CombinedText stringlengths 4 3.42M |
|---|
import sys
import dbus
import core.module
import core.widget
import core.input
import core.decorators
"""Displays the current song being played and allows pausing, skipping ahead, and skipping back.
Requires the following library:
* python-dbus
Parameters:
* spotify-buttons.format: Format string (defaults to '{artist} - {title}')
Available values are: {album}, {title}, {artist}, {trackNumber}
* spotify-buttons.layout: Order in which widgets appear (defaults to song, previous, pause, next)
Widget names are: spotify-buttons.song, spotify-buttons.prev, spotify-buttons.pause, spotify-buttons.next
"""
class Module(core.module.Module):
def __init__(self, config, theme):
super().__init__(config, theme, [])
self.__layout = self.parameter("layout", "spotify-buttons.song spotify-buttons.prev spotify-buttons.pause spotify-buttons.next")
self.__song = ""
self.__pause = ""
self.__format = self.parameter("format", "{artist} - {title}")
self.__cmd = "dbus-send --session --type=method_call --dest=org.mpris.MediaPlayer2.spotify \
/org/mpris/MediaPlayer2 org.mpris.MediaPlayer2.Player."
def hidden(self):
return self.string_song == ""
def update(self):
try:
self.clear_widgets()
bus = dbus.SessionBus()
spotify = bus.get_object(
"org.mpris.MediaPlayer2.spotify", "/org/mpris/MediaPlayer2"
)
spotify_iface = dbus.Interface(spotify, "org.freedesktop.DBus.Properties")
props = spotify_iface.Get("org.mpris.MediaPlayer2.Player", "Metadata")
playback_status = str(
spotify_iface.Get("org.mpris.MediaPlayer2.Player", "PlaybackStatus")
)
if playback_status == "Playing":
self.__pause = "\u258D\u258D"
else:
self.__pause = "\u25B6"
self.__song = self.__format.format(
album=str(props.get("xesam:album")),
title=str(props.get("xesam:title")),
artist=",".join(props.get("xesam:artist")),
trackNumber=str(props.get("xesam:trackNumber")),
)
#this feels like a stupid way to do this but its all i can think of
widget_map = {}
for widget_name in self.__layout.split():
widget = self.add_widget(name = widget_name)
if widget_name == "spotify-buttons.prev":
widget_map[widget] = {
"button": core.input.LEFT_MOUSE,
"cmd": self.__cmd + "Previous",
}
widget.full_text("\u258F\u25C0")
elif widget_name == "spotify-buttons.pause":
widget_map[widget] = {
"button": core.input.LEFT_MOUSE,
"cmd": self.__cmd + "PlayPause",
}
widget.full_text(self.__pause)
elif widget_name == "spotify-buttons.next":
widget_map[widget] = {
"button": core.input.LEFT_MOUSE,
"cmd": self.__cmd + "Next",
}
widget.full_text("\u25B6\u2595")
elif widget_name == "spotify-buttons.song":
widget.full_text(self.__song)
else:
raise KeyError(
"The spotify-buttons module does not have a {widget_name!r} widget".format(widget_name=widget_name)
)
for widget, callback_options in widget_map.items():
core.input.register(widget, **callback_options)
except Exception:
self.__song = ""
@property
def string_song(self):
if sys.version_info.major < 3:
return unicode(self.__song)
return str(self.__song)
black -t py34
import sys
import dbus
import core.module
import core.widget
import core.input
import core.decorators
"""Displays the current song being played and allows pausing, skipping ahead, and skipping back.
Requires the following library:
* python-dbus
Parameters:
* spotify-buttons.format: Format string (defaults to '{artist} - {title}')
Available values are: {album}, {title}, {artist}, {trackNumber}
* spotify-buttons.layout: Order in which widgets appear (defaults to song, previous, pause, next)
Widget names are: spotify-buttons.song, spotify-buttons.prev, spotify-buttons.pause, spotify-buttons.next
"""
class Module(core.module.Module):
def __init__(self, config, theme):
super().__init__(config, theme, [])
self.__layout = self.parameter(
"layout",
"spotify-buttons.song spotify-buttons.prev spotify-buttons.pause spotify-buttons.next",
)
self.__song = ""
self.__pause = ""
self.__format = self.parameter("format", "{artist} - {title}")
self.__cmd = "dbus-send --session --type=method_call --dest=org.mpris.MediaPlayer2.spotify \
/org/mpris/MediaPlayer2 org.mpris.MediaPlayer2.Player."
def hidden(self):
return self.string_song == ""
def update(self):
try:
self.clear_widgets()
bus = dbus.SessionBus()
spotify = bus.get_object(
"org.mpris.MediaPlayer2.spotify", "/org/mpris/MediaPlayer2"
)
spotify_iface = dbus.Interface(spotify, "org.freedesktop.DBus.Properties")
props = spotify_iface.Get("org.mpris.MediaPlayer2.Player", "Metadata")
playback_status = str(
spotify_iface.Get("org.mpris.MediaPlayer2.Player", "PlaybackStatus")
)
if playback_status == "Playing":
self.__pause = "\u258D\u258D"
else:
self.__pause = "\u25B6"
self.__song = self.__format.format(
album=str(props.get("xesam:album")),
title=str(props.get("xesam:title")),
artist=",".join(props.get("xesam:artist")),
trackNumber=str(props.get("xesam:trackNumber")),
)
# this feels like a stupid way to do this but its all i can think of
widget_map = {}
for widget_name in self.__layout.split():
widget = self.add_widget(name=widget_name)
if widget_name == "spotify-buttons.prev":
widget_map[widget] = {
"button": core.input.LEFT_MOUSE,
"cmd": self.__cmd + "Previous",
}
widget.full_text("\u258F\u25C0")
elif widget_name == "spotify-buttons.pause":
widget_map[widget] = {
"button": core.input.LEFT_MOUSE,
"cmd": self.__cmd + "PlayPause",
}
widget.full_text(self.__pause)
elif widget_name == "spotify-buttons.next":
widget_map[widget] = {
"button": core.input.LEFT_MOUSE,
"cmd": self.__cmd + "Next",
}
widget.full_text("\u25B6\u2595")
elif widget_name == "spotify-buttons.song":
widget.full_text(self.__song)
else:
raise KeyError(
"The spotify-buttons module does not have a {widget_name!r} widget".format(
widget_name=widget_name
)
)
for widget, callback_options in widget_map.items():
core.input.register(widget, **callback_options)
except Exception:
self.__song = ""
@property
def string_song(self):
if sys.version_info.major < 3:
return unicode(self.__song)
return str(self.__song)
|
#!/usr/bin/env python
import cv2
import numpy as np
import rospy
from cv_bridge import CvBridge, CvBridgeError
from sensor_msgs.msg import Image
from sensor_msgs.msg import LaserScan
from math import sin, cos, pi
image_width = rospy.get_param("image_width")
image_height = rospy.get_param("image_height")
pixels_per_m = rospy.get_param("pixels_per_m")
image_width_m = image_width / pixels_per_m
image_height_m = image_height / pixels_per_m
lidar_publisher = rospy.Publisher('lidar_model', Image, queue_size=0)
bridge = CvBridge()
def gen_lidar_image(laser_scan):
""" Converts a LaserScan message to a cv image, then publishes it through
a lidar_publisher """
#convert points from polar to cartesian (origin at (width/2, height))
lidar_points = laser_scan_to_cartesian(laser_scan)
#generate the image from the cartesian LaserScan points
lidar_image = gen_point_image(lidar_points)
#publish the cv image as an imgmsg
lidar_publisher.publish(get_lidar_image_message(lidar_image))
def laser_scan_to_cartesian(laser_scan):
""" Generates a list of cartesian coordinates from LaserScan range and angle
data. Uses (width/2, height/2) as the origin. """
lidar_points = []
ranges = laser_scan.ranges
#effectively rotate the input data by 90 degrees counterclockwise
angle = laser_scan.angle_min + pi / 2
#convert points from polar to cartesian (origin at (width/2, height))
for i in range(len(ranges)):
lidar_points.append(
(cos(angle) * ranges[i] * pixels_per_m + (image_width / 2),
image_height - (sin(angle) * ranges[i] * pixels_per_m))
)
angle += laser_scan.angle_increment
return lidar_points
def get_lidar_image_message(lidar_image):
""" Convert an opencv image (image) to an imgmsg """
try:
color_image = cv2.cvtColor(lidar_image, cv2.COLOR_GRAY2BGR)
return bridge.cv2_to_imgmsg(color_image, encoding="bgr8")
except CvBridgeError as e:
rospy.loginfo("Error converting lidar image to imgmsg")
def gen_point_image(points):
""" Generate a cv image from a list of tuple (x, y) coordinates """
# Generate image of all 0's to represent image
image = np.zeros((image_height, image_width), np.uint8)
image.fill(255)
#get the points at ~pi and ~0
min_point = points[0]
max_point = points[len(points) - 1]
#add boundary points for line drawing
points.insert(0, (0,0))
points.insert(1, (0, min_point[1]))
points.append((image_width, max_point[1]))
points.append((image_width, 0))
#fill the undrivable portion of the image with black
cv2.fillPoly(image, [np.array(points, np.int32)], 0)
return image
def lidar_node():
rospy.init_node('lidar_node', anonymous=True)
rospy.Subscriber('scan', LaserScan, gen_lidar_image, queue_size=1)
rospy.spin()
if __name__ == '__main__': lidar_node()
rename lidar_node as lidar_to_frame (Fix #34)
#!/usr/bin/env python
import cv2
import numpy as np
import rospy
from cv_bridge import CvBridge, CvBridgeError
from sensor_msgs.msg import Image
from sensor_msgs.msg import LaserScan
from math import sin, cos, pi
image_width = rospy.get_param("image_width")
image_height = rospy.get_param("image_height")
pixels_per_m = rospy.get_param("pixels_per_m")
image_width_m = image_width / pixels_per_m
image_height_m = image_height / pixels_per_m
lidar_publisher = rospy.Publisher('lidar_model', Image, queue_size=0)
bridge = CvBridge()
def gen_lidar_image(laser_scan):
""" Converts a LaserScan message to a cv image, then publishes it through
a lidar_publisher """
#convert points from polar to cartesian (origin at (width/2, height))
lidar_points = laser_scan_to_cartesian(laser_scan)
#generate the image from the cartesian LaserScan points
lidar_image = gen_point_image(lidar_points)
#publish the cv image as an imgmsg
lidar_publisher.publish(get_lidar_image_message(lidar_image))
def laser_scan_to_cartesian(laser_scan):
""" Generates a list of cartesian coordinates from LaserScan range and angle
data. Uses (width/2, height/2) as the origin. """
lidar_points = []
ranges = laser_scan.ranges
#effectively rotate the input data by 90 degrees counterclockwise
angle = laser_scan.angle_min + pi / 2
#convert points from polar to cartesian (origin at (width/2, height))
for i in range(len(ranges)):
lidar_points.append(
(cos(angle) * ranges[i] * pixels_per_m + (image_width / 2),
image_height - (sin(angle) * ranges[i] * pixels_per_m))
)
angle += laser_scan.angle_increment
return lidar_points
def get_lidar_image_message(lidar_image):
""" Convert an opencv image (image) to an imgmsg """
try:
color_image = cv2.cvtColor(lidar_image, cv2.COLOR_GRAY2BGR)
return bridge.cv2_to_imgmsg(color_image, encoding="bgr8")
except CvBridgeError as e:
rospy.loginfo("Error converting lidar image to imgmsg")
def gen_point_image(points):
""" Generate a cv image from a list of tuple (x, y) coordinates """
# Generate image of all 0's to represent image
image = np.zeros((image_height, image_width), np.uint8)
image.fill(255)
#get the points at ~pi and ~0
min_point = points[0]
max_point = points[len(points) - 1]
#add boundary points for line drawing
points.insert(0, (0,0))
points.insert(1, (0, min_point[1]))
points.append((image_width, max_point[1]))
points.append((image_width, 0))
#fill the undrivable portion of the image with black
cv2.fillPoly(image, [np.array(points, np.int32)], 0)
return image
def lidar_node():
rospy.init_node('lidar_to_frame', anonymous=True)
rospy.Subscriber('scan', LaserScan, gen_lidar_image, queue_size=1)
rospy.spin()
if __name__ == '__main__': lidar_node()
|
from flask import Flask
from flask import render_template
from pygments import highlight
import pygments.lexers as pyg_lexers
from pygments.formatters import HtmlFormatter
import requests, re
from flask import request
from urlparse import urlparse
from flask import redirect, url_for
app = Flask(__name__)
@app.route("/")
def index():
url = request.args.get('url', None)
fontsize = request.args.get('fontsize', None)
if url:
return redirect(url_for('found', fontsize = fontsize, url = url))
rawurl = request.args.get('rawurl', None)
if rawurl:
return redirect(url_for('show', fontsize = fontsize, url = rawurl))
fontsizes = [100, 120, 150, 180, 200]
return render_template('index.jinja', fontsizes = fontsizes)
@app.route("/found/<fontsize>/<path:url>")
def found(fontsize = 100, url = None):
output = 'url: %s\n' % url
parsed_uri = urlparse(url)
domain = '{uri.scheme}://{uri.netloc}/'.format(uri=parsed_uri).strip('/')
output += 'domain: %s\n' % domain
r = requests.get(url)
# for m in re.finditer('href="(.*?)".*?>(.*?)<', r.text, re.MULTILINE):
# output += 'MATCH: %s\n' % str(m.groups())
urls = re.findall('href="(.*?)"', r.text)
links = []
for urlfound in urls:
if 'raw' in urlfound:
if urlfound.startswith('/'):
urlfound = domain + urlfound
output += 'url: %s\n' % urlfound
links.append({'name': urlfound, 'url': url_for('show', fontsize = fontsize, url = urlfound)})
return render_template('find_links.jinja', url = url, links = links) # , output = output
@app.route("/show/<int:fontsize>/<path:url>")
def show(fontsize = 100, url = None):
r = requests.get(url)
fname = url.split('/')[-1]
try:
lexer = pyg_lexers.get_lexer_for_filename(fname)
except:
lexer = pyg_lexers.get_lexer_for_filename('.txt')
pyg_css = HtmlFormatter().get_style_defs('.code')
css = pyg_css.encode('utf8')
formatter = HtmlFormatter(linenos=True, cssclass='code')#
code = highlight(r.text, lexer, formatter)
return render_template('showcode.jinja', title = fname, code = code, css = css, fontsize = fontsize)
if __name__ == "__main__":
app.run(debug=True)#
try to get content-type from request
from flask import Flask
from flask import render_template
from pygments import highlight
import pygments.lexers as pyg_lexers
from pygments.formatters import HtmlFormatter
import requests, re
from flask import request
from urlparse import urlparse
from flask import redirect, url_for
app = Flask(__name__)
@app.route("/")
def index():
url = request.args.get('url', None)
fontsize = request.args.get('fontsize', None)
if url:
return redirect(url_for('found', fontsize = fontsize, url = url))
rawurl = request.args.get('rawurl', None)
if rawurl:
return redirect(url_for('show', fontsize = fontsize, url = rawurl))
fontsizes = [100, 120, 150, 180, 200]
return render_template('index.jinja', fontsizes = fontsizes)
@app.route("/found/<fontsize>/<path:url>")
def found(fontsize = 100, url = None):
output = 'url: %s\n' % url
parsed_uri = urlparse(url)
domain = '{uri.scheme}://{uri.netloc}/'.format(uri=parsed_uri).strip('/')
output += 'domain: %s\n' % domain
r = requests.get(url)
# for m in re.finditer('href="(.*?)".*?>(.*?)<', r.text, re.MULTILINE):
# output += 'MATCH: %s\n' % str(m.groups())
urls = re.findall('href="(.*?)"', r.text)
links = []
for urlfound in urls:
if 'raw' in urlfound:
if urlfound.startswith('/'):
urlfound = domain + urlfound
output += 'url: %s\n' % urlfound
links.append({'name': urlfound, 'url': url_for('show', fontsize = fontsize, url = urlfound)})
return render_template('find_links.jinja', url = url, links = links) # , output = output
@app.route("/show/<int:fontsize>/<path:url>")
def show(fontsize = 100, url = None):
r = requests.get(url)
fname = url.split('/')[-1]
contype = r.headers.get('content-type', None)
if contype and ';' in contype:
contype = contype.split(';')[0]
try:
lexer = pyg_lexers.get_lexer_for_filename(fname)
except:
try:
lexer = pyg_lexers.get_lexer_for_mimetype(contype)
except:
lexer = pyg_lexers.get_lexer_for_filename('.txt')
pyg_css = HtmlFormatter().get_style_defs('.code')
css = pyg_css.encode('utf8')
formatter = HtmlFormatter(linenos=True, cssclass='code')#
code = highlight(r.text, lexer, formatter)
return render_template('showcode.jinja', title = fname, code = code, css = css, fontsize = fontsize)
if __name__ == "__main__":
app.run()#debug=True |
from discord.ext import commands
from .utils import checks, db, fuzzy
import asyncio
import discord
import re
import lxml.etree as etree
from collections import Counter
DISCORD_API_ID = 81384788765712384
DISCORD_BOTS_ID = 110373943822540800
USER_BOTS_ROLE = 178558252869484544
CONTRIBUTORS_ROLE = 111173097888993280
DISCORD_PY_ID = 84319995256905728
def is_discord_api():
return checks.is_in_guilds(DISCORD_API_ID)
def contributor_or_higher():
def predicate(ctx):
guild = ctx.guild
if guild is None:
return False
role = discord.utils.find(lambda r: r.id == CONTRIBUTORS_ROLE, guild.roles)
if role is None:
return False
return ctx.author.top_role >= role
return commands.check(predicate)
class Feeds(db.Table):
id = db.PrimaryKeyColumn()
channel_id = db.Column(db.Integer(big=True))
role_id = db.Column(db.Integer(big=True))
name = db.Column(db.String)
class RTFM(db.Table):
id = db.PrimaryKeyColumn()
user_id = db.Column(db.Integer(big=True), unique=True, index=True)
count = db.Column(db.Integer, default=1)
class API:
"""Discord API exclusive things."""
def __init__(self, bot):
self.bot = bot
self.issue = re.compile(r'##(?P<number>[0-9]+)')
# async def on_member_join(self, member):
# if member.guild.id != DISCORD_API_ID:
# return
# if member.bot:
# role = discord.Object(id=USER_BOTS_ROLE)
# await member.add_roles(role)
# async def on_message(self, message):
# channel = message.channel
# author = message.author
# if channel.id != DISCORD_PY_ID:
# return
# if author.status is discord.Status.offline:
# fmt = f'{author} (ID: {author.id}) has been automatically blocked for 5 minutes for being invisible'
# await channel.set_permissions(author, read_messages=False, reason='invisible block')
# await channel.send(fmt)
# try:
# msg = f'Heya. You have been automatically blocked from <#{DISCORD_PY_ID}> for 5 minutes for being ' \
# 'invisible.\nTry chatting again in 5 minutes when you change your status. If you\'re curious ' \
# 'why invisible users are blocked, it is because they tend to break the client and cause them to ' \
# 'be hard to mention. Since we want to help you usually, we expect mentions to work without ' \
# 'headaches.\n\nSorry for the trouble.'
# await author.send(msg)
# except discord.HTTPException:
# pass
# await asyncio.sleep(300)
# await channel.set_permissions(author, overwrite=None, reason='invisible unblock')
# return
# m = self.issue.search(message.content)
# if m is not None:
# url = 'https://github.com/Rapptz/discord.py/issues/'
# await channel.send(url + m.group('number'))
async def build_rtfm_lookup_table(self):
cache = {}
page_types = {
'rewrite': (
'http://discordpy.rtfd.io/en/rewrite/api.html',
'http://discordpy.rtfd.io/en/rewrite/ext/commands/api.html'
),
'latest': (
'http://discordpy.rtfd.io/en/latest/api.html',
)
}
for key, pages in page_types.items():
sub = cache[key] = {}
for page in pages:
async with self.bot.session.get(page) as resp:
if resp.status != 200:
raise RuntimeError('Cannot build rtfm lookup table, try again later.')
text = await resp.text(encoding='utf-8')
root = etree.fromstring(text, etree.HTMLParser())
nodes = root.findall(".//dt/a[@class='headerlink']")
for node in nodes:
href = node.get('href', '')
as_key = href.replace('#discord.', '').replace('ext.commands.', '')
sub[as_key] = page + href
self._rtfm_cache = cache
async def do_rtfm(self, ctx, key, obj):
base_url = f'http://discordpy.rtfd.io/en/{key}/'
if obj is None:
await ctx.send(base_url)
return
if not hasattr(self, '_rtfm_cache'):
await ctx.trigger_typing()
await self.build_rtfm_lookup_table()
if key == 'rewrite':
pit_of_success_helpers = {
'vc': 'VoiceClient',
'msg': 'Message',
'color': 'Colour',
'perm': 'Permissions',
'channel': 'TextChannel',
'chan': 'TextChannel',
}
# point the abc.Messageable types properly:
q = obj.lower()
for name in dir(discord.abc.Messageable):
if name[0] == '_':
continue
if q == name:
obj = f'abc.Messageable.{name}'
break
def replace(o):
return pit_of_success_helpers.get(o.group(0), '')
pattern = re.compile('|'.join(fr'\b{k}\b' for k in pit_of_success_helpers.keys()))
obj = pattern.sub(replace, obj)
cache = self._rtfm_cache[key]
matches = fuzzy.extract_or_exact(obj, cache, scorer=fuzzy.token_sort_ratio, limit=5, score_cutoff=60)
e = discord.Embed(colour=discord.Colour.blurple())
if len(matches) == 0:
return await ctx.send('Could not find anything. Sorry.')
e.description = '\n'.join(f'[{key}]({url}) ({p}%)' for key, p, url in matches)
await ctx.send(embed=e)
if ctx.guild and ctx.guild.id == DISCORD_API_ID:
query = 'INSERT INTO rtfm (user_id) VALUES ($1) ON CONFLICT (user_id) DO UPDATE SET count = rtfm.count + 1;'
await ctx.db.execute(query, ctx.author.id)
@commands.group(aliases=['rtfd'], invoke_without_command=True)
async def rtfm(self, ctx, *, obj: str = None):
"""Gives you a documentation link for a discord.py entity.
Events, objects, and functions are all supported through a
a cruddy fuzzy algorithm.
"""
await self.do_rtfm(ctx, 'latest', obj)
@rtfm.command(name='rewrite')
async def rtfm_rewrite(self, ctx, *, obj: str = None):
"""Gives you a documentation link for a rewrite discord.py entity."""
await self.do_rtfm(ctx, 'rewrite', obj)
async def _member_stats(self, ctx, member, total_uses):
e = discord.Embed(title='RTFM Stats')
e.set_author(name=str(member), icon_url=member.avatar_url)
query = 'SELECT count FROM rtfm WHERE user_id=$1;'
record = await ctx.db.fetchrow(query, member.id)
if record is None:
count = 0
else:
count = record['count']
e.add_field(name='Uses', value=count)
e.add_field(name='Percentage', value=f'{count/total_uses:.2%} out of {total_uses}')
e.colour = discord.Colour.blurple()
await ctx.send(embed=e)
@rtfm.command()
async def stats(self, ctx, *, member: discord.Member = None):
"""Tells you stats about the ?rtfm command."""
query = 'SELECT SUM(count) AS total_uses FROM rtfm;'
record = await ctx.db.fetchrow(query)
total_uses = record['total_uses']
if member is not None:
return await self._member_stats(ctx, member, total_uses)
query = 'SELECT user_id, count FROM rtfm ORDER BY count DESC LIMIT 10;'
records = await ctx.db.fetch(query)
output = []
output.append(f'**Total uses**: {total_uses}')
# first we get the most used users
if records:
output.append(f'**Top {len(records)} users**:')
for rank, (user_id, count) in enumerate(records, 1):
user = self.bot.get_user(user_id)
if rank != 10:
output.append(f'{rank}\u20e3 {user}: {count}')
else:
output.append(f'\N{KEYCAP TEN} {user}: {count}')
await ctx.send('\n'.join(output))
def library_name(self, channel):
# language_<name>
name = channel.name
index = name.find('_')
if index != -1:
name = name[index + 1:]
return name.replace('-', '.')
@commands.command()
@commands.has_permissions(manage_roles=True)
@is_discord_api()
async def block(self, ctx, *, member: discord.Member):
"""Blocks a user from your channel."""
try:
await ctx.channel.set_permissions(member, read_messages=True)
except:
await ctx.send('\N{THUMBS DOWN SIGN}')
else:
await ctx.send('\N{THUMBS UP SIGN}')
async def get_feeds(self, channel_id):
query = 'SELECT name, role_id FROM feeds WHERE channel_id=$1;'
feeds = await self.bot.pool.fetch(query, channel_id)
return {f['name']: f['role_id'] for f in feeds}
@commands.group(name='feeds', invoke_without_command=True)
@commands.guild_only()
async def _feeds(self, ctx):
"""Shows the list of feeds that the channel has.
A feed is something that users can opt-in to
to receive news about a certain feed by running
the `sub` command (and opt-out by doing the `unsub` command).
You can publish to a feed by using the `publish` command.
"""
feeds = await self.get_feeds(ctx.channel.id)
if len(feeds) == 0:
await ctx.send('This channel has no feeds.')
return
names = '\n'.join(f'- {r}' for r in feeds)
await ctx.send(f'Found {len(feeds)} feeds.\n{names}')
@_feeds.command(name='create')
@commands.has_permissions(manage_roles=True)
@commands.guild_only()
async def feeds_create(self, ctx, *, name: str):
"""Creates a feed with the specified name.
You need Manage Roles permissions to create a feed.
"""
name = name.lower()
if name in ('@everyone', '@here'):
return await ctx.send('That is an invalid feed name.')
query = 'SELECT role_id FROM feeds WHERE channel_id=$1 AND name=$2;'
exists = await con.fetchrow(query, ctx.channel.id, name)
if exists is not None:
await ctx.send('This feed already exists.')
return
# create the role
if ctx.guild.id == DISCORD_API_ID:
role_name = self.library_name(ctx.channel) + ' ' + name
else:
role_name = name
role = await ctx.guild.create_role(name=role_name, permissions=discord.Permissions.none())
query = 'INSERT INTO feeds (role_id, channel_id, name) VALUES ($1, $2, $3);'
await con.execute(query, role.id, ctx.channel.id, name)
await ctx.send(f'{ctx.tick(True)} Successfully created feed.')
@_feeds.command(name='delete', aliases=['remove'])
@commands.has_permissions(manage_roles=True)
@commands.guild_only()
async def feeds_delete(self, ctx, *, feed: str):
"""Removes a feed from the channel.
This will also delete the associated role so this
action is irreversible.
"""
query = 'DELETE FROM feeds WHERE channel_id=$1 AND name=$2 RETURNING *;'
records = await ctx.db.fetch(query, ctx.channel.id, feed)
if len(records) == 0:
return await ctx.send('This feed does not exist.')
for record in records:
role = discord.utils.find(lambda r: r.id == record['role_id'], ctx.guild.roles)
if role is not None:
try:
await role.delete()
except discord.HTTPException:
continue
await ctx.send(f'{ctx.tick(True)} Removed feed.')
async def do_subscription(self, ctx, feed, action):
feeds = await self.get_feeds(ctx.channel.id)
if len(feeds) == 0:
await ctx.send('This channel has no feeds set up.')
return
if feed not in feeds:
await ctx.send(f'This feed does not exist.\nValid feeds: {", ".join(feeds)}')
return
role_id = feeds[feed]
role = discord.utils.find(lambda r: r.id == role_id, ctx.guild.roles)
if role is not None:
await action(role)
await ctx.send(ctx.tick(True), delete_after=8.0)
else:
await ctx.send(ctx.tick(False), delete_after=8.0)
@commands.command()
@commands.guild_only()
async def sub(self, ctx, *, feed: str):
"""Subscribes to the publication of a feed.
This will allow you to receive updates from the channel
owner. To unsubscribe, see the `unsub` command.
"""
await self.do_subscription(ctx, feed, ctx.author.add_roles)
@commands.command()
@commands.guild_only()
async def unsub(self, ctx, *, feed: str):
"""Unsubscribe to the publication of a feed.
This will remove you from notifications of a feed you
are no longer interested in. You can always sub back by
using the `sub` command.
"""
await self.do_subscription(ctx, feed, ctx.author.remove_roles)
@commands.command()
@commands.has_permissions(manage_roles=True)
@commands.guild_only()
async def publish(self, ctx, feed: str, *, content: str):
"""Publishes content to a feed.
Everyone who is subscribed to the feed will be notified
with the content. Use this to notify people of important
events or changes.
"""
feeds = await self.get_feeds(ctx.channel.id)
feed = feed.lower()
if feed not in feeds:
await ctx.send('This feed does not exist.')
return
role = discord.utils.get(ctx.guild.roles, id=feeds[feed])
if role is None:
fmt = 'Uh.. a fatal error occurred here. The role associated with ' \
'this feed has been removed or not found. ' \
'Please recreate the feed.'
await ctx.send(fmt)
return
# delete the message we used to invoke it
try:
await ctx.message.delete()
except:
pass
# make the role mentionable
await role.edit(mentionable=True)
# then send the message..
await ctx.send(f'{role.mention}: {content}'[:2000])
# then make the role unmentionable
await role.edit(mentionable=False)
async def refresh_faq_cache(self):
self.faq_entries = {}
base_url = 'http://discordpy.readthedocs.io/en/rewrite/faq.html'
async with self.bot.session.get(base_url) as resp:
text = await resp.text(encoding='utf-8')
root = etree.fromstring(text, etree.HTMLParser())
nodes = root.findall(".//div[@id='questions']/ul[@class='simple']//ul/li/a")
for node in nodes:
self.faq_entries[''.join(node.itertext()).strip()] = base_url + node.get('href').strip()
@commands.command()
async def faq(self, ctx, *, query: str = None):
"""Shows an FAQ entry from the discord.py documentation"""
if not hasattr(self, 'faq_entries'):
await self.refresh_faq_cache()
if query is None:
return await self.bot.say('http://discordpy.readthedocs.io/en/rewrite/faq.html')
matches = fuzzy.extract_matches(query, self.faq_entries, scorer=fuzzy.partial_ratio, score_cutoff=40)
if len(matches) == 0:
return await ctx.send('Nothing found...')
fmt = '\n'.join(f'**{key}**\n{value}' for key, _, value in matches)
await ctx.send(fmt)
def setup(bot):
bot.add_cog(API(bot))
Uncomment API specific code to prepare for deployment.
from discord.ext import commands
from .utils import checks, db, fuzzy
import asyncio
import discord
import re
import lxml.etree as etree
from collections import Counter
DISCORD_API_ID = 81384788765712384
DISCORD_BOTS_ID = 110373943822540800
USER_BOTS_ROLE = 178558252869484544
CONTRIBUTORS_ROLE = 111173097888993280
DISCORD_PY_ID = 84319995256905728
def is_discord_api():
return checks.is_in_guilds(DISCORD_API_ID)
def contributor_or_higher():
def predicate(ctx):
guild = ctx.guild
if guild is None:
return False
role = discord.utils.find(lambda r: r.id == CONTRIBUTORS_ROLE, guild.roles)
if role is None:
return False
return ctx.author.top_role >= role
return commands.check(predicate)
class Feeds(db.Table):
id = db.PrimaryKeyColumn()
channel_id = db.Column(db.Integer(big=True))
role_id = db.Column(db.Integer(big=True))
name = db.Column(db.String)
class RTFM(db.Table):
id = db.PrimaryKeyColumn()
user_id = db.Column(db.Integer(big=True), unique=True, index=True)
count = db.Column(db.Integer, default=1)
class API:
"""Discord API exclusive things."""
def __init__(self, bot):
self.bot = bot
self.issue = re.compile(r'##(?P<number>[0-9]+)')
async def on_member_join(self, member):
if member.guild.id != DISCORD_API_ID:
return
if member.bot:
role = discord.Object(id=USER_BOTS_ROLE)
await member.add_roles(role)
async def on_message(self, message):
channel = message.channel
author = message.author
if channel.id != DISCORD_PY_ID:
return
if author.status is discord.Status.offline:
fmt = f'{author} (ID: {author.id}) has been automatically blocked for 5 minutes for being invisible'
await channel.set_permissions(author, read_messages=False, reason='invisible block')
await channel.send(fmt)
try:
msg = f'Heya. You have been automatically blocked from <#{DISCORD_PY_ID}> for 5 minutes for being ' \
'invisible.\nTry chatting again in 5 minutes when you change your status. If you\'re curious ' \
'why invisible users are blocked, it is because they tend to break the client and cause them to ' \
'be hard to mention. Since we want to help you usually, we expect mentions to work without ' \
'headaches.\n\nSorry for the trouble.'
await author.send(msg)
except discord.HTTPException:
pass
await asyncio.sleep(300)
await channel.set_permissions(author, overwrite=None, reason='invisible unblock')
return
m = self.issue.search(message.content)
if m is not None:
url = 'https://github.com/Rapptz/discord.py/issues/'
await channel.send(url + m.group('number'))
async def build_rtfm_lookup_table(self):
cache = {}
page_types = {
'rewrite': (
'http://discordpy.rtfd.io/en/rewrite/api.html',
'http://discordpy.rtfd.io/en/rewrite/ext/commands/api.html'
),
'latest': (
'http://discordpy.rtfd.io/en/latest/api.html',
)
}
for key, pages in page_types.items():
sub = cache[key] = {}
for page in pages:
async with self.bot.session.get(page) as resp:
if resp.status != 200:
raise RuntimeError('Cannot build rtfm lookup table, try again later.')
text = await resp.text(encoding='utf-8')
root = etree.fromstring(text, etree.HTMLParser())
nodes = root.findall(".//dt/a[@class='headerlink']")
for node in nodes:
href = node.get('href', '')
as_key = href.replace('#discord.', '').replace('ext.commands.', '')
sub[as_key] = page + href
self._rtfm_cache = cache
async def do_rtfm(self, ctx, key, obj):
base_url = f'http://discordpy.rtfd.io/en/{key}/'
if obj is None:
await ctx.send(base_url)
return
if not hasattr(self, '_rtfm_cache'):
await ctx.trigger_typing()
await self.build_rtfm_lookup_table()
if key == 'rewrite':
pit_of_success_helpers = {
'vc': 'VoiceClient',
'msg': 'Message',
'color': 'Colour',
'perm': 'Permissions',
'channel': 'TextChannel',
'chan': 'TextChannel',
}
# point the abc.Messageable types properly:
q = obj.lower()
for name in dir(discord.abc.Messageable):
if name[0] == '_':
continue
if q == name:
obj = f'abc.Messageable.{name}'
break
def replace(o):
return pit_of_success_helpers.get(o.group(0), '')
pattern = re.compile('|'.join(fr'\b{k}\b' for k in pit_of_success_helpers.keys()))
obj = pattern.sub(replace, obj)
cache = self._rtfm_cache[key]
matches = fuzzy.extract_or_exact(obj, cache, scorer=fuzzy.token_sort_ratio, limit=5, score_cutoff=60)
e = discord.Embed(colour=discord.Colour.blurple())
if len(matches) == 0:
return await ctx.send('Could not find anything. Sorry.')
e.description = '\n'.join(f'[{key}]({url}) ({p}%)' for key, p, url in matches)
await ctx.send(embed=e)
if ctx.guild and ctx.guild.id == DISCORD_API_ID:
query = 'INSERT INTO rtfm (user_id) VALUES ($1) ON CONFLICT (user_id) DO UPDATE SET count = rtfm.count + 1;'
await ctx.db.execute(query, ctx.author.id)
@commands.group(aliases=['rtfd'], invoke_without_command=True)
async def rtfm(self, ctx, *, obj: str = None):
"""Gives you a documentation link for a discord.py entity.
Events, objects, and functions are all supported through a
a cruddy fuzzy algorithm.
"""
await self.do_rtfm(ctx, 'latest', obj)
@rtfm.command(name='rewrite')
async def rtfm_rewrite(self, ctx, *, obj: str = None):
"""Gives you a documentation link for a rewrite discord.py entity."""
await self.do_rtfm(ctx, 'rewrite', obj)
async def _member_stats(self, ctx, member, total_uses):
e = discord.Embed(title='RTFM Stats')
e.set_author(name=str(member), icon_url=member.avatar_url)
query = 'SELECT count FROM rtfm WHERE user_id=$1;'
record = await ctx.db.fetchrow(query, member.id)
if record is None:
count = 0
else:
count = record['count']
e.add_field(name='Uses', value=count)
e.add_field(name='Percentage', value=f'{count/total_uses:.2%} out of {total_uses}')
e.colour = discord.Colour.blurple()
await ctx.send(embed=e)
@rtfm.command()
async def stats(self, ctx, *, member: discord.Member = None):
"""Tells you stats about the ?rtfm command."""
query = 'SELECT SUM(count) AS total_uses FROM rtfm;'
record = await ctx.db.fetchrow(query)
total_uses = record['total_uses']
if member is not None:
return await self._member_stats(ctx, member, total_uses)
query = 'SELECT user_id, count FROM rtfm ORDER BY count DESC LIMIT 10;'
records = await ctx.db.fetch(query)
output = []
output.append(f'**Total uses**: {total_uses}')
# first we get the most used users
if records:
output.append(f'**Top {len(records)} users**:')
for rank, (user_id, count) in enumerate(records, 1):
user = self.bot.get_user(user_id)
if rank != 10:
output.append(f'{rank}\u20e3 {user}: {count}')
else:
output.append(f'\N{KEYCAP TEN} {user}: {count}')
await ctx.send('\n'.join(output))
def library_name(self, channel):
# language_<name>
name = channel.name
index = name.find('_')
if index != -1:
name = name[index + 1:]
return name.replace('-', '.')
@commands.command()
@commands.has_permissions(manage_roles=True)
@is_discord_api()
async def block(self, ctx, *, member: discord.Member):
"""Blocks a user from your channel."""
try:
await ctx.channel.set_permissions(member, read_messages=True)
except:
await ctx.send('\N{THUMBS DOWN SIGN}')
else:
await ctx.send('\N{THUMBS UP SIGN}')
async def get_feeds(self, channel_id):
query = 'SELECT name, role_id FROM feeds WHERE channel_id=$1;'
feeds = await self.bot.pool.fetch(query, channel_id)
return {f['name']: f['role_id'] for f in feeds}
@commands.group(name='feeds', invoke_without_command=True)
@commands.guild_only()
async def _feeds(self, ctx):
"""Shows the list of feeds that the channel has.
A feed is something that users can opt-in to
to receive news about a certain feed by running
the `sub` command (and opt-out by doing the `unsub` command).
You can publish to a feed by using the `publish` command.
"""
feeds = await self.get_feeds(ctx.channel.id)
if len(feeds) == 0:
await ctx.send('This channel has no feeds.')
return
names = '\n'.join(f'- {r}' for r in feeds)
await ctx.send(f'Found {len(feeds)} feeds.\n{names}')
@_feeds.command(name='create')
@commands.has_permissions(manage_roles=True)
@commands.guild_only()
async def feeds_create(self, ctx, *, name: str):
"""Creates a feed with the specified name.
You need Manage Roles permissions to create a feed.
"""
name = name.lower()
if name in ('@everyone', '@here'):
return await ctx.send('That is an invalid feed name.')
query = 'SELECT role_id FROM feeds WHERE channel_id=$1 AND name=$2;'
exists = await con.fetchrow(query, ctx.channel.id, name)
if exists is not None:
await ctx.send('This feed already exists.')
return
# create the role
if ctx.guild.id == DISCORD_API_ID:
role_name = self.library_name(ctx.channel) + ' ' + name
else:
role_name = name
role = await ctx.guild.create_role(name=role_name, permissions=discord.Permissions.none())
query = 'INSERT INTO feeds (role_id, channel_id, name) VALUES ($1, $2, $3);'
await con.execute(query, role.id, ctx.channel.id, name)
await ctx.send(f'{ctx.tick(True)} Successfully created feed.')
@_feeds.command(name='delete', aliases=['remove'])
@commands.has_permissions(manage_roles=True)
@commands.guild_only()
async def feeds_delete(self, ctx, *, feed: str):
"""Removes a feed from the channel.
This will also delete the associated role so this
action is irreversible.
"""
query = 'DELETE FROM feeds WHERE channel_id=$1 AND name=$2 RETURNING *;'
records = await ctx.db.fetch(query, ctx.channel.id, feed)
if len(records) == 0:
return await ctx.send('This feed does not exist.')
for record in records:
role = discord.utils.find(lambda r: r.id == record['role_id'], ctx.guild.roles)
if role is not None:
try:
await role.delete()
except discord.HTTPException:
continue
await ctx.send(f'{ctx.tick(True)} Removed feed.')
async def do_subscription(self, ctx, feed, action):
feeds = await self.get_feeds(ctx.channel.id)
if len(feeds) == 0:
await ctx.send('This channel has no feeds set up.')
return
if feed not in feeds:
await ctx.send(f'This feed does not exist.\nValid feeds: {", ".join(feeds)}')
return
role_id = feeds[feed]
role = discord.utils.find(lambda r: r.id == role_id, ctx.guild.roles)
if role is not None:
await action(role)
await ctx.send(ctx.tick(True), delete_after=8.0)
else:
await ctx.send(ctx.tick(False), delete_after=8.0)
@commands.command()
@commands.guild_only()
async def sub(self, ctx, *, feed: str):
"""Subscribes to the publication of a feed.
This will allow you to receive updates from the channel
owner. To unsubscribe, see the `unsub` command.
"""
await self.do_subscription(ctx, feed, ctx.author.add_roles)
@commands.command()
@commands.guild_only()
async def unsub(self, ctx, *, feed: str):
"""Unsubscribe to the publication of a feed.
This will remove you from notifications of a feed you
are no longer interested in. You can always sub back by
using the `sub` command.
"""
await self.do_subscription(ctx, feed, ctx.author.remove_roles)
@commands.command()
@commands.has_permissions(manage_roles=True)
@commands.guild_only()
async def publish(self, ctx, feed: str, *, content: str):
"""Publishes content to a feed.
Everyone who is subscribed to the feed will be notified
with the content. Use this to notify people of important
events or changes.
"""
feeds = await self.get_feeds(ctx.channel.id)
feed = feed.lower()
if feed not in feeds:
await ctx.send('This feed does not exist.')
return
role = discord.utils.get(ctx.guild.roles, id=feeds[feed])
if role is None:
fmt = 'Uh.. a fatal error occurred here. The role associated with ' \
'this feed has been removed or not found. ' \
'Please recreate the feed.'
await ctx.send(fmt)
return
# delete the message we used to invoke it
try:
await ctx.message.delete()
except:
pass
# make the role mentionable
await role.edit(mentionable=True)
# then send the message..
await ctx.send(f'{role.mention}: {content}'[:2000])
# then make the role unmentionable
await role.edit(mentionable=False)
async def refresh_faq_cache(self):
self.faq_entries = {}
base_url = 'http://discordpy.readthedocs.io/en/rewrite/faq.html'
async with self.bot.session.get(base_url) as resp:
text = await resp.text(encoding='utf-8')
root = etree.fromstring(text, etree.HTMLParser())
nodes = root.findall(".//div[@id='questions']/ul[@class='simple']//ul/li/a")
for node in nodes:
self.faq_entries[''.join(node.itertext()).strip()] = base_url + node.get('href').strip()
@commands.command()
async def faq(self, ctx, *, query: str = None):
"""Shows an FAQ entry from the discord.py documentation"""
if not hasattr(self, 'faq_entries'):
await self.refresh_faq_cache()
if query is None:
return await self.bot.say('http://discordpy.readthedocs.io/en/rewrite/faq.html')
matches = fuzzy.extract_matches(query, self.faq_entries, scorer=fuzzy.partial_ratio, score_cutoff=40)
if len(matches) == 0:
return await ctx.send('Nothing found...')
fmt = '\n'.join(f'**{key}**\n{value}' for key, _, value in matches)
await ctx.send(fmt)
def setup(bot):
bot.add_cog(API(bot))
|
#!/bin/env python
import aiohttp
import discord
from discord.ext import commands
from bs4 import BeautifulSoup
from utils import aiohttp_wrap as aw
class AskAsk:
def __init__(self, bot):
self.bot = bot
self.aio_session = bot.aio_session
self.scrape_uri = 'http://www.ask.com/web?q={}&o=0&qo=homepageSearchBox'
def _get_ask_links(html):
""" Gets all result links from [REDACTED] """
soup = BeautifulSoup(html, 'lxml')
link_list = []
for link in soup.find_all('a', {'class': 'result-link'}, href=True):
if type(link) is str:
if not link.startswith('//'):
link_list.append(link['href'])
print(html)
print(link_list)
if not link_list:
return None
return link_list
@commands.command(name='ask', aliases=['g'])
async def ask_search(self, ctx, *, query):
""" Get search results from [REDACTED], now that Google hates me. """
# Handle no input
if not query:
return await ctx.send('Feel free to search something.')
# Format query for search url
search_query = query.replace(' ', '+')
# Get response and store links
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36'}
resp_html = await aw.aio_get_text(self.aio_session, self.scrape_uri.format(search_query), headers=headers)
link_list = AskAsk._get_ask_links(resp_html)
if not link_list:
return await ctx.send("Sorry, I couldn't find anything for `{}``.".format(query))
if len(link_list) >= 3:
await ctx.send(f'**Top result:**\n{link_list[0]}\n**See Also:**\n1. <{link_list[1]}>\n2. <{link_list[2]}>')
elif len(link_list) >= 2:
await ctx.send(f'**Top result:**\n{link_list[0]}\n**See Also:**\n1. <{link_list[1]}>')
else:
await ctx.send(f'**Top result:**\n{link_list[0]}')
def setup(bot):
bot.add_cog(AskAsk(bot))
found the error
#!/bin/env python
import aiohttp
import discord
from discord.ext import commands
from bs4 import BeautifulSoup
from utils import aiohttp_wrap as aw
class AskAsk:
def __init__(self, bot):
self.bot = bot
self.aio_session = bot.aio_session
self.scrape_uri = 'http://www.ask.com/web?q={}&o=0&qo=homepageSearchBox'
def _get_ask_links(html):
""" Gets all result links from [REDACTED] """
soup = BeautifulSoup(html, 'lxml')
link_list = []
for link in soup.find_all('a', {'class': 'result-link'}, href=True):
if not link['href'].startswith('//'):
link_list.append(link['href'])
if not link_list:
return None
return link_list
@commands.command(name='ask', aliases=['g'])
async def ask_search(self, ctx, *, query):
""" Get search results from [REDACTED], now that Google hates me. """
# Handle no input
if not query:
return await ctx.send('Feel free to search something.')
# Format query for search url
search_query = query.replace(' ', '+')
# Get response and store links
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36'}
resp_html = await aw.aio_get_text(self.aio_session, self.scrape_uri.format(search_query), headers=headers)
link_list = AskAsk._get_ask_links(resp_html)
if not link_list:
return await ctx.send("Sorry, I couldn't find anything for `{}``.".format(query))
if len(link_list) >= 3:
await ctx.send(f'**Top result:**\n{link_list[0]}\n**See Also:**\n1. <{link_list[1]}>\n2. <{link_list[2]}>')
elif len(link_list) >= 2:
await ctx.send(f'**Top result:**\n{link_list[0]}\n**See Also:**\n1. <{link_list[1]}>')
else:
await ctx.send(f'**Top result:**\n{link_list[0]}')
def setup(bot):
bot.add_cog(AskAsk(bot))
|
check_cd takes a Member, not an int
|
import random
import urllib.parse
import sqlite3
import asyncio
import aiohttp
import discord
from discord.ext import commands
import loadconfig
class fun(commands.Cog):
db = 'reaction.db'
def __init__(self, bot):
self.bot = bot
async def cog_command_error(self, ctx, error):
print('Error in {0.command.qualified_name}: {1}'.format(ctx, error))
def userOnline(self, memberList):
online = []
for i in memberList:
if i.status == discord.Status.online and i.bot == False:
online.append(i)
return online
@commands.command(aliases=['javascript', 'nodejs', 'js'])
async def java(self, ctx):
'''Weil Java != Javscript'''
await ctx.send(':interrobang: Meintest du jQuery, Javascript oder Node.js? https://abload.de/img/2016-05-102130191kzpu.png')
@commands.command(aliases=['c++', 'c#', 'objective-c'])
async def csharp(self, ctx):
'''Wie soll man da überhaupt durchblicken???'''
await ctx.send(':interrobang: Meintest du C, C++, C# oder Objective-C? https://i.imgur.com/Nd4aAXO.png')
@commands.command()
async def praise(self, ctx):
'''Praise the Sun'''
await ctx.send('https://i.imgur.com/K8ySn3e.gif')
@commands.command()
async def css(self, ctx):
'''Counter Strike: Source'''
await ctx.send('http://i.imgur.com/TgPKFTz.gif')
@commands.command()
async def countdown(self, ctx):
'''It's the final countdown'''
countdown = ['five', 'four', 'three', 'two', 'one']
for num in countdown:
await ctx.send('**:{0}:**'.format(num))
await asyncio.sleep(1)
await ctx.send('**:ok:** DING DING DING')
@commands.command(aliases=['cat', 'randomcat'])
async def neko(self, ctx):
'''Zufällige Katzen Bilder nyan~'''
#http://discordpy.readthedocs.io/en/latest/faq.html#what-does-blocking-mean
async with aiohttp.ClientSession() as cs:
async with cs.get('http://aws.random.cat/meow') as r:
res = await r.json()
emojis = [':cat2: ', ':cat: ', ':heart_eyes_cat: ']
await ctx.send(random.choice(emojis) + res['file'])
@commands.command(aliases=['rand'])
async def random(self, ctx, *arg):
'''Gibt eine zufällige Zahl oder Member aus
Benutzung:
-----------
:random
Gibt eine zufällige Zahl zwischen 1 und 100 aus
:random coin
Wirft eine Münze (Kopf oder Zahl)
:random 6
Gibt eine zufällige Zahl zwischen 1 und 6 aus
:random 10 20
Gibt eine zufällige Zahl zwischen 10 und 20 aus
:random user
Gibt einen zufällige Benutzer der gerade online ist aus
:random choice Dani Eddy Shinobu
Wählt aus der vorgegebenen Liste einen Namen aus
'''
if ctx.invoked_subcommand is None:
if not arg:
start = 1
end = 100
elif arg[0] == 'flip' or arg[0] == 'coin':
coin = ['Kopf', 'Zahl']
await ctx.send(f':arrows_counterclockwise: {random.choice(coin)}')
return
elif arg[0] == 'choice':
choices = list(arg)
choices.pop(0)
await ctx.send(f':congratulations: The winner is {random.choice(choices)}')
return
elif arg[0] == 'user':
online = self.userOnline(ctx.guild.members)
randomuser = random.choice(online)
if ctx.channel.permissions_for(ctx.author).mention_everyone:
user = randomuser.mention
else:
user = randomuser.display_name
await ctx.send(f':congratulations: The winner is {user}')
return
elif len(arg) == 1:
start = 1
end = int(arg[0])
elif len(arg) == 2:
start = int(arg[0])
end = int(arg[1])
await ctx.send(f'**:arrows_counterclockwise:** Zufällige Zahl ({start} - {end}): {random.randint(start, end)}')
@commands.command()
async def steinigt(self, ctx, member:str):
'''Monty Python'''
await ctx.send(f'R.I.P. {member}\nhttps://media.giphy.com/media/l41lGAcThnMc29u2Q/giphy.gif')
@commands.command(aliases=['hypu', 'train'])
async def hype(self, ctx):
'''HYPE TRAIN CHOO CHOO'''
hypu = ['https://cdn.discordapp.com/attachments/102817255661772800/219514281136357376/tumblr_nr6ndeEpus1u21ng6o1_540.gif',
'https://cdn.discordapp.com/attachments/102817255661772800/219518372839161859/tumblr_n1h2afSbCu1ttmhgqo1_500.gif',
'https://gfycat.com/HairyFloweryBarebirdbat',
'https://i.imgur.com/PFAQSLA.gif',
'https://abload.de/img/ezgif-32008219442iq0i.gif',
'https://i.imgur.com/vOVwq5o.jpg',
'https://i.imgur.com/Ki12X4j.jpg',
'https://media.giphy.com/media/b1o4elYH8Tqjm/giphy.gif']
msg = f':train2: CHOO CHOO {random.choice(hypu)}'
await ctx.send(msg)
@commands.command()
async def xkcd(self, ctx, *searchterm: str):
'''Zeigt den letzten oder zufälligen XKCD Comic
Beispiel:
-----------
:xkcd
:xkcd random
'''
apiUrl = 'https://xkcd.com{}info.0.json'
async with aiohttp.ClientSession() as cs:
async with cs.get(apiUrl.format('/')) as r:
js = await r.json()
if ''.join(searchterm) == 'random':
randomComic = random.randint(0, js['num'])
async with cs.get(apiUrl.format('/' + str(randomComic) + '/')) as r:
if r.status == 200:
js = await r.json()
comicUrl = 'https://xkcd.com/{}/'.format(js['num'])
date = '{}.{}.{}'.format(js['day'], js['month'], js['year'])
msg = '**{}**\n{}\nAlt Text:```{}```XKCD Link: <{}> ({})'.format(js['safe_title'], js['img'], js['alt'], comicUrl, date)
await ctx.send(msg)
@commands.command(aliases=['tag'])
async def tags(self, ctx, command: str, *arg):
'''Erstellt tags oder gibt diese aus
Benutzung:
-----------
:tags COMMAND
Gibt ein zufälliges Bild unter dem command aus
:tags add COMMAND BILDURL
Fügt das jeweilige Bild zum jeweiligen command hinzu
:tags del ID
Löscht den Eintrag mit der jeweiligen ID, nur für Modaratoren und Ersteller des Eintrags
:tags list
Gibt die volle Liste an commands und jeweiligen Links
'''
with sqlite3.connect(self.db) as con:
c = con.cursor()
if command == 'add' or command == 'new':
if len(arg) > 1:
command = arg[0].lower()
content = list(arg[1:])
c.execute('INSERT INTO "reactions" ("command","url","author") VALUES (?, ?, ?)', (command, ' '.join(content), str(ctx.message.author)))
con.commit()
await ctx.send(':ok: Tag **{}** hinzugefügt!'.format(arg[0].lower()))
elif command == 'del' or command == 'rm':
if await ctx.bot.is_owner(ctx.author):
c.execute('DELETE FROM "reactions" WHERE "id" in (?)', (int(arg[0]), ))
else:
c.execute('DELETE FROM "reactions" WHERE "id" in (?) AND "author" IN (?)', (int(arg[0]), str(ctx.message.author)))
con.commit()
await ctx.send(':put_litter_in_its_place: Tag-ID #{} gelöscht!'.format(arg[0].lower()))
elif command == 'list':
lst = c.execute('SELECT * FROM "reactions"')
msg = ''
for i in lst:
msg += '**ID:** {:>3} | **Command:** {:>15} | **Author:** {}\n'.format(i[0], i[1], i[3])
await ctx.send(msg)
else:
lst = c.execute('SELECT * FROM "reactions" WHERE "command" LIKE (?)', (command,))
reaction = random.choice(lst.fetchall())
await ctx.send(reaction[2])
c.close()
@commands.command(aliases=['witz', 'joke'])
async def pun(self, ctx):
'''Weil jeder schlechte Witze mag'''
puns = ['Was sagt das eine Streichholz zum anderen Streichholz?\n Komm, lass uns durchbrennen',
'Wieviele Deutsche braucht man um eine Glühbirne zu wechseln?\n Einen, wir sind humorlos und effizient.',
'Wo wohnt die Katze?\n Im Miezhaus.',
'Wie begrüßen sich zwei plastische Chirurgen?\n "Was machst du denn heute für ein Gesicht?"',
'Warum essen Veganer kein Huhn?\n Könnte Ei enthalten',
'85% der Frauen finden ihren Arsch zu dick, 10% zu dünn, 5% finden ihn so ok, wie er ist und sind froh, dass sie ihn geheiratet haben...',
'Meine Freundin meint, ich wär neugierig...\n...zumindest\' steht das in ihrem Tagebuch.',
'"Schatz, Ich muss mein T-Shirt waschen! Welches Waschmaschinen Programm soll ich nehmen?" - "Was steht denn auf dem T-Shirt drauf?"\n "Slayer!"',
'Gestern erzählte ich meinem Freund, dass ich schon immer dieses Ding aus Harry Potter reiten wollte.\n"einen Besen?" "nein, Hermine."',
'Warum gehen Ameisen nicht in die Kirche?\nSie sind in Sekten.',
'Was steht auf dem Grabstein eines Mathematikers?\n"Damit hat er nicht gerechnet."',
'Wenn ein Yogalehrer seine Beine senkrecht nach oben streckt und dabei furzt, welche Yoga Figur stellt er da?\n Eine Duftkerze',
'Warum ging der Luftballon kaputt?\n Aus Platzgründen.',
'Ich wollte Spiderman anrufen, aber er hatte kein Netz.',
'Was vermisst eine Schraube am meisten? Einen Vater']
emojis = [':laughing:', ':smile:', ':joy:', ':sob:', ':rofl:']
msg = f'{random.choice(emojis)} {random.choice(puns)}'
await ctx.send(msg)
def setup(bot):
bot.add_cog(fun(bot))
added funny joke 🤐
import random
import urllib.parse
import sqlite3
import asyncio
import aiohttp
import discord
from discord.ext import commands
import loadconfig
class fun(commands.Cog):
db = 'reaction.db'
def __init__(self, bot):
self.bot = bot
async def cog_command_error(self, ctx, error):
print('Error in {0.command.qualified_name}: {1}'.format(ctx, error))
def userOnline(self, memberList):
online = []
for i in memberList:
if i.status == discord.Status.online and i.bot == False:
online.append(i)
return online
@commands.command(aliases=['javascript', 'nodejs', 'js'])
async def java(self, ctx):
'''Weil Java != Javscript'''
await ctx.send(':interrobang: Meintest du jQuery, Javascript oder Node.js? https://abload.de/img/2016-05-102130191kzpu.png')
@commands.command(aliases=['c++', 'c#', 'objective-c'])
async def csharp(self, ctx):
'''Wie soll man da überhaupt durchblicken???'''
await ctx.send(':interrobang: Meintest du C, C++, C# oder Objective-C? https://i.imgur.com/Nd4aAXO.png')
@commands.command()
async def praise(self, ctx):
'''Praise the Sun'''
await ctx.send('https://i.imgur.com/K8ySn3e.gif')
@commands.command()
async def css(self, ctx):
'''Counter Strike: Source'''
await ctx.send('http://i.imgur.com/TgPKFTz.gif')
@commands.command()
async def countdown(self, ctx):
'''It's the final countdown'''
countdown = ['five', 'four', 'three', 'two', 'one']
for num in countdown:
await ctx.send('**:{0}:**'.format(num))
await asyncio.sleep(1)
await ctx.send('**:ok:** DING DING DING')
@commands.command(aliases=['cat', 'randomcat'])
async def neko(self, ctx):
'''Zufällige Katzen Bilder nyan~'''
#http://discordpy.readthedocs.io/en/latest/faq.html#what-does-blocking-mean
async with aiohttp.ClientSession() as cs:
async with cs.get('http://aws.random.cat/meow') as r:
res = await r.json()
emojis = [':cat2: ', ':cat: ', ':heart_eyes_cat: ']
await ctx.send(random.choice(emojis) + res['file'])
@commands.command(aliases=['rand'])
async def random(self, ctx, *arg):
'''Gibt eine zufällige Zahl oder Member aus
Benutzung:
-----------
:random
Gibt eine zufällige Zahl zwischen 1 und 100 aus
:random coin
Wirft eine Münze (Kopf oder Zahl)
:random 6
Gibt eine zufällige Zahl zwischen 1 und 6 aus
:random 10 20
Gibt eine zufällige Zahl zwischen 10 und 20 aus
:random user
Gibt einen zufällige Benutzer der gerade online ist aus
:random choice Dani Eddy Shinobu
Wählt aus der vorgegebenen Liste einen Namen aus
'''
if ctx.invoked_subcommand is None:
if not arg:
start = 1
end = 100
elif arg[0] == 'flip' or arg[0] == 'coin':
coin = ['Kopf', 'Zahl']
await ctx.send(f':arrows_counterclockwise: {random.choice(coin)}')
return
elif arg[0] == 'choice':
choices = list(arg)
choices.pop(0)
await ctx.send(f':congratulations: The winner is {random.choice(choices)}')
return
elif arg[0] == 'user':
online = self.userOnline(ctx.guild.members)
randomuser = random.choice(online)
if ctx.channel.permissions_for(ctx.author).mention_everyone:
user = randomuser.mention
else:
user = randomuser.display_name
await ctx.send(f':congratulations: The winner is {user}')
return
elif len(arg) == 1:
start = 1
end = int(arg[0])
elif len(arg) == 2:
start = int(arg[0])
end = int(arg[1])
await ctx.send(f'**:arrows_counterclockwise:** Zufällige Zahl ({start} - {end}): {random.randint(start, end)}')
@commands.command()
async def steinigt(self, ctx, member:str):
'''Monty Python'''
await ctx.send(f'R.I.P. {member}\nhttps://media.giphy.com/media/l41lGAcThnMc29u2Q/giphy.gif')
@commands.command(aliases=['hypu', 'train'])
async def hype(self, ctx):
'''HYPE TRAIN CHOO CHOO'''
hypu = ['https://cdn.discordapp.com/attachments/102817255661772800/219514281136357376/tumblr_nr6ndeEpus1u21ng6o1_540.gif',
'https://cdn.discordapp.com/attachments/102817255661772800/219518372839161859/tumblr_n1h2afSbCu1ttmhgqo1_500.gif',
'https://gfycat.com/HairyFloweryBarebirdbat',
'https://i.imgur.com/PFAQSLA.gif',
'https://abload.de/img/ezgif-32008219442iq0i.gif',
'https://i.imgur.com/vOVwq5o.jpg',
'https://i.imgur.com/Ki12X4j.jpg',
'https://media.giphy.com/media/b1o4elYH8Tqjm/giphy.gif']
msg = f':train2: CHOO CHOO {random.choice(hypu)}'
await ctx.send(msg)
@commands.command()
async def xkcd(self, ctx, *searchterm: str):
'''Zeigt den letzten oder zufälligen XKCD Comic
Beispiel:
-----------
:xkcd
:xkcd random
'''
apiUrl = 'https://xkcd.com{}info.0.json'
async with aiohttp.ClientSession() as cs:
async with cs.get(apiUrl.format('/')) as r:
js = await r.json()
if ''.join(searchterm) == 'random':
randomComic = random.randint(0, js['num'])
async with cs.get(apiUrl.format('/' + str(randomComic) + '/')) as r:
if r.status == 200:
js = await r.json()
comicUrl = 'https://xkcd.com/{}/'.format(js['num'])
date = '{}.{}.{}'.format(js['day'], js['month'], js['year'])
msg = '**{}**\n{}\nAlt Text:```{}```XKCD Link: <{}> ({})'.format(js['safe_title'], js['img'], js['alt'], comicUrl, date)
await ctx.send(msg)
@commands.command(aliases=['tag'])
async def tags(self, ctx, command: str, *arg):
'''Erstellt tags oder gibt diese aus
Benutzung:
-----------
:tags COMMAND
Gibt ein zufälliges Bild unter dem command aus
:tags add COMMAND BILDURL
Fügt das jeweilige Bild zum jeweiligen command hinzu
:tags del ID
Löscht den Eintrag mit der jeweiligen ID, nur für Modaratoren und Ersteller des Eintrags
:tags list
Gibt die volle Liste an commands und jeweiligen Links
'''
with sqlite3.connect(self.db) as con:
c = con.cursor()
if command == 'add' or command == 'new':
if len(arg) > 1:
command = arg[0].lower()
content = list(arg[1:])
c.execute('INSERT INTO "reactions" ("command","url","author") VALUES (?, ?, ?)', (command, ' '.join(content), str(ctx.message.author)))
con.commit()
await ctx.send(':ok: Tag **{}** hinzugefügt!'.format(arg[0].lower()))
elif command == 'del' or command == 'rm':
if await ctx.bot.is_owner(ctx.author):
c.execute('DELETE FROM "reactions" WHERE "id" in (?)', (int(arg[0]), ))
else:
c.execute('DELETE FROM "reactions" WHERE "id" in (?) AND "author" IN (?)', (int(arg[0]), str(ctx.message.author)))
con.commit()
await ctx.send(':put_litter_in_its_place: Tag-ID #{} gelöscht!'.format(arg[0].lower()))
elif command == 'list':
lst = c.execute('SELECT * FROM "reactions"')
msg = ''
for i in lst:
msg += '**ID:** {:>3} | **Command:** {:>15} | **Author:** {}\n'.format(i[0], i[1], i[3])
await ctx.send(msg)
else:
lst = c.execute('SELECT * FROM "reactions" WHERE "command" LIKE (?)', (command,))
reaction = random.choice(lst.fetchall())
await ctx.send(reaction[2])
c.close()
@commands.command(aliases=['witz', 'joke'])
async def pun(self, ctx):
'''Weil jeder schlechte Witze mag'''
puns = ['Was sagt das eine Streichholz zum anderen Streichholz?\n Komm, lass uns durchbrennen',
'Wieviele Deutsche braucht man um eine Glühbirne zu wechseln?\n Einen, wir sind humorlos und effizient.',
'Wo wohnt die Katze?\n Im Miezhaus.',
'Wie begrüßen sich zwei plastische Chirurgen?\n "Was machst du denn heute für ein Gesicht?"',
'Warum essen Veganer kein Huhn?\n Könnte Ei enthalten',
'85% der Frauen finden ihren Arsch zu dick, 10% zu dünn, 5% finden ihn so ok, wie er ist und sind froh, dass sie ihn geheiratet haben...',
'Meine Freundin meint, ich wär neugierig...\n...zumindest\' steht das in ihrem Tagebuch.',
'"Schatz, Ich muss mein T-Shirt waschen! Welches Waschmaschinen Programm soll ich nehmen?" - "Was steht denn auf dem T-Shirt drauf?"\n "Slayer!"',
'Gestern erzählte ich meinem Freund, dass ich schon immer dieses Ding aus Harry Potter reiten wollte.\n"einen Besen?" "nein, Hermine."',
'Warum gehen Ameisen nicht in die Kirche?\nSie sind in Sekten.',
'Was steht auf dem Grabstein eines Mathematikers?\n"Damit hat er nicht gerechnet."',
'Wenn ein Yogalehrer seine Beine senkrecht nach oben streckt und dabei furzt, welche Yoga Figur stellt er da?\n Eine Duftkerze',
'Warum ging der Luftballon kaputt?\n Aus Platzgründen.',
'Ich wollte Spiderman anrufen, aber er hatte kein Netz.',
'Was vermisst eine Schraube am meisten? Einen Vater',
'Geht ein Panda über die Straße. Bam....Bus!']
emojis = [':laughing:', ':smile:', ':joy:', ':sob:', ':rofl:']
msg = f'{random.choice(emojis)} {random.choice(puns)}'
await ctx.send(msg)
def setup(bot):
bot.add_cog(fun(bot))
|
import random
from discord.ext.commands import bot
class Fun():
def __init__(self, Bot):
self.bot = Bot
@bot.command(pass_context=True)
async def roll(self, ctx, die):
"""
Rolls a die using ndx format.
Usage:
{command_prefix}roll ndx
Example:
.roll 2d20 # Rolls two D20s
"""
# TODO: Change to ndx format
dice = 0
if die[0].isdigit():
if die[1].isdigit() or die[0] == 0:
return await self.bot.say("I only support multipliers from 1-9")
multiplier = int(die[0])
else:
multiplier = 1
if die[1].lower() != "d" and die[0].lower() != "d":
return await self.bot.say("Use the format 'ndx'.")
options = (4, 6, 8, 10, 12, 20, 100)
for option in options:
if die.endswith(str(option)):
dice = option
if dice == 0:
return await self.bot.say("You didn't give a die to use.")
rolls = []
if dice == 100:
step = 10
else:
step = 1
total = 0
if multiplier > 1:
for x in range(multiplier):
rolls.append(random.randrange(step, dice+1, step))
for r in rolls:
total += r
return await self.bot.say("{} rolled **{}**. Totaling **{}**".format(ctx.message.author.mention, rolls, total))
else:
roll = random.randrange(step, dice + 1, step)
return await self.bot.say("{} rolled a **{}**".format(ctx.message.author.mention, roll))
@bot.command(pass_context=True)
async def suck(self, ctx):
"""
Sucks the mentioned user ;)
Usage:
{command_prefix}suck @user#9999
"""
if len(ctx.message.mentions) < 1:
return await self.bot.say("You didn't mention someone for me to suck")
user = ctx.message.mentions[0]
return await self.bot.say(":eggplant: :sweat_drops: :tongue: {}".format(user.mention))
@bot.command(pass_context=True, aliases=["wf"])
async def waifurate(self, ctx):
"""
Rates the mentioned waifu(s)
Usage:
{command_prefix}waifurate @user#9999
"""
mentions = ctx.message.mentions
if not mentions:
return await self.bot.reply("You didn't mention anyone for me to rate.", delete_after=10)
rating = random.randrange(1, 11)
if rating <= 2:
emoji = ":sob:"
elif rating <= 4:
emoji = ":disappointed:"
elif rating <= 6:
emoji = ":thinking:"
elif rating <= 8:
emoji = ":blush:"
elif rating == 9:
emoji = ":kissing_heart:"
else:
emoji = ":heart_eyes:"
if len(mentions) > 1:
return await self.bot.say("Oh poly waifu rating? :smirk: Your combined waifu rating is {}/10. {}".format(rating, emoji))
else:
return await self.bot.say("Oh that's your waifu? I rate them a {}/10. {}".format(rating, emoji))
@bot.command(pass_context=True, aliases=["cf"])
async def coinflip(self, ctx):
"""Flip a coin"""
return await self.bot.reply("the coin landed on {}!".format(random.choice(["heads", "tails"])))
@bot.command(pass_context=True)
async def aesthetics(self, ctx, *convert):
"""Converts text to be more a e s t h e t i c s"""
WIDE_MAP = dict((i, i + 0xFEE0) for i in range(0x21, 0x7F))
WIDE_MAP[0x20] = 0x3000
convert = str(' '.join(convert)).translate(WIDE_MAP)
return await self.bot.say(convert)
def setup(Bot):
Bot.add_cog(Fun(Bot))
suck rewritten and decent
import discord
import random
from discord.ext.commands import bot
class Fun():
def __init__(self, Bot):
self.bot = Bot
@bot.command(pass_context=True)
async def roll(self, ctx, die):
"""
Rolls a die using ndx format.
Usage:
{command_prefix}roll ndx
Example:
.roll 2d20 # Rolls two D20s
"""
# TODO: Change to ndx format
dice = 0
if die[0].isdigit():
if die[1].isdigit() or die[0] == 0:
return await self.bot.say("I only support multipliers from 1-9")
multiplier = int(die[0])
else:
multiplier = 1
if die[1].lower() != "d" and die[0].lower() != "d":
return await self.bot.say("Use the format 'ndx'.")
options = (4, 6, 8, 10, 12, 20, 100)
for option in options:
if die.endswith(str(option)):
dice = option
if dice == 0:
return await self.bot.say("You didn't give a die to use.")
rolls = []
if dice == 100:
step = 10
else:
step = 1
total = 0
if multiplier > 1:
for x in range(multiplier):
rolls.append(random.randrange(step, dice+1, step))
for r in rolls:
total += r
return await self.bot.say("{} rolled **{}**. Totaling **{}**".format(ctx.message.author.mention, rolls, total))
else:
roll = random.randrange(step, dice + 1, step)
return await self.bot.say("{} rolled a **{}**".format(ctx.message.author.mention, roll))
@bot.command(pass_context=True)
async def suck(self, ctx, *, user: discord.User = None):
"""
Sucks the mentioned user ;)
Usage:
{command_prefix}suck @RoxBot#4170
{command_prefix}suck RoxBot
"""
if not user:
return await self.bot.say("You didn't mention someone for me to suck")
return await self.bot.say(":eggplant: :sweat_drops: :tongue: *{} sucks {}*".format(self.bot.user.name, user.name))
@bot.command(pass_context=True, aliases=["wf"])
async def waifurate(self, ctx):
"""
Rates the mentioned waifu(s)
Usage:
{command_prefix}waifurate @user#9999
"""
mentions = ctx.message.mentions
if not mentions:
return await self.bot.reply("You didn't mention anyone for me to rate.", delete_after=10)
rating = random.randrange(1, 11)
if rating <= 2:
emoji = ":sob:"
elif rating <= 4:
emoji = ":disappointed:"
elif rating <= 6:
emoji = ":thinking:"
elif rating <= 8:
emoji = ":blush:"
elif rating == 9:
emoji = ":kissing_heart:"
else:
emoji = ":heart_eyes:"
if len(mentions) > 1:
return await self.bot.say("Oh poly waifu rating? :smirk: Your combined waifu rating is {}/10. {}".format(rating, emoji))
else:
return await self.bot.say("Oh that's your waifu? I rate them a {}/10. {}".format(rating, emoji))
@bot.command(pass_context=True, aliases=["cf"])
async def coinflip(self, ctx):
"""Flip a coin"""
return await self.bot.reply("the coin landed on {}!".format(random.choice(["heads", "tails"])))
@bot.command(pass_context=True)
async def aesthetics(self, ctx, *convert):
"""Converts text to be more a e s t h e t i c s"""
WIDE_MAP = dict((i, i + 0xFEE0) for i in range(0x21, 0x7F))
WIDE_MAP[0x20] = 0x3000
convert = str(' '.join(convert)).translate(WIDE_MAP)
return await self.bot.say(convert)
def setup(Bot):
Bot.add_cog(Fun(Bot)) |
#!/bin/env python3
import discord
from discord.ext import commands
class Moderator:
def __init__(self, bot):
self.bot = bot
async def on_command_error(self, ctx, error):
if isinstance(error, commands.BadArgument):
return await ctx.send(f'Sorry, I can not kick/ban that user.')
@commands.command(aliases=['k'])
@commands.has_permissions(kick_members=True)
async def kick(self, ctx, user: discord.User, *, reason=None):
""" Kick a user from the server """
await ctx.guild.kick(user, reason=reason)
await ctx.send(f'User `{user}` kicked.\n'
f'Reason: `{reason}`.')
@commands.command(aliases=['kb'])
@commands.has_permissions(ban_members=True)
async def ban(self, ctx, user: discord.User, *, reason=None):
""" Ban a user from the server """
await ctx.guild.ban(user, reason=reason, delete_message_days=0)
await ctx.send(f'User `{user}` banned.\n'
f'Reason: `{reason}`.')
@commands.command(aliases=['ub'])
@commands.has_permissions(ban_members=True)
async def unban(self, ctx, user: discord.User, *, reason=None):
""" Unban a user from the server
Since you can't highlight them anymore use their name#discrim """
await ctx.guild.unban(user, reason=reason)
await ctx.send(f'User `{user}` unbanned.\n'
f'Reason: `{reason}`.')
@commands.command(aliases=['purge'])
@commands.has_permissions(manage_messages=True)
async def clean(self, ctx, num_msg: int):
""" Remove bot messages from the last X messages """
if num_msg > 100:
return await ctx.send('Sorry, number of messages to be deleted must not exceed 100.')
# Check so that only bot msgs are removed
def check(message):
return message.author.id == self.bot.user.id
try:
await ctx.channel.purge(check=check, limit=num_msg)
except Exception as e:
await ctx.send(f'Failed to delete messages.\n ```py\n{e}```')
def setup(bot):
bot.add_cog(Moderator(bot))
can not -> cannot
#!/bin/env python3
import discord
from discord.ext import commands
class Moderator:
def __init__(self, bot):
self.bot = bot
async def on_command_error(self, ctx, error):
if isinstance(error, commands.BadArgument):
return await ctx.send(f'Sorry, I cannot kick/ban that user.')
@commands.command(aliases=['k'])
@commands.has_permissions(kick_members=True)
async def kick(self, ctx, user: discord.User, *, reason=None):
""" Kick a user from the server """
await ctx.guild.kick(user, reason=reason)
await ctx.send(f'User `{user}` kicked.\n'
f'Reason: `{reason}`.')
@commands.command(aliases=['kb'])
@commands.has_permissions(ban_members=True)
async def ban(self, ctx, user: discord.User, *, reason=None):
""" Ban a user from the server """
await ctx.guild.ban(user, reason=reason, delete_message_days=0)
await ctx.send(f'User `{user}` banned.\n'
f'Reason: `{reason}`.')
@commands.command(aliases=['ub'])
@commands.has_permissions(ban_members=True)
async def unban(self, ctx, user: discord.User, *, reason=None):
""" Unban a user from the server
Since you can't highlight them anymore use their name#discrim """
await ctx.guild.unban(user, reason=reason)
await ctx.send(f'User `{user}` unbanned.\n'
f'Reason: `{reason}`.')
@commands.command(aliases=['purge'])
@commands.has_permissions(manage_messages=True)
async def clean(self, ctx, num_msg: int):
""" Remove bot messages from the last X messages """
if num_msg > 100:
return await ctx.send('Sorry, number of messages to be deleted must not exceed 100.')
# Check so that only bot msgs are removed
def check(message):
return message.author.id == self.bot.user.id
try:
await ctx.channel.purge(check=check, limit=num_msg)
except Exception as e:
await ctx.send(f'Failed to delete messages.\n ```py\n{e}```')
def setup(bot):
bot.add_cog(Moderator(bot))
|
from discord.ext import commands
from .utils import checks, db, time, cache, formats
from collections import Counter, defaultdict
from inspect import cleandoc
import re
import json
import discord
import enum
import datetime
import asyncio
import argparse, shlex
import logging
log = logging.getLogger(__name__)
## Misc utilities
class Arguments(argparse.ArgumentParser):
def error(self, message):
raise RuntimeError(message)
class RaidMode(enum.Enum):
off = 0
on = 1
strict = 2
def __str__(self):
return self.name
## Tables
class GuildConfig(db.Table, table_name='guild_mod_config'):
id = db.Column(db.Integer(big=True), primary_key=True)
raid_mode = db.Column(db.Integer(small=True))
broadcast_channel = db.Column(db.Integer(big=True))
mention_count = db.Column(db.Integer(small=True))
safe_mention_channel_ids = db.Column(db.Array(db.Integer(big=True)))
## Configuration
class ModConfig:
__slots__ = ('raid_mode', 'id', 'bot', 'broadcast_channel_id', 'mention_count', 'safe_mention_channel_ids')
@classmethod
async def from_record(cls, record, bot):
self = cls()
# the basic configuration
self.bot = bot
self.raid_mode = record['raid_mode']
self.id = record['id']
self.broadcast_channel_id = record['broadcast_channel']
self.mention_count = record['mention_count']
self.safe_mention_channel_ids = set(record['safe_mention_channel_ids'] or [])
return self
@property
def broadcast_channel(self):
guild = self.bot.get_guild(self.id)
return guild and guild.get_channel(self.broadcast_channel_id)
## Converters
def can_execute_action(ctx, user, target):
return user.id == ctx.bot.owner_id or \
user == ctx.guild.owner or \
user.top_role > target.top_role
class MemberID(commands.Converter):
async def convert(self, ctx, argument):
try:
m = await commands.MemberConverter().convert(ctx, argument)
except commands.BadArgument:
try:
return int(argument, base=10)
except ValueError:
raise commands.BadArgument(f"{argument} is not a valid member or member ID.") from None
else:
if not can_execute_action(ctx, ctx.author, m):
raise commands.BadArgument('You cannot do this action on this user due to role hierarchy.')
return m.id
class BannedMember(commands.Converter):
async def convert(self, ctx, argument):
ban_list = await ctx.guild.bans()
try:
member_id = int(argument, base=10)
entity = discord.utils.find(lambda u: u.user.id == member_id, ban_list)
except ValueError:
entity = discord.utils.find(lambda u: str(u.user) == argument, ban_list)
if entity is None:
raise commands.BadArgument("Not a valid previously-banned member.")
return entity
class ActionReason(commands.Converter):
async def convert(self, ctx, argument):
ret = f'{ctx.author} (ID: {ctx.author.id}): {argument}'
if len(ret) > 512:
reason_max = 512 - len(ret) - len(argument)
raise commands.BadArgument(f'reason is too long ({len(argument)}/{reason_max})')
return ret
## Spam detector
# TODO: add this to d.py maybe
class CooldownByContent(commands.CooldownMapping):
def _bucket_key(self, message):
return (message.channel.id, message.content)
class SpamChecker:
"""This spam checker does a few things.
1) It checks if a user has spammed more than 15 times in 17 seconds
2) It checks if the content has been spammed 15 times in 17 seconds.
3) It checks if 10 members have joined over a 10 second window.
The second case is meant to catch alternating spam bots while the first one
just catches regular singular spam bots.
From experience these values aren't reached unless someone is actively spamming.
The third case is used for logging purposes only.
"""
def __init__(self):
self.by_content = CooldownByContent.from_cooldown(15, 17.0, commands.BucketType.member)
self.by_user = commands.CooldownMapping.from_cooldown(15, 17.0, commands.BucketType.user)
self.by_join = commands.Cooldown(10, 10.0, commands.BucketType.default)
def is_spamming(self, message):
if message.guild is None:
return False
user_bucket = self.by_user.get_bucket(message)
if user_bucket.update_rate_limit():
return True
content_bucket = self.by_content.get_bucket(message)
if content_bucket.update_rate_limit():
return True
return False
def is_fast_join(self):
return self.by_join.update_rate_limit()
## The actual cog
class Mod(commands.Cog):
"""Moderation related commands."""
def __init__(self, bot):
self.bot = bot
# guild_id: SpamChecker
self._spam_check = defaultdict(SpamChecker)
def __repr__(self):
return '<cogs.Mod>'
async def cog_command_error(self, ctx, error):
if isinstance(error, commands.BadArgument):
await ctx.send(error)
elif isinstance(error, commands.CommandInvokeError):
original = error.original
if isinstance(original, discord.Forbidden):
await ctx.send('I do not have permission to execute this action.')
elif isinstance(original, discord.NotFound):
await ctx.send(f'This entity does not exist: {original.text}')
elif isinstance(original, discord.HTTPException):
await ctx.send('Somehow, an unexpected error occurred. Try again later?')
@cache.cache()
async def get_guild_config(self, guild_id):
query = """SELECT * FROM guild_mod_config WHERE id=$1;"""
async with self.bot.pool.acquire() as con:
record = await con.fetchrow(query, guild_id)
if record is not None:
return await ModConfig.from_record(record, self.bot)
return None
async def check_raid(self, config, guild_id, member, message):
if config.raid_mode != RaidMode.strict.value:
return
checker = self._spam_check[guild_id]
if not checker.is_spamming(message):
return
try:
await member.ban(reason='Auto-ban from spam (strict raid mode ban)')
except discord.HTTPException:
log.info(f'[Raid Mode] Failed to ban {member} (ID: {member.id}) from server {member.guild} via strict mode.')
else:
log.info(f'[Raid Mode] Banned {member} (ID: {member.id}) from server {member.guild} via strict mode.')
@commands.Cog.listener()
async def on_message(self, message):
author = message.author
if author.id in (self.bot.user.id, self.bot.owner_id):
return
if message.guild is None:
return
if not isinstance(author, discord.Member):
return
if author.bot:
return
# we're going to ignore members with roles
if len(author.roles) > 1:
return
guild_id = message.guild.id
config = await self.get_guild_config(guild_id)
if config is None:
return
# check for raid mode stuff
await self.check_raid(config, guild_id, author, message)
# auto-ban tracking for mention spams begin here
if len(message.mentions) <= 3:
return
if not config.mention_count:
return
# check if it meets the thresholds required
mention_count = sum(not m.bot and m.id != author.id for m in message.mentions)
if mention_count < config.mention_count:
return
if message.channel.id in config.safe_mention_channel_ids:
return
try:
await author.ban(reason=f'Spamming mentions ({mention_count} mentions)')
except Exception as e:
log.info(f'Failed to autoban member {author} (ID: {author.id}) in guild ID {guild_id}')
else:
await message.channel.send(f'Banned {author} (ID: {author.id}) for spamming {mention_count} mentions.')
log.info(f'Member {author} (ID: {author.id}) has been autobanned from guild ID {guild_id}')
@commands.Cog.listener()
async def on_voice_state_update(self, user, before, after):
if not isinstance(user, discord.Member):
return
# joined a voice channel
if before.channel is None and after.channel is not None:
config = await self.get_guild_config(user.guild.id)
if config is None:
return
await self.check_raid(config, user.guild, user, datetime.datetime.utcnow())
@commands.Cog.listener()
async def on_member_join(self, member):
guild_id = member.guild.id
config = await self.get_guild_config(guild_id)
if config is None or not config.raid_mode:
return
now = datetime.datetime.utcnow()
# these are the dates in minutes
created = (now - member.created_at).total_seconds() // 60
checker = self._spam_check[guild_id]
# Do the broadcasted message to the channel
title = 'Member Joined'
if checker.is_fast_join():
colour = 0xdd5f53 # red
if created < 30:
title = 'Member Joined (Very New Member)'
else:
colour = 0x53dda4 # green
if created < 30:
colour = 0xdda453 # yellow
title = 'Member Joined (Very New Member)'
e = discord.Embed(title=title, colour=colour)
e.timestamp = now
e.set_author(name=str(member), icon_url=member.avatar_url)
e.add_field(name='ID', value=member.id)
e.add_field(name='Joined', value=member.joined_at)
e.add_field(name='Created', value=time.human_timedelta(member.created_at), inline=False)
if config.broadcast_channel:
await config.broadcast_channel.send(embed=e)
@commands.command(aliases=['newmembers'])
@commands.guild_only()
async def newusers(self, ctx, *, count=5):
"""Tells you the newest members of the server.
This is useful to check if any suspicious members have
joined.
The count parameter can only be up to 25.
"""
count = max(min(count, 25), 5)
if not ctx.guild.chunked:
await self.bot.request_offline_members(ctx.guild)
members = sorted(ctx.guild.members, key=lambda m: m.joined_at, reverse=True)[:count]
e = discord.Embed(title='New Members', colour=discord.Colour.green())
for member in members:
body = f'joined {time.human_timedelta(member.joined_at)}, created {time.human_timedelta(member.created_at)}'
e.add_field(name=f'{member} (ID: {member.id})', value=body, inline=False)
await ctx.send(embed=e)
@commands.group(aliases=['raids'], invoke_without_command=True)
@checks.is_mod()
async def raid(self, ctx):
"""Controls raid mode on the server.
Calling this command with no arguments will show the current raid
mode information.
You must have Manage Server permissions to use this command or
its subcommands.
"""
query = "SELECT raid_mode, broadcast_channel FROM guild_mod_config WHERE id=$1;"
row = await ctx.db.fetchrow(query, ctx.guild.id)
if row is None:
fmt = 'Raid Mode: off\nBroadcast Channel: None'
else:
ch = f'<#{row[1]}>' if row[1] else None
fmt = f'Raid Mode: {RaidMode(row[0])}\nBroadcast Channel: {ch}'
await ctx.send(fmt)
@raid.command(name='on', aliases=['enable', 'enabled'])
@checks.is_mod()
async def raid_on(self, ctx, *, channel: discord.TextChannel = None):
"""Enables basic raid mode on the server.
When enabled, server verification level is set to table flip
levels and allows the bot to broadcast new members joining
to a specified channel.
If no channel is given, then the bot will broadcast join
messages on the channel this command was used in.
"""
channel = channel or ctx.channel
try:
await ctx.guild.edit(verification_level=discord.VerificationLevel.high)
except discord.HTTPException:
await ctx.send('\N{WARNING SIGN} Could not set verification level.')
query = """INSERT INTO guild_mod_config (id, raid_mode, broadcast_channel)
VALUES ($1, $2, $3) ON CONFLICT (id)
DO UPDATE SET
raid_mode = EXCLUDED.raid_mode,
broadcast_channel = EXCLUDED.broadcast_channel;
"""
await ctx.db.execute(query, ctx.guild.id, RaidMode.on.value, channel.id)
self.get_guild_config.invalidate(self, ctx.guild.id)
await ctx.send(f'Raid mode enabled. Broadcasting join messages to {channel.mention}.')
@raid.command(name='off', aliases=['disable', 'disabled'])
@checks.is_mod()
async def raid_off(self, ctx):
"""Disables raid mode on the server.
When disabled, the server verification levels are set
back to Low levels and the bot will stop broadcasting
join messages.
"""
try:
await ctx.guild.edit(verification_level=discord.VerificationLevel.low)
except discord.HTTPException:
await ctx.send('\N{WARNING SIGN} Could not set verification level.')
query = """INSERT INTO guild_mod_config (id, raid_mode, broadcast_channel)
VALUES ($1, $2, NULL) ON CONFLICT (id)
DO UPDATE SET
raid_mode = EXCLUDED.raid_mode,
broadcast_channel = NULL;
"""
await ctx.db.execute(query, ctx.guild.id, RaidMode.off.value)
self._spam_check.pop(ctx.guild.id, None)
self.get_guild_config.invalidate(self, ctx.guild.id)
await ctx.send('Raid mode disabled. No longer broadcasting join messages.')
@raid.command(name='strict')
@checks.is_mod()
async def raid_strict(self, ctx, *, channel: discord.TextChannel = None):
"""Enables strict raid mode on the server.
Strict mode is similar to regular enabled raid mode, with the added
benefit of auto-banning members that are spamming. The threshold for
spamming depends on a per-content basis and also on a per-user basis
of 15 messages per 17 seconds.
If this is considered too strict, it is recommended to fall back to regular
raid mode.
"""
channel = channel or ctx.channel
perms = ctx.me.guild_permissions
if not (perms.kick_members and perms.ban_members):
return await ctx.send('\N{NO ENTRY SIGN} I do not have permissions to kick and ban members.')
try:
await ctx.guild.edit(verification_level=discord.VerificationLevel.high)
except discord.HTTPException:
await ctx.send('\N{WARNING SIGN} Could not set verification level.')
query = """INSERT INTO guild_mod_config (id, raid_mode, broadcast_channel)
VALUES ($1, $2, $3) ON CONFLICT (id)
DO UPDATE SET
raid_mode = EXCLUDED.raid_mode,
broadcast_channel = EXCLUDED.broadcast_channel;
"""
await ctx.db.execute(query, ctx.guild.id, RaidMode.strict.value, ctx.channel.id)
self.get_guild_config.invalidate(self, ctx.guild.id)
await ctx.send(f'Raid mode enabled strictly. Broadcasting join messages to {channel.mention}.')
async def _basic_cleanup_strategy(self, ctx, search):
count = 0
async for msg in ctx.history(limit=search, before=ctx.message):
if msg.author == ctx.me:
await msg.delete()
count += 1
return { 'Bot': count }
async def _complex_cleanup_strategy(self, ctx, search):
prefixes = tuple(self.bot.get_guild_prefixes(ctx.guild)) # thanks startswith
def check(m):
return m.author == ctx.me or m.content.startswith(prefixes)
deleted = await ctx.channel.purge(limit=search, check=check, before=ctx.message)
return Counter(m.author.display_name for m in deleted)
@commands.command()
@checks.has_permissions(manage_messages=True)
async def cleanup(self, ctx, search=100):
"""Cleans up the bot's messages from the channel.
If a search number is specified, it searches that many messages to delete.
If the bot has Manage Messages permissions then it will try to delete
messages that look like they invoked the bot as well.
After the cleanup is completed, the bot will send you a message with
which people got their messages deleted and their count. This is useful
to see which users are spammers.
You must have Manage Messages permission to use this.
"""
strategy = self._basic_cleanup_strategy
if ctx.me.permissions_in(ctx.channel).manage_messages:
strategy = self._complex_cleanup_strategy
spammers = await strategy(ctx, search)
deleted = sum(spammers.values())
messages = [f'{deleted} message{" was" if deleted == 1 else "s were"} removed.']
if deleted:
messages.append('')
spammers = sorted(spammers.items(), key=lambda t: t[1], reverse=True)
messages.extend(f'- **{author}**: {count}' for author, count in spammers)
await ctx.send('\n'.join(messages), delete_after=10)
@commands.command()
@commands.guild_only()
@checks.has_permissions(kick_members=True)
async def kick(self, ctx, member: discord.Member, *, reason: ActionReason = None):
"""Kicks a member from the server.
In order for this to work, the bot must have Kick Member permissions.
To use this command you must have Kick Members permission.
"""
if reason is None:
reason = f'Action done by {ctx.author} (ID: {ctx.author.id})'
await member.kick(reason=reason)
await ctx.send('\N{OK HAND SIGN}')
@commands.command()
@commands.guild_only()
@checks.has_permissions(ban_members=True)
async def ban(self, ctx, member: MemberID, *, reason: ActionReason = None):
"""Bans a member from the server.
You can also ban from ID to ban regardless whether they're
in the server or not.
In order for this to work, the bot must have Ban Member permissions.
To use this command you must have Ban Members permission.
"""
if reason is None:
reason = f'Action done by {ctx.author} (ID: {ctx.author.id})'
await ctx.guild.ban(discord.Object(id=member), reason=reason)
await ctx.send('\N{OK HAND SIGN}')
@commands.command()
@commands.guild_only()
@checks.has_permissions(ban_members=True)
async def massban(self, ctx, *, args):
"""Mass bans multiple members from the server.
This command has a powerful "command line" syntax. To use this command
you and the bot must both have Ban Members permission. **Every option is optional.**
Users are only banned **if and only if** all conditions are met.
The following options are valid.
`--channel` or `-c`: Channel to search for message history.
`--reason` or `-r`: The reason for the ban.
`--regex`: Regex that usernames must match.
`--created`: Matches users whose accounts were created less than specified minutes ago.
`--joined`: Matches users that joined less than specified minutes ago.
`--joined-before`: Matches users who joined before the member given.
`--joined-after`: Matches users who joined after the member given.
`--no-avatar`: Matches users who have no avatar. (no arguments)
`--no-roles`: Matches users that have no role. (no arguments)
`--show`: Show members instead of banning them (no arguments).
Message history filters (Requires `--channel`):
`--contains`: A substring to search for in the message.
`--starts`: A substring to search if the message starts with.
`--ends`: A substring to search if the message ends with.
`--match`: A regex to match the message content to.
`--search`: How many messages to search. Default 100. Max 2000.
`--after`: Messages must come after this message ID.
`--before`: Messages must come before this message ID.
`--files`: Checks if the message has attachments (no arguments).
`--embeds`: Checks if the message has embeds (no arguments).
"""
parser = Arguments(add_help=False, allow_abbrev=False)
parser.add_argument('--channel', '-c')
parser.add_argument('--reason', '-r')
parser.add_argument('--search', type=int, default=100)
parser.add_argument('--regex')
parser.add_argument('--no-avatar', action='store_true')
parser.add_argument('--no-roles', action='store_true')
parser.add_argument('--created', type=int)
parser.add_argument('--joined', type=int)
parser.add_argument('--joined-before')
parser.add_argument('--joined-after')
parser.add_argument('--contains')
parser.add_argument('--starts')
parser.add_argument('--ends')
parser.add_argument('--match')
parser.add_argument('--show', action='store_true')
parser.add_argument('--embeds', action='store_const', const=lambda m: len(m.embeds))
parser.add_argument('--files', action='store_const', const=lambda m: len(m.attachments))
parser.add_argument('--after', type=int)
parser.add_argument('--before', type=int)
try:
args = parser.parse_args(shlex.split(args))
except Exception as e:
return await ctx.send(str(e))
members = []
if args.channel:
channel = await commands.TextChannelConverter().convert(ctx, args.channel)
before = args.before and discord.Object(id=args.before)
after = args.after and discord.Object(id=args.after)
predicates = []
if args.contains:
predicates.append(lambda m: args.contains in m.content)
if args.starts:
predicates.append(lambda m: m.content.startswith(args.starts))
if args.ends:
predicates.append(lambda m: m.content.endswith(args.ends))
if args.match:
try:
_match = re.compile(args.match)
except re.error as e:
return await ctx.send(f'Invalid regex passed to `--match`: {e}')
else:
predicates.append(lambda m, x=_match: x.match(m.content))
if args.embeds:
predicates.append(args.embeds)
if args.files:
predicates.append(args.files)
async for message in channel.history(limit=min(max(1, args.search), 2000), before=before, after=after):
if all(p(message) for p in predicates):
members.append(message.author)
else:
members = ctx.guild.members
# member filters
predicates = [
lambda m: can_execute_action(ctx, ctx.author, m), # Only if applicable
lambda m: not m.bot, # No bots
lambda m: m.discriminator != '0000', # No deleted users
]
if args.regex:
try:
_regex = re.compile(args.regex)
except re.error as e:
return await ctx.send(f'Invalid regex passed to `--regex`: {e}')
else:
predicates.append(lambda m, x=_regex: x.match(m.name))
if args.no_avatar:
predicates.append(lambda m: m.avatar is None)
if args.no_roles:
predicates.append(lambda m: len(getattr(m, 'roles', [])) <= 1)
now = datetime.datetime.utcnow()
if args.created:
def created(member, *, offset=now - datetime.timedelta(minutes=args.created)):
return member.created_at > offset
predicates.append(created)
if args.joined:
def joined(member, *, offset=now - datetime.timedelta(minutes=args.joined)):
return member.joined_at and member.joined_at > offset
predicates.append(joined)
if args.joined_after:
_joined_after_member = await commands.MemberConverter().convert(ctx, args.joined_after)
def joined_after(member, *, _other=_joined_after_member):
return member.joined_at and _other.joined_at and member.joined_at > _other.joined_at
predicates.append(joined_after)
if args.joined_before:
_joined_before_member = await commands.MemberConverter().convert(ctx, args.joined_before)
def joined_before(member, *, _other=_joined_before_member):
return member.joined_at and _other.joined_at and member.joined_at < _other.joined_at
predicates.append(joined_before)
members = {m for m in members if all(p(m) for p in predicates)}
if len(members) == 0:
return await ctx.send('No members found matching criteria.')
if args.show:
fmt = "\n".join(f'{member.id}\t{member}' for member in members)
content = f'Total members: {len(members)}\n{fmt}'
async with self.bot.session.post('https://hastebin.com/documents', data=content) as resp:
if resp.status != 200:
return await ctx.send('Sorry, failed to post data to hastebin.')
js = await resp.json()
return await ctx.send(f'https://hastebin.com/{js["key"]}.txt')
confirm = await ctx.prompt(f'This will ban **{formats.Plural(member=len(members))}**. Are you sure?')
if not confirm:
return await ctx.send('Aborting.')
if args.reason is None:
reason = f'Action done by {ctx.author} (ID: {ctx.author.id})'
else:
reason = await ActionReason().convert(ctx, args.reason)
count = 0
for member in members:
try:
await ctx.guild.ban(member, reason=reason)
except discord.HTTPException:
pass
else:
count += 1
await ctx.send(f'Banned {count}/{len(members)}')
@commands.command()
@commands.guild_only()
@checks.has_permissions(kick_members=True)
async def softban(self, ctx, member: MemberID, *, reason: ActionReason = None):
"""Soft bans a member from the server.
A softban is basically banning the member from the server but
then unbanning the member as well. This allows you to essentially
kick the member while removing their messages.
In order for this to work, the bot must have Ban Member permissions.
To use this command you must have Kick Members permissions.
"""
if reason is None:
reason = f'Action done by {ctx.author} (ID: {ctx.author.id})'
obj = discord.Object(id=member)
await ctx.guild.ban(obj, reason=reason)
await ctx.guild.unban(obj, reason=reason)
await ctx.send('\N{OK HAND SIGN}')
@commands.command()
@commands.guild_only()
@checks.has_permissions(ban_members=True)
async def unban(self, ctx, member: BannedMember, *, reason: ActionReason = None):
"""Unbans a member from the server.
You can pass either the ID of the banned member or the Name#Discrim
combination of the member. Typically the ID is easiest to use.
In order for this to work, the bot must have Ban Member permissions.
To use this command you must have Ban Members permissions.
"""
if reason is None:
reason = f'Action done by {ctx.author} (ID: {ctx.author.id})'
await ctx.guild.unban(member.user, reason=reason)
if member.reason:
await ctx.send(f'Unbanned {member.user} (ID: {member.user.id}), previously banned for {member.reason}.')
else:
await ctx.send(f'Unbanned {member.user} (ID: {member.user.id}).')
@commands.command()
@commands.guild_only()
@checks.has_permissions(ban_members=True)
async def tempban(self, ctx, duration: time.FutureTime, member: MemberID, *, reason: ActionReason = None):
"""Temporarily bans a member for the specified duration.
The duration can be a a short time form, e.g. 30d or a more human
duration such as "until thursday at 3PM" or a more concrete time
such as "2017-12-31".
Note that times are in UTC.
You can also ban from ID to ban regardless whether they're
in the server or not.
In order for this to work, the bot must have Ban Member permissions.
To use this command you must have Ban Members permission.
"""
if reason is None:
reason = f'Action done by {ctx.author} (ID: {ctx.author.id})'
reminder = self.bot.get_cog('Reminder')
if reminder is None:
return await ctx.send('Sorry, this functionality is currently unavailable. Try again later?')
await ctx.guild.ban(discord.Object(id=member), reason=reason)
timer = await reminder.create_timer(duration.dt, 'tempban', ctx.guild.id, ctx.author.id, member, connection=ctx.db)
await ctx.send(f'Banned ID {member} for {time.human_timedelta(duration.dt)}.')
@commands.Cog.listener()
async def on_tempban_timer_complete(self, timer):
guild_id, mod_id, member_id = timer.args
guild = self.bot.get_guild(guild_id)
if guild is None:
# RIP
return
moderator = guild.get_member(mod_id)
if moderator is None:
try:
moderator = await self.bot.fetch_user(mod_id)
except:
# request failed somehow
moderator = f'Mod ID {mod_id}'
else:
moderator = f'{moderator} (ID: {mod_id})'
else:
moderator = f'{moderator} (ID: {mod_id})'
reason = f'Automatic unban from timer made on {timer.created_at} by {moderator}.'
await guild.unban(discord.Object(id=member_id), reason=reason)
@commands.group(invoke_without_command=True)
@commands.guild_only()
@checks.has_permissions(ban_members=True)
async def mentionspam(self, ctx, count: int=None):
"""Enables auto-banning accounts that spam mentions.
If a message contains `count` or more mentions then the
bot will automatically attempt to auto-ban the member.
The `count` must be greater than 3. If the `count` is 0
then this is disabled.
This only applies for user mentions. Everyone or Role
mentions are not included.
To use this command you must have the Ban Members permission.
"""
if count is None:
query = """SELECT mention_count, COALESCE(safe_mention_channel_ids, '{}') AS channel_ids
FROM guild_mod_config
WHERE id=$1;
"""
row = await ctx.db.fetchrow(query, ctx.guild.id)
if row is None or not row['mention_count']:
return await ctx.send('This server has not set up mention spam banning.')
ignores = ', '.join(f'<#{e}>' for e in row['channel_ids']) or 'None'
return await ctx.send(f'- Threshold: {row["mention_count"]} mentions\n- Ignored Channels: {ignores}')
if count == 0:
query = """UPDATE guild_mod_config SET mention_count = NULL WHERE id=$1;"""
await ctx.db.execute(query, ctx.guild.id)
self.get_guild_config.invalidate(self, ctx.guild.id)
return await ctx.send('Auto-banning members has been disabled.')
if count <= 3:
await ctx.send('\N{NO ENTRY SIGN} Auto-ban threshold must be greater than three.')
return
query = """INSERT INTO guild_mod_config (id, mention_count, safe_mention_channel_ids)
VALUES ($1, $2, '{}')
ON CONFLICT (id) DO UPDATE SET
mention_count = $2;
"""
await ctx.db.execute(query, ctx.guild.id, count)
self.get_guild_config.invalidate(self, ctx.guild.id)
await ctx.send(f'Now auto-banning members that mention more than {count} users.')
@mentionspam.command(name='ignore', aliases=['bypass'])
@commands.guild_only()
@checks.has_permissions(ban_members=True)
async def mentionspam_ignore(self, ctx, *channels: discord.TextChannel):
"""Specifies what channels ignore mentionspam auto-bans.
If a channel is given then that channel will no longer be protected
by auto-banning from mention spammers.
To use this command you must have the Ban Members permission.
"""
query = """UPDATE guild_mod_config
SET safe_mention_channel_ids =
ARRAY(SELECT DISTINCT * FROM unnest(COALESCE(safe_mention_channel_ids, '{}') || $2::bigint[]))
WHERE id = $1;
"""
if len(channels) == 0:
return await ctx.send('Missing channels to ignore.')
channel_ids = [c.id for c in channels]
await ctx.db.execute(query, ctx.guild.id, channel_ids)
self.get_guild_config.invalidate(self, ctx.guild.id)
await ctx.send(f'Mentions are now ignored on {", ".join(c.mention for c in channels)}.')
@mentionspam.command(name='unignore', aliases=['protect'])
@commands.guild_only()
@checks.has_permissions(ban_members=True)
async def mentionspam_unignore(self, ctx, *channels: discord.TextChannel):
"""Specifies what channels to take off the ignore list.
To use this command you must have the Ban Members permission.
"""
if len(channels) == 0:
return await ctx.send('Missing channels to protect.')
query = """UPDATE guild_mod_config
SET safe_mention_channel_ids =
ARRAY(SELECT element FROM unnest(safe_mention_channel_ids) AS element
WHERE NOT(element = ANY($2::bigint[])))
WHERE id = $1;
"""
await ctx.db.execute(query, ctx.guild.id, [c.id for c in channels])
self.get_guild_config.invalidate(self, ctx.guild.id)
await ctx.send('Updated mentionspam ignore list.')
@commands.group(aliases=['purge'])
@commands.guild_only()
@checks.has_permissions(manage_messages=True)
async def remove(self, ctx):
"""Removes messages that meet a criteria.
In order to use this command, you must have Manage Messages permissions.
Note that the bot needs Manage Messages as well. These commands cannot
be used in a private message.
When the command is done doing its work, you will get a message
detailing which users got removed and how many messages got removed.
"""
if ctx.invoked_subcommand is None:
await ctx.send_help(ctx.command)
async def do_removal(self, ctx, limit, predicate, *, before=None, after=None):
if limit > 2000:
return await ctx.send(f'Too many messages to search given ({limit}/2000)')
if before is None:
before = ctx.message
else:
before = discord.Object(id=before)
if after is not None:
after = discord.Object(id=after)
try:
deleted = await ctx.channel.purge(limit=limit, before=before, after=after, check=predicate)
except discord.Forbidden as e:
return await ctx.send('I do not have permissions to delete messages.')
except discord.HTTPException as e:
return await ctx.send(f'Error: {e} (try a smaller search?)')
spammers = Counter(m.author.display_name for m in deleted)
deleted = len(deleted)
messages = [f'{deleted} message{" was" if deleted == 1 else "s were"} removed.']
if deleted:
messages.append('')
spammers = sorted(spammers.items(), key=lambda t: t[1], reverse=True)
messages.extend(f'**{name}**: {count}' for name, count in spammers)
to_send = '\n'.join(messages)
if len(to_send) > 2000:
await ctx.send(f'Successfully removed {deleted} messages.', delete_after=10)
else:
await ctx.send(to_send, delete_after=10)
@remove.command()
async def embeds(self, ctx, search=100):
"""Removes messages that have embeds in them."""
await self.do_removal(ctx, search, lambda e: len(e.embeds))
@remove.command()
async def files(self, ctx, search=100):
"""Removes messages that have attachments in them."""
await self.do_removal(ctx, search, lambda e: len(e.attachments))
@remove.command()
async def images(self, ctx, search=100):
"""Removes messages that have embeds or attachments."""
await self.do_removal(ctx, search, lambda e: len(e.embeds) or len(e.attachments))
@remove.command(name='all')
async def _remove_all(self, ctx, search=100):
"""Removes all messages."""
await self.do_removal(ctx, search, lambda e: True)
@remove.command()
async def user(self, ctx, member: discord.Member, search=100):
"""Removes all messages by the member."""
await self.do_removal(ctx, search, lambda e: e.author == member)
@remove.command()
async def contains(self, ctx, *, substr: str):
"""Removes all messages containing a substring.
The substring must be at least 3 characters long.
"""
if len(substr) < 3:
await ctx.send('The substring length must be at least 3 characters.')
else:
await self.do_removal(ctx, 100, lambda e: substr in e.content)
@remove.command(name='bot')
async def _bot(self, ctx, prefix=None, search=100):
"""Removes a bot user's messages and messages with their optional prefix."""
def predicate(m):
return (m.webhook_id is None and m.author.bot) or (prefix and m.content.startswith(prefix))
await self.do_removal(ctx, search, predicate)
@remove.command(name='emoji')
async def _emoji(self, ctx, search=100):
"""Removes all messages containing custom emoji."""
custom_emoji = re.compile(r'<:(\w+):(\d+)>')
def predicate(m):
return custom_emoji.search(m.content)
await self.do_removal(ctx, search, predicate)
@remove.command(name='reactions')
async def _reactions(self, ctx, search=100):
"""Removes all reactions from messages that have them."""
if search > 2000:
return await ctx.send(f'Too many messages to search for ({search}/2000)')
total_reactions = 0
async for message in ctx.history(limit=search, before=ctx.message):
if len(message.reactions):
total_reactions += sum(r.count for r in message.reactions)
await message.clear_reactions()
await ctx.send(f'Successfully removed {total_reactions} reactions.')
@remove.command()
async def custom(self, ctx, *, args: str):
"""A more advanced purge command.
This command uses a powerful "command line" syntax.
Most options support multiple values to indicate 'any' match.
If the value has spaces it must be quoted.
The messages are only deleted if all options are met unless
the `--or` flag is passed, in which case only if any is met.
The following options are valid.
`--user`: A mention or name of the user to remove.
`--contains`: A substring to search for in the message.
`--starts`: A substring to search if the message starts with.
`--ends`: A substring to search if the message ends with.
`--search`: How many messages to search. Default 100. Max 2000.
`--after`: Messages must come after this message ID.
`--before`: Messages must come before this message ID.
Flag options (no arguments):
`--bot`: Check if it's a bot user.
`--embeds`: Check if the message has embeds.
`--files`: Check if the message has attachments.
`--emoji`: Check if the message has custom emoji.
`--reactions`: Check if the message has reactions
`--or`: Use logical OR for all options.
`--not`: Use logical NOT for all options.
"""
parser = Arguments(add_help=False, allow_abbrev=False)
parser.add_argument('--user', nargs='+')
parser.add_argument('--contains', nargs='+')
parser.add_argument('--starts', nargs='+')
parser.add_argument('--ends', nargs='+')
parser.add_argument('--or', action='store_true', dest='_or')
parser.add_argument('--not', action='store_true', dest='_not')
parser.add_argument('--emoji', action='store_true')
parser.add_argument('--bot', action='store_const', const=lambda m: m.author.bot)
parser.add_argument('--embeds', action='store_const', const=lambda m: len(m.embeds))
parser.add_argument('--files', action='store_const', const=lambda m: len(m.attachments))
parser.add_argument('--reactions', action='store_const', const=lambda m: len(m.reactions))
parser.add_argument('--search', type=int, default=100)
parser.add_argument('--after', type=int)
parser.add_argument('--before', type=int)
try:
args = parser.parse_args(shlex.split(args))
except Exception as e:
await ctx.send(str(e))
return
predicates = []
if args.bot:
predicates.append(args.bot)
if args.embeds:
predicates.append(args.embeds)
if args.files:
predicates.append(args.files)
if args.reactions:
predicates.append(args.reactions)
if args.emoji:
custom_emoji = re.compile(r'<:(\w+):(\d+)>')
predicates.append(lambda m: custom_emoji.search(m.content))
if args.user:
users = []
converter = commands.MemberConverter()
for u in args.user:
try:
user = await converter.convert(ctx, u)
users.append(user)
except Exception as e:
await ctx.send(str(e))
return
predicates.append(lambda m: m.author in users)
if args.contains:
predicates.append(lambda m: any(sub in m.content for sub in args.contains))
if args.starts:
predicates.append(lambda m: any(m.content.startswith(s) for s in args.starts))
if args.ends:
predicates.append(lambda m: any(m.content.endswith(s) for s in args.ends))
op = all if not args._or else any
def predicate(m):
r = op(p(m) for p in predicates)
if args._not:
return not r
return r
args.search = max(0, min(2000, args.search)) # clamp from 0-2000
await self.do_removal(ctx, args.search, predicate, before=args.before, after=args.after)
def setup(bot):
bot.add_cog(Mod(bot))
Fix moronic mistake in ?raid strict not using the proper channel.
from discord.ext import commands
from .utils import checks, db, time, cache, formats
from collections import Counter, defaultdict
from inspect import cleandoc
import re
import json
import discord
import enum
import datetime
import asyncio
import argparse, shlex
import logging
log = logging.getLogger(__name__)
## Misc utilities
class Arguments(argparse.ArgumentParser):
def error(self, message):
raise RuntimeError(message)
class RaidMode(enum.Enum):
off = 0
on = 1
strict = 2
def __str__(self):
return self.name
## Tables
class GuildConfig(db.Table, table_name='guild_mod_config'):
id = db.Column(db.Integer(big=True), primary_key=True)
raid_mode = db.Column(db.Integer(small=True))
broadcast_channel = db.Column(db.Integer(big=True))
mention_count = db.Column(db.Integer(small=True))
safe_mention_channel_ids = db.Column(db.Array(db.Integer(big=True)))
## Configuration
class ModConfig:
__slots__ = ('raid_mode', 'id', 'bot', 'broadcast_channel_id', 'mention_count', 'safe_mention_channel_ids')
@classmethod
async def from_record(cls, record, bot):
self = cls()
# the basic configuration
self.bot = bot
self.raid_mode = record['raid_mode']
self.id = record['id']
self.broadcast_channel_id = record['broadcast_channel']
self.mention_count = record['mention_count']
self.safe_mention_channel_ids = set(record['safe_mention_channel_ids'] or [])
return self
@property
def broadcast_channel(self):
guild = self.bot.get_guild(self.id)
return guild and guild.get_channel(self.broadcast_channel_id)
## Converters
def can_execute_action(ctx, user, target):
return user.id == ctx.bot.owner_id or \
user == ctx.guild.owner or \
user.top_role > target.top_role
class MemberID(commands.Converter):
async def convert(self, ctx, argument):
try:
m = await commands.MemberConverter().convert(ctx, argument)
except commands.BadArgument:
try:
return int(argument, base=10)
except ValueError:
raise commands.BadArgument(f"{argument} is not a valid member or member ID.") from None
else:
if not can_execute_action(ctx, ctx.author, m):
raise commands.BadArgument('You cannot do this action on this user due to role hierarchy.')
return m.id
class BannedMember(commands.Converter):
async def convert(self, ctx, argument):
ban_list = await ctx.guild.bans()
try:
member_id = int(argument, base=10)
entity = discord.utils.find(lambda u: u.user.id == member_id, ban_list)
except ValueError:
entity = discord.utils.find(lambda u: str(u.user) == argument, ban_list)
if entity is None:
raise commands.BadArgument("Not a valid previously-banned member.")
return entity
class ActionReason(commands.Converter):
async def convert(self, ctx, argument):
ret = f'{ctx.author} (ID: {ctx.author.id}): {argument}'
if len(ret) > 512:
reason_max = 512 - len(ret) - len(argument)
raise commands.BadArgument(f'reason is too long ({len(argument)}/{reason_max})')
return ret
## Spam detector
# TODO: add this to d.py maybe
class CooldownByContent(commands.CooldownMapping):
def _bucket_key(self, message):
return (message.channel.id, message.content)
class SpamChecker:
"""This spam checker does a few things.
1) It checks if a user has spammed more than 15 times in 17 seconds
2) It checks if the content has been spammed 15 times in 17 seconds.
3) It checks if 10 members have joined over a 10 second window.
The second case is meant to catch alternating spam bots while the first one
just catches regular singular spam bots.
From experience these values aren't reached unless someone is actively spamming.
The third case is used for logging purposes only.
"""
def __init__(self):
self.by_content = CooldownByContent.from_cooldown(15, 17.0, commands.BucketType.member)
self.by_user = commands.CooldownMapping.from_cooldown(15, 17.0, commands.BucketType.user)
self.by_join = commands.Cooldown(10, 10.0, commands.BucketType.default)
def is_spamming(self, message):
if message.guild is None:
return False
user_bucket = self.by_user.get_bucket(message)
if user_bucket.update_rate_limit():
return True
content_bucket = self.by_content.get_bucket(message)
if content_bucket.update_rate_limit():
return True
return False
def is_fast_join(self):
return self.by_join.update_rate_limit()
## The actual cog
class Mod(commands.Cog):
"""Moderation related commands."""
def __init__(self, bot):
self.bot = bot
# guild_id: SpamChecker
self._spam_check = defaultdict(SpamChecker)
def __repr__(self):
return '<cogs.Mod>'
async def cog_command_error(self, ctx, error):
if isinstance(error, commands.BadArgument):
await ctx.send(error)
elif isinstance(error, commands.CommandInvokeError):
original = error.original
if isinstance(original, discord.Forbidden):
await ctx.send('I do not have permission to execute this action.')
elif isinstance(original, discord.NotFound):
await ctx.send(f'This entity does not exist: {original.text}')
elif isinstance(original, discord.HTTPException):
await ctx.send('Somehow, an unexpected error occurred. Try again later?')
@cache.cache()
async def get_guild_config(self, guild_id):
query = """SELECT * FROM guild_mod_config WHERE id=$1;"""
async with self.bot.pool.acquire() as con:
record = await con.fetchrow(query, guild_id)
if record is not None:
return await ModConfig.from_record(record, self.bot)
return None
async def check_raid(self, config, guild_id, member, message):
if config.raid_mode != RaidMode.strict.value:
return
checker = self._spam_check[guild_id]
if not checker.is_spamming(message):
return
try:
await member.ban(reason='Auto-ban from spam (strict raid mode ban)')
except discord.HTTPException:
log.info(f'[Raid Mode] Failed to ban {member} (ID: {member.id}) from server {member.guild} via strict mode.')
else:
log.info(f'[Raid Mode] Banned {member} (ID: {member.id}) from server {member.guild} via strict mode.')
@commands.Cog.listener()
async def on_message(self, message):
author = message.author
if author.id in (self.bot.user.id, self.bot.owner_id):
return
if message.guild is None:
return
if not isinstance(author, discord.Member):
return
if author.bot:
return
# we're going to ignore members with roles
if len(author.roles) > 1:
return
guild_id = message.guild.id
config = await self.get_guild_config(guild_id)
if config is None:
return
# check for raid mode stuff
await self.check_raid(config, guild_id, author, message)
# auto-ban tracking for mention spams begin here
if len(message.mentions) <= 3:
return
if not config.mention_count:
return
# check if it meets the thresholds required
mention_count = sum(not m.bot and m.id != author.id for m in message.mentions)
if mention_count < config.mention_count:
return
if message.channel.id in config.safe_mention_channel_ids:
return
try:
await author.ban(reason=f'Spamming mentions ({mention_count} mentions)')
except Exception as e:
log.info(f'Failed to autoban member {author} (ID: {author.id}) in guild ID {guild_id}')
else:
await message.channel.send(f'Banned {author} (ID: {author.id}) for spamming {mention_count} mentions.')
log.info(f'Member {author} (ID: {author.id}) has been autobanned from guild ID {guild_id}')
@commands.Cog.listener()
async def on_voice_state_update(self, user, before, after):
if not isinstance(user, discord.Member):
return
# joined a voice channel
if before.channel is None and after.channel is not None:
config = await self.get_guild_config(user.guild.id)
if config is None:
return
await self.check_raid(config, user.guild, user, datetime.datetime.utcnow())
@commands.Cog.listener()
async def on_member_join(self, member):
guild_id = member.guild.id
config = await self.get_guild_config(guild_id)
if config is None or not config.raid_mode:
return
now = datetime.datetime.utcnow()
# these are the dates in minutes
created = (now - member.created_at).total_seconds() // 60
checker = self._spam_check[guild_id]
# Do the broadcasted message to the channel
title = 'Member Joined'
if checker.is_fast_join():
colour = 0xdd5f53 # red
if created < 30:
title = 'Member Joined (Very New Member)'
else:
colour = 0x53dda4 # green
if created < 30:
colour = 0xdda453 # yellow
title = 'Member Joined (Very New Member)'
e = discord.Embed(title=title, colour=colour)
e.timestamp = now
e.set_author(name=str(member), icon_url=member.avatar_url)
e.add_field(name='ID', value=member.id)
e.add_field(name='Joined', value=member.joined_at)
e.add_field(name='Created', value=time.human_timedelta(member.created_at), inline=False)
if config.broadcast_channel:
await config.broadcast_channel.send(embed=e)
@commands.command(aliases=['newmembers'])
@commands.guild_only()
async def newusers(self, ctx, *, count=5):
"""Tells you the newest members of the server.
This is useful to check if any suspicious members have
joined.
The count parameter can only be up to 25.
"""
count = max(min(count, 25), 5)
if not ctx.guild.chunked:
await self.bot.request_offline_members(ctx.guild)
members = sorted(ctx.guild.members, key=lambda m: m.joined_at, reverse=True)[:count]
e = discord.Embed(title='New Members', colour=discord.Colour.green())
for member in members:
body = f'joined {time.human_timedelta(member.joined_at)}, created {time.human_timedelta(member.created_at)}'
e.add_field(name=f'{member} (ID: {member.id})', value=body, inline=False)
await ctx.send(embed=e)
@commands.group(aliases=['raids'], invoke_without_command=True)
@checks.is_mod()
async def raid(self, ctx):
"""Controls raid mode on the server.
Calling this command with no arguments will show the current raid
mode information.
You must have Manage Server permissions to use this command or
its subcommands.
"""
query = "SELECT raid_mode, broadcast_channel FROM guild_mod_config WHERE id=$1;"
row = await ctx.db.fetchrow(query, ctx.guild.id)
if row is None:
fmt = 'Raid Mode: off\nBroadcast Channel: None'
else:
ch = f'<#{row[1]}>' if row[1] else None
fmt = f'Raid Mode: {RaidMode(row[0])}\nBroadcast Channel: {ch}'
await ctx.send(fmt)
@raid.command(name='on', aliases=['enable', 'enabled'])
@checks.is_mod()
async def raid_on(self, ctx, *, channel: discord.TextChannel = None):
"""Enables basic raid mode on the server.
When enabled, server verification level is set to table flip
levels and allows the bot to broadcast new members joining
to a specified channel.
If no channel is given, then the bot will broadcast join
messages on the channel this command was used in.
"""
channel = channel or ctx.channel
try:
await ctx.guild.edit(verification_level=discord.VerificationLevel.high)
except discord.HTTPException:
await ctx.send('\N{WARNING SIGN} Could not set verification level.')
query = """INSERT INTO guild_mod_config (id, raid_mode, broadcast_channel)
VALUES ($1, $2, $3) ON CONFLICT (id)
DO UPDATE SET
raid_mode = EXCLUDED.raid_mode,
broadcast_channel = EXCLUDED.broadcast_channel;
"""
await ctx.db.execute(query, ctx.guild.id, RaidMode.on.value, channel.id)
self.get_guild_config.invalidate(self, ctx.guild.id)
await ctx.send(f'Raid mode enabled. Broadcasting join messages to {channel.mention}.')
@raid.command(name='off', aliases=['disable', 'disabled'])
@checks.is_mod()
async def raid_off(self, ctx):
"""Disables raid mode on the server.
When disabled, the server verification levels are set
back to Low levels and the bot will stop broadcasting
join messages.
"""
try:
await ctx.guild.edit(verification_level=discord.VerificationLevel.low)
except discord.HTTPException:
await ctx.send('\N{WARNING SIGN} Could not set verification level.')
query = """INSERT INTO guild_mod_config (id, raid_mode, broadcast_channel)
VALUES ($1, $2, NULL) ON CONFLICT (id)
DO UPDATE SET
raid_mode = EXCLUDED.raid_mode,
broadcast_channel = NULL;
"""
await ctx.db.execute(query, ctx.guild.id, RaidMode.off.value)
self._spam_check.pop(ctx.guild.id, None)
self.get_guild_config.invalidate(self, ctx.guild.id)
await ctx.send('Raid mode disabled. No longer broadcasting join messages.')
@raid.command(name='strict')
@checks.is_mod()
async def raid_strict(self, ctx, *, channel: discord.TextChannel = None):
"""Enables strict raid mode on the server.
Strict mode is similar to regular enabled raid mode, with the added
benefit of auto-banning members that are spamming. The threshold for
spamming depends on a per-content basis and also on a per-user basis
of 15 messages per 17 seconds.
If this is considered too strict, it is recommended to fall back to regular
raid mode.
"""
channel = channel or ctx.channel
perms = ctx.me.guild_permissions
if not (perms.kick_members and perms.ban_members):
return await ctx.send('\N{NO ENTRY SIGN} I do not have permissions to kick and ban members.')
try:
await ctx.guild.edit(verification_level=discord.VerificationLevel.high)
except discord.HTTPException:
await ctx.send('\N{WARNING SIGN} Could not set verification level.')
query = """INSERT INTO guild_mod_config (id, raid_mode, broadcast_channel)
VALUES ($1, $2, $3) ON CONFLICT (id)
DO UPDATE SET
raid_mode = EXCLUDED.raid_mode,
broadcast_channel = EXCLUDED.broadcast_channel;
"""
await ctx.db.execute(query, ctx.guild.id, RaidMode.strict.value, channel.id)
self.get_guild_config.invalidate(self, ctx.guild.id)
await ctx.send(f'Raid mode enabled strictly. Broadcasting join messages to {channel.mention}.')
async def _basic_cleanup_strategy(self, ctx, search):
count = 0
async for msg in ctx.history(limit=search, before=ctx.message):
if msg.author == ctx.me:
await msg.delete()
count += 1
return { 'Bot': count }
async def _complex_cleanup_strategy(self, ctx, search):
prefixes = tuple(self.bot.get_guild_prefixes(ctx.guild)) # thanks startswith
def check(m):
return m.author == ctx.me or m.content.startswith(prefixes)
deleted = await ctx.channel.purge(limit=search, check=check, before=ctx.message)
return Counter(m.author.display_name for m in deleted)
@commands.command()
@checks.has_permissions(manage_messages=True)
async def cleanup(self, ctx, search=100):
"""Cleans up the bot's messages from the channel.
If a search number is specified, it searches that many messages to delete.
If the bot has Manage Messages permissions then it will try to delete
messages that look like they invoked the bot as well.
After the cleanup is completed, the bot will send you a message with
which people got their messages deleted and their count. This is useful
to see which users are spammers.
You must have Manage Messages permission to use this.
"""
strategy = self._basic_cleanup_strategy
if ctx.me.permissions_in(ctx.channel).manage_messages:
strategy = self._complex_cleanup_strategy
spammers = await strategy(ctx, search)
deleted = sum(spammers.values())
messages = [f'{deleted} message{" was" if deleted == 1 else "s were"} removed.']
if deleted:
messages.append('')
spammers = sorted(spammers.items(), key=lambda t: t[1], reverse=True)
messages.extend(f'- **{author}**: {count}' for author, count in spammers)
await ctx.send('\n'.join(messages), delete_after=10)
@commands.command()
@commands.guild_only()
@checks.has_permissions(kick_members=True)
async def kick(self, ctx, member: discord.Member, *, reason: ActionReason = None):
"""Kicks a member from the server.
In order for this to work, the bot must have Kick Member permissions.
To use this command you must have Kick Members permission.
"""
if reason is None:
reason = f'Action done by {ctx.author} (ID: {ctx.author.id})'
await member.kick(reason=reason)
await ctx.send('\N{OK HAND SIGN}')
@commands.command()
@commands.guild_only()
@checks.has_permissions(ban_members=True)
async def ban(self, ctx, member: MemberID, *, reason: ActionReason = None):
"""Bans a member from the server.
You can also ban from ID to ban regardless whether they're
in the server or not.
In order for this to work, the bot must have Ban Member permissions.
To use this command you must have Ban Members permission.
"""
if reason is None:
reason = f'Action done by {ctx.author} (ID: {ctx.author.id})'
await ctx.guild.ban(discord.Object(id=member), reason=reason)
await ctx.send('\N{OK HAND SIGN}')
@commands.command()
@commands.guild_only()
@checks.has_permissions(ban_members=True)
async def massban(self, ctx, *, args):
"""Mass bans multiple members from the server.
This command has a powerful "command line" syntax. To use this command
you and the bot must both have Ban Members permission. **Every option is optional.**
Users are only banned **if and only if** all conditions are met.
The following options are valid.
`--channel` or `-c`: Channel to search for message history.
`--reason` or `-r`: The reason for the ban.
`--regex`: Regex that usernames must match.
`--created`: Matches users whose accounts were created less than specified minutes ago.
`--joined`: Matches users that joined less than specified minutes ago.
`--joined-before`: Matches users who joined before the member given.
`--joined-after`: Matches users who joined after the member given.
`--no-avatar`: Matches users who have no avatar. (no arguments)
`--no-roles`: Matches users that have no role. (no arguments)
`--show`: Show members instead of banning them (no arguments).
Message history filters (Requires `--channel`):
`--contains`: A substring to search for in the message.
`--starts`: A substring to search if the message starts with.
`--ends`: A substring to search if the message ends with.
`--match`: A regex to match the message content to.
`--search`: How many messages to search. Default 100. Max 2000.
`--after`: Messages must come after this message ID.
`--before`: Messages must come before this message ID.
`--files`: Checks if the message has attachments (no arguments).
`--embeds`: Checks if the message has embeds (no arguments).
"""
parser = Arguments(add_help=False, allow_abbrev=False)
parser.add_argument('--channel', '-c')
parser.add_argument('--reason', '-r')
parser.add_argument('--search', type=int, default=100)
parser.add_argument('--regex')
parser.add_argument('--no-avatar', action='store_true')
parser.add_argument('--no-roles', action='store_true')
parser.add_argument('--created', type=int)
parser.add_argument('--joined', type=int)
parser.add_argument('--joined-before')
parser.add_argument('--joined-after')
parser.add_argument('--contains')
parser.add_argument('--starts')
parser.add_argument('--ends')
parser.add_argument('--match')
parser.add_argument('--show', action='store_true')
parser.add_argument('--embeds', action='store_const', const=lambda m: len(m.embeds))
parser.add_argument('--files', action='store_const', const=lambda m: len(m.attachments))
parser.add_argument('--after', type=int)
parser.add_argument('--before', type=int)
try:
args = parser.parse_args(shlex.split(args))
except Exception as e:
return await ctx.send(str(e))
members = []
if args.channel:
channel = await commands.TextChannelConverter().convert(ctx, args.channel)
before = args.before and discord.Object(id=args.before)
after = args.after and discord.Object(id=args.after)
predicates = []
if args.contains:
predicates.append(lambda m: args.contains in m.content)
if args.starts:
predicates.append(lambda m: m.content.startswith(args.starts))
if args.ends:
predicates.append(lambda m: m.content.endswith(args.ends))
if args.match:
try:
_match = re.compile(args.match)
except re.error as e:
return await ctx.send(f'Invalid regex passed to `--match`: {e}')
else:
predicates.append(lambda m, x=_match: x.match(m.content))
if args.embeds:
predicates.append(args.embeds)
if args.files:
predicates.append(args.files)
async for message in channel.history(limit=min(max(1, args.search), 2000), before=before, after=after):
if all(p(message) for p in predicates):
members.append(message.author)
else:
members = ctx.guild.members
# member filters
predicates = [
lambda m: can_execute_action(ctx, ctx.author, m), # Only if applicable
lambda m: not m.bot, # No bots
lambda m: m.discriminator != '0000', # No deleted users
]
if args.regex:
try:
_regex = re.compile(args.regex)
except re.error as e:
return await ctx.send(f'Invalid regex passed to `--regex`: {e}')
else:
predicates.append(lambda m, x=_regex: x.match(m.name))
if args.no_avatar:
predicates.append(lambda m: m.avatar is None)
if args.no_roles:
predicates.append(lambda m: len(getattr(m, 'roles', [])) <= 1)
now = datetime.datetime.utcnow()
if args.created:
def created(member, *, offset=now - datetime.timedelta(minutes=args.created)):
return member.created_at > offset
predicates.append(created)
if args.joined:
def joined(member, *, offset=now - datetime.timedelta(minutes=args.joined)):
return member.joined_at and member.joined_at > offset
predicates.append(joined)
if args.joined_after:
_joined_after_member = await commands.MemberConverter().convert(ctx, args.joined_after)
def joined_after(member, *, _other=_joined_after_member):
return member.joined_at and _other.joined_at and member.joined_at > _other.joined_at
predicates.append(joined_after)
if args.joined_before:
_joined_before_member = await commands.MemberConverter().convert(ctx, args.joined_before)
def joined_before(member, *, _other=_joined_before_member):
return member.joined_at and _other.joined_at and member.joined_at < _other.joined_at
predicates.append(joined_before)
members = {m for m in members if all(p(m) for p in predicates)}
if len(members) == 0:
return await ctx.send('No members found matching criteria.')
if args.show:
fmt = "\n".join(f'{member.id}\t{member}' for member in members)
content = f'Total members: {len(members)}\n{fmt}'
async with self.bot.session.post('https://hastebin.com/documents', data=content) as resp:
if resp.status != 200:
return await ctx.send('Sorry, failed to post data to hastebin.')
js = await resp.json()
return await ctx.send(f'https://hastebin.com/{js["key"]}.txt')
confirm = await ctx.prompt(f'This will ban **{formats.Plural(member=len(members))}**. Are you sure?')
if not confirm:
return await ctx.send('Aborting.')
if args.reason is None:
reason = f'Action done by {ctx.author} (ID: {ctx.author.id})'
else:
reason = await ActionReason().convert(ctx, args.reason)
count = 0
for member in members:
try:
await ctx.guild.ban(member, reason=reason)
except discord.HTTPException:
pass
else:
count += 1
await ctx.send(f'Banned {count}/{len(members)}')
@commands.command()
@commands.guild_only()
@checks.has_permissions(kick_members=True)
async def softban(self, ctx, member: MemberID, *, reason: ActionReason = None):
"""Soft bans a member from the server.
A softban is basically banning the member from the server but
then unbanning the member as well. This allows you to essentially
kick the member while removing their messages.
In order for this to work, the bot must have Ban Member permissions.
To use this command you must have Kick Members permissions.
"""
if reason is None:
reason = f'Action done by {ctx.author} (ID: {ctx.author.id})'
obj = discord.Object(id=member)
await ctx.guild.ban(obj, reason=reason)
await ctx.guild.unban(obj, reason=reason)
await ctx.send('\N{OK HAND SIGN}')
@commands.command()
@commands.guild_only()
@checks.has_permissions(ban_members=True)
async def unban(self, ctx, member: BannedMember, *, reason: ActionReason = None):
"""Unbans a member from the server.
You can pass either the ID of the banned member or the Name#Discrim
combination of the member. Typically the ID is easiest to use.
In order for this to work, the bot must have Ban Member permissions.
To use this command you must have Ban Members permissions.
"""
if reason is None:
reason = f'Action done by {ctx.author} (ID: {ctx.author.id})'
await ctx.guild.unban(member.user, reason=reason)
if member.reason:
await ctx.send(f'Unbanned {member.user} (ID: {member.user.id}), previously banned for {member.reason}.')
else:
await ctx.send(f'Unbanned {member.user} (ID: {member.user.id}).')
@commands.command()
@commands.guild_only()
@checks.has_permissions(ban_members=True)
async def tempban(self, ctx, duration: time.FutureTime, member: MemberID, *, reason: ActionReason = None):
"""Temporarily bans a member for the specified duration.
The duration can be a a short time form, e.g. 30d or a more human
duration such as "until thursday at 3PM" or a more concrete time
such as "2017-12-31".
Note that times are in UTC.
You can also ban from ID to ban regardless whether they're
in the server or not.
In order for this to work, the bot must have Ban Member permissions.
To use this command you must have Ban Members permission.
"""
if reason is None:
reason = f'Action done by {ctx.author} (ID: {ctx.author.id})'
reminder = self.bot.get_cog('Reminder')
if reminder is None:
return await ctx.send('Sorry, this functionality is currently unavailable. Try again later?')
await ctx.guild.ban(discord.Object(id=member), reason=reason)
timer = await reminder.create_timer(duration.dt, 'tempban', ctx.guild.id, ctx.author.id, member, connection=ctx.db)
await ctx.send(f'Banned ID {member} for {time.human_timedelta(duration.dt)}.')
@commands.Cog.listener()
async def on_tempban_timer_complete(self, timer):
guild_id, mod_id, member_id = timer.args
guild = self.bot.get_guild(guild_id)
if guild is None:
# RIP
return
moderator = guild.get_member(mod_id)
if moderator is None:
try:
moderator = await self.bot.fetch_user(mod_id)
except:
# request failed somehow
moderator = f'Mod ID {mod_id}'
else:
moderator = f'{moderator} (ID: {mod_id})'
else:
moderator = f'{moderator} (ID: {mod_id})'
reason = f'Automatic unban from timer made on {timer.created_at} by {moderator}.'
await guild.unban(discord.Object(id=member_id), reason=reason)
@commands.group(invoke_without_command=True)
@commands.guild_only()
@checks.has_permissions(ban_members=True)
async def mentionspam(self, ctx, count: int=None):
"""Enables auto-banning accounts that spam mentions.
If a message contains `count` or more mentions then the
bot will automatically attempt to auto-ban the member.
The `count` must be greater than 3. If the `count` is 0
then this is disabled.
This only applies for user mentions. Everyone or Role
mentions are not included.
To use this command you must have the Ban Members permission.
"""
if count is None:
query = """SELECT mention_count, COALESCE(safe_mention_channel_ids, '{}') AS channel_ids
FROM guild_mod_config
WHERE id=$1;
"""
row = await ctx.db.fetchrow(query, ctx.guild.id)
if row is None or not row['mention_count']:
return await ctx.send('This server has not set up mention spam banning.')
ignores = ', '.join(f'<#{e}>' for e in row['channel_ids']) or 'None'
return await ctx.send(f'- Threshold: {row["mention_count"]} mentions\n- Ignored Channels: {ignores}')
if count == 0:
query = """UPDATE guild_mod_config SET mention_count = NULL WHERE id=$1;"""
await ctx.db.execute(query, ctx.guild.id)
self.get_guild_config.invalidate(self, ctx.guild.id)
return await ctx.send('Auto-banning members has been disabled.')
if count <= 3:
await ctx.send('\N{NO ENTRY SIGN} Auto-ban threshold must be greater than three.')
return
query = """INSERT INTO guild_mod_config (id, mention_count, safe_mention_channel_ids)
VALUES ($1, $2, '{}')
ON CONFLICT (id) DO UPDATE SET
mention_count = $2;
"""
await ctx.db.execute(query, ctx.guild.id, count)
self.get_guild_config.invalidate(self, ctx.guild.id)
await ctx.send(f'Now auto-banning members that mention more than {count} users.')
@mentionspam.command(name='ignore', aliases=['bypass'])
@commands.guild_only()
@checks.has_permissions(ban_members=True)
async def mentionspam_ignore(self, ctx, *channels: discord.TextChannel):
"""Specifies what channels ignore mentionspam auto-bans.
If a channel is given then that channel will no longer be protected
by auto-banning from mention spammers.
To use this command you must have the Ban Members permission.
"""
query = """UPDATE guild_mod_config
SET safe_mention_channel_ids =
ARRAY(SELECT DISTINCT * FROM unnest(COALESCE(safe_mention_channel_ids, '{}') || $2::bigint[]))
WHERE id = $1;
"""
if len(channels) == 0:
return await ctx.send('Missing channels to ignore.')
channel_ids = [c.id for c in channels]
await ctx.db.execute(query, ctx.guild.id, channel_ids)
self.get_guild_config.invalidate(self, ctx.guild.id)
await ctx.send(f'Mentions are now ignored on {", ".join(c.mention for c in channels)}.')
@mentionspam.command(name='unignore', aliases=['protect'])
@commands.guild_only()
@checks.has_permissions(ban_members=True)
async def mentionspam_unignore(self, ctx, *channels: discord.TextChannel):
"""Specifies what channels to take off the ignore list.
To use this command you must have the Ban Members permission.
"""
if len(channels) == 0:
return await ctx.send('Missing channels to protect.')
query = """UPDATE guild_mod_config
SET safe_mention_channel_ids =
ARRAY(SELECT element FROM unnest(safe_mention_channel_ids) AS element
WHERE NOT(element = ANY($2::bigint[])))
WHERE id = $1;
"""
await ctx.db.execute(query, ctx.guild.id, [c.id for c in channels])
self.get_guild_config.invalidate(self, ctx.guild.id)
await ctx.send('Updated mentionspam ignore list.')
@commands.group(aliases=['purge'])
@commands.guild_only()
@checks.has_permissions(manage_messages=True)
async def remove(self, ctx):
"""Removes messages that meet a criteria.
In order to use this command, you must have Manage Messages permissions.
Note that the bot needs Manage Messages as well. These commands cannot
be used in a private message.
When the command is done doing its work, you will get a message
detailing which users got removed and how many messages got removed.
"""
if ctx.invoked_subcommand is None:
await ctx.send_help(ctx.command)
async def do_removal(self, ctx, limit, predicate, *, before=None, after=None):
if limit > 2000:
return await ctx.send(f'Too many messages to search given ({limit}/2000)')
if before is None:
before = ctx.message
else:
before = discord.Object(id=before)
if after is not None:
after = discord.Object(id=after)
try:
deleted = await ctx.channel.purge(limit=limit, before=before, after=after, check=predicate)
except discord.Forbidden as e:
return await ctx.send('I do not have permissions to delete messages.')
except discord.HTTPException as e:
return await ctx.send(f'Error: {e} (try a smaller search?)')
spammers = Counter(m.author.display_name for m in deleted)
deleted = len(deleted)
messages = [f'{deleted} message{" was" if deleted == 1 else "s were"} removed.']
if deleted:
messages.append('')
spammers = sorted(spammers.items(), key=lambda t: t[1], reverse=True)
messages.extend(f'**{name}**: {count}' for name, count in spammers)
to_send = '\n'.join(messages)
if len(to_send) > 2000:
await ctx.send(f'Successfully removed {deleted} messages.', delete_after=10)
else:
await ctx.send(to_send, delete_after=10)
@remove.command()
async def embeds(self, ctx, search=100):
"""Removes messages that have embeds in them."""
await self.do_removal(ctx, search, lambda e: len(e.embeds))
@remove.command()
async def files(self, ctx, search=100):
"""Removes messages that have attachments in them."""
await self.do_removal(ctx, search, lambda e: len(e.attachments))
@remove.command()
async def images(self, ctx, search=100):
"""Removes messages that have embeds or attachments."""
await self.do_removal(ctx, search, lambda e: len(e.embeds) or len(e.attachments))
@remove.command(name='all')
async def _remove_all(self, ctx, search=100):
"""Removes all messages."""
await self.do_removal(ctx, search, lambda e: True)
@remove.command()
async def user(self, ctx, member: discord.Member, search=100):
"""Removes all messages by the member."""
await self.do_removal(ctx, search, lambda e: e.author == member)
@remove.command()
async def contains(self, ctx, *, substr: str):
"""Removes all messages containing a substring.
The substring must be at least 3 characters long.
"""
if len(substr) < 3:
await ctx.send('The substring length must be at least 3 characters.')
else:
await self.do_removal(ctx, 100, lambda e: substr in e.content)
@remove.command(name='bot')
async def _bot(self, ctx, prefix=None, search=100):
"""Removes a bot user's messages and messages with their optional prefix."""
def predicate(m):
return (m.webhook_id is None and m.author.bot) or (prefix and m.content.startswith(prefix))
await self.do_removal(ctx, search, predicate)
@remove.command(name='emoji')
async def _emoji(self, ctx, search=100):
"""Removes all messages containing custom emoji."""
custom_emoji = re.compile(r'<:(\w+):(\d+)>')
def predicate(m):
return custom_emoji.search(m.content)
await self.do_removal(ctx, search, predicate)
@remove.command(name='reactions')
async def _reactions(self, ctx, search=100):
"""Removes all reactions from messages that have them."""
if search > 2000:
return await ctx.send(f'Too many messages to search for ({search}/2000)')
total_reactions = 0
async for message in ctx.history(limit=search, before=ctx.message):
if len(message.reactions):
total_reactions += sum(r.count for r in message.reactions)
await message.clear_reactions()
await ctx.send(f'Successfully removed {total_reactions} reactions.')
@remove.command()
async def custom(self, ctx, *, args: str):
"""A more advanced purge command.
This command uses a powerful "command line" syntax.
Most options support multiple values to indicate 'any' match.
If the value has spaces it must be quoted.
The messages are only deleted if all options are met unless
the `--or` flag is passed, in which case only if any is met.
The following options are valid.
`--user`: A mention or name of the user to remove.
`--contains`: A substring to search for in the message.
`--starts`: A substring to search if the message starts with.
`--ends`: A substring to search if the message ends with.
`--search`: How many messages to search. Default 100. Max 2000.
`--after`: Messages must come after this message ID.
`--before`: Messages must come before this message ID.
Flag options (no arguments):
`--bot`: Check if it's a bot user.
`--embeds`: Check if the message has embeds.
`--files`: Check if the message has attachments.
`--emoji`: Check if the message has custom emoji.
`--reactions`: Check if the message has reactions
`--or`: Use logical OR for all options.
`--not`: Use logical NOT for all options.
"""
parser = Arguments(add_help=False, allow_abbrev=False)
parser.add_argument('--user', nargs='+')
parser.add_argument('--contains', nargs='+')
parser.add_argument('--starts', nargs='+')
parser.add_argument('--ends', nargs='+')
parser.add_argument('--or', action='store_true', dest='_or')
parser.add_argument('--not', action='store_true', dest='_not')
parser.add_argument('--emoji', action='store_true')
parser.add_argument('--bot', action='store_const', const=lambda m: m.author.bot)
parser.add_argument('--embeds', action='store_const', const=lambda m: len(m.embeds))
parser.add_argument('--files', action='store_const', const=lambda m: len(m.attachments))
parser.add_argument('--reactions', action='store_const', const=lambda m: len(m.reactions))
parser.add_argument('--search', type=int, default=100)
parser.add_argument('--after', type=int)
parser.add_argument('--before', type=int)
try:
args = parser.parse_args(shlex.split(args))
except Exception as e:
await ctx.send(str(e))
return
predicates = []
if args.bot:
predicates.append(args.bot)
if args.embeds:
predicates.append(args.embeds)
if args.files:
predicates.append(args.files)
if args.reactions:
predicates.append(args.reactions)
if args.emoji:
custom_emoji = re.compile(r'<:(\w+):(\d+)>')
predicates.append(lambda m: custom_emoji.search(m.content))
if args.user:
users = []
converter = commands.MemberConverter()
for u in args.user:
try:
user = await converter.convert(ctx, u)
users.append(user)
except Exception as e:
await ctx.send(str(e))
return
predicates.append(lambda m: m.author in users)
if args.contains:
predicates.append(lambda m: any(sub in m.content for sub in args.contains))
if args.starts:
predicates.append(lambda m: any(m.content.startswith(s) for s in args.starts))
if args.ends:
predicates.append(lambda m: any(m.content.endswith(s) for s in args.ends))
op = all if not args._or else any
def predicate(m):
r = op(p(m) for p in predicates)
if args._not:
return not r
return r
args.search = max(0, min(2000, args.search)) # clamp from 0-2000
await self.do_removal(ctx, args.search, predicate, before=args.before, after=args.after)
def setup(bot):
bot.add_cog(Mod(bot))
|
import asyncio
import os
import random
import re
from concurrent import futures
from typing import Dict, List, Tuple, Union
import discord
from discord.ext import commands
from discord.ext.commands import Cog
from bot import BeattieBot
from context import BContext
from utils.genesys import die_names, genesysroller
RollArg = Tuple[int, int, int, int, int, int]
L2 = List[int]
L1 = List[L2]
ROLL_EXPR = re.compile(
r"^(?:(?P<num>\d*)d)?(?P<sides>\d+)(?:[+-](?P<mod>\d+))?"
r"(?:[v^](?P<drop>\d+))?(?:x(?P<times>\d+))?(?:[ts]{1,2})?$"
)
SHADOWRUN_EXPR = re.compile(r"^\d+e?$")
GENESYS_ROLL_EXPR = re.compile(r"^(?:\d+[a-z])+$")
GENESYS_DIE_EXPR = re.compile(r"\d+[a-z]")
TAROT_EXPR = re.compile(r"(?:\w+/)+[IVX0_]*([\w_]+)\.jpg")
TAROT_URL = "https://www.trustedtarot.com/cards/{}/"
class RPG(Cog):
def __init__(self, bot: BeattieBot):
self.loop = bot.loop
@commands.command()
async def choose(self, ctx: BContext, *options: str) -> None:
"""Choose between some options. Use quotes if they have spaces."""
len_ = len(options)
if len_ == 0:
await ctx.send("Choose nothing? Is this some sort of metaphor?")
elif len_ == 1:
await ctx.send("That's not much of a choice!")
else:
choice = random.choice(options)
await ctx.send(
f"I choose {choice}",
allowed_mentions=discord.AllowedMentions(
everyone=False, users=False, roles=False
),
)
@commands.command()
async def tarot(self, ctx: BContext, *suits: str) -> None:
"""Get a random tarot card.
You can specify the suits from which to pull, options are:
minor:
cups
swords
wands
pentacles
major"""
async with ctx.typing():
cards = []
if not suits:
suits = ("cups", "swords", "wands", "pentacles", "major")
if "minor" in suits:
suits = suits + ("cups", "swords", "wands", "pentacles")
suit_set = set(suit.lower() for suit in suits)
for root, _dirs, files in os.walk("data/tarot"):
if any(suit in root for suit in suit_set):
cards += [f"{root}/{card}" for card in files]
try:
card = random.choice(cards).replace("\\", "/")
except IndexError:
await ctx.send("Please specify a valid suit, or no suit.")
return
match = TAROT_EXPR.match(card)
assert match is not None
name = match.groups()[0].replace("_", " ")
url = TAROT_URL.format(name.lower().replace(" ", "-"))
embed = discord.Embed()
embed.title = name
embed.url = url
filename = card.rpartition("/")[2]
embed.set_image(url=f"attachment://{filename}")
await ctx.send(file=discord.File(f"{card}"), embed=embed)
@commands.command(aliases=["r"])
async def roll(self, ctx: BContext, *, roll: str = "1d20") -> None:
"""Roll some dice!
Can roll multiple dice of any size, with modifiers.
Format: XdY([^v]Z)([+-]W)(xN)(s)(t)
X is the number of dice
Y is the number of sides
^ drops the Z highest dice
v drops the Z lowest dice
+ adds W to the result
- subtracts W from the result
x repeats the roll N times
s sorts the results
t totals each roll
"""
if roll == "stats":
roll = "4d6v1x6t"
roll = "".join(roll.split()).lower()
if (match := ROLL_EXPR.match(roll)) is None:
raise commands.BadArgument
args: Dict[str, int] = {
k: int(v) if v else 0 for k, v in match.groupdict().items()
}
num = args["num"] or 1
if (sides := args["sides"]) == 0:
raise commands.BadArgument
hi_drop = 0
lo_drop = 0
if (mod := args["mod"]) and "-" in roll:
mod = -mod
if (drop := args["drop"]) :
if drop >= num:
raise commands.BadArgument
if "^" in roll:
hi_drop = drop
else:
lo_drop = drop
times = args["times"] or 1
args = (num, sides, lo_drop, hi_drop, mod, times)
future = self.loop.run_in_executor(None, roller, *args)
async with ctx.typing():
result = await asyncio.wait_for(future, 10, loop=self.loop)
if "d" not in roll:
roll = f"1d{roll}"
elif roll[0] == "d":
roll = f"1{roll}"
total = "t" in roll
if total:
result = [[sum(roll_)] for roll_ in result]
if "s" in roll:
for roll_ in result:
roll_.sort()
result.sort()
out = denest(result)
await ctx.reply(f"{roll}: {out}")
@roll.error
async def roll_error(self, ctx: BContext, e: Exception) -> None:
if isinstance(e, commands.CommandInvokeError):
e = e.original
if isinstance(e, (commands.MissingRequiredArgument, commands.BadArgument)):
await ctx.send(
"Invalid input. Valid input examples:"
"\n1d20+3"
"\n1d6"
"\n2d8-4"
"\n2d20+2v1"
"\n4d6v1x6t"
)
elif isinstance(e, asyncio.TimeoutError):
await ctx.reply("Your execution took too long. Roll fewer dice.")
elif isinstance(e, discord.HTTPException):
await ctx.reply("Your results were too long. Maybe sum them?")
else:
await ctx.bot.handle_error(ctx, e)
@commands.command(aliases=["shadroll", "sr"])
async def shadowroll(self, ctx: BContext, *, inp: str) -> None:
"""Roll some dice - for Shadowrun!
Format: N[e]
Roll N six-sided dice and return the number of dice that rolled 5 or 6.
If you put "e" after the number, 6s are counted and then rerolled."""
inp = inp.strip()
if not SHADOWRUN_EXPR.match(inp):
raise commands.BadArgument
edge = "e" in inp
num = int(inp.rstrip("e"))
args = (num, edge)
future = self.loop.run_in_executor(None, shadowroller, *args)
async with ctx.typing():
result = await asyncio.wait_for(future, 10, loop=self.loop)
await ctx.reply(result)
@shadowroll.error
async def shadowroll_error(self, ctx: BContext, e: Exception) -> None:
if isinstance(e, commands.CommandInvokeError):
e = e.original
if isinstance(e, (commands.MissingRequiredArgument, commands.BadArgument)):
await ctx.send("Invalid input. Valid input examples:" "\n6" "\n13e")
elif isinstance(e, futures.TimeoutError):
await ctx.reply("Your execution took too long. Roll fewer dice.")
else:
await ctx.bot.handle_error(ctx, e)
@commands.command(aliases=["gr"])
async def genesysroll(self, ctx: BContext, *, inp: str) -> None:
"""Roll some dice - for Fantasy Flight Genesys!
Available dice:
b[oost]
a[bility]
p[roficiency]
s[etback]
d[ifficulty]
c[hallenge]
f[orce]
Input examples:
4a3d
3a2p1b4d1c
2f"""
inp = inp.lower()
if (match := GENESYS_ROLL_EXPR.match(inp)) is None:
raise commands.BadArgument
matches = GENESYS_DIE_EXPR.finditer(inp)
dice = {}
for match in matches:
roll = match.group(0)
num = int(roll[:-1])
die_code = roll[-1]
try:
die = die_names[die_code]
except KeyError:
await ctx.send(f'Die "{die_code}" does not exist.')
return
dice[die] = num
future = self.loop.run_in_executor(None, lambda: genesysroller(**dice))
async with ctx.typing():
try:
result = await asyncio.wait_for(future, 10, loop=self.loop)
except ValueError:
await ctx.send("Force dice cannot be used with other dice.")
else:
await ctx.reply(str(result))
@genesysroll.error
async def genesysroll_error(self, ctx: BContext, e: Exception) -> None:
if isinstance(e, commands.CommandInvokeError):
e = e.original
if isinstance(e, futures.TimeoutError):
await ctx.reply("Your execution took too long. Roll fewer dice.")
else:
await ctx.bot.handle_error(ctx, e)
def roller(
num: int = 1,
sides: int = 20,
lo_drop: int = 0,
hi_drop: int = 0,
mod: int = 0,
times: int = 1,
) -> List[List[int]]:
rolls = []
for _ in range(times):
pool = [random.randint(1, sides) for _ in range(num)]
if lo_drop or hi_drop:
sorted_pool = sorted(pool)
dropped_vals = sorted_pool[:lo_drop] + sorted_pool[num - hi_drop :]
for val in dropped_vals:
pool.remove(val)
if mod:
pool = [sum(pool) + mod]
rolls.append(pool)
return rolls
def shadowroller(num: int, edge: bool = False) -> str:
rolls = hits = count1 = 0
while True:
count6 = 0
rolls += num
for _ in range(num):
roll = random.randint(1, 6)
if roll > 4:
hits += 1
if roll == 6:
count6 += 1
elif roll == 1:
count1 += 1
if not (count6 > 0 and edge):
break
num = count6
s = "s" if hits != 1 else ""
if count1 < rolls / 2:
result = f"{hits} hit{s}."
elif hits == 0:
result = "Critical glitch."
else:
result = f"Glitch with {hits} hit{s}."
return result
def denest(rolls: L1) -> str:
# this isn't my fault
first: Union[L1, L2] = [roll[0] for roll in rolls] if len(rolls[0]) == 1 else rolls
second: Union[L1, L2, int] = first[0] if len(first) == 1 else first
return str(second)
def setup(bot: BeattieBot) -> None:
bot.add_cog(RPG(bot))
Make the code just a bit less ugly
import asyncio
import os
import random
import re
from concurrent import futures
from typing import Dict, List, Tuple, Union
import discord
from discord.ext import commands
from discord.ext.commands import Cog
from bot import BeattieBot
from context import BContext
from utils.genesys import die_names, genesysroller
RollArg = Tuple[int, int, int, int, int, int]
L2 = List[int]
L1 = List[L2]
ROLL_EXPR = re.compile(
r"^(?:(?P<num>\d*)d)?(?P<sides>\d+)(?:[+-](?P<mod>\d+))?"
r"(?:[v^](?P<drop>\d+))?(?:x(?P<times>\d+))?(?:[ts]{1,2})?$"
)
SHADOWRUN_EXPR = re.compile(r"^\d+e?$")
GENESYS_ROLL_EXPR = re.compile(r"^(?:\d+[a-z])+$")
GENESYS_DIE_EXPR = re.compile(r"\d+[a-z]")
TAROT_EXPR = re.compile(r"(?:\w+/)+[IVX0_]*([\w_]+)\.jpg")
TAROT_URL = "https://www.trustedtarot.com/cards/{}/"
class RPG(Cog):
def __init__(self, bot: BeattieBot):
self.loop = bot.loop
@commands.command()
async def choose(self, ctx: BContext, *options: str) -> None:
"""Choose between some options. Use quotes if they have spaces."""
len_ = len(options)
if len_ == 0:
await ctx.send("Choose nothing? Is this some sort of metaphor?")
elif len_ == 1:
await ctx.send("That's not much of a choice!")
else:
choice = random.choice(options)
await ctx.send(
f"I choose {choice}",
allowed_mentions=discord.AllowedMentions(
everyone=False, users=False, roles=False
),
)
@commands.command()
async def tarot(self, ctx: BContext, *suits: str) -> None:
"""Get a random tarot card.
You can specify the suits from which to pull, options are:
minor:
cups
swords
wands
pentacles
major"""
async with ctx.typing():
cards = []
if not suits:
suits = ("cups", "swords", "wands", "pentacles", "major")
if "minor" in suits:
suits = suits + ("cups", "swords", "wands", "pentacles")
suit_set = set(suit.lower() for suit in suits)
for root, _dirs, files in os.walk("data/tarot"):
if any(suit in root for suit in suit_set):
cards += [f"{root}/{card}" for card in files]
try:
card = random.choice(cards).replace("\\", "/")
except IndexError:
await ctx.send("Please specify a valid suit, or no suit.")
return
match = TAROT_EXPR.match(card)
assert match is not None
name = match.groups()[0].replace("_", " ")
url = TAROT_URL.format(name.lower().replace(" ", "-"))
embed = discord.Embed()
embed.title = name
embed.url = url
filename = card.rpartition("/")[2]
embed.set_image(url=f"attachment://{filename}")
await ctx.send(file=discord.File(f"{card}"), embed=embed)
@commands.command(aliases=["r"])
async def roll(self, ctx: BContext, *, roll: str = "1d20") -> None:
"""Roll some dice!
Can roll multiple dice of any size, with modifiers.
Format: XdY([^v]Z)([+-]W)(xN)(s)(t)
X is the number of dice
Y is the number of sides
^ drops the Z highest dice
v drops the Z lowest dice
+ adds W to the result
- subtracts W from the result
x repeats the roll N times
s sorts the results
t totals each roll
"""
if roll == "stats":
roll = "4d6v1x6t"
roll = "".join(roll.split()).lower()
if (match := ROLL_EXPR.match(roll)) is None:
raise commands.BadArgument
args: Dict[str, int] = {
k: int(v) if v else 0 for k, v in match.groupdict().items()
}
num = args["num"] or 1
if (sides := args["sides"]) == 0:
raise commands.BadArgument
hi_drop = 0
lo_drop = 0
if (mod := args["mod"]) and "-" in roll:
mod = -mod
if (drop := args["drop"]) :
if drop >= num:
raise commands.BadArgument
if "^" in roll:
hi_drop = drop
else:
lo_drop = drop
times = args["times"] or 1
args = (num, sides, lo_drop, hi_drop, mod, times)
future = self.loop.run_in_executor(None, roller, *args)
async with ctx.typing():
result = await asyncio.wait_for(future, 10, loop=self.loop)
if "d" not in roll:
roll = f"1d{roll}"
elif roll[0] == "d":
roll = f"1{roll}"
total = "t" in roll
if total:
result = [[sum(roll_)] for roll_ in result]
if "s" in roll:
for roll_ in result:
roll_.sort()
result.sort()
out = denest(result)
await ctx.reply(f"{roll}: {out}")
@roll.error
async def roll_error(self, ctx: BContext, e: Exception) -> None:
if isinstance(e, commands.CommandInvokeError):
e = e.original
if isinstance(e, (commands.MissingRequiredArgument, commands.BadArgument)):
await ctx.send(
"Invalid input. Valid input examples:"
"\n1d20+3"
"\n1d6"
"\n2d8-4"
"\n2d20+2v1"
"\n4d6v1x6t"
)
elif isinstance(e, asyncio.TimeoutError):
await ctx.reply("Your execution took too long. Roll fewer dice.")
elif isinstance(e, discord.HTTPException):
await ctx.reply("Your results were too long. Maybe sum them?")
else:
await ctx.bot.handle_error(ctx, e)
@commands.command(aliases=["shadroll", "sr"])
async def shadowroll(self, ctx: BContext, *, inp: str) -> None:
"""Roll some dice - for Shadowrun!
Format: N[e]
Roll N six-sided dice and return the number of dice that rolled 5 or 6.
If you put "e" after the number, 6s are counted and then rerolled."""
inp = inp.strip()
if not SHADOWRUN_EXPR.match(inp):
raise commands.BadArgument
edge = "e" in inp
num = int(inp.rstrip("e"))
args = (num, edge)
future = self.loop.run_in_executor(None, shadowroller, *args)
async with ctx.typing():
result = await asyncio.wait_for(future, 10, loop=self.loop)
await ctx.reply(result)
@shadowroll.error
async def shadowroll_error(self, ctx: BContext, e: Exception) -> None:
if isinstance(e, commands.CommandInvokeError):
e = e.original
if isinstance(e, (commands.MissingRequiredArgument, commands.BadArgument)):
await ctx.send("Invalid input. Valid input examples:" "\n6" "\n13e")
elif isinstance(e, futures.TimeoutError):
await ctx.reply("Your execution took too long. Roll fewer dice.")
else:
await ctx.bot.handle_error(ctx, e)
@commands.command(aliases=["gr"])
async def genesysroll(self, ctx: BContext, *, inp: str) -> None:
"""Roll some dice - for Fantasy Flight Genesys!
Available dice:
b[oost]
a[bility]
p[roficiency]
s[etback]
d[ifficulty]
c[hallenge]
f[orce]
Input examples:
4a3d
3a2p1b4d1c
2f"""
inp = inp.lower()
if (match := GENESYS_ROLL_EXPR.match(inp)) is None:
raise commands.BadArgument
matches = GENESYS_DIE_EXPR.finditer(inp)
dice = {}
for match in matches:
roll = match.group(0)
num = int(roll[:-1])
die_code = roll[-1]
try:
die = die_names[die_code]
except KeyError:
await ctx.send(f'Die "{die_code}" does not exist.')
return
dice[die] = num
future = self.loop.run_in_executor(None, lambda: genesysroller(**dice))
async with ctx.typing():
try:
result = await asyncio.wait_for(future, 10, loop=self.loop)
except ValueError:
await ctx.send("Force dice cannot be used with other dice.")
else:
await ctx.reply(str(result))
@genesysroll.error
async def genesysroll_error(self, ctx: BContext, e: Exception) -> None:
if isinstance(e, commands.CommandInvokeError):
e = e.original
if isinstance(e, futures.TimeoutError):
await ctx.reply("Your execution took too long. Roll fewer dice.")
else:
await ctx.bot.handle_error(ctx, e)
def roller(
num: int = 1,
sides: int = 20,
lo_drop: int = 0,
hi_drop: int = 0,
mod: int = 0,
times: int = 1,
) -> List[List[int]]:
rolls = []
for _ in range(times):
pool = [random.randint(1, sides) for _ in range(num)]
if lo_drop or hi_drop:
sorted_pool = sorted(pool)
dropped_vals = sorted_pool[:lo_drop] + sorted_pool[num - hi_drop :]
for val in dropped_vals:
pool.remove(val)
if mod:
pool = [sum(pool) + mod]
rolls.append(pool)
return rolls
def shadowroller(num: int, edge: bool = False) -> str:
rolls = hits = count1 = 0
while True:
count6 = 0
rolls += num
for _ in range(num):
roll = random.randint(1, 6)
if roll > 4:
hits += 1
if roll == 6:
count6 += 1
elif roll == 1:
count1 += 1
if not (count6 > 0 and edge):
break
num = count6
s = "s" if hits != 1 else ""
if count1 < rolls / 2:
result = f"{hits} hit{s}."
elif hits == 0:
result = "Critical glitch."
else:
result = f"Glitch with {hits} hit{s}."
return result
def denest(rolls: L1) -> str:
# this isn't my fault
rolls: Union[L1, L2] = [roll[0] for roll in rolls] if len(rolls[0]) == 1 else rolls
rolls: Union[L1, L2, int] = rolls[0] if len(rolls) == 1 else rolls
return str(rolls)
def setup(bot: BeattieBot) -> None:
bot.add_cog(RPG(bot))
|
"""
Class Name : ACL
Description:
Class that handles Access Control Lists for bot users
This class serves as a RESTful abstration to the user database
Contributors:
- Patrick Hennessy
- Aleksandr Tihomirov
License:
Arcbot is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License v3; as published
by the Free Software Foundation
"""
from peewee import *
from core.Database import *
class ACLUser(Model):
id = IntegerField(primary_key=True)
access = IntegerField(default=0, constraints=[Check('access >= 0'), Check('access <= 1000') ])
owner = BooleanField(default=False)
class ACL():
def __init__(self):
self.database = Database(databaseName="databases/ACL.db")
self.database.addTable(ACLUser)
def getOwner(self):
"""
Summary:
Returns the ID of the bot owner if there is one
Args:
None
Returns:
(String): ID of bot owner
"""
try:
user = ACLUser.get(owner=True)
return user.id
except:
return None
def setOwner(self, uid):
"""
Summary:
Sets the bot owner
Args:
uid (str): ID of the target user
Returns:
None
"""
user, created = self.database.ACLUser.get_or_create(id=uid)
if(created):
user.id = uid
user.access = 1000
user.owner = True
user.save()
else:
user.owner = True
user.save()
def setAccess(self, uid, access):
"""
Summary:
Sets the database access for a specific user
Args:
uid (str): ID of the target user
Returns:
(Bool): Successful or Not
"""
# Check if trying to change owner
if(self.getOwner == uid):
return False
# Set user access
user, created = self.database.ACLUser.get_or_create(id=uid)
if(created):
user.id = uid
user.access = access
user.save()
else:
user.access = access
user.save()
return True
def getAccess(self, uid):
"""
Summary:
Gets the database access for the specified user
Args:
uid (str): ID of the target user
Returns:
(Int): Access level of the target user, or -1 if not found
"""
try:
user = ACLUser.get(id=uid)
return user.access
except:
return -1
def deleteUser(self, uid):
"""
Summary:
Deletes a user from the ACL database
Args:
uid (str): ID of the target user
Returns:
(Bool): Successful or Not
"""
try:
user = self.database.ACLUser.get(id=uid)
if(user):
user.delete_instance()
return True
else:
return False
except:
return False
Changed file header comments
"""
Class Name : ACL
Description:
Class that handles Access Control Lists for bot users
This class serves as a RESTful abstration to the user database
Contributors:
- Patrick Hennessy
License:
Arcbot is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License v3; as published
by the Free Software Foundation
"""
from peewee import *
from core.Database import *
class ACLUser(Model):
id = IntegerField(primary_key=True)
access = IntegerField(default=0, constraints=[Check('access >= 0'), Check('access <= 1000') ])
owner = BooleanField(default=False)
class ACL():
def __init__(self):
self.database = Database(databaseName="databases/ACL.db")
self.database.addTable(ACLUser)
def getOwner(self):
"""
Summary:
Returns the ID of the bot owner if there is one
Args:
None
Returns:
(String): ID of bot owner
"""
try:
user = ACLUser.get(owner=True)
return user.id
except:
return None
def setOwner(self, uid):
"""
Summary:
Sets the bot owner
Args:
uid (str): ID of the target user
Returns:
None
"""
user, created = self.database.ACLUser.get_or_create(id=uid)
if(created):
user.id = uid
user.access = 1000
user.owner = True
user.save()
else:
user.owner = True
user.save()
def setAccess(self, uid, access):
"""
Summary:
Sets the database access for a specific user
Args:
uid (str): ID of the target user
Returns:
(Bool): Successful or Not
"""
# Check if trying to change owner
if(self.getOwner == uid):
return False
# Set user access
user, created = self.database.ACLUser.get_or_create(id=uid)
if(created):
user.id = uid
user.access = access
user.save()
else:
user.access = access
user.save()
return True
def getAccess(self, uid):
"""
Summary:
Gets the database access for the specified user
Args:
uid (str): ID of the target user
Returns:
(Int): Access level of the target user, or -1 if not found
"""
try:
user = ACLUser.get(id=uid)
return user.access
except:
return -1
def deleteUser(self, uid):
"""
Summary:
Deletes a user from the ACL database
Args:
uid (str): ID of the target user
Returns:
(Bool): Successful or Not
"""
try:
user = self.database.ACLUser.get(id=uid)
if(user):
user.delete_instance()
return True
else:
return False
except:
return False
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from functools import partial
import numpy as onp
import six
from six.moves import reduce
from .. import core
from .. import linear_util as lu
from ..core import Trace, Tracer, Primitive, new_master
from ..abstract_arrays import ShapedArray, ConcreteArray, make_shaped_array
from ..util import safe_zip, unzip2, unzip3, partialmethod, prod
from ..lib import xla_bridge as xb
from . import partial_eval as pe
from . import batching
zip = safe_zip
def identity(x): return x
### pmap
def pmap(fun, name, in_vals, in_axes, out_axis_target):
sizes = reduce(set.union, map(batching.dimsize, in_axes, in_vals))
if not sizes:
return fun.call_wrapped(*in_vals)
elif len(sizes) == 1:
fun, out_axis = pmap_transform(fun, name, in_axes)
out_val = fun.call_wrapped(*in_vals)
return batching.moveaxis(sizes.pop(), out_axis_target, out_axis(), out_val)
else:
raise TypeError("got inconsistent map dimension sizes: {}".format(sizes))
@lu.transformation_with_aux
def pmap_transform(name, axes, *vals):
with new_master(PmapTrace) as master:
trace = PmapTrace(master, core.cur_sublevel())
in_tracers = map(partial(PmapTracer, trace, name), vals, axes)
ans = yield in_tracers
out_tracer = trace.full_raise(ans)
out_val, out_axis = out_tracer.val, out_tracer.axis
del master, out_tracer
yield out_val, out_axis
@lu.transformation_with_aux
def pmap_subtrace(master, name, axes, *vals):
trace = PmapTrace(master, core.cur_sublevel())
ans = yield map(partial(PmapTracer, trace, name), vals, axes)
out_tracer = trace.full_raise(ans)
out_val, out_axis = out_tracer.val, out_tracer.axis
yield out_val, out_axis
class PmapTracer(Tracer):
def __init__(self, trace, name, val, axis):
self.trace = trace
self.name = name
self.val = val
self.axis = axis
@property
def aval(self):
batched_aval = batching.get_aval(self.val)
return batching.remove_batch_dim_from_aval(self.axis, batched_aval)
def unpack(self):
t = type(self.axis)
if t is tuple:
axes = self.axis
elif t is int:
axes = [self.axis] * len(self.val)
elif t is type(None):
return tuple(self.val)
else:
raise TypeError(t)
return map(partial(PmapTracer, self.trace, self.name), self.val, axes)
def full_lower(self):
if self.axis is None:
return core.full_lower(self.val)
else:
return self
class PmapTrace(Trace):
def pure(self, val):
return PmapTracer(self, None, val, None)
def lift(self, val):
return PmapTracer(self, None, val, None)
def sublift(self, val):
return PmapTracer(self, val.name, val.val, val.axis)
def process_primitive(self, primitive, tracers, params):
names_in, vals_in, axes_in = unzip3((t.name, t.val, t.axis) for t in tracers)
if all(axis is None for axis in axes_in):
return primitive.bind(*vals_in, **params)
else:
name = next(name for name in names_in if name is not None) # all same
if primitive in pmap_primitive_rules:
# if it's a pmap collective primitive, do something special
val_in, = vals_in
axis_in, = axes_in
if name == params['axis_name']:
# if the name matches this tracer's name, apply the pmap rule
rule = pmap_primitive_rules[primitive]
params = {k: params[k] for k in params if k != 'axis_name'}
val_out, axis_out = rule(val_in, axis_in, **params)
return PmapTracer(self, name, val_out, axis_out)
else:
# if not, bind the primitive so that any other pmap tracers can see it
val_out = primitive.bind(val_in, **params)
return PmapTracer(self, name, val_out, axis_in)
else:
# if it's not a pmap collective primitive, act just like vmap
rule = batching.get_primitive_batcher(primitive)
val_out, axis_out = rule(vals_in, axes_in, **params)
return PmapTracer(self, name, val_out, axis_out)
def process_call(self, call_primitive, f, tracers, params):
names, vals, axes = unzip3((t.name, t.val, t.axis) for t in tracers)
if all(axis is None for axis in axes):
return call_primitive.bind(f, *vals, **params)
else:
name = next(name for name in names if name is not None) # all same
f, axis_out = pmap_subtrace(f, self.master, name, axes)
val_out = call_primitive.bind(f, *vals, **params)
return PmapTracer(self, name, val_out, axis_out())
def post_process_call(self, _, out_tracer):
name, val, axis = out_tracer.name, out_tracer.val, out_tracer.axis
master = self.master
def todo(x):
trace = PmapTrace(master, core.cur_sublevel())
return PmapTracer(trace, name, x, axis)
return val, todo
def pack(self, tracers):
vals = core.pack([t.val for t in tracers])
axis = tuple(t.axis for t in tracers)
name = next(t.name for t in tracers if t.name)
return PmapTracer(self, name, vals, axis)
def unbound_name_error(primitive_name, *args, **kwargs):
axis_name = kwargs['axis_name']
msg = "axis name '{}' is unbound for primitive {}."
raise NameError(msg.format(axis_name, primitive_name))
def PmapPrimitive(name):
prim = Primitive(name)
prim.def_impl(partial(unbound_name_error, name))
prim.def_abstract_eval(lambda x, *args, **kwargs: x) # default
return prim
pmap_primitive_rules = {}
parallel_translation_rules = {}
def psum(x, axis_name):
return psum_p.bind(x, axis_name=axis_name)
def psum_pmap_rule(val, axis):
return val.sum(axis), None
def psum_parallel_translation_rule(c, val, device_groups):
if len(device_groups) > 1:
return c.CrossReplicaSum(val, device_groups)
else:
return c.CrossReplicaSum(val)
psum_p = PmapPrimitive('psum')
pmap_primitive_rules[psum_p] = psum_pmap_rule
parallel_translation_rules[psum_p] = psum_parallel_translation_rule
def gather(x, axis_name):
return gather_p.bind(x, axis_name=axis_name)
def gather_pmap_rule(val, axis):
return val, None
gather_p = PmapPrimitive('gather')
pmap_primitive_rules[gather_p] = gather_pmap_rule
### axis variable splitting and computation chunking
@lu.transformation
def axisvar_split(name, new_names, *args):
with new_master(SplitTrace) as master:
trace = SplitTrace(master, core.cur_sublevel())
in_tracers = map(partial(SplitTracer, trace, name, new_names), args)
ans = yield in_tracers
out_tracer = trace.full_raise(ans)
out_val = out_tracer.val
del master, out_tracer
yield out_val
@lu.transformation
def axisvar_split_subtrace(master, name, new_names, *vals):
trace = SplitTrace(master, core.cur_sublevel())
ans = yield map(partial(SplitTracer, trace, name, new_names), vals)
out_tracer = trace.full_raise(ans)
out_val = out_tracer.val
yield out_val
class SplitTracer(Tracer):
def __init__(self, trace, name, new_names, val):
self.trace = trace
self.name = name
self.new_names = new_names
self.val = val
@property
def aval(self):
return core.get_aval(self.val)
def unpack(self):
if self.name is None:
return self.full_lower()
else:
elt_tracer = partial(SplitTracer, self.trace, self.name, self.new_names)
return map(elt_tracer, self.val)
def full_lower(self):
if self.name is None:
return core.full_lower(self.val)
else:
return self
class SplitTrace(Trace):
def pure(self, val):
return SplitTracer(self, None, (), val)
def lift(self, val):
return SplitTracer(self, None, (), val)
def sublift(self, val):
return SplitTracer(self, val.name, val.new_names, val.val)
def process_primitive(self, primitive, tracers, params):
names_in, vals_in = unzip2((t.name, t.val) for t in tracers)
if all(name is None for name in names_in):
return primitive.bind(*vals_in, **params)
else:
name = next(name for name in names_in if name is not None)
new_names = next(t.new_names for t in tracers if t.name is not None)
if primitive in pmap_primitive_rules:
val_in, = vals_in
if name == params['axis_name']:
new_params = {k: params[k] for k in params if k != 'axis_name'}
val = val_in
for new_name in new_names:
val = primitive.bind(val, axis_name=new_name, **new_params)
val_out = val
return SplitTracer(self, name, new_names, val_out)
else:
val_out = primitive.bind(val_in, **params)
return SplitTracer(self, name, new_names, val_out)
else:
val_out = primitive.bind(*vals_in, **params)
return SplitTracer(self, name, new_names, val_out)
def process_call(self, call_primitive, f, tracers, params):
names_in, vals_in = unzip2((t.name, t.val) for t in tracers)
if all(name is None for name in names_in):
return call_primitive.bind(f, *vals, **params)
else:
name = next(name for name in names_in if name is not None)
new_names = next(t.new_names for t in tracers if t.name is not None)
f = axisvar_split_subtrace(f, self.master, name, new_names)
val_out = call_primitive.bind(f, *vals, **params)
return SplitTracer(self, name, new_names, val_out)
def post_process_call(self, _, out_tracer):
name, new_names, val = out_tracer.name, out_tracer.new_names, out_tracer.val
master = self.master
def todo(x):
trace = SplitTrace(master, core.cur_sublevel())
return SplitTracer(trace, name, new_names, x)
return val, todo
def pack(self, tracers):
vals = core.pack([t.val for t in tracers])
name = next(t.name for t in tracers if t.name is not None)
new_names = next(t.new_names for t in tracers if t.name is not None)
return SplitTracer(self, name, new_names, vals)
def reshape_axis(chunksize, in_axis, arg):
aval = core.get_aval(arg)
if type(aval) is core.AbstractTuple:
if type(in_axis) is int:
return core.pack(map(partial(reshape_axis, chunksize, in_axis), arg))
elif isinstance(in_axis, (list, tuple)):
return core.pack(map(partial(reshape_axis, chunksize), in_axis, arg))
else:
raise TypeError("unexpected in_axis type: {}".format(type(in_axis)))
elif isinstance(aval, ShapedArray):
in_axis = in_axis % arg.ndim
split_shape = (arg.shape[in_axis] // chunksize, chunksize)
new_shape = arg.shape[:in_axis] + split_shape + arg.shape[in_axis+1:]
return arg.reshape(new_shape)
else:
raise TypeError(type(arg))
### papply
newvar = pe.gensym('_axis')
def papply(fun, name, in_vals, in_axes):
return papply_transform(fun).call_wrapped(name, in_vals, in_axes)
@lu.transformation
def papply_transform(name, args, axes):
with new_master(PapplyTrace) as master:
trace = PapplyTrace(master, core.cur_sublevel())
in_tracers = map(partial(PapplyTracer, trace, name), args, axes)
out_tracer = yield in_tracers
out_tracer = trace.full_raise(out_tracer)
out_val = out_tracer.val
del master, out_tracer
yield out_val
class PapplyTracer(Tracer):
def __init__(self, trace, name, val, axis):
self.trace = trace
self.name = name
self.val = val
self.axis = axis
@property
def aval(self):
return batching.get_aval(self.val)
def unpack(self):
raise NotImplementedError # TODO(mattjj,frostig)
def full_lower(self):
if self.axis is None:
return core.full_lower(self.val)
else:
return self
class PapplyTrace(Trace):
def pure(self, val):
return PapplyTracer(self, None, val, None)
def lift(self, val):
return PapplyTracer(self, None, val, None)
def sublift(self, val):
return PapplyTracer(self, val.name, val.val, val.axis)
def process_primitive(self, primitive, tracers, params):
names, vals, axes = unzip3((t.name, t.val, t.axis) for t in tracers)
if all(axis is None for axis in axes):
return primitive.bind(*vals, **params)
else:
name = next(n for n in names if n is not None)
rule = papply_primitive_rules[primitive]
val_out, axis_out = rule(name, vals, axes, **params)
return PapplyTracer(self, name, val_out, axis_out)
def process_call(self, call_primitive, f, tracers, params):
raise NotImplementedError # TODO(mattjj,frostig)
def post_process_call(self, _, out_tracer):
raise NotImplementedError # TODO(mattjj,frostig)
def pack(self, tracers):
vals = core.pack([t.val for t in tracers])
axis = tuple(t.axis for t in tracers)
name = tuple(t.name for t in tracers)
return PapplyTracer(self, name, vals, axis)
papply_primitive_rules = {}
def scatter_like(source, target):
return scatter_like_p.bind(source, target)
def scatter_like_papply_rule(name, vals, axes):
source, target = vals
source_axis, target_axis = axes
assert source_axis is None
return _scatter(source, target, target_axis, name)
scatter_like_p = Primitive('scatter_like')
scatter_like_p.def_abstract_eval(lambda source, target: source)
papply_primitive_rules[scatter_like_p] = scatter_like_papply_rule
def defvectorized(prim):
papply_primitive_rules[prim] = partial(vectorized_papply, prim)
def vectorized_papply(prim, name, vals, axes, **params):
assert all(axes[0] == a for a in axes[1:])
return prim.bind(*vals, **params), axes[0]
def defreducer(prim, collective_prim):
papply_primitive_rules[prim] = partial(reducer_papply, prim, collective_prim)
def reducer_papply(prim, cprim, name, vals, papply_axes, input_shape, axes):
operand, = vals
papply_axis, = papply_axes
other_axes = [i for i in axes if i != papply_axis]
if other_axes:
result = prim.bind(operand, axes=other_axes, input_shape=input_shape)
else:
result = operand
if not axes or papply_axis in axes:
return cprim.bind(result, axis_name=name), None
else:
new_papply_axis = papply_axis - onp.sum(onp.less(other_axes, papply_axis))
return result, new_papply_axis
def defbroadcasting(prim):
papply_primitive_rules[prim] = partial(broadcasting_papply, prim)
def broadcasting_papply(prim, name, vals, axes, **params):
x, y = vals
xdim, ydim = axes
if xdim is None:
return prim.bind(x, y, **params), ydim
elif ydim is None:
return prim.bind(x, y, **params), xdim
elif xdim == ydim:
return prim.bind(x, y, **params), xdim
else:
raise NotImplementedError # this isn't right, need to think about names
x = rescatter(x, ydim, name)
return prim.bind(x, y, **params), ydim
Add parallel primitive and translation rule for all_to_all.
Use it to complete last case of the binop papply rule. Also add an all_gather based on all_to_all.
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from functools import partial
import numpy as onp
import six
from six.moves import reduce
from .. import core
from .. import linear_util as lu
from ..core import Trace, Tracer, Primitive, new_master
from ..abstract_arrays import ShapedArray, ConcreteArray, make_shaped_array
from ..util import safe_zip, unzip2, unzip3, partialmethod, prod
from ..lib import xla_bridge as xb
from . import partial_eval as pe
from . import batching
zip = safe_zip
def identity(x): return x
### pmap
def pmap(fun, name, in_vals, in_axes, out_axis_target):
sizes = reduce(set.union, map(batching.dimsize, in_axes, in_vals))
if not sizes:
return fun.call_wrapped(*in_vals)
elif len(sizes) == 1:
fun, out_axis = pmap_transform(fun, name, in_axes)
out_val = fun.call_wrapped(*in_vals)
return batching.moveaxis(sizes.pop(), out_axis_target, out_axis(), out_val)
else:
raise TypeError("got inconsistent map dimension sizes: {}".format(sizes))
@lu.transformation_with_aux
def pmap_transform(name, axes, *vals):
with new_master(PmapTrace) as master:
trace = PmapTrace(master, core.cur_sublevel())
in_tracers = map(partial(PmapTracer, trace, name), vals, axes)
ans = yield in_tracers
out_tracer = trace.full_raise(ans)
out_val, out_axis = out_tracer.val, out_tracer.axis
del master, out_tracer
yield out_val, out_axis
@lu.transformation_with_aux
def pmap_subtrace(master, name, axes, *vals):
trace = PmapTrace(master, core.cur_sublevel())
ans = yield map(partial(PmapTracer, trace, name), vals, axes)
out_tracer = trace.full_raise(ans)
out_val, out_axis = out_tracer.val, out_tracer.axis
yield out_val, out_axis
class PmapTracer(Tracer):
def __init__(self, trace, name, val, axis):
self.trace = trace
self.name = name
self.val = val
self.axis = axis
@property
def aval(self):
batched_aval = batching.get_aval(self.val)
return batching.remove_batch_dim_from_aval(self.axis, batched_aval)
def unpack(self):
t = type(self.axis)
if t is tuple:
axes = self.axis
elif t is int:
axes = [self.axis] * len(self.val)
elif t is type(None):
return tuple(self.val)
else:
raise TypeError(t)
return map(partial(PmapTracer, self.trace, self.name), self.val, axes)
def full_lower(self):
if self.axis is None:
return core.full_lower(self.val)
else:
return self
class PmapTrace(Trace):
def pure(self, val):
return PmapTracer(self, None, val, None)
def lift(self, val):
return PmapTracer(self, None, val, None)
def sublift(self, val):
return PmapTracer(self, val.name, val.val, val.axis)
def process_primitive(self, primitive, tracers, params):
names_in, vals_in, axes_in = unzip3((t.name, t.val, t.axis) for t in tracers)
if all(axis is None for axis in axes_in):
return primitive.bind(*vals_in, **params)
else:
name = next(name for name in names_in if name is not None) # all same
if primitive in pmap_primitive_rules:
# if it's a pmap collective primitive, do something special
val_in, = vals_in
axis_in, = axes_in
if name == params['axis_name']:
# if the name matches this tracer's name, apply the pmap rule
rule = pmap_primitive_rules[primitive]
params = {k: params[k] for k in params if k != 'axis_name'}
val_out, axis_out = rule(val_in, axis_in, **params)
return PmapTracer(self, name, val_out, axis_out)
else:
# if not, bind the primitive so that any other pmap tracers can see it
val_out = primitive.bind(val_in, **params)
return PmapTracer(self, name, val_out, axis_in)
else:
# if it's not a pmap collective primitive, act just like vmap
rule = batching.get_primitive_batcher(primitive)
val_out, axis_out = rule(vals_in, axes_in, **params)
return PmapTracer(self, name, val_out, axis_out)
def process_call(self, call_primitive, f, tracers, params):
names, vals, axes = unzip3((t.name, t.val, t.axis) for t in tracers)
if all(axis is None for axis in axes):
return call_primitive.bind(f, *vals, **params)
else:
name = next(name for name in names if name is not None) # all same
f, axis_out = pmap_subtrace(f, self.master, name, axes)
val_out = call_primitive.bind(f, *vals, **params)
return PmapTracer(self, name, val_out, axis_out())
def post_process_call(self, _, out_tracer):
name, val, axis = out_tracer.name, out_tracer.val, out_tracer.axis
master = self.master
def todo(x):
trace = PmapTrace(master, core.cur_sublevel())
return PmapTracer(trace, name, x, axis)
return val, todo
def pack(self, tracers):
vals = core.pack([t.val for t in tracers])
axis = tuple(t.axis for t in tracers)
name = next(t.name for t in tracers if t.name)
return PmapTracer(self, name, vals, axis)
def unbound_name_error(primitive_name, *args, **kwargs):
axis_name = kwargs['axis_name']
msg = "axis name '{}' is unbound for primitive {}."
raise NameError(msg.format(axis_name, primitive_name))
def PmapPrimitive(name):
prim = Primitive(name)
prim.def_impl(partial(unbound_name_error, name))
prim.def_abstract_eval(lambda x, *args, **kwargs: x) # default
return prim
pmap_primitive_rules = {}
parallel_translation_rules = {}
def psum(x, axis_name):
return psum_p.bind(x, axis_name=axis_name)
def psum_pmap_rule(val, axis):
return val.sum(axis), None
def psum_parallel_translation_rule(c, val, device_groups):
if len(device_groups) > 1:
return c.CrossReplicaSum(val, device_groups)
else:
return c.CrossReplicaSum(val)
psum_p = PmapPrimitive('psum')
pmap_primitive_rules[psum_p] = psum_pmap_rule
parallel_translation_rules[psum_p] = psum_parallel_translation_rule
def all_to_all(x, split_dim, concat_dim):
return all_to_all_p.bind(x, split_dim=split_dim, concat_dim=concat_dim)
def all_to_all_translation_rule(c, x, split_dim, concat_dim):
return c.AllToAll(x, split_dim, concat_dim)
all_to_all_p = PmapPrimitive('all_to_all')
all_to_all_p.def_abstract_eval(lambda x, **kwargs: x)
parallel_translation_rules[all_to_all_p] = all_to_all_translation_rule
def all_gather(x, xdim):
x = x.broadcast((xb.get_replica_count(),))
return all_to_all(x, 0, xdim)
def gather(x, axis_name):
return gather_p.bind(x, axis_name=axis_name)
def gather_pmap_rule(val, axis):
return val, None
gather_p = PmapPrimitive('gather')
pmap_primitive_rules[gather_p] = gather_pmap_rule
### axis variable splitting and computation chunking
@lu.transformation
def axisvar_split(name, new_names, *args):
with new_master(SplitTrace) as master:
trace = SplitTrace(master, core.cur_sublevel())
in_tracers = map(partial(SplitTracer, trace, name, new_names), args)
ans = yield in_tracers
out_tracer = trace.full_raise(ans)
out_val = out_tracer.val
del master, out_tracer
yield out_val
@lu.transformation
def axisvar_split_subtrace(master, name, new_names, *vals):
trace = SplitTrace(master, core.cur_sublevel())
ans = yield map(partial(SplitTracer, trace, name, new_names), vals)
out_tracer = trace.full_raise(ans)
out_val = out_tracer.val
yield out_val
class SplitTracer(Tracer):
def __init__(self, trace, name, new_names, val):
self.trace = trace
self.name = name
self.new_names = new_names
self.val = val
@property
def aval(self):
return core.get_aval(self.val)
def unpack(self):
if self.name is None:
return self.full_lower()
else:
elt_tracer = partial(SplitTracer, self.trace, self.name, self.new_names)
return map(elt_tracer, self.val)
def full_lower(self):
if self.name is None:
return core.full_lower(self.val)
else:
return self
class SplitTrace(Trace):
def pure(self, val):
return SplitTracer(self, None, (), val)
def lift(self, val):
return SplitTracer(self, None, (), val)
def sublift(self, val):
return SplitTracer(self, val.name, val.new_names, val.val)
def process_primitive(self, primitive, tracers, params):
names_in, vals_in = unzip2((t.name, t.val) for t in tracers)
if all(name is None for name in names_in):
return primitive.bind(*vals_in, **params)
else:
name = next(name for name in names_in if name is not None)
new_names = next(t.new_names for t in tracers if t.name is not None)
if primitive in pmap_primitive_rules:
val_in, = vals_in
if name == params['axis_name']:
new_params = {k: params[k] for k in params if k != 'axis_name'}
val = val_in
for new_name in new_names:
val = primitive.bind(val, axis_name=new_name, **new_params)
val_out = val
return SplitTracer(self, name, new_names, val_out)
else:
val_out = primitive.bind(val_in, **params)
return SplitTracer(self, name, new_names, val_out)
else:
val_out = primitive.bind(*vals_in, **params)
return SplitTracer(self, name, new_names, val_out)
def process_call(self, call_primitive, f, tracers, params):
names_in, vals_in = unzip2((t.name, t.val) for t in tracers)
if all(name is None for name in names_in):
return call_primitive.bind(f, *vals, **params)
else:
name = next(name for name in names_in if name is not None)
new_names = next(t.new_names for t in tracers if t.name is not None)
f = axisvar_split_subtrace(f, self.master, name, new_names)
val_out = call_primitive.bind(f, *vals, **params)
return SplitTracer(self, name, new_names, val_out)
def post_process_call(self, _, out_tracer):
name, new_names, val = out_tracer.name, out_tracer.new_names, out_tracer.val
master = self.master
def todo(x):
trace = SplitTrace(master, core.cur_sublevel())
return SplitTracer(trace, name, new_names, x)
return val, todo
def pack(self, tracers):
vals = core.pack([t.val for t in tracers])
name = next(t.name for t in tracers if t.name is not None)
new_names = next(t.new_names for t in tracers if t.name is not None)
return SplitTracer(self, name, new_names, vals)
def reshape_axis(chunksize, in_axis, arg):
aval = core.get_aval(arg)
if type(aval) is core.AbstractTuple:
if type(in_axis) is int:
return core.pack(map(partial(reshape_axis, chunksize, in_axis), arg))
elif isinstance(in_axis, (list, tuple)):
return core.pack(map(partial(reshape_axis, chunksize), in_axis, arg))
else:
raise TypeError("unexpected in_axis type: {}".format(type(in_axis)))
elif isinstance(aval, ShapedArray):
in_axis = in_axis % arg.ndim
split_shape = (arg.shape[in_axis] // chunksize, chunksize)
new_shape = arg.shape[:in_axis] + split_shape + arg.shape[in_axis+1:]
return arg.reshape(new_shape)
else:
raise TypeError(type(arg))
### papply
newvar = pe.gensym('_axis')
def papply(fun, name, in_vals, in_axes):
return papply_transform(fun).call_wrapped(name, in_vals, in_axes)
@lu.transformation
def papply_transform(name, args, axes):
with new_master(PapplyTrace) as master:
trace = PapplyTrace(master, core.cur_sublevel())
in_tracers = map(partial(PapplyTracer, trace, name), args, axes)
out_tracer = yield in_tracers
out_tracer = trace.full_raise(out_tracer)
out_val = out_tracer.val
del master, out_tracer
yield out_val
class PapplyTracer(Tracer):
def __init__(self, trace, name, val, axis):
self.trace = trace
self.name = name
self.val = val
self.axis = axis
@property
def aval(self):
return batching.get_aval(self.val)
def unpack(self):
raise NotImplementedError # TODO(mattjj,frostig)
def full_lower(self):
if self.axis is None:
return core.full_lower(self.val)
else:
return self
class PapplyTrace(Trace):
def pure(self, val):
return PapplyTracer(self, None, val, None)
def lift(self, val):
return PapplyTracer(self, None, val, None)
def sublift(self, val):
return PapplyTracer(self, val.name, val.val, val.axis)
def process_primitive(self, primitive, tracers, params):
names, vals, axes = unzip3((t.name, t.val, t.axis) for t in tracers)
if all(axis is None for axis in axes):
return primitive.bind(*vals, **params)
else:
name = next(n for n in names if n is not None)
rule = papply_primitive_rules[primitive]
val_out, axis_out = rule(name, vals, axes, **params)
return PapplyTracer(self, name, val_out, axis_out)
def process_call(self, call_primitive, f, tracers, params):
raise NotImplementedError # TODO(mattjj,frostig)
def post_process_call(self, _, out_tracer):
raise NotImplementedError # TODO(mattjj,frostig)
def pack(self, tracers):
vals = core.pack([t.val for t in tracers])
axis = tuple(t.axis for t in tracers)
name = tuple(t.name for t in tracers)
return PapplyTracer(self, name, vals, axis)
papply_primitive_rules = {}
def scatter_like(source, target):
return scatter_like_p.bind(source, target)
def scatter_like_papply_rule(name, vals, axes):
source, target = vals
source_axis, target_axis = axes
assert source_axis is None
return _scatter(source, target, target_axis, name)
scatter_like_p = Primitive('scatter_like')
scatter_like_p.def_abstract_eval(lambda source, target: source)
papply_primitive_rules[scatter_like_p] = scatter_like_papply_rule
def defvectorized(prim):
papply_primitive_rules[prim] = partial(vectorized_papply, prim)
def vectorized_papply(prim, name, vals, axes, **params):
assert all(axes[0] == a for a in axes[1:])
return prim.bind(*vals, **params), axes[0]
def defreducer(prim, collective_prim):
papply_primitive_rules[prim] = partial(reducer_papply, prim, collective_prim)
def reducer_papply(prim, cprim, name, vals, papply_axes, input_shape, axes):
operand, = vals
papply_axis, = papply_axes
other_axes = [i for i in axes if i != papply_axis]
if other_axes:
result = prim.bind(operand, axes=other_axes, input_shape=input_shape)
else:
result = operand
if not axes or papply_axis in axes:
return cprim.bind(result, axis_name=name), None
else:
new_papply_axis = papply_axis - onp.sum(onp.less(other_axes, papply_axis))
return result, new_papply_axis
def defbroadcasting(prim):
papply_primitive_rules[prim] = partial(broadcasting_papply, prim)
def broadcasting_papply(prim, name, vals, axes, **params):
x, y = vals
xdim, ydim = axes
if xdim is None:
return prim.bind(x, y, **params), ydim
elif ydim is None:
return prim.bind(x, y, **params), xdim
elif xdim == ydim:
return prim.bind(x, y, **params), xdim
else:
x = all_to_all(x, xdim, ydim)
return prim.bind(x, y, **params), ydim
|
"""The tests for the person component."""
from unittest.mock import Mock
from homeassistant.components.person import (
ATTR_SOURCE, ATTR_USER_ID, DOMAIN, PersonManager)
from homeassistant.const import (
ATTR_ID, ATTR_LATITUDE, ATTR_LONGITUDE, ATTR_GPS_ACCURACY,
STATE_UNKNOWN, EVENT_HOMEASSISTANT_START)
from homeassistant.components.device_tracker import (
ATTR_SOURCE_TYPE, SOURCE_TYPE_GPS, SOURCE_TYPE_ROUTER)
from homeassistant.core import CoreState, State
from homeassistant.setup import async_setup_component
import pytest
from tests.common import mock_component, mock_restore_cache, mock_coro_func
DEVICE_TRACKER = 'device_tracker.test_tracker'
DEVICE_TRACKER_2 = 'device_tracker.test_tracker_2'
@pytest.fixture
def storage_setup(hass, hass_storage, hass_admin_user):
"""Storage setup."""
hass_storage[DOMAIN] = {
'key': DOMAIN,
'version': 1,
'data': {
'persons': [
{
'id': '1234',
'name': 'tracked person',
'user_id': hass_admin_user.id,
'device_trackers': [DEVICE_TRACKER]
}
]
}
}
assert hass.loop.run_until_complete(
async_setup_component(hass, DOMAIN, {})
)
async def test_minimal_setup(hass):
"""Test minimal config with only name."""
config = {DOMAIN: {'id': '1234', 'name': 'test person'}}
assert await async_setup_component(hass, DOMAIN, config)
state = hass.states.get('person.test_person')
assert state.state == STATE_UNKNOWN
assert state.attributes.get(ATTR_LATITUDE) is None
assert state.attributes.get(ATTR_LONGITUDE) is None
assert state.attributes.get(ATTR_SOURCE) is None
assert state.attributes.get(ATTR_USER_ID) is None
async def test_setup_no_id(hass):
"""Test config with no id."""
config = {DOMAIN: {'name': 'test user'}}
assert not await async_setup_component(hass, DOMAIN, config)
async def test_setup_no_name(hass):
"""Test config with no name."""
config = {DOMAIN: {'id': '1234'}}
assert not await async_setup_component(hass, DOMAIN, config)
async def test_setup_user_id(hass, hass_admin_user):
"""Test config with user id."""
user_id = hass_admin_user.id
config = {
DOMAIN: {'id': '1234', 'name': 'test person', 'user_id': user_id}}
assert await async_setup_component(hass, DOMAIN, config)
state = hass.states.get('person.test_person')
assert state.state == STATE_UNKNOWN
assert state.attributes.get(ATTR_ID) == '1234'
assert state.attributes.get(ATTR_LATITUDE) is None
assert state.attributes.get(ATTR_LONGITUDE) is None
assert state.attributes.get(ATTR_SOURCE) is None
assert state.attributes.get(ATTR_USER_ID) == user_id
async def test_valid_invalid_user_ids(hass, hass_admin_user):
"""Test a person with valid user id and a person with invalid user id ."""
user_id = hass_admin_user.id
config = {DOMAIN: [
{'id': '1234', 'name': 'test valid user', 'user_id': user_id},
{'id': '5678', 'name': 'test bad user', 'user_id': 'bad_user_id'}]}
assert await async_setup_component(hass, DOMAIN, config)
state = hass.states.get('person.test_valid_user')
assert state.state == STATE_UNKNOWN
assert state.attributes.get(ATTR_ID) == '1234'
assert state.attributes.get(ATTR_LATITUDE) is None
assert state.attributes.get(ATTR_LONGITUDE) is None
assert state.attributes.get(ATTR_SOURCE) is None
assert state.attributes.get(ATTR_USER_ID) == user_id
state = hass.states.get('person.test_bad_user')
assert state is None
async def test_setup_tracker(hass, hass_admin_user):
"""Test set up person with one device tracker."""
hass.state = CoreState.not_running
user_id = hass_admin_user.id
config = {DOMAIN: {
'id': '1234', 'name': 'tracked person', 'user_id': user_id,
'device_trackers': DEVICE_TRACKER}}
assert await async_setup_component(hass, DOMAIN, config)
state = hass.states.get('person.tracked_person')
assert state.state == STATE_UNKNOWN
assert state.attributes.get(ATTR_ID) == '1234'
assert state.attributes.get(ATTR_LATITUDE) is None
assert state.attributes.get(ATTR_LONGITUDE) is None
assert state.attributes.get(ATTR_SOURCE) is None
assert state.attributes.get(ATTR_USER_ID) == user_id
hass.states.async_set(DEVICE_TRACKER, 'home')
await hass.async_block_till_done()
state = hass.states.get('person.tracked_person')
assert state.state == STATE_UNKNOWN
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
await hass.async_block_till_done()
state = hass.states.get('person.tracked_person')
assert state.state == 'home'
assert state.attributes.get(ATTR_ID) == '1234'
assert state.attributes.get(ATTR_LATITUDE) is None
assert state.attributes.get(ATTR_LONGITUDE) is None
assert state.attributes.get(ATTR_SOURCE) == DEVICE_TRACKER
assert state.attributes.get(ATTR_USER_ID) == user_id
hass.states.async_set(
DEVICE_TRACKER, 'not_home', {
ATTR_LATITUDE: 10.123456,
ATTR_LONGITUDE: 11.123456,
ATTR_GPS_ACCURACY: 10})
await hass.async_block_till_done()
state = hass.states.get('person.tracked_person')
assert state.state == 'not_home'
assert state.attributes.get(ATTR_ID) == '1234'
assert state.attributes.get(ATTR_LATITUDE) == 10.123456
assert state.attributes.get(ATTR_LONGITUDE) == 11.123456
assert state.attributes.get(ATTR_GPS_ACCURACY) == 10
assert state.attributes.get(ATTR_SOURCE) == DEVICE_TRACKER
assert state.attributes.get(ATTR_USER_ID) == user_id
async def test_setup_two_trackers(hass, hass_admin_user):
"""Test set up person with two device trackers."""
hass.state = CoreState.not_running
user_id = hass_admin_user.id
config = {DOMAIN: {
'id': '1234', 'name': 'tracked person', 'user_id': user_id,
'device_trackers': [DEVICE_TRACKER, DEVICE_TRACKER_2]}}
assert await async_setup_component(hass, DOMAIN, config)
state = hass.states.get('person.tracked_person')
assert state.state == STATE_UNKNOWN
assert state.attributes.get(ATTR_ID) == '1234'
assert state.attributes.get(ATTR_LATITUDE) is None
assert state.attributes.get(ATTR_LONGITUDE) is None
assert state.attributes.get(ATTR_SOURCE) is None
assert state.attributes.get(ATTR_USER_ID) == user_id
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
await hass.async_block_till_done()
hass.states.async_set(
DEVICE_TRACKER, 'home', {ATTR_SOURCE_TYPE: SOURCE_TYPE_ROUTER})
await hass.async_block_till_done()
state = hass.states.get('person.tracked_person')
assert state.state == 'home'
assert state.attributes.get(ATTR_ID) == '1234'
assert state.attributes.get(ATTR_LATITUDE) is None
assert state.attributes.get(ATTR_LONGITUDE) is None
assert state.attributes.get(ATTR_GPS_ACCURACY) is None
assert state.attributes.get(ATTR_SOURCE) == DEVICE_TRACKER
assert state.attributes.get(ATTR_USER_ID) == user_id
hass.states.async_set(
DEVICE_TRACKER_2, 'not_home', {
ATTR_LATITUDE: 12.123456,
ATTR_LONGITUDE: 13.123456,
ATTR_GPS_ACCURACY: 12,
ATTR_SOURCE_TYPE: SOURCE_TYPE_GPS})
await hass.async_block_till_done()
hass.states.async_set(
DEVICE_TRACKER, 'not_home', {ATTR_SOURCE_TYPE: SOURCE_TYPE_ROUTER})
await hass.async_block_till_done()
state = hass.states.get('person.tracked_person')
assert state.state == 'not_home'
assert state.attributes.get(ATTR_ID) == '1234'
assert state.attributes.get(ATTR_LATITUDE) == 12.123456
assert state.attributes.get(ATTR_LONGITUDE) == 13.123456
assert state.attributes.get(ATTR_GPS_ACCURACY) == 12
assert state.attributes.get(ATTR_SOURCE) == DEVICE_TRACKER_2
assert state.attributes.get(ATTR_USER_ID) == user_id
hass.states.async_set(
DEVICE_TRACKER_2, 'zone1', {ATTR_SOURCE_TYPE: SOURCE_TYPE_GPS})
await hass.async_block_till_done()
state = hass.states.get('person.tracked_person')
assert state.state == 'zone1'
assert state.attributes.get(ATTR_SOURCE) == DEVICE_TRACKER_2
hass.states.async_set(
DEVICE_TRACKER, 'home', {ATTR_SOURCE_TYPE: SOURCE_TYPE_ROUTER})
await hass.async_block_till_done()
hass.states.async_set(
DEVICE_TRACKER_2, 'zone2', {ATTR_SOURCE_TYPE: SOURCE_TYPE_GPS})
await hass.async_block_till_done()
state = hass.states.get('person.tracked_person')
assert state.state == 'home'
assert state.attributes.get(ATTR_SOURCE) == DEVICE_TRACKER
async def test_ignore_unavailable_states(hass, hass_admin_user):
"""Test set up person with two device trackers, one unavailable."""
hass.state = CoreState.not_running
user_id = hass_admin_user.id
config = {DOMAIN: {
'id': '1234', 'name': 'tracked person', 'user_id': user_id,
'device_trackers': [DEVICE_TRACKER, DEVICE_TRACKER_2]}}
assert await async_setup_component(hass, DOMAIN, config)
state = hass.states.get('person.tracked_person')
assert state.state == STATE_UNKNOWN
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
await hass.async_block_till_done()
hass.states.async_set(DEVICE_TRACKER, 'home')
await hass.async_block_till_done()
hass.states.async_set(DEVICE_TRACKER, 'unavailable')
await hass.async_block_till_done()
# Unknown, as only 1 device tracker has a state, but we ignore that one
state = hass.states.get('person.tracked_person')
assert state.state == STATE_UNKNOWN
hass.states.async_set(DEVICE_TRACKER_2, 'not_home')
await hass.async_block_till_done()
# Take state of tracker 2
state = hass.states.get('person.tracked_person')
assert state.state == 'not_home'
# state 1 is newer but ignored, keep tracker 2 state
hass.states.async_set(DEVICE_TRACKER, 'unknown')
await hass.async_block_till_done()
state = hass.states.get('person.tracked_person')
assert state.state == 'not_home'
async def test_restore_home_state(hass, hass_admin_user):
"""Test that the state is restored for a person on startup."""
user_id = hass_admin_user.id
attrs = {
ATTR_ID: '1234', ATTR_LATITUDE: 10.12346, ATTR_LONGITUDE: 11.12346,
ATTR_SOURCE: DEVICE_TRACKER, ATTR_USER_ID: user_id}
state = State('person.tracked_person', 'home', attrs)
mock_restore_cache(hass, (state, ))
hass.state = CoreState.not_running
mock_component(hass, 'recorder')
config = {DOMAIN: {
'id': '1234', 'name': 'tracked person', 'user_id': user_id,
'device_trackers': DEVICE_TRACKER}}
assert await async_setup_component(hass, DOMAIN, config)
state = hass.states.get('person.tracked_person')
assert state.state == 'home'
assert state.attributes.get(ATTR_ID) == '1234'
assert state.attributes.get(ATTR_LATITUDE) == 10.12346
assert state.attributes.get(ATTR_LONGITUDE) == 11.12346
# When restoring state the entity_id of the person will be used as source.
assert state.attributes.get(ATTR_SOURCE) == 'person.tracked_person'
assert state.attributes.get(ATTR_USER_ID) == user_id
async def test_duplicate_ids(hass, hass_admin_user):
"""Test we don't allow duplicate IDs."""
config = {DOMAIN: [
{'id': '1234', 'name': 'test user 1'},
{'id': '1234', 'name': 'test user 2'}]}
assert await async_setup_component(hass, DOMAIN, config)
assert len(hass.states.async_entity_ids('person')) == 1
assert hass.states.get('person.test_user_1') is not None
assert hass.states.get('person.test_user_2') is None
async def test_create_person_during_run(hass):
"""Test that person is updated if created while hass is running."""
config = {DOMAIN: {}}
assert await async_setup_component(hass, DOMAIN, config)
hass.states.async_set(DEVICE_TRACKER, 'home')
await hass.async_block_till_done()
await hass.components.person.async_create_person(
'tracked person', device_trackers=[DEVICE_TRACKER])
await hass.async_block_till_done()
state = hass.states.get('person.tracked_person')
assert state.state == 'home'
async def test_load_person_storage(hass, hass_admin_user, storage_setup):
"""Test set up person from storage."""
state = hass.states.get('person.tracked_person')
assert state.state == STATE_UNKNOWN
assert state.attributes.get(ATTR_ID) == '1234'
assert state.attributes.get(ATTR_LATITUDE) is None
assert state.attributes.get(ATTR_LONGITUDE) is None
assert state.attributes.get(ATTR_SOURCE) is None
assert state.attributes.get(ATTR_USER_ID) == hass_admin_user.id
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
await hass.async_block_till_done()
hass.states.async_set(DEVICE_TRACKER, 'home')
await hass.async_block_till_done()
state = hass.states.get('person.tracked_person')
assert state.state == 'home'
assert state.attributes.get(ATTR_ID) == '1234'
assert state.attributes.get(ATTR_LATITUDE) is None
assert state.attributes.get(ATTR_LONGITUDE) is None
assert state.attributes.get(ATTR_SOURCE) == DEVICE_TRACKER
assert state.attributes.get(ATTR_USER_ID) == hass_admin_user.id
async def test_load_person_storage_two_nonlinked(hass, hass_storage):
"""Test loading two users with both not having a user linked."""
hass_storage[DOMAIN] = {
'key': DOMAIN,
'version': 1,
'data': {
'persons': [
{
'id': '1234',
'name': 'tracked person 1',
'user_id': None,
'device_trackers': []
},
{
'id': '5678',
'name': 'tracked person 2',
'user_id': None,
'device_trackers': []
},
]
}
}
await async_setup_component(hass, DOMAIN, {})
assert len(hass.states.async_entity_ids('person')) == 2
assert hass.states.get('person.tracked_person_1') is not None
assert hass.states.get('person.tracked_person_2') is not None
async def test_ws_list(hass, hass_ws_client, storage_setup):
"""Test listing via WS."""
manager = hass.data[DOMAIN]
client = await hass_ws_client(hass)
resp = await client.send_json({
'id': 6,
'type': 'person/list',
})
resp = await client.receive_json()
assert resp['success']
assert resp['result']['storage'] == manager.storage_persons
assert len(resp['result']['storage']) == 1
assert len(resp['result']['config']) == 0
async def test_ws_create(hass, hass_ws_client, storage_setup,
hass_read_only_user):
"""Test creating via WS."""
manager = hass.data[DOMAIN]
client = await hass_ws_client(hass)
resp = await client.send_json({
'id': 6,
'type': 'person/create',
'name': 'Hello',
'device_trackers': [DEVICE_TRACKER],
'user_id': hass_read_only_user.id,
})
resp = await client.receive_json()
persons = manager.storage_persons
assert len(persons) == 2
assert resp['success']
assert resp['result'] == persons[1]
async def test_ws_create_requires_admin(hass, hass_ws_client, storage_setup,
hass_admin_user, hass_read_only_user):
"""Test creating via WS requires admin."""
hass_admin_user.groups = []
manager = hass.data[DOMAIN]
client = await hass_ws_client(hass)
resp = await client.send_json({
'id': 6,
'type': 'person/create',
'name': 'Hello',
'device_trackers': [DEVICE_TRACKER],
'user_id': hass_read_only_user.id,
})
resp = await client.receive_json()
persons = manager.storage_persons
assert len(persons) == 1
assert not resp['success']
async def test_ws_update(hass, hass_ws_client, storage_setup):
"""Test updating via WS."""
manager = hass.data[DOMAIN]
client = await hass_ws_client(hass)
persons = manager.storage_persons
resp = await client.send_json({
'id': 6,
'type': 'person/update',
'person_id': persons[0]['id'],
'name': 'Updated Name',
'device_trackers': [DEVICE_TRACKER_2],
'user_id': None,
})
resp = await client.receive_json()
persons = manager.storage_persons
assert len(persons) == 1
assert resp['success']
assert resp['result'] == persons[0]
assert persons[0]['name'] == 'Updated Name'
assert persons[0]['name'] == 'Updated Name'
assert persons[0]['device_trackers'] == [DEVICE_TRACKER_2]
assert persons[0]['user_id'] is None
state = hass.states.get('person.tracked_person')
assert state.name == 'Updated Name'
async def test_ws_update_require_admin(hass, hass_ws_client, storage_setup,
hass_admin_user):
"""Test updating via WS requires admin."""
hass_admin_user.groups = []
manager = hass.data[DOMAIN]
client = await hass_ws_client(hass)
original = dict(manager.storage_persons[0])
resp = await client.send_json({
'id': 6,
'type': 'person/update',
'person_id': original['id'],
'name': 'Updated Name',
'device_trackers': [DEVICE_TRACKER_2],
'user_id': None,
})
resp = await client.receive_json()
assert not resp['success']
not_updated = dict(manager.storage_persons[0])
assert original == not_updated
async def test_ws_delete(hass, hass_ws_client, storage_setup):
"""Test deleting via WS."""
manager = hass.data[DOMAIN]
client = await hass_ws_client(hass)
persons = manager.storage_persons
resp = await client.send_json({
'id': 6,
'type': 'person/delete',
'person_id': persons[0]['id'],
})
resp = await client.receive_json()
persons = manager.storage_persons
assert len(persons) == 0
assert resp['success']
assert len(hass.states.async_entity_ids('person')) == 0
ent_reg = await hass.helpers.entity_registry.async_get_registry()
assert not ent_reg.async_is_registered('person.tracked_person')
async def test_ws_delete_require_admin(hass, hass_ws_client, storage_setup,
hass_admin_user):
"""Test deleting via WS requires admin."""
hass_admin_user.groups = []
manager = hass.data[DOMAIN]
client = await hass_ws_client(hass)
resp = await client.send_json({
'id': 6,
'type': 'person/delete',
'person_id': manager.storage_persons[0]['id'],
'name': 'Updated Name',
'device_trackers': [DEVICE_TRACKER_2],
'user_id': None,
})
resp = await client.receive_json()
assert not resp['success']
persons = manager.storage_persons
assert len(persons) == 1
async def test_create_invalid_user_id(hass):
"""Test we do not allow invalid user ID during creation."""
manager = PersonManager(hass, Mock(), [])
await manager.async_initialize()
with pytest.raises(ValueError):
await manager.async_create_person(
name='Hello',
user_id='non-existing'
)
async def test_create_duplicate_user_id(hass, hass_admin_user):
"""Test we do not allow duplicate user ID during creation."""
manager = PersonManager(
hass, Mock(async_add_entities=mock_coro_func()), []
)
await manager.async_initialize()
await manager.async_create_person(
name='Hello',
user_id=hass_admin_user.id
)
with pytest.raises(ValueError):
await manager.async_create_person(
name='Hello',
user_id=hass_admin_user.id
)
async def test_update_double_user_id(hass, hass_admin_user):
"""Test we do not allow double user ID during update."""
manager = PersonManager(
hass, Mock(async_add_entities=mock_coro_func()), []
)
await manager.async_initialize()
await manager.async_create_person(
name='Hello',
user_id=hass_admin_user.id
)
person = await manager.async_create_person(
name='Hello',
)
with pytest.raises(ValueError):
await manager.async_update_person(
person_id=person['id'],
user_id=hass_admin_user.id
)
async def test_update_invalid_user_id(hass):
"""Test updating to invalid user ID."""
manager = PersonManager(
hass, Mock(async_add_entities=mock_coro_func()), []
)
await manager.async_initialize()
person = await manager.async_create_person(
name='Hello',
)
with pytest.raises(ValueError):
await manager.async_update_person(
person_id=person['id'],
user_id='non-existing'
)
async def test_update_person_when_user_removed(hass, hass_read_only_user):
"""Update person when user is removed."""
manager = PersonManager(
hass, Mock(async_add_entities=mock_coro_func()), []
)
await manager.async_initialize()
person = await manager.async_create_person(
name='Hello',
user_id=hass_read_only_user.id
)
await hass.auth.async_remove_user(hass_read_only_user)
await hass.async_block_till_done()
assert person['user_id'] is None
Person tests - split from #21703 (#22663)
"""The tests for the person component."""
from unittest.mock import Mock
import pytest
from homeassistant.components.device_tracker import (
ATTR_SOURCE_TYPE, SOURCE_TYPE_GPS, SOURCE_TYPE_ROUTER)
from homeassistant.components.person import (
ATTR_SOURCE, ATTR_USER_ID, DOMAIN, PersonManager)
from homeassistant.const import (
ATTR_GPS_ACCURACY, ATTR_ID, ATTR_LATITUDE, ATTR_LONGITUDE,
EVENT_HOMEASSISTANT_START, STATE_UNKNOWN)
from homeassistant.core import CoreState, State
from homeassistant.setup import async_setup_component
from tests.common import (
assert_setup_component, mock_component, mock_coro_func, mock_restore_cache)
DEVICE_TRACKER = 'device_tracker.test_tracker'
DEVICE_TRACKER_2 = 'device_tracker.test_tracker_2'
# pylint: disable=redefined-outer-name
@pytest.fixture
def storage_setup(hass, hass_storage, hass_admin_user):
"""Storage setup."""
hass_storage[DOMAIN] = {
'key': DOMAIN,
'version': 1,
'data': {
'persons': [
{
'id': '1234',
'name': 'tracked person',
'user_id': hass_admin_user.id,
'device_trackers': [DEVICE_TRACKER]
}
]
}
}
assert hass.loop.run_until_complete(
async_setup_component(hass, DOMAIN, {})
)
async def test_minimal_setup(hass):
"""Test minimal config with only name."""
config = {DOMAIN: {'id': '1234', 'name': 'test person'}}
with assert_setup_component(1):
assert await async_setup_component(hass, DOMAIN, config)
state = hass.states.get('person.test_person')
assert state.state == STATE_UNKNOWN
assert state.attributes.get(ATTR_LATITUDE) is None
assert state.attributes.get(ATTR_LONGITUDE) is None
assert state.attributes.get(ATTR_SOURCE) is None
assert state.attributes.get(ATTR_USER_ID) is None
async def test_setup_no_id(hass):
"""Test config with no id."""
config = {DOMAIN: {'name': 'test user'}}
assert not await async_setup_component(hass, DOMAIN, config)
async def test_setup_no_name(hass):
"""Test config with no name."""
config = {DOMAIN: {'id': '1234'}}
assert not await async_setup_component(hass, DOMAIN, config)
async def test_setup_user_id(hass, hass_admin_user):
"""Test config with user id."""
user_id = hass_admin_user.id
config = {
DOMAIN: {'id': '1234', 'name': 'test person', 'user_id': user_id}}
with assert_setup_component(1):
assert await async_setup_component(hass, DOMAIN, config)
state = hass.states.get('person.test_person')
assert state.state == STATE_UNKNOWN
assert state.attributes.get(ATTR_ID) == '1234'
assert state.attributes.get(ATTR_LATITUDE) is None
assert state.attributes.get(ATTR_LONGITUDE) is None
assert state.attributes.get(ATTR_SOURCE) is None
assert state.attributes.get(ATTR_USER_ID) == user_id
async def test_valid_invalid_user_ids(hass, hass_admin_user):
"""Test a person with valid user id and a person with invalid user id ."""
user_id = hass_admin_user.id
config = {DOMAIN: [
{'id': '1234', 'name': 'test valid user', 'user_id': user_id},
{'id': '5678', 'name': 'test bad user', 'user_id': 'bad_user_id'}]}
with assert_setup_component(2):
assert await async_setup_component(hass, DOMAIN, config)
state = hass.states.get('person.test_valid_user')
assert state.state == STATE_UNKNOWN
assert state.attributes.get(ATTR_ID) == '1234'
assert state.attributes.get(ATTR_LATITUDE) is None
assert state.attributes.get(ATTR_LONGITUDE) is None
assert state.attributes.get(ATTR_SOURCE) is None
assert state.attributes.get(ATTR_USER_ID) == user_id
state = hass.states.get('person.test_bad_user')
assert state is None
async def test_setup_tracker(hass, hass_admin_user):
"""Test set up person with one device tracker."""
hass.state = CoreState.not_running
user_id = hass_admin_user.id
config = {DOMAIN: {
'id': '1234', 'name': 'tracked person', 'user_id': user_id,
'device_trackers': DEVICE_TRACKER}}
with assert_setup_component(1):
assert await async_setup_component(hass, DOMAIN, config)
state = hass.states.get('person.tracked_person')
assert state.state == STATE_UNKNOWN
assert state.attributes.get(ATTR_ID) == '1234'
assert state.attributes.get(ATTR_LATITUDE) is None
assert state.attributes.get(ATTR_LONGITUDE) is None
assert state.attributes.get(ATTR_SOURCE) is None
assert state.attributes.get(ATTR_USER_ID) == user_id
hass.states.async_set(DEVICE_TRACKER, 'home')
await hass.async_block_till_done()
state = hass.states.get('person.tracked_person')
assert state.state == STATE_UNKNOWN
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
await hass.async_block_till_done()
state = hass.states.get('person.tracked_person')
assert state.state == 'home'
assert state.attributes.get(ATTR_ID) == '1234'
assert state.attributes.get(ATTR_LATITUDE) is None
assert state.attributes.get(ATTR_LONGITUDE) is None
assert state.attributes.get(ATTR_SOURCE) == DEVICE_TRACKER
assert state.attributes.get(ATTR_USER_ID) == user_id
hass.states.async_set(
DEVICE_TRACKER, 'not_home', {
ATTR_LATITUDE: 10.123456,
ATTR_LONGITUDE: 11.123456,
ATTR_GPS_ACCURACY: 10})
await hass.async_block_till_done()
state = hass.states.get('person.tracked_person')
assert state.state == 'not_home'
assert state.attributes.get(ATTR_ID) == '1234'
assert state.attributes.get(ATTR_LATITUDE) == 10.123456
assert state.attributes.get(ATTR_LONGITUDE) == 11.123456
assert state.attributes.get(ATTR_GPS_ACCURACY) == 10
assert state.attributes.get(ATTR_SOURCE) == DEVICE_TRACKER
assert state.attributes.get(ATTR_USER_ID) == user_id
async def test_setup_two_trackers(hass, hass_admin_user):
"""Test set up person with two device trackers."""
hass.state = CoreState.not_running
user_id = hass_admin_user.id
config = {DOMAIN: {
'id': '1234', 'name': 'tracked person', 'user_id': user_id,
'device_trackers': [DEVICE_TRACKER, DEVICE_TRACKER_2]}}
with assert_setup_component(1):
assert await async_setup_component(hass, DOMAIN, config)
state = hass.states.get('person.tracked_person')
assert state.state == STATE_UNKNOWN
assert state.attributes.get(ATTR_ID) == '1234'
assert state.attributes.get(ATTR_LATITUDE) is None
assert state.attributes.get(ATTR_LONGITUDE) is None
assert state.attributes.get(ATTR_SOURCE) is None
assert state.attributes.get(ATTR_USER_ID) == user_id
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
await hass.async_block_till_done()
hass.states.async_set(
DEVICE_TRACKER, 'home', {ATTR_SOURCE_TYPE: SOURCE_TYPE_ROUTER})
await hass.async_block_till_done()
state = hass.states.get('person.tracked_person')
assert state.state == 'home'
assert state.attributes.get(ATTR_ID) == '1234'
assert state.attributes.get(ATTR_LATITUDE) is None
assert state.attributes.get(ATTR_LONGITUDE) is None
assert state.attributes.get(ATTR_GPS_ACCURACY) is None
assert state.attributes.get(ATTR_SOURCE) == DEVICE_TRACKER
assert state.attributes.get(ATTR_USER_ID) == user_id
hass.states.async_set(
DEVICE_TRACKER_2, 'not_home', {
ATTR_LATITUDE: 12.123456,
ATTR_LONGITUDE: 13.123456,
ATTR_GPS_ACCURACY: 12,
ATTR_SOURCE_TYPE: SOURCE_TYPE_GPS})
await hass.async_block_till_done()
hass.states.async_set(
DEVICE_TRACKER, 'not_home', {ATTR_SOURCE_TYPE: SOURCE_TYPE_ROUTER})
await hass.async_block_till_done()
state = hass.states.get('person.tracked_person')
assert state.state == 'not_home'
assert state.attributes.get(ATTR_ID) == '1234'
assert state.attributes.get(ATTR_LATITUDE) == 12.123456
assert state.attributes.get(ATTR_LONGITUDE) == 13.123456
assert state.attributes.get(ATTR_GPS_ACCURACY) == 12
assert state.attributes.get(ATTR_SOURCE) == DEVICE_TRACKER_2
assert state.attributes.get(ATTR_USER_ID) == user_id
hass.states.async_set(
DEVICE_TRACKER_2, 'zone1', {ATTR_SOURCE_TYPE: SOURCE_TYPE_GPS})
await hass.async_block_till_done()
state = hass.states.get('person.tracked_person')
assert state.state == 'zone1'
assert state.attributes.get(ATTR_SOURCE) == DEVICE_TRACKER_2
hass.states.async_set(
DEVICE_TRACKER, 'home', {ATTR_SOURCE_TYPE: SOURCE_TYPE_ROUTER})
await hass.async_block_till_done()
hass.states.async_set(
DEVICE_TRACKER_2, 'zone2', {ATTR_SOURCE_TYPE: SOURCE_TYPE_GPS})
await hass.async_block_till_done()
state = hass.states.get('person.tracked_person')
assert state.state == 'home'
assert state.attributes.get(ATTR_SOURCE) == DEVICE_TRACKER
async def test_ignore_unavailable_states(hass, hass_admin_user):
"""Test set up person with two device trackers, one unavailable."""
hass.state = CoreState.not_running
user_id = hass_admin_user.id
config = {DOMAIN: {
'id': '1234', 'name': 'tracked person', 'user_id': user_id,
'device_trackers': [DEVICE_TRACKER, DEVICE_TRACKER_2]}}
with assert_setup_component(1):
assert await async_setup_component(hass, DOMAIN, config)
state = hass.states.get('person.tracked_person')
assert state.state == STATE_UNKNOWN
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
await hass.async_block_till_done()
hass.states.async_set(DEVICE_TRACKER, 'home')
await hass.async_block_till_done()
hass.states.async_set(DEVICE_TRACKER, 'unavailable')
await hass.async_block_till_done()
# Unknown, as only 1 device tracker has a state, but we ignore that one
state = hass.states.get('person.tracked_person')
assert state.state == STATE_UNKNOWN
hass.states.async_set(DEVICE_TRACKER_2, 'not_home')
await hass.async_block_till_done()
# Take state of tracker 2
state = hass.states.get('person.tracked_person')
assert state.state == 'not_home'
# state 1 is newer but ignored, keep tracker 2 state
hass.states.async_set(DEVICE_TRACKER, 'unknown')
await hass.async_block_till_done()
state = hass.states.get('person.tracked_person')
assert state.state == 'not_home'
async def test_restore_home_state(hass, hass_admin_user):
"""Test that the state is restored for a person on startup."""
user_id = hass_admin_user.id
attrs = {
ATTR_ID: '1234', ATTR_LATITUDE: 10.12346, ATTR_LONGITUDE: 11.12346,
ATTR_SOURCE: DEVICE_TRACKER, ATTR_USER_ID: user_id}
state = State('person.tracked_person', 'home', attrs)
mock_restore_cache(hass, (state, ))
hass.state = CoreState.not_running
mock_component(hass, 'recorder')
config = {DOMAIN: {
'id': '1234', 'name': 'tracked person', 'user_id': user_id,
'device_trackers': DEVICE_TRACKER}}
with assert_setup_component(1):
assert await async_setup_component(hass, DOMAIN, config)
state = hass.states.get('person.tracked_person')
assert state.state == 'home'
assert state.attributes.get(ATTR_ID) == '1234'
assert state.attributes.get(ATTR_LATITUDE) == 10.12346
assert state.attributes.get(ATTR_LONGITUDE) == 11.12346
# When restoring state the entity_id of the person will be used as source.
assert state.attributes.get(ATTR_SOURCE) == 'person.tracked_person'
assert state.attributes.get(ATTR_USER_ID) == user_id
async def test_duplicate_ids(hass, hass_admin_user):
"""Test we don't allow duplicate IDs."""
config = {DOMAIN: [
{'id': '1234', 'name': 'test user 1'},
{'id': '1234', 'name': 'test user 2'}]}
with assert_setup_component(2):
assert await async_setup_component(hass, DOMAIN, config)
assert len(hass.states.async_entity_ids('person')) == 1
assert hass.states.get('person.test_user_1') is not None
assert hass.states.get('person.test_user_2') is None
async def test_create_person_during_run(hass):
"""Test that person is updated if created while hass is running."""
config = {DOMAIN: {}}
with assert_setup_component(0):
assert await async_setup_component(hass, DOMAIN, config)
hass.states.async_set(DEVICE_TRACKER, 'home')
await hass.async_block_till_done()
await hass.components.person.async_create_person(
'tracked person', device_trackers=[DEVICE_TRACKER])
await hass.async_block_till_done()
state = hass.states.get('person.tracked_person')
assert state.state == 'home'
async def test_load_person_storage(hass, hass_admin_user, storage_setup):
"""Test set up person from storage."""
state = hass.states.get('person.tracked_person')
assert state.state == STATE_UNKNOWN
assert state.attributes.get(ATTR_ID) == '1234'
assert state.attributes.get(ATTR_LATITUDE) is None
assert state.attributes.get(ATTR_LONGITUDE) is None
assert state.attributes.get(ATTR_SOURCE) is None
assert state.attributes.get(ATTR_USER_ID) == hass_admin_user.id
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
await hass.async_block_till_done()
hass.states.async_set(DEVICE_TRACKER, 'home')
await hass.async_block_till_done()
state = hass.states.get('person.tracked_person')
assert state.state == 'home'
assert state.attributes.get(ATTR_ID) == '1234'
assert state.attributes.get(ATTR_LATITUDE) is None
assert state.attributes.get(ATTR_LONGITUDE) is None
assert state.attributes.get(ATTR_SOURCE) == DEVICE_TRACKER
assert state.attributes.get(ATTR_USER_ID) == hass_admin_user.id
async def test_load_person_storage_two_nonlinked(hass, hass_storage):
"""Test loading two users with both not having a user linked."""
hass_storage[DOMAIN] = {
'key': DOMAIN,
'version': 1,
'data': {
'persons': [
{
'id': '1234',
'name': 'tracked person 1',
'user_id': None,
'device_trackers': []
},
{
'id': '5678',
'name': 'tracked person 2',
'user_id': None,
'device_trackers': []
},
]
}
}
await async_setup_component(hass, DOMAIN, {})
assert len(hass.states.async_entity_ids('person')) == 2
assert hass.states.get('person.tracked_person_1') is not None
assert hass.states.get('person.tracked_person_2') is not None
async def test_ws_list(hass, hass_ws_client, storage_setup):
"""Test listing via WS."""
manager = hass.data[DOMAIN]
client = await hass_ws_client(hass)
resp = await client.send_json({
'id': 6,
'type': 'person/list',
})
resp = await client.receive_json()
assert resp['success']
assert resp['result']['storage'] == manager.storage_persons
assert len(resp['result']['storage']) == 1
assert len(resp['result']['config']) == 0
async def test_ws_create(hass, hass_ws_client, storage_setup,
hass_read_only_user):
"""Test creating via WS."""
manager = hass.data[DOMAIN]
client = await hass_ws_client(hass)
resp = await client.send_json({
'id': 6,
'type': 'person/create',
'name': 'Hello',
'device_trackers': [DEVICE_TRACKER],
'user_id': hass_read_only_user.id,
})
resp = await client.receive_json()
persons = manager.storage_persons
assert len(persons) == 2
assert resp['success']
assert resp['result'] == persons[1]
async def test_ws_create_requires_admin(hass, hass_ws_client, storage_setup,
hass_admin_user, hass_read_only_user):
"""Test creating via WS requires admin."""
hass_admin_user.groups = []
manager = hass.data[DOMAIN]
client = await hass_ws_client(hass)
resp = await client.send_json({
'id': 6,
'type': 'person/create',
'name': 'Hello',
'device_trackers': [DEVICE_TRACKER],
'user_id': hass_read_only_user.id,
})
resp = await client.receive_json()
persons = manager.storage_persons
assert len(persons) == 1
assert not resp['success']
async def test_ws_update(hass, hass_ws_client, storage_setup):
"""Test updating via WS."""
manager = hass.data[DOMAIN]
client = await hass_ws_client(hass)
persons = manager.storage_persons
resp = await client.send_json({
'id': 6,
'type': 'person/update',
'person_id': persons[0]['id'],
'name': 'Updated Name',
'device_trackers': [DEVICE_TRACKER_2],
'user_id': None,
})
resp = await client.receive_json()
persons = manager.storage_persons
assert len(persons) == 1
assert resp['success']
assert resp['result'] == persons[0]
assert persons[0]['name'] == 'Updated Name'
assert persons[0]['name'] == 'Updated Name'
assert persons[0]['device_trackers'] == [DEVICE_TRACKER_2]
assert persons[0]['user_id'] is None
state = hass.states.get('person.tracked_person')
assert state.name == 'Updated Name'
async def test_ws_update_require_admin(hass, hass_ws_client, storage_setup,
hass_admin_user):
"""Test updating via WS requires admin."""
hass_admin_user.groups = []
manager = hass.data[DOMAIN]
client = await hass_ws_client(hass)
original = dict(manager.storage_persons[0])
resp = await client.send_json({
'id': 6,
'type': 'person/update',
'person_id': original['id'],
'name': 'Updated Name',
'device_trackers': [DEVICE_TRACKER_2],
'user_id': None,
})
resp = await client.receive_json()
assert not resp['success']
not_updated = dict(manager.storage_persons[0])
assert original == not_updated
async def test_ws_delete(hass, hass_ws_client, storage_setup):
"""Test deleting via WS."""
manager = hass.data[DOMAIN]
client = await hass_ws_client(hass)
persons = manager.storage_persons
resp = await client.send_json({
'id': 6,
'type': 'person/delete',
'person_id': persons[0]['id'],
})
resp = await client.receive_json()
persons = manager.storage_persons
assert len(persons) == 0
assert resp['success']
assert len(hass.states.async_entity_ids('person')) == 0
ent_reg = await hass.helpers.entity_registry.async_get_registry()
assert not ent_reg.async_is_registered('person.tracked_person')
async def test_ws_delete_require_admin(hass, hass_ws_client, storage_setup,
hass_admin_user):
"""Test deleting via WS requires admin."""
hass_admin_user.groups = []
manager = hass.data[DOMAIN]
client = await hass_ws_client(hass)
resp = await client.send_json({
'id': 6,
'type': 'person/delete',
'person_id': manager.storage_persons[0]['id'],
'name': 'Updated Name',
'device_trackers': [DEVICE_TRACKER_2],
'user_id': None,
})
resp = await client.receive_json()
assert not resp['success']
persons = manager.storage_persons
assert len(persons) == 1
async def test_create_invalid_user_id(hass):
"""Test we do not allow invalid user ID during creation."""
manager = PersonManager(hass, Mock(), [])
await manager.async_initialize()
with pytest.raises(ValueError):
await manager.async_create_person(
name='Hello',
user_id='non-existing'
)
async def test_create_duplicate_user_id(hass, hass_admin_user):
"""Test we do not allow duplicate user ID during creation."""
manager = PersonManager(
hass, Mock(async_add_entities=mock_coro_func()), []
)
await manager.async_initialize()
await manager.async_create_person(
name='Hello',
user_id=hass_admin_user.id
)
with pytest.raises(ValueError):
await manager.async_create_person(
name='Hello',
user_id=hass_admin_user.id
)
async def test_update_double_user_id(hass, hass_admin_user):
"""Test we do not allow double user ID during update."""
manager = PersonManager(
hass, Mock(async_add_entities=mock_coro_func()), []
)
await manager.async_initialize()
await manager.async_create_person(
name='Hello',
user_id=hass_admin_user.id
)
person = await manager.async_create_person(
name='Hello',
)
with pytest.raises(ValueError):
await manager.async_update_person(
person_id=person['id'],
user_id=hass_admin_user.id
)
async def test_update_invalid_user_id(hass):
"""Test updating to invalid user ID."""
manager = PersonManager(
hass, Mock(async_add_entities=mock_coro_func()), []
)
await manager.async_initialize()
person = await manager.async_create_person(
name='Hello',
)
with pytest.raises(ValueError):
await manager.async_update_person(
person_id=person['id'],
user_id='non-existing'
)
async def test_update_person_when_user_removed(hass, hass_read_only_user):
"""Update person when user is removed."""
manager = PersonManager(
hass, Mock(async_add_entities=mock_coro_func()), []
)
await manager.async_initialize()
person = await manager.async_create_person(
name='Hello',
user_id=hass_read_only_user.id
)
await hass.auth.async_remove_user(hass_read_only_user)
await hass.async_block_till_done()
assert person['user_id'] is None
|
# -*- coding: utf-8 -*-
"""
jinja2.testsuite.fixtures
~~~~~~~~~~~~~~~~~~~~~~~~~
Contains fixtures used by tests.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import pytest
import os
import re
import sys
from traceback import format_exception
from jinja2 import loaders
from jinja2._compat import PY2
from jinja2 import Environment
@pytest.fixture
def env():
'''returns a new environment.
'''
return Environment()
@pytest.fixture
def dict_loader():
'''returns DictLoader
'''
return loaders.DictLoader({
'justdict.html': 'FOO'
})
@pytest.fixture
def package_loader():
'''returns PackageLoader initialized from templates
'''
return loaders.PackageLoader('jinja2.testsuite.res', 'templates')
@pytest.fixture
def filesystem_loader():
'''returns FileSystemLoader initialized to res/templates directory
'''
here = os.path.dirname(os.path.abspath(__file__))
return loaders.FileSystemLoader(here + '/res/templates')
@pytest.fixture
def function_loader():
'''returns a FunctionLoader
'''
return loaders.FunctionLoader({'justfunction.html': 'FOO'}.get)
@pytest.fixture
def choice_loader(dict_loader, package_loader):
'''returns a ChoiceLoader
'''
return loaders.ChoiceLoader([dict_loader, package_loader])
@pytest.fixture
def prefix_loader(filesystem_loader, dict_loader):
'''returns a PrefixLoader
'''
return loaders.PrefixLoader({
'a': filesystem_loader,
'b': dict_loader
})
fixing errors due to removal of init.py from test suite
# -*- coding: utf-8 -*-
"""
jinja2.testsuite.fixtures
~~~~~~~~~~~~~~~~~~~~~~~~~
Contains fixtures used by tests.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import pytest
import os
import re
import sys
from traceback import format_exception
from jinja2 import loaders
from jinja2._compat import PY2
from jinja2 import Environment
@pytest.fixture
def env():
'''returns a new environment.
'''
return Environment()
@pytest.fixture
def dict_loader():
'''returns DictLoader
'''
return loaders.DictLoader({
'justdict.html': 'FOO'
})
@pytest.fixture
def package_loader():
'''returns PackageLoader initialized from templates
'''
return loaders.PackageLoader('res', 'templates')
@pytest.fixture
def filesystem_loader():
'''returns FileSystemLoader initialized to res/templates directory
'''
here = os.path.dirname(os.path.abspath(__file__))
return loaders.FileSystemLoader(here + '/res/templates')
@pytest.fixture
def function_loader():
'''returns a FunctionLoader
'''
return loaders.FunctionLoader({'justfunction.html': 'FOO'}.get)
@pytest.fixture
def choice_loader(dict_loader, package_loader):
'''returns a ChoiceLoader
'''
return loaders.ChoiceLoader([dict_loader, package_loader])
@pytest.fixture
def prefix_loader(filesystem_loader, dict_loader):
'''returns a PrefixLoader
'''
return loaders.PrefixLoader({
'a': filesystem_loader,
'b': dict_loader
})
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from django.core.urlresolvers import reverse
from django.test import TestCase, Client
from django.contrib.sites.models import Site
from django.contrib.auth import get_user_model
from opps.channels.models import Channel
from opps.core.permissions.models import Permission
def get_url(app_label, model_name):
return "{}?term=&app_label={}&model_name={}".format(
reverse('permissions:grp_autocomplete_lookup'),
app_label,
model_name
)
class OppsAutocompleteLookupTest(TestCase):
def setUp(self):
User = get_user_model()
self.user = User.objects.create(username='test', is_staff=True)
self.user.set_password('test')
self.user.save()
self.user2 = User.objects.create(username='test2', is_staff=True)
self.user2.set_password('test')
self.user2.save()
self.site = Site.objects.all()[0]
self.channel_allowed = Channel.objects.create(
name='Home',
slug='home',
site=self.site,
user=self.user
)
self.channel_not_allowed = Channel.objects.create(
name='Top secret',
slug='top-secret',
site=self.site,
user=self.user
)
self.permission = Permission.objects.create(user=self.user)
self.permission.channel.add(self.channel_allowed)
self.permission.save()
self.client = Client()
def test_user_has_permission(self):
self.client.login(username='test', password='test')
response = self.client.get(get_url('channels', 'Channel'))
result = json.loads(response.content)
pk = self.channel_allowed.pk
self.assertEqual(response.status_code, 200)
self.assertEqual(len(filter(lambda x: x['value'] == pk, result)), 1)
def test_user_hasnt_permission(self):
self.client.login(username='test', password='test')
response = self.client.get(get_url('channels', 'Channel'))
result = json.loads(response.content)
pk = self.channel_not_allowed.pk
self.assertEqual(response.status_code, 200)
self.assertEqual(len(filter(lambda x: x['value'] == pk, result)), 0)
def test_user_without_permission(self):
self.client.login(username='test2', password='test')
response = self.client.get(get_url('channels', 'Channel'))
result = json.loads(response.content)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(result), 1)
self.assertEqual(result[0]['value'], None)
self.assertEqual(result[0]['label'], '0 results')
Adding test 'test_user_has_permission_on_site' on opps.core.permissions.views
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from django.core.urlresolvers import reverse
from django.test import TestCase, Client
from django.contrib.sites.models import Site
from django.contrib.auth import get_user_model
from opps.channels.models import Channel
from opps.core.permissions.models import Permission
def get_url(app_label, model_name):
return "{}?term=&app_label={}&model_name={}".format(
reverse('permissions:grp_autocomplete_lookup'),
app_label,
model_name
)
class OppsAutocompleteLookupTest(TestCase):
def setUp(self):
User = get_user_model()
self.user = User.objects.create(username='test', is_staff=True)
self.user.set_password('test')
self.user.save()
self.user2 = User.objects.create(username='test2', is_staff=True)
self.user2.set_password('test')
self.user2.save()
self.site = Site.objects.all()[0]
self.another_site = Site.objects.create(domain='oppsproject.org')
self.allowed_channel = Channel.objects.create(
name='Home',
slug='home',
site=self.site,
user=self.user
)
self.another_allowed_channel = Channel.objects.create(
name='Contact',
slug='contact',
site=self.another_site,
user=self.user
)
self.not_allowed_channel = Channel.objects.create(
name='Top secret',
slug='top-secret',
site=self.site,
user=self.user
)
self.permission = Permission.objects.create(user=self.user)
self.permission.channel.add(self.allowed_channel)
self.permission.site.add(self.another_site)
self.client = Client()
def test_user_has_permission_on_channel(self):
self.client.login(username='test', password='test')
response = self.client.get(get_url('channels', 'Channel'))
result = json.loads(response.content)
pk = self.allowed_channel.pk
self.assertEqual(response.status_code, 200)
self.assertEqual(len(filter(lambda x: x['value'] == pk, result)), 1)
def test_user_has_permission_on_site(self):
self.client.login(username='test', password='test')
response = self.client.get(get_url('channels', 'Channel'))
result = json.loads(response.content)
pk = self.another_allowed_channel.pk
self.assertEqual(response.status_code, 200)
self.assertEqual(len(filter(lambda x: x['value'] == pk, result)), 1)
def test_user_hasnt_permission(self):
self.client.login(username='test', password='test')
response = self.client.get(get_url('channels', 'Channel'))
result = json.loads(response.content)
pk = self.not_allowed_channel.pk
self.assertEqual(response.status_code, 200)
self.assertEqual(len(filter(lambda x: x['value'] == pk, result)), 0)
def test_user_without_permission(self):
self.client.login(username='test2', password='test')
response = self.client.get(get_url('channels', 'Channel'))
result = json.loads(response.content)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(result), 1)
self.assertEqual(result[0]['value'], None)
self.assertEqual(result[0]['label'], '0 results')
|
#!/usr/bin/python
import baseclass
import os
import unittest
import time
import blivet.devicelibs.mdraid as mdraid
import blivet.errors as errors
from blivet.size import Size
class MDRaidTestCase(unittest.TestCase):
def testMDRaid(self):
##
## getRaidLevel
##
self.assertEqual(mdraid.getRaidLevel("container").name, "container")
self.assertEqual(mdraid.getRaidLevel("stripe").name, "raid0")
self.assertEqual(mdraid.getRaidLevel("mirror").name, "raid1")
self.assertEqual(mdraid.getRaidLevel("4").name, "raid4")
self.assertEqual(mdraid.getRaidLevel(5).name, "raid5")
self.assertEqual(mdraid.getRaidLevel("RAID6").name, "raid6")
self.assertEqual(mdraid.getRaidLevel("raid10").name, "raid10")
##
## get_raid_superblock_size
##
self.assertEqual(mdraid.get_raid_superblock_size(Size(spec="256 GiB")),
Size(spec="128 MiB"))
self.assertEqual(mdraid.get_raid_superblock_size(Size(spec="128 GiB")),
Size(spec="128 MiB"))
self.assertEqual(mdraid.get_raid_superblock_size(Size(spec="64 GiB")),
Size(spec="64 MiB"))
self.assertEqual(mdraid.get_raid_superblock_size(Size(spec="63 GiB")),
Size(spec="32 MiB"))
self.assertEqual(mdraid.get_raid_superblock_size(Size(spec="10 GiB")),
Size(spec="8 MiB"))
self.assertEqual(mdraid.get_raid_superblock_size(Size(spec="1 GiB")),
Size(spec="1 MiB"))
self.assertEqual(mdraid.get_raid_superblock_size(Size(spec="1023 MiB")),
Size(spec="1 MiB"))
self.assertEqual(mdraid.get_raid_superblock_size(Size(spec="512 MiB")),
Size(spec="1 MiB"))
self.assertEqual(mdraid.get_raid_superblock_size(Size(spec="257 MiB"),
version="version"),
mdraid.MD_SUPERBLOCK_SIZE)
class MDRaidAsRootTestCase(baseclass.DevicelibsTestCase):
@unittest.skipUnless(os.geteuid() == 0, "requires root privileges")
def testMDRaidAsRoot(self):
_LOOP_DEV0 = self._loopMap[self._LOOP_DEVICES[0]]
_LOOP_DEV1 = self._loopMap[self._LOOP_DEVICES[1]]
##
## mdcreate
##
# pass
self.assertEqual(mdraid.mdcreate("/dev/md0", 1, [_LOOP_DEV0, _LOOP_DEV1]), None)
# wait for raid to settle
time.sleep(2)
# fail
self.assertRaises(mdraid.MDRaidError, mdraid.mdcreate, "/dev/md1", 1, ["/not/existing/dev0", "/not/existing/dev1"])
##
## mddeactivate
##
# pass
self.assertEqual(mdraid.mddeactivate("/dev/md0"), None)
# fail
self.assertRaises(mdraid.MDRaidError, mdraid.mddeactivate, "/not/existing/md")
##
## mdadd
##
# pass
# TODO
# fail
self.assertRaises(mdraid.MDRaidError, mdraid.mdadd, "/not/existing/device")
##
## mdactivate
##
self.assertRaises(mdraid.MDRaidError, mdraid.mdactivate, "/not/existing/md", uuid=32)
# requires uuid
self.assertRaises(mdraid.MDRaidError, mdraid.mdactivate, "/dev/md1")
##
## mddestroy
##
# pass
self.assertEqual(mdraid.mddestroy(_LOOP_DEV0), None)
self.assertEqual(mdraid.mddestroy(_LOOP_DEV1), None)
# pass
# Note that these should fail because mdadm is unable to locate the
# device. The mdadm Kill function does return 2, but the mdadm process
# returns 0 for both tests.
self.assertIsNone(mdraid.mddestroy("/dev/md0"))
self.assertIsNone(mdraid.mddestroy("/not/existing/device"))
def suite():
suite1 = unittest.TestLoader().loadTestsFromTestCase(MDRaidTestCase)
suite2 = unittest.TestLoader().loadTestsFromTestCase(MDRaidAsRootTestCase)
return unittest.TestSuite([suite1, suite2])
if __name__ == "__main__":
unittest.main()
Teardown RAID device once testing is over
Signed-off-by: mulhern <7b51bcf507bcd7afb72bf8663752c0ddbeb517f6@redhat.com>
#!/usr/bin/python
import baseclass
import os
import unittest
import time
import blivet.devicelibs.mdraid as mdraid
import blivet.errors as errors
from blivet.size import Size
class MDRaidTestCase(unittest.TestCase):
def testMDRaid(self):
##
## getRaidLevel
##
self.assertEqual(mdraid.getRaidLevel("container").name, "container")
self.assertEqual(mdraid.getRaidLevel("stripe").name, "raid0")
self.assertEqual(mdraid.getRaidLevel("mirror").name, "raid1")
self.assertEqual(mdraid.getRaidLevel("4").name, "raid4")
self.assertEqual(mdraid.getRaidLevel(5).name, "raid5")
self.assertEqual(mdraid.getRaidLevel("RAID6").name, "raid6")
self.assertEqual(mdraid.getRaidLevel("raid10").name, "raid10")
##
## get_raid_superblock_size
##
self.assertEqual(mdraid.get_raid_superblock_size(Size(spec="256 GiB")),
Size(spec="128 MiB"))
self.assertEqual(mdraid.get_raid_superblock_size(Size(spec="128 GiB")),
Size(spec="128 MiB"))
self.assertEqual(mdraid.get_raid_superblock_size(Size(spec="64 GiB")),
Size(spec="64 MiB"))
self.assertEqual(mdraid.get_raid_superblock_size(Size(spec="63 GiB")),
Size(spec="32 MiB"))
self.assertEqual(mdraid.get_raid_superblock_size(Size(spec="10 GiB")),
Size(spec="8 MiB"))
self.assertEqual(mdraid.get_raid_superblock_size(Size(spec="1 GiB")),
Size(spec="1 MiB"))
self.assertEqual(mdraid.get_raid_superblock_size(Size(spec="1023 MiB")),
Size(spec="1 MiB"))
self.assertEqual(mdraid.get_raid_superblock_size(Size(spec="512 MiB")),
Size(spec="1 MiB"))
self.assertEqual(mdraid.get_raid_superblock_size(Size(spec="257 MiB"),
version="version"),
mdraid.MD_SUPERBLOCK_SIZE)
class MDRaidAsRootTestCase(baseclass.DevicelibsTestCase):
def __init__(self, *args, **kwargs):
"""Set up the structure of the mdraid array."""
super(MDRaidAsRootTestCase, self).__init__(*args, **kwargs)
self._dev_name = "/dev/md0"
def tearDown(self):
try:
mdraid.mddeactivate(self._dev_name)
mdraid.mddestroy(_LOOP_DEV0)
mdraid.mddestroy(_LOOP_DEV1)
except mdraid.MDRaidError:
pass
super(MDRaidAsRootTestCase, self).tearDown()
@unittest.skipUnless(os.geteuid() == 0, "requires root privileges")
def testMDRaidAsRoot(self):
_LOOP_DEV0 = self._loopMap[self._LOOP_DEVICES[0]]
_LOOP_DEV1 = self._loopMap[self._LOOP_DEVICES[1]]
##
## mdcreate
##
# pass
self.assertEqual(mdraid.mdcreate(self._dev_name, 1, [_LOOP_DEV0, _LOOP_DEV1]), None)
# wait for raid to settle
time.sleep(2)
# fail
self.assertRaises(mdraid.MDRaidError, mdraid.mdcreate, "/dev/md1", 1, ["/not/existing/dev0", "/not/existing/dev1"])
##
## mddeactivate
##
# pass
self.assertEqual(mdraid.mddeactivate(self._dev_name), None)
# fail
self.assertRaises(mdraid.MDRaidError, mdraid.mddeactivate, "/not/existing/md")
##
## mdadd
##
# pass
# TODO
# fail
self.assertRaises(mdraid.MDRaidError, mdraid.mdadd, "/not/existing/device")
##
## mdactivate
##
self.assertRaises(mdraid.MDRaidError, mdraid.mdactivate, "/not/existing/md", uuid=32)
# requires uuid
self.assertRaises(mdraid.MDRaidError, mdraid.mdactivate, "/dev/md1")
##
## mddestroy
##
# pass
self.assertEqual(mdraid.mddestroy(_LOOP_DEV0), None)
self.assertEqual(mdraid.mddestroy(_LOOP_DEV1), None)
# pass
# Note that these should fail because mdadm is unable to locate the
# device. The mdadm Kill function does return 2, but the mdadm process
# returns 0 for both tests.
self.assertIsNone(mdraid.mddestroy(self._dev_name))
self.assertIsNone(mdraid.mddestroy("/not/existing/device"))
def suite():
suite1 = unittest.TestLoader().loadTestsFromTestCase(MDRaidTestCase)
suite2 = unittest.TestLoader().loadTestsFromTestCase(MDRaidAsRootTestCase)
return unittest.TestSuite([suite1, suite2])
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python3
"""
Created on 3 May 2021
@author: Bruno Beloff (bruno.beloff@southcoastscience.com)
"""
import json
from scs_core.data.json import JSONify
from scs_core.estate.package_version import PackageVersions
from scs_host.sys.host import Host
# --------------------------------------------------------------------------------------------------------------------
root = '/Users/bruno/Python/MacProject'
pvs1 = PackageVersions.construct_from_installation(root)
print(pvs1)
jstr = JSONify.dumps(pvs1)
print(jstr)
print("-")
pvs2 = PackageVersions.construct_from_jdict(json.loads(jstr))
print(pvs2)
print(pvs2 == pvs1)
Added PackageVersion class
#!/usr/bin/env python3
"""
Created on 3 May 2021
@author: Bruno Beloff (bruno.beloff@southcoastscience.com)
"""
import json
from scs_core.data.json import JSONify
from scs_core.estate.package_version import PackageVersions
from scs_host.sys.host import Host
# --------------------------------------------------------------------------------------------------------------------
root = Host.scs_path()
pvs1 = PackageVersions.construct_from_installation(root)
print(pvs1)
jstr = JSONify.dumps(pvs1, indent=4) # , indent=4
print(jstr)
print("-")
pvs2 = PackageVersions.construct_from_jdict(json.loads(jstr))
print(pvs2)
print(pvs2 == pvs1)
|
from . import functions
import os
import plistlib
import re
import sublime
import yaml
ST_SUPPORT_SYNTAX = int(sublime.version()) >= 3084
ST_LANGUAGES = [".sublime-syntax", ".tmLanguage"] if ST_SUPPORT_SYNTAX else [".tmLanguage"]
class SyntaxMappings:
settings = None
logger = None
# contents of this list are dict whose keys are
# file_extensions
# file_path
# first_line_match
# first_line_match_compiled
syntax_mappings = []
# the path of all syntax files
syntax_files = []
def __init__(self, settings, logger):
self.settings = settings
self.logger = logger
self.syntax_files = self._find_syntax_file_paths(True)
self.syntax_mappings = self._build_syntax_mappings()
self.logger.debug("found syntax files: {0}".format(self.syntax_files))
def __iter__(self):
return iter(self.value())
def __len__(self):
return len(self.value())
def value(self, val=None):
if val is None:
return self.syntax_mappings
else:
self.syntax_mappings = val
def _find_syntax_file_paths(self, drop_duplicated=False):
"""
@brief find the path of all syntax files
@param drop_duplicated if True, for a syntax, only the highest priority resource will be returned
@return list<string> the path of all syntax files
"""
if drop_duplicated is False:
syntax_files = []
for syntax_file_ext in ST_LANGUAGES:
syntax_files += sublime.find_resources("*" + syntax_file_ext)
else:
# key = syntax resource path without extension
# value = the corresponding extension
# example: { 'Packages/Java/Java': '.sublime-syntax' }
syntax_griddle = {}
for syntax_file_ext in ST_LANGUAGES:
resources = sublime.find_resources("*" + syntax_file_ext)
for resource in resources:
resource_name, resource_ext = os.path.splitext(resource)
if resource_name not in syntax_griddle:
syntax_griddle[resource_name] = resource_ext
# combine a name and an extension back into a full path
syntax_files = [n + e for n, e in syntax_griddle.items()]
return syntax_files
def _build_syntax_mappings(self):
return self._build_syntax_mappings_from_user() + self._build_syntax_mappings_from_st()
def _build_syntax_mappings_from_user(self):
""" load from user settings """
mapping_settings = self.settings.get("syntax_mapping", {}).items()
syntax_mappings = []
for syntax_file_partial, first_line_matches in mapping_settings:
first_line_match_regexes = []
for first_line_match in first_line_matches:
try:
first_line_match_regexes.append(re.compile(first_line_match))
except:
self.logger.error(
'regex compilation failed in user settings "{0}": {1}'.format(
syntax_file_partial, first_line_match
)
)
if first_line_match_regexes:
# syntax_file_partial could be partial path
# we try to get the real path here
is_syntax_file_found = False
for syntax_file in self.syntax_files:
if syntax_file.find(syntax_file_partial) >= 0:
self.logger.info(
'match syntax file "{0}" with "{1}"'.format(
syntax_file_partial, syntax_file
)
)
is_syntax_file_found = True
syntax_mappings.append(
{
"file_extensions": None,
"file_path": syntax_file,
"first_line_match": first_line_matches,
"first_line_match_compiled": first_line_match_regexes,
}
)
break
if is_syntax_file_found is False:
self.logger.error(
'cannot find a syntax file in user settings "{0}"'.format(
syntax_file_partial
)
)
return syntax_mappings
def _build_syntax_mappings_from_st(self):
""" load from ST packages (one-time job, unless restart ST) """
syntax_mappings = []
for syntax_file in self.syntax_files:
syntax_file_content = sublime.load_resource(syntax_file).strip()
attrs = self._get_attributes_from_syntax_file_content(
syntax_file_content,
[
"file_extensions",
"file_types", # i.e., the 'file_extensions' in XML
"first_line_match",
],
)
if attrs is None:
self.logger.error("fail parsing file: {0}".format(syntax_file))
continue
# use 'file_extensions' as the formal key
if attrs["file_types"] is not None:
attrs["file_extensions"] = attrs["file_types"]
attrs.pop("file_types")
attrs.update({"file_path": syntax_file, "first_line_match_compiled": None})
if attrs["first_line_match"] is not None:
try:
attrs["first_line_match_compiled"] = [re.compile(attrs["first_line_match"])]
except:
self.logger.error(
'regex compilation failed in "{0}": {1}'.format(
syntax_file, attrs["first_line_match"]
)
)
attrs["first_line_match"] = [attrs["first_line_match"]]
syntax_mappings.append(attrs)
return syntax_mappings
def _get_attributes_from_syntax_file_content(self, content="", attrs=[]):
""" find "first_line_match" or "first_line_match" in syntax file content """
if content.lstrip().startswith("<"):
return self._get_attributes_from_xml_syntax_file_content(content, attrs)
else:
return self._get_attributes_from_yaml_syntax_file_content(content, attrs)
def _get_attributes_from_yaml_syntax_file_content(self, content="", attrs=[]):
""" find attributes in .sublime-syntax content """
results = {}
try:
# "contexts:" is usually the last (and largest) part of a syntax deinition.
# to speed up searching, strip everything behinds "contexts:"
cut_pos = content.find("contexts:")
if cut_pos >= 0:
content = content[:cut_pos]
parsed = yaml.safe_load(content)
if parsed is None:
raise Exception("fail parsing YAML content")
except:
return None
for attr in attrs:
results[attr] = parsed[attr] if attr in parsed else None
return results
def _get_attributes_from_xml_syntax_file_content(self, content="", attrs=[]):
""" find attributes in .tmLanguage content """
attrs = [functions.snake_to_camel(attr) for attr in attrs]
results = {}
try:
# "<key>patterns</key>" is usually the last (and largest) part of a syntax deinition.
# to speed up searching, strip everything behinds "<key>patterns</key>"
cut_pos = content.find("<key>patterns</key>")
if cut_pos >= 0:
content = content[:cut_pos] + r"</dict></plist>"
parsed = plistlib.readPlistFromBytes(content.encode("UTF-8"))
except:
return None
for attr in attrs:
attr_snake = functions.camel_to_snake(attr)
results[attr_snake] = parsed[attr] if attr in parsed else None
return results
Specify an Exception to "except:" statement
Signed-off-by: Jack Cherng <159f0f32a62cc912ca55f89bb5e06807cf019bc7@gmail.com>
from . import functions
import os
import plistlib
import re
import sublime
import yaml
ST_SUPPORT_SYNTAX = int(sublime.version()) >= 3084
ST_LANGUAGES = [".sublime-syntax", ".tmLanguage"] if ST_SUPPORT_SYNTAX else [".tmLanguage"]
class SyntaxMappings:
settings = None
logger = None
# contents of this list are dict whose keys are
# file_extensions
# file_path
# first_line_match
# first_line_match_compiled
syntax_mappings = []
# the path of all syntax files
syntax_files = []
def __init__(self, settings, logger):
self.settings = settings
self.logger = logger
self.syntax_files = self._find_syntax_file_paths(True)
self.syntax_mappings = self._build_syntax_mappings()
self.logger.debug("found syntax files: {0}".format(self.syntax_files))
def __iter__(self):
return iter(self.value())
def __len__(self):
return len(self.value())
def value(self, val=None):
if val is None:
return self.syntax_mappings
else:
self.syntax_mappings = val
def _find_syntax_file_paths(self, drop_duplicated=False):
"""
@brief find the path of all syntax files
@param drop_duplicated if True, for a syntax, only the highest priority resource will be returned
@return list<string> the path of all syntax files
"""
if drop_duplicated is False:
syntax_files = []
for syntax_file_ext in ST_LANGUAGES:
syntax_files += sublime.find_resources("*" + syntax_file_ext)
else:
# key = syntax resource path without extension
# value = the corresponding extension
# example: { 'Packages/Java/Java': '.sublime-syntax' }
syntax_griddle = {}
for syntax_file_ext in ST_LANGUAGES:
resources = sublime.find_resources("*" + syntax_file_ext)
for resource in resources:
resource_name, resource_ext = os.path.splitext(resource)
if resource_name not in syntax_griddle:
syntax_griddle[resource_name] = resource_ext
# combine a name and an extension back into a full path
syntax_files = [n + e for n, e in syntax_griddle.items()]
return syntax_files
def _build_syntax_mappings(self):
return self._build_syntax_mappings_from_user() + self._build_syntax_mappings_from_st()
def _build_syntax_mappings_from_user(self):
""" load from user settings """
mapping_settings = self.settings.get("syntax_mapping", {}).items()
syntax_mappings = []
for syntax_file_partial, first_line_matches in mapping_settings:
first_line_match_regexes = []
for first_line_match in first_line_matches:
try:
first_line_match_regexes.append(re.compile(first_line_match))
except Exception:
self.logger.error(
'regex compilation failed in user settings "{0}": {1}'.format(
syntax_file_partial, first_line_match
)
)
if first_line_match_regexes:
# syntax_file_partial could be partial path
# we try to get the real path here
is_syntax_file_found = False
for syntax_file in self.syntax_files:
if syntax_file.find(syntax_file_partial) >= 0:
self.logger.info(
'match syntax file "{0}" with "{1}"'.format(
syntax_file_partial, syntax_file
)
)
is_syntax_file_found = True
syntax_mappings.append(
{
"file_extensions": None,
"file_path": syntax_file,
"first_line_match": first_line_matches,
"first_line_match_compiled": first_line_match_regexes,
}
)
break
if is_syntax_file_found is False:
self.logger.error(
'cannot find a syntax file in user settings "{0}"'.format(
syntax_file_partial
)
)
return syntax_mappings
def _build_syntax_mappings_from_st(self):
""" load from ST packages (one-time job, unless restart ST) """
syntax_mappings = []
for syntax_file in self.syntax_files:
syntax_file_content = sublime.load_resource(syntax_file).strip()
attrs = self._get_attributes_from_syntax_file_content(
syntax_file_content,
[
"file_extensions",
"file_types", # i.e., the 'file_extensions' in XML
"first_line_match",
],
)
if attrs is None:
self.logger.error("fail parsing file: {0}".format(syntax_file))
continue
# use 'file_extensions' as the formal key
if attrs["file_types"] is not None:
attrs["file_extensions"] = attrs["file_types"]
attrs.pop("file_types")
attrs.update({"file_path": syntax_file, "first_line_match_compiled": None})
if attrs["first_line_match"] is not None:
try:
attrs["first_line_match_compiled"] = [re.compile(attrs["first_line_match"])]
except Exception:
self.logger.error(
'regex compilation failed in "{0}": {1}'.format(
syntax_file, attrs["first_line_match"]
)
)
attrs["first_line_match"] = [attrs["first_line_match"]]
syntax_mappings.append(attrs)
return syntax_mappings
def _get_attributes_from_syntax_file_content(self, content="", attrs=[]):
""" find "first_line_match" or "first_line_match" in syntax file content """
if content.lstrip().startswith("<"):
return self._get_attributes_from_xml_syntax_file_content(content, attrs)
else:
return self._get_attributes_from_yaml_syntax_file_content(content, attrs)
def _get_attributes_from_yaml_syntax_file_content(self, content="", attrs=[]):
""" find attributes in .sublime-syntax content """
results = {}
try:
# "contexts:" is usually the last (and largest) part of a syntax deinition.
# to speed up searching, strip everything behinds "contexts:"
cut_pos = content.find("contexts:")
if cut_pos >= 0:
content = content[:cut_pos]
parsed = yaml.safe_load(content)
if parsed is None:
raise Exception("fail parsing YAML content")
except Exception:
return None
for attr in attrs:
results[attr] = parsed[attr] if attr in parsed else None
return results
def _get_attributes_from_xml_syntax_file_content(self, content="", attrs=[]):
""" find attributes in .tmLanguage content """
attrs = [functions.snake_to_camel(attr) for attr in attrs]
results = {}
try:
# "<key>patterns</key>" is usually the last (and largest) part of a syntax deinition.
# to speed up searching, strip everything behinds "<key>patterns</key>"
cut_pos = content.find("<key>patterns</key>")
if cut_pos >= 0:
content = content[:cut_pos] + r"</dict></plist>"
parsed = plistlib.readPlistFromBytes(content.encode("UTF-8"))
except Exception:
return None
for attr in attrs:
attr_snake = functions.camel_to_snake(attr)
results[attr_snake] = parsed[attr] if attr in parsed else None
return results
|
import pytest
from mfr.extensions.video import VideoRenderer
@pytest.fixture
def url():
return 'http://osf.io/file/video.mp4'
@pytest.fixture
def download_url():
return 'http://wb.osf.io/file/video.mp4?token=1234'
@pytest.fixture
def file_path():
return '/tmp/video.mp4'
@pytest.fixture
def assets_url():
return 'http://mfr.osf.io/assets/video/'
@pytest.fixture
def extension():
return '.mp3'
@pytest.fixture
def renderer(url, download_url, file_path, assets_url, extension):
return VideoRenderer(url, download_url, file_path, assets_url, extension)
class TestRenderVideo:
def test_render_video(self, renderer, url):
body = renderer.render()
assert '<video controls' in body
assert 'src="{}"'.format(url) in body
def test_render_audio_file_required(self, renderer):
assert renderer.file_required is False
def test_render_audio_cache_result(self, renderer):
assert renderer.cache_result is False
video/audio tests
import pytest
from mfr.extensions.video import VideoRenderer
@pytest.fixture
def url():
return 'http://osf.io/file/video.mp4'
@pytest.fixture
def download_url():
return 'http://wb.osf.io/file/video.mp4?token=1234'
@pytest.fixture
def file_path():
return '/tmp/video.mp4'
@pytest.fixture
def assets_url():
return 'http://mfr.osf.io/assets/video/'
@pytest.fixture
def extension():
return '.mp3'
@pytest.fixture
def renderer(url, download_url, file_path, assets_url, extension):
return VideoRenderer(url, download_url, file_path, assets_url, extension)
class TestRenderVideo:
def test_render_video(self, renderer, url):
body = renderer.render()
assert '<video controls' in body
assert 'src="{}"'.format(url) in body
def test_render_video_file_required(self, renderer):
assert renderer.file_required is False
def test_render_video_cache_result(self, renderer):
assert renderer.cache_result is False
|
# encoding: utf-8
""" library for the thunk.us API """
import urllib
import urllib2
import json
class Thunk:
""" class for creating an object which can talk to the
thunk.us API
"""
def __init__(self):
self.base_url = "http://thunk.us/"
def create(self, name=None):
""" method for creating a thunk
Parameters:
name -> optional name of the thunk
"""
values = {}
if name is not None:
values["name"] = name
data = self._query(self.base_url, values)
return data
def poke(self, uid):
""" poke a thunk with the given UID
Parameters:
uid -> uid of the thunk to poke
"""
pass
def destroy(self, uid):
""" method to destroy a thunk with the given UID """
pass
def check(self, uid):
""" method for checking the status of a given UID
Parameters:
uid -> the UID to check for
"""
url = self.base_url + uid
return self._query(url)
def _query(self, url, data = None):
""" query method to do HTTP POST/GET
Parameters:
url -> the url to POST/GET
data -> header_data as a dict (only for POST)
Returns:
Parsed JSON data as dict
or
None on error
"""
if data is not None: # we have POST data if there is data
values = urllib.urlencode(data)
request = urllib2.Request(url, values)
else: # do a GET otherwise
request = urllib2.Request(url)
try:
response = urllib2.urlopen(request)
except IOError: # no connection
return None
json_data = response.read()
data = json.loads(json_data)
return data
if __name__ == '__main__':
import doctest
doctest.doctest()
add exception class for wrong poke state
# encoding: utf-8
""" library for the thunk.us API """
import urllib
import urllib2
import json
class Thunk:
""" class for creating an object which can talk to the
thunk.us API
"""
def __init__(self):
self.base_url = "http://thunk.us/"
def create(self, name=None):
""" method for creating a thunk
Parameters:
name -> optional name of the thunk
"""
values = {}
if name is not None:
values["name"] = name
data = self._query(self.base_url, values)
return data
def poke(self, uid):
""" poke a thunk with the given UID
Parameters:
uid -> uid of the thunk to poke
"""
pass
def destroy(self, uid):
""" method to destroy a thunk with the given UID """
pass
def check(self, uid):
""" method for checking the status of a given UID
Parameters:
uid -> the UID to check for
"""
url = self.base_url + uid
return self._query(url)
def _query(self, url, data = None):
""" query method to do HTTP POST/GET
Parameters:
url -> the url to POST/GET
data -> header_data as a dict (only for POST)
Returns:
Parsed JSON data as dict
or
None on error
"""
if data is not None: # we have POST data if there is data
values = urllib.urlencode(data)
request = urllib2.Request(url, values)
else: # do a GET otherwise
request = urllib2.Request(url)
try:
response = urllib2.urlopen(request)
except IOError: # no connection
return None
json_data = response.read()
data = json.loads(json_data)
return data
if __name__ == '__main__':
import doctest
doctest.doctest()
# Custom Exception Classes
class PokeStateError(Exception):
""" exception to raise if wrong poke state was provided
"""
def __init__(self, arg):
self.arg = arg
def __str__(self):
return repr(self.arg)
|
# -*- coding: utf8 -*-
# Copyright (c) 2014 by Ecreall under licence AGPL terms
# avalaible on http://www.gnu.org/licenses/agpl.html
# licence: AGPL
# author: Amen Souissi
"""
This module represent all of behaviors used in the
Idea management process definition.
"""
import datetime
from pyramid.httpexceptions import HTTPFound
from persistent.list import PersistentList
from dace.util import (
getSite,
getBusinessAction,
copy)
from dace.objectofcollaboration.principal.util import (
has_role,
grant_roles,
get_current)
from dace.processinstance.activity import InfiniteCardinality, ActionType
from novaideo.ips.mailer import mailer_send
from novaideo.mail import (
ARCHIVEIDEA_SUBJECT,
ARCHIVEIDEA_MESSAGE,
PUBLISHEDIDEA_SUBJECT,
PUBLISHEDIDEA_MESSAGE)
from novaideo.content.interface import INovaIdeoApplication, Iidea
from ..user_management.behaviors import global_user_processsecurity
from novaideo import _
from novaideo.content.idea import Idea
from ..comment_management.behaviors import VALIDATOR_BY_CONTEXT
from novaideo.core import acces_action
from novaideo.utilities.util import connect
from novaideo.event import ObjectPublished, CorrelableRemoved
try:
basestring
except NameError:
basestring = str
def createidea_roles_validation(process, context):
return has_role(role=('Member',))
def createidea_processsecurity_validation(process, context):
return global_user_processsecurity(process, context)
class CreateIdea(InfiniteCardinality):
submission_title = _('Save')
context = INovaIdeoApplication
roles_validation = createidea_roles_validation
processsecurity_validation = createidea_processsecurity_validation
def start(self, context, request, appstruct, **kw):
root = getSite()
keywords_ids = appstruct.pop('keywords')
result, newkeywords = root.get_keywords(keywords_ids)
for nkw in newkeywords:
root.addtoproperty('keywords', nkw)
result.extend(newkeywords)
idea = appstruct['_object_data']
root.addtoproperty('ideas', idea)
idea.setproperty('keywords_ref', result)
idea.state.append('to work')
grant_roles(roles=(('Owner', idea), ))
idea.setproperty('author', get_current())
idea.reindex()
return {'newcontext': idea}
def redirect(self, context, request, **kw):
return HTTPFound(request.resource_url(kw['newcontext'], "@@index"))
def duplicate_processsecurity_validation(process, context):
return ((has_role(role=('Owner', context)) and \
(not ('archived' in context.state)) or \
'version' in context.state) or \
'published' in context.state) and \
global_user_processsecurity(process, context)
class DuplicateIdea(InfiniteCardinality):
style = 'button' #TODO add style abstract class
style_descriminator = 'global-action'
style_picto = 'glyphicon glyphicon-resize-full'
style_order = 5
submission_title = _('Save')
context = Iidea
processsecurity_validation = duplicate_processsecurity_validation
def start(self, context, request, appstruct, **kw):
root = getSite()
copy_of_idea = copy(context, (root, 'ideas'))
keywords_ids = appstruct.pop('keywords')
result, newkeywords = root.get_keywords(keywords_ids)
for nkw in newkeywords:
root.addtoproperty('keywords', nkw)
result.extend(newkeywords)
appstruct['keywords_ref'] = result
files = [f['_object_data'] for f in appstruct.pop('attached_files')]
appstruct['attached_files'] = files
copy_of_idea.setproperty('originalentity', context)
copy_of_idea.state = PersistentList(['to work'])
copy_of_idea.setproperty('author', get_current())
grant_roles(roles=(('Owner', copy_of_idea), ))
copy_of_idea.set_data(appstruct)
copy_of_idea.reindex()
context.reindex()
return {'newcontext': copy_of_idea}
def redirect(self, context, request, **kw):
return HTTPFound(request.resource_url(kw['newcontext'], "@@index"))
def del_roles_validation(process, context):
return has_role(role=('Owner', context))
def del_processsecurity_validation(process, context):
return global_user_processsecurity(process, context)
def del_state_validation(process, context):
return 'archived' in context.state and \
not('version' in context.state)
class DelIdea(InfiniteCardinality):
style = 'button' #TODO add style abstract class
style_descriminator = 'global-action'
style_picto = 'glyphicon glyphicon-trash'
style_order = 4
submission_title = _('Continue')
context = Iidea
roles_validation = del_roles_validation
processsecurity_validation = del_processsecurity_validation
state_validation = del_state_validation
def start(self, context, request, appstruct, **kw):
root = getSite()
request.registry.notify(CorrelableRemoved(object=context))
root.delfromproperty('ideas', context)
return {}
def redirect(self, context, request, **kw):
root = getSite()
return HTTPFound(request.resource_url(root))
def edit_roles_validation(process, context):
return has_role(role=('Owner', context))
def edit_processsecurity_validation(process, context):
return global_user_processsecurity(process, context)
def edit_state_validation(process, context):
return "to work" in context.state
class EditIdea(InfiniteCardinality):
style = 'button' #TODO add style abstract class
style_descriminator = 'text-action'
style_picto = 'glyphicon glyphicon-pencil'
style_order = 1
submission_title = _('Save')
context = Iidea
roles_validation = edit_roles_validation
processsecurity_validation = edit_processsecurity_validation
state_validation = edit_state_validation
def start(self, context, request, appstruct, **kw):
root = getSite()
last_version = context.version
copy_of_idea = copy(context,
(context, 'version'),
new_name=context.__name__,
select=('modified_at',),
omit=('created_at',),
roles=True)
copy_keywords, newkeywords = root.get_keywords(context.keywords)
copy_of_idea.setproperty('keywords_ref', copy_keywords)
copy_of_idea.setproperty('version', last_version)
copy_of_idea.setproperty('originalentity', context.originalentity)
if last_version is not None:
grant_roles(roles=(('Owner', last_version), ))
files = [f['_object_data'] for f in appstruct.pop('attached_files')]
appstruct['attached_files'] = files
keywords_ids = appstruct.pop('keywords')
result, newkeywords = root.get_keywords(keywords_ids)
for nkw in newkeywords:
root.addtoproperty('keywords', nkw)
result.extend(newkeywords)
appstruct['keywords_ref'] = result
copy_of_idea.state = PersistentList(['archived', 'version'])
copy_of_idea.setproperty('author', get_current())
context.set_data(appstruct)
context.modified_at = datetime.datetime.today()
if hasattr(context, 'note'):
del context.note
copy_of_idea.reindex()
context.reindex()
if 'archived' in context.state:
recuperate_actions = getBusinessAction(context,
request,
'ideamanagement',
'recuperate')
if recuperate_actions:
recuperate_actions[0].execute(context, request, appstruct, **kw)
return {}
def redirect(self, context, request, **kw):
return HTTPFound(request.resource_url(context, "@@index"))
def submit_roles_validation(process, context):
return has_role(role=('Owner', context))
def submit_processsecurity_validation(process, context):
if getattr(context, 'originalentity', None):
originalentity = getattr(context, 'originalentity')
if originalentity.text == context.text:
return False
return global_user_processsecurity(process, context)
def submit_state_validation(process, context):
return 'to work' in context.state
class SubmitIdea(InfiniteCardinality):
style = 'button' #TODO add style abstract class
style_descriminator = 'global-action'
style_picto = 'glyphicon glyphicon-share'
style_order = 1
submission_title = _('Continue')
context = Iidea
roles_validation = submit_roles_validation
processsecurity_validation = submit_processsecurity_validation
state_validation = submit_state_validation
def start(self, context, request, appstruct, **kw):
context.state.remove('to work')
context.state.append('submited')
context.reindex()
return {}
def redirect(self, context, request, **kw):
return HTTPFound(request.resource_url(context, "@@index"))
def decision_roles_validation(process, context):
return has_role(role=('Moderator',))
def decision_processsecurity_validation(process, context):
return global_user_processsecurity(process, context)
def decision_state_validation(process, context):
return 'submited' in context.state
class ArchiveIdea(InfiniteCardinality):
style = 'button' #TODO add style abstract class
style_descriminator = 'global-action'
style_picto = 'glyphicon glyphicon-share'
style_order = 1
submission_title = _('Continue')
context = Iidea
roles_validation = decision_roles_validation
processsecurity_validation = decision_processsecurity_validation
state_validation = decision_state_validation
def start(self, context, request, appstruct, **kw):
explanation = appstruct['explanation']
context.state.remove('submited')
context.state.append('archived')
context.reindex()
user = context.author
localizer = request.localizer
subject = ARCHIVEIDEA_SUBJECT.format(subject_title=context.title)
message = ARCHIVEIDEA_MESSAGE.format(
recipient_title=localizer.translate(_(getattr(user, 'user_title',''))),
recipient_first_name=getattr(user, 'first_name', user.name),
recipient_last_name=getattr(user, 'last_name',''),
subject_title=context.title,
subject_url=request.resource_url(context, "@@index"),
explanation=explanation,
novaideo_title=request.root.title
)
mailer_send(subject=subject,
recipients=[user.email],
body=message)
return {}
def redirect(self, context, request, **kw):
return HTTPFound(request.resource_url(context, "@@index"))
class PublishIdea(InfiniteCardinality):
style = 'button' #TODO add style abstract class
style_descriminator = 'global-action'
style_picto = 'glyphicon glyphicon-share'
style_order = 1
submission_title = _('Continue')
context = Iidea
roles_validation = decision_roles_validation
processsecurity_validation = decision_processsecurity_validation
state_validation = decision_state_validation
def start(self, context, request, appstruct, **kw):
context.state.remove('submited')
context.state.append('published')
context.reindex()
user = context.author
localizer = request.localizer
subject = PUBLISHEDIDEA_SUBJECT.format(subject_title=context.title)
message = PUBLISHEDIDEA_MESSAGE.format(
recipient_title=localizer.translate(_(getattr(user, 'user_title',''))),
recipient_first_name=getattr(user, 'first_name', user.name),
recipient_last_name=getattr(user, 'last_name',''),
subject_title=context.title,
subject_url=request.resource_url(context, "@@index"),
novaideo_title=request.root.title
)
mailer_send(subject=subject,
recipients=[user.email],
body=message)
request.registry.notify(ObjectPublished(object=context))
return {}
def redirect(self, context, request, **kw):
return HTTPFound(request.resource_url(context, "@@index"))
def ab_roles_validation(process, context):
return has_role(role=('Owner', context))
def ab_processsecurity_validation(process, context):
return global_user_processsecurity(process, context)
def ab_state_validation(process, context):
return 'to work' in context.state
class AbandonIdea(InfiniteCardinality):
style = 'button' #TODO add style abstract class
style_descriminator = 'global-action'
style_picto = 'glyphicon glyphicon-stop'
style_order = 2
context = Iidea
roles_validation = ab_roles_validation
processsecurity_validation = ab_processsecurity_validation
state_validation = ab_state_validation
def start(self, context, request, appstruct, **kw):
context.state.remove('to work')
context.state.append('archived')
context.reindex()
return {}
def redirect(self, context, request, **kw):
return HTTPFound(request.resource_url(context, "@@index"))
def re_roles_validation(process, context):
return has_role(role=('Owner', context))
def re_processsecurity_validation(process, context):
return global_user_processsecurity(process, context)
def re_state_validation(process, context):
return 'archived' in context.state and \
not('version' in context.state)
class RecuperateIdea(InfiniteCardinality):
style = 'button' #TODO add style abstract class
style_descriminator = 'global-action'
style_picto = 'glyphicon glyphicon-play'
style_order = 3
context = Iidea
roles_validation = re_roles_validation
processsecurity_validation = re_processsecurity_validation
state_validation = re_state_validation
def start(self, context, request, appstruct, **kw):
context.state.remove('archived')
context.state.append('to work')
context.reindex()
return {}
def redirect(self, context, request, **kw):
return HTTPFound(request.resource_url(context, "@@index"))
def comm_roles_validation(process, context):
return has_role(role=('Member',))
def comm_processsecurity_validation(process, context):
return global_user_processsecurity(process, context)
def comm_state_validation(process, context):
return 'published' in context.state
class CommentIdea(InfiniteCardinality):
isSequential = False
context = Iidea
roles_validation = comm_roles_validation
processsecurity_validation = comm_processsecurity_validation
state_validation = comm_state_validation
def start(self, context, request, appstruct, **kw):
comment = appstruct['_object_data']
context.addtoproperty('comments', comment)
user = get_current()
comment.setproperty('author', user)
if appstruct['related_contents']['associate']:
related_contents = appstruct['related_contents']['related_contents']
correlation = connect(context,
list(related_contents),
{'comment': comment.comment,
'type': comment.intention},
user,
unique=True)
comment.setproperty('related_correlation', correlation)
context.reindex()
return {}
def redirect(self, context, request, **kw):
return HTTPFound(request.resource_url(context, "@@index"))
def present_roles_validation(process, context):
return has_role(role=('Member',))
def present_processsecurity_validation(process, context):
return global_user_processsecurity(process, context)
def present_state_validation(process, context):
return 'published' in context.state
class PresentIdea(InfiniteCardinality):
submission_title = _('Send')
context = Iidea
roles_validation = present_roles_validation
processsecurity_validation = present_processsecurity_validation
state_validation = present_state_validation
def start(self, context, request, appstruct, **kw):
send_to_me = appstruct['send_to_me']
members = list(appstruct['members'])
user = get_current()
if send_to_me:
members.append(user)
localizer = request.localizer
user_title = localizer.translate(_(getattr(user, 'user_title','')))
user_first_name = getattr(user, 'first_name', user.name)
user_last_name = getattr(user, 'last_name','')
url = request.resource_url(context, "@@index")
presentation_subject = appstruct['subject']
presentation_message = appstruct['message']
subject = presentation_subject.format(subject_title=context.title)
for member in members:
recipient_title = ''
recipient_first_name = ''
recipient_last_name = ''
member_email = ''
if not isinstance(member, basestring):
recipient_title = localizer.translate(_(getattr(member, 'user_title','')))
recipient_first_name = getattr(member, 'first_name', member.name)
recipient_last_name = getattr(member, 'last_name','')
member_email = member.email
else:
member_email = member
message = presentation_message.format(
recipient_title=recipient_title,
recipient_first_name=recipient_first_name,
recipient_last_name=recipient_last_name,
subject_url=url,
subject_title=getattr(context, 'title', context.name),
my_title=user_title,
my_first_name=user_first_name,
my_last_name=user_last_name,
novaideo_title=request.root.title
)
mailer_send(subject=subject,
recipients=[member_email],
body=message)
if not (member is user):
context._email_persons_contacted.append(member_email)
return {}
def redirect(self, context, request, **kw):
return HTTPFound(request.resource_url(context, "@@index"))
def associate_processsecurity_validation(process, context):
return (has_role(role=('Owner', context)) or \
(has_role(role=('Member',)) and 'published' in context.state)) and \
global_user_processsecurity(process, context)
class Associate(InfiniteCardinality):
context = Iidea
processsecurity_validation = associate_processsecurity_validation
def start(self, context, request, appstruct, **kw):
correlation = appstruct['_object_data']
correlation.setproperty('source', context)
correlation.setproperty('author', get_current())
root = getSite()
root.addtoproperty('correlations', correlation)
return {}
def redirect(self, context, request, **kw):
return HTTPFound(request.resource_url(context, "@@index"))
def seeidea_processsecurity_validation(process, context):
return ('published' in context.state or has_role(role=('Owner', context))) or \
('submited' in context.state and has_role(role=('Moderator',)))
@acces_action()
class SeeIdea(InfiniteCardinality):
"""SeeIdea is the behavior allowing access to context"""
title = _('Details')
context = Iidea
actionType = ActionType.automatic
processsecurity_validation = seeidea_processsecurity_validation
def start(self, context, request, appstruct, **kw):
return {}
def redirect(self, context, request, **kw):
return HTTPFound(request.resource_url(context, "@@index"))
def compare_roles_validation(process, context):
return has_role(role=('Owner', context))
def compare_processsecurity_validation(process, context):
return getattr(context, 'version', None) is not None
class CompareIdea(InfiniteCardinality):
title = _('Compare')
context = Iidea
roles_validation = compare_roles_validation
processsecurity_validation = compare_processsecurity_validation
def start(self, context, request, appstruct, **kw):
return {}
def redirect(self, context, request, **kw):
return HTTPFound(request.resource_url(context, "@@index"))
#TODO behaviors
VALIDATOR_BY_CONTEXT[Idea] = CommentIdea
Fix edit idea
# -*- coding: utf8 -*-
# Copyright (c) 2014 by Ecreall under licence AGPL terms
# avalaible on http://www.gnu.org/licenses/agpl.html
# licence: AGPL
# author: Amen Souissi
"""
This module represent all of behaviors used in the
Idea management process definition.
"""
import datetime
from pyramid.httpexceptions import HTTPFound
from persistent.list import PersistentList
from dace.util import (
getSite,
getBusinessAction,
copy)
from dace.objectofcollaboration.principal.util import (
has_role,
grant_roles,
get_current)
from dace.processinstance.activity import InfiniteCardinality, ActionType
from novaideo.ips.mailer import mailer_send
from novaideo.mail import (
ARCHIVEIDEA_SUBJECT,
ARCHIVEIDEA_MESSAGE,
PUBLISHEDIDEA_SUBJECT,
PUBLISHEDIDEA_MESSAGE)
from novaideo.content.interface import INovaIdeoApplication, Iidea
from ..user_management.behaviors import global_user_processsecurity
from novaideo import _
from novaideo.content.idea import Idea
from ..comment_management.behaviors import VALIDATOR_BY_CONTEXT
from novaideo.core import acces_action
from novaideo.utilities.util import connect
from novaideo.event import ObjectPublished, CorrelableRemoved
try:
basestring
except NameError:
basestring = str
def createidea_roles_validation(process, context):
return has_role(role=('Member',))
def createidea_processsecurity_validation(process, context):
return global_user_processsecurity(process, context)
class CreateIdea(InfiniteCardinality):
submission_title = _('Save')
context = INovaIdeoApplication
roles_validation = createidea_roles_validation
processsecurity_validation = createidea_processsecurity_validation
def start(self, context, request, appstruct, **kw):
root = getSite()
keywords_ids = appstruct.pop('keywords')
result, newkeywords = root.get_keywords(keywords_ids)
for nkw in newkeywords:
root.addtoproperty('keywords', nkw)
result.extend(newkeywords)
idea = appstruct['_object_data']
root.addtoproperty('ideas', idea)
idea.setproperty('keywords_ref', result)
idea.state.append('to work')
grant_roles(roles=(('Owner', idea), ))
idea.setproperty('author', get_current())
idea.reindex()
return {'newcontext': idea}
def redirect(self, context, request, **kw):
return HTTPFound(request.resource_url(kw['newcontext'], "@@index"))
def duplicate_processsecurity_validation(process, context):
return ((has_role(role=('Owner', context)) and \
(not ('archived' in context.state)) or \
'version' in context.state) or \
'published' in context.state) and \
global_user_processsecurity(process, context)
class DuplicateIdea(InfiniteCardinality):
style = 'button' #TODO add style abstract class
style_descriminator = 'global-action'
style_picto = 'glyphicon glyphicon-resize-full'
style_order = 5
submission_title = _('Save')
context = Iidea
processsecurity_validation = duplicate_processsecurity_validation
def start(self, context, request, appstruct, **kw):
root = getSite()
copy_of_idea = copy(context, (root, 'ideas'))
keywords_ids = appstruct.pop('keywords')
result, newkeywords = root.get_keywords(keywords_ids)
for nkw in newkeywords:
root.addtoproperty('keywords', nkw)
result.extend(newkeywords)
appstruct['keywords_ref'] = result
files = [f['_object_data'] for f in appstruct.pop('attached_files')]
appstruct['attached_files'] = files
copy_of_idea.setproperty('originalentity', context)
copy_of_idea.state = PersistentList(['to work'])
copy_of_idea.setproperty('author', get_current())
grant_roles(roles=(('Owner', copy_of_idea), ))
copy_of_idea.set_data(appstruct)
copy_of_idea.reindex()
context.reindex()
return {'newcontext': copy_of_idea}
def redirect(self, context, request, **kw):
return HTTPFound(request.resource_url(kw['newcontext'], "@@index"))
def del_roles_validation(process, context):
return has_role(role=('Owner', context))
def del_processsecurity_validation(process, context):
return global_user_processsecurity(process, context)
def del_state_validation(process, context):
return 'archived' in context.state and \
not('version' in context.state)
class DelIdea(InfiniteCardinality):
style = 'button' #TODO add style abstract class
style_descriminator = 'global-action'
style_picto = 'glyphicon glyphicon-trash'
style_order = 4
submission_title = _('Continue')
context = Iidea
roles_validation = del_roles_validation
processsecurity_validation = del_processsecurity_validation
state_validation = del_state_validation
def start(self, context, request, appstruct, **kw):
root = getSite()
request.registry.notify(CorrelableRemoved(object=context))
root.delfromproperty('ideas', context)
return {}
def redirect(self, context, request, **kw):
root = getSite()
return HTTPFound(request.resource_url(root))
def edit_roles_validation(process, context):
return has_role(role=('Owner', context))
def edit_processsecurity_validation(process, context):
return global_user_processsecurity(process, context)
def edit_state_validation(process, context):
return "to work" in context.state
class EditIdea(InfiniteCardinality):
style = 'button' #TODO add style abstract class
style_descriminator = 'text-action'
style_picto = 'glyphicon glyphicon-pencil'
style_order = 1
submission_title = _('Save')
context = Iidea
roles_validation = edit_roles_validation
processsecurity_validation = edit_processsecurity_validation
state_validation = edit_state_validation
def start(self, context, request, appstruct, **kw):
root = getSite()
last_version = context.version
copy_of_idea = copy(context,
(context, 'version'),
new_name=context.__name__,
select=('modified_at',),
omit=('created_at',),
roles=True)
copy_keywords, newkeywords = root.get_keywords(context.keywords)
copy_of_idea.setproperty('keywords_ref', copy_keywords)
copy_of_idea.setproperty('version', last_version)
copy_of_idea.setproperty('originalentity', context.originalentity)
if last_version is not None:
grant_roles(roles=(('Owner', last_version), ))
files = [f['_object_data'] for f in appstruct.pop('attached_files')]
appstruct['attached_files'] = files
keywords_ids = appstruct.pop('keywords')
result, newkeywords = root.get_keywords(keywords_ids)
for nkw in newkeywords:
root.addtoproperty('keywords', nkw)
result.extend(newkeywords)
appstruct['keywords_ref'] = result
copy_of_idea.state = PersistentList(['archived', 'version'])
copy_of_idea.setproperty('author', get_current())
note = appstruct.pop('note', '')
copy_of_idea.note = note
context.set_data(appstruct)
context.modified_at = datetime.datetime.today()
copy_of_idea.reindex()
context.reindex()
if 'archived' in context.state:
recuperate_actions = getBusinessAction(context,
request,
'ideamanagement',
'recuperate')
if recuperate_actions:
recuperate_actions[0].execute(context, request, appstruct, **kw)
return {}
def redirect(self, context, request, **kw):
return HTTPFound(request.resource_url(context, "@@index"))
def submit_roles_validation(process, context):
return has_role(role=('Owner', context))
def submit_processsecurity_validation(process, context):
if getattr(context, 'originalentity', None):
originalentity = getattr(context, 'originalentity')
if originalentity.text == context.text:
return False
return global_user_processsecurity(process, context)
def submit_state_validation(process, context):
return 'to work' in context.state
class SubmitIdea(InfiniteCardinality):
style = 'button' #TODO add style abstract class
style_descriminator = 'global-action'
style_picto = 'glyphicon glyphicon-share'
style_order = 1
submission_title = _('Continue')
context = Iidea
roles_validation = submit_roles_validation
processsecurity_validation = submit_processsecurity_validation
state_validation = submit_state_validation
def start(self, context, request, appstruct, **kw):
context.state.remove('to work')
context.state.append('submited')
context.reindex()
return {}
def redirect(self, context, request, **kw):
return HTTPFound(request.resource_url(context, "@@index"))
def decision_roles_validation(process, context):
return has_role(role=('Moderator',))
def decision_processsecurity_validation(process, context):
return global_user_processsecurity(process, context)
def decision_state_validation(process, context):
return 'submited' in context.state
class ArchiveIdea(InfiniteCardinality):
style = 'button' #TODO add style abstract class
style_descriminator = 'global-action'
style_picto = 'glyphicon glyphicon-share'
style_order = 1
submission_title = _('Continue')
context = Iidea
roles_validation = decision_roles_validation
processsecurity_validation = decision_processsecurity_validation
state_validation = decision_state_validation
def start(self, context, request, appstruct, **kw):
explanation = appstruct['explanation']
context.state.remove('submited')
context.state.append('archived')
context.reindex()
user = context.author
localizer = request.localizer
subject = ARCHIVEIDEA_SUBJECT.format(subject_title=context.title)
message = ARCHIVEIDEA_MESSAGE.format(
recipient_title=localizer.translate(_(getattr(user, 'user_title',''))),
recipient_first_name=getattr(user, 'first_name', user.name),
recipient_last_name=getattr(user, 'last_name',''),
subject_title=context.title,
subject_url=request.resource_url(context, "@@index"),
explanation=explanation,
novaideo_title=request.root.title
)
mailer_send(subject=subject,
recipients=[user.email],
body=message)
return {}
def redirect(self, context, request, **kw):
return HTTPFound(request.resource_url(context, "@@index"))
class PublishIdea(InfiniteCardinality):
style = 'button' #TODO add style abstract class
style_descriminator = 'global-action'
style_picto = 'glyphicon glyphicon-share'
style_order = 1
submission_title = _('Continue')
context = Iidea
roles_validation = decision_roles_validation
processsecurity_validation = decision_processsecurity_validation
state_validation = decision_state_validation
def start(self, context, request, appstruct, **kw):
context.state.remove('submited')
context.state.append('published')
context.reindex()
user = context.author
localizer = request.localizer
subject = PUBLISHEDIDEA_SUBJECT.format(subject_title=context.title)
message = PUBLISHEDIDEA_MESSAGE.format(
recipient_title=localizer.translate(_(getattr(user, 'user_title',''))),
recipient_first_name=getattr(user, 'first_name', user.name),
recipient_last_name=getattr(user, 'last_name',''),
subject_title=context.title,
subject_url=request.resource_url(context, "@@index"),
novaideo_title=request.root.title
)
mailer_send(subject=subject,
recipients=[user.email],
body=message)
request.registry.notify(ObjectPublished(object=context))
return {}
def redirect(self, context, request, **kw):
return HTTPFound(request.resource_url(context, "@@index"))
def ab_roles_validation(process, context):
return has_role(role=('Owner', context))
def ab_processsecurity_validation(process, context):
return global_user_processsecurity(process, context)
def ab_state_validation(process, context):
return 'to work' in context.state
class AbandonIdea(InfiniteCardinality):
style = 'button' #TODO add style abstract class
style_descriminator = 'global-action'
style_picto = 'glyphicon glyphicon-stop'
style_order = 2
context = Iidea
roles_validation = ab_roles_validation
processsecurity_validation = ab_processsecurity_validation
state_validation = ab_state_validation
def start(self, context, request, appstruct, **kw):
context.state.remove('to work')
context.state.append('archived')
context.reindex()
return {}
def redirect(self, context, request, **kw):
return HTTPFound(request.resource_url(context, "@@index"))
def re_roles_validation(process, context):
return has_role(role=('Owner', context))
def re_processsecurity_validation(process, context):
return global_user_processsecurity(process, context)
def re_state_validation(process, context):
return 'archived' in context.state and \
not('version' in context.state)
class RecuperateIdea(InfiniteCardinality):
style = 'button' #TODO add style abstract class
style_descriminator = 'global-action'
style_picto = 'glyphicon glyphicon-play'
style_order = 3
context = Iidea
roles_validation = re_roles_validation
processsecurity_validation = re_processsecurity_validation
state_validation = re_state_validation
def start(self, context, request, appstruct, **kw):
context.state.remove('archived')
context.state.append('to work')
context.reindex()
return {}
def redirect(self, context, request, **kw):
return HTTPFound(request.resource_url(context, "@@index"))
def comm_roles_validation(process, context):
return has_role(role=('Member',))
def comm_processsecurity_validation(process, context):
return global_user_processsecurity(process, context)
def comm_state_validation(process, context):
return 'published' in context.state
class CommentIdea(InfiniteCardinality):
isSequential = False
context = Iidea
roles_validation = comm_roles_validation
processsecurity_validation = comm_processsecurity_validation
state_validation = comm_state_validation
def start(self, context, request, appstruct, **kw):
comment = appstruct['_object_data']
context.addtoproperty('comments', comment)
user = get_current()
comment.setproperty('author', user)
if appstruct['related_contents']['associate']:
related_contents = appstruct['related_contents']['related_contents']
correlation = connect(context,
list(related_contents),
{'comment': comment.comment,
'type': comment.intention},
user,
unique=True)
comment.setproperty('related_correlation', correlation)
context.reindex()
return {}
def redirect(self, context, request, **kw):
return HTTPFound(request.resource_url(context, "@@index"))
def present_roles_validation(process, context):
return has_role(role=('Member',))
def present_processsecurity_validation(process, context):
return global_user_processsecurity(process, context)
def present_state_validation(process, context):
return 'published' in context.state
class PresentIdea(InfiniteCardinality):
submission_title = _('Send')
context = Iidea
roles_validation = present_roles_validation
processsecurity_validation = present_processsecurity_validation
state_validation = present_state_validation
def start(self, context, request, appstruct, **kw):
send_to_me = appstruct['send_to_me']
members = list(appstruct['members'])
user = get_current()
if send_to_me:
members.append(user)
localizer = request.localizer
user_title = localizer.translate(_(getattr(user, 'user_title','')))
user_first_name = getattr(user, 'first_name', user.name)
user_last_name = getattr(user, 'last_name','')
url = request.resource_url(context, "@@index")
presentation_subject = appstruct['subject']
presentation_message = appstruct['message']
subject = presentation_subject.format(subject_title=context.title)
for member in members:
recipient_title = ''
recipient_first_name = ''
recipient_last_name = ''
member_email = ''
if not isinstance(member, basestring):
recipient_title = localizer.translate(_(getattr(member, 'user_title','')))
recipient_first_name = getattr(member, 'first_name', member.name)
recipient_last_name = getattr(member, 'last_name','')
member_email = member.email
else:
member_email = member
message = presentation_message.format(
recipient_title=recipient_title,
recipient_first_name=recipient_first_name,
recipient_last_name=recipient_last_name,
subject_url=url,
subject_title=getattr(context, 'title', context.name),
my_title=user_title,
my_first_name=user_first_name,
my_last_name=user_last_name,
novaideo_title=request.root.title
)
mailer_send(subject=subject,
recipients=[member_email],
body=message)
if not (member is user):
context._email_persons_contacted.append(member_email)
return {}
def redirect(self, context, request, **kw):
return HTTPFound(request.resource_url(context, "@@index"))
def associate_processsecurity_validation(process, context):
return (has_role(role=('Owner', context)) or \
(has_role(role=('Member',)) and 'published' in context.state)) and \
global_user_processsecurity(process, context)
class Associate(InfiniteCardinality):
context = Iidea
processsecurity_validation = associate_processsecurity_validation
def start(self, context, request, appstruct, **kw):
correlation = appstruct['_object_data']
correlation.setproperty('source', context)
correlation.setproperty('author', get_current())
root = getSite()
root.addtoproperty('correlations', correlation)
return {}
def redirect(self, context, request, **kw):
return HTTPFound(request.resource_url(context, "@@index"))
def seeidea_processsecurity_validation(process, context):
return ('published' in context.state or has_role(role=('Owner', context))) or \
('submited' in context.state and has_role(role=('Moderator',)))
@acces_action()
class SeeIdea(InfiniteCardinality):
"""SeeIdea is the behavior allowing access to context"""
title = _('Details')
context = Iidea
actionType = ActionType.automatic
processsecurity_validation = seeidea_processsecurity_validation
def start(self, context, request, appstruct, **kw):
return {}
def redirect(self, context, request, **kw):
return HTTPFound(request.resource_url(context, "@@index"))
def compare_roles_validation(process, context):
return has_role(role=('Owner', context))
def compare_processsecurity_validation(process, context):
return getattr(context, 'version', None) is not None
class CompareIdea(InfiniteCardinality):
title = _('Compare')
context = Iidea
roles_validation = compare_roles_validation
processsecurity_validation = compare_processsecurity_validation
def start(self, context, request, appstruct, **kw):
return {}
def redirect(self, context, request, **kw):
return HTTPFound(request.resource_url(context, "@@index"))
#TODO behaviors
VALIDATOR_BY_CONTEXT[Idea] = CommentIdea
|
#!/usr/bin/python
from __future__ import with_statement
import sys
import os
import datetime
from itertools import repeat
from pyrap.quanta import quantity
from lofarpipe.support.control import control
from lofarpipe.support.utilities import log_time
from lofarpipe.support.parset import patched_parset
from tkp.database.database import DataBase
from tkp.database.dataset import DataSet
SECONDS_IN_DAY = 86400.
class SIP(control):
def pipeline_logic(self):
# Read the datafiles; datafiles is a list of MS paths.
from ms_to_process import datafiles
with log_time(self.logger):
storage_mapfile = self.run_task("datamapper_storage", datafiles)['mapfile'] # generate a mapfile mapping them to compute nodes
self.logger.info('storage mapfile = %s' % storage_mapfile)
# Produce a GVDS file describing the data on the storage nodes.
gvds_file = self.run_task('vdsmaker', storage_mapfile)['gvds']
# Read metadata (start, end times, pointing direction) from GVDS.
vdsinfo = self.run_task("vdsreader")
# NDPPP reads the data from the storage nodes, according to the
# map. It returns a new map, describing the location of data on
# the compute nodes.
ndppp_results = self.run_task(
"ndppp",
storage_mapfile,
)
# Remove baselines which have been fully-flagged in any
# individual subband.
compute_mapfile = self.run_task(
"flag_baseline",
ndppp_results['mapfile'],
baselines=ndppp_results['fullyflagged']
)['mapfile']
#ra = quantity(vdsinfo['pointing']['ra']).get_value('deg')
#dec = quantity(vdsinfo['pointing']['dec']).get_value('deg')
#central = self.run_task(
# "skymodel", ra=ra, dec=dec, search_size=2.5
# )
parmdb_mapfile = self.run_task("parmdb", compute_mapfile)['mapfile']
sourcedb_mapfile = self.run_task("sourcedb", compute_mapfile)['mapfile']
#
# # Patch the name of the central source into the BBS parset for
# # subtraction.
with patched_parset(
self.task_definitions.get("bbs", "parset"),
{
## 'Step.correct.Model.Sources': '[ "%s" ]' % (central["source_name"]),
## 'Step.solve1.Model.Sources': '[ "%s" ]' % (central["source_name"]),
## 'Step.solve2.Model.Sources': '[ "%s" ]' % (central["source_name"]),
## 'Step.subtract.Model.Sources': '[ "%s" ]' % (central["source_name"])
}
) as bbs_parset:
self.logger.info("bbs patched parset = %s" % bbs_parset)
# BBS modifies data in place, so the map produced by NDPPP
# remains valid.
bbs_mapfile = self.run_task(
"bbs", compute_mapfile,
parset=bbs_parset,
instrument_mapfile=parmdb_mapfile,
sky_mapfile=sourcedb_mapfile)['mapfile']
# Now, run DPPP three times on the output of BBS. We'll run
# this twice: once on CORRECTED_DATA, and once on
# SUBTRACTED_DATA.
with patched_parset(
os.path.join(
self.config.get("layout", "parset_directory"),
"ndppp.postbbs.parset"
),
{
# "clip1.amplmax": str(5 * central["source_flux"])
},
output_dir=self.config.get("layout", "parset_directory")
) as corrected_ndppp_parset:
for i in repeat(None, 2):
self.run_task(
"ndppp",
compute_mapfile,
parset=corrected_ndppp_parset,
suffix=""
)
# Produce a GVDS file describing the data on the storage nodes.
gvds_file = self.run_task('vdsmaker', compute_mapfile)['gvds']
self.logger.info("GVDS file = %s" % gvds_file)
# Create the dataset in the database
database = DataBase()
dataset = DataSet(data={'dsinname': self.inputs['job_name']},
database=database)
dsid = dataset.id
outputs = self.run_task("time_slicing", gvds_file=gvds_file)
mapfiles = outputs['mapfiles']
subdirs = ["%d" % int(starttime) for starttime, endtime in
outputs['timesteps']]
self.logger.info("directories with sliced MSs = %s", str(subdirs))
for iteration, (mapfile, subdir) in enumerate(zip(mapfiles,
subdirs)):
self.logger.info("Starting time slice iteration #%d" %
(iteration+1,))
outputs = {}
results_dir = os.path.join(
self.config.get('DEFAULT', 'working_directory'),
self.inputs['job_name'],
subdir
)
outputs = self.run_task('awimager', mapfile)
outputs.update(
self.run_task('img2fits', outputs['images'],
results_dir=os.path.join(
self.config.get('layout', 'results_directory'),
subdir))
)
outputs.update(self.run_task("source_extraction",
[outputs['combined_fitsfile']],
dataset_id=dataset.id))
outputs.update(
self.run_task("monitoringlist", outputs['image_ids']))
outputs.update(
self.run_task("transient_search", [dataset.id],
image_ids=outputs['image_ids']))
outputs.update(
self.run_task("feature_extraction", outputs['transients']))
outputs.update(
self.run_task("classification", outputs['transients']))
self.run_task("prettyprint", outputs['transients'])
dataset.process_ts = datetime.datetime.utcnow()
database.close()
if __name__ == '__main__':
sys.exit(SIP().main())
Uncomment automatic skymodel generation; BBS needs at least some sky model
git-svn-id: 8739e81f5e357e0bd4e6b1e18f358058a57987f8@2827 2b73c8c1-3922-0410-90dd-bc0a5c6f2ac6
#!/usr/bin/python
from __future__ import with_statement
import sys
import os
import datetime
from itertools import repeat
from pyrap.quanta import quantity
from lofarpipe.support.control import control
from lofarpipe.support.utilities import log_time
from lofarpipe.support.parset import patched_parset
from tkp.database.database import DataBase
from tkp.database.dataset import DataSet
SECONDS_IN_DAY = 86400.
class SIP(control):
def pipeline_logic(self):
# Read the datafiles; datafiles is a list of MS paths.
from ms_to_process import datafiles
with log_time(self.logger):
storage_mapfile = self.run_task("datamapper_storage", datafiles)['mapfile'] # generate a mapfile mapping them to compute nodes
self.logger.info('storage mapfile = %s' % storage_mapfile)
# Produce a GVDS file describing the data on the storage nodes.
gvds_file = self.run_task('vdsmaker', storage_mapfile)['gvds']
# Read metadata (start, end times, pointing direction) from GVDS.
vdsinfo = self.run_task("vdsreader")
# NDPPP reads the data from the storage nodes, according to the
# map. It returns a new map, describing the location of data on
# the compute nodes.
ndppp_results = self.run_task(
"ndppp",
storage_mapfile,
)
# Remove baselines which have been fully-flagged in any
# individual subband.
compute_mapfile = self.run_task(
"flag_baseline",
ndppp_results['mapfile'],
baselines=ndppp_results['fullyflagged']
)['mapfile']
ra = quantity(vdsinfo['pointing']['ra']).get_value('deg')
dec = quantity(vdsinfo['pointing']['dec']).get_value('deg')
central = self.run_task(
"skymodel", ra=ra, dec=dec, search_size=2.5
)
parmdb_mapfile = self.run_task("parmdb", compute_mapfile)['mapfile']
sourcedb_mapfile = self.run_task("sourcedb", compute_mapfile)['mapfile']
#
# # Patch the name of the central source into the BBS parset for
# # subtraction.
with patched_parset(
self.task_definitions.get("bbs", "parset"),
{
## 'Step.correct.Model.Sources': '[ "%s" ]' % (central["source_name"]),
## 'Step.solve1.Model.Sources': '[ "%s" ]' % (central["source_name"]),
## 'Step.solve2.Model.Sources': '[ "%s" ]' % (central["source_name"]),
## 'Step.subtract.Model.Sources': '[ "%s" ]' % (central["source_name"])
}
) as bbs_parset:
self.logger.info("bbs patched parset = %s" % bbs_parset)
# BBS modifies data in place, so the map produced by NDPPP
# remains valid.
bbs_mapfile = self.run_task(
"bbs", compute_mapfile,
parset=bbs_parset,
instrument_mapfile=parmdb_mapfile,
sky_mapfile=sourcedb_mapfile)['mapfile']
# Now, run DPPP three times on the output of BBS. We'll run
# this twice: once on CORRECTED_DATA, and once on
# SUBTRACTED_DATA.
with patched_parset(
os.path.join(
self.config.get("layout", "parset_directory"),
"ndppp.postbbs.parset"
),
{
# "clip1.amplmax": str(5 * central["source_flux"])
},
output_dir=self.config.get("layout", "parset_directory")
) as corrected_ndppp_parset:
for i in repeat(None, 2):
self.run_task(
"ndppp",
compute_mapfile,
parset=corrected_ndppp_parset,
suffix=""
)
# Produce a GVDS file describing the data on the storage nodes.
gvds_file = self.run_task('vdsmaker', compute_mapfile)['gvds']
self.logger.info("GVDS file = %s" % gvds_file)
# Create the dataset in the database
database = DataBase()
dataset = DataSet(data={'dsinname': self.inputs['job_name']},
database=database)
dsid = dataset.id
outputs = self.run_task("time_slicing", gvds_file=gvds_file)
mapfiles = outputs['mapfiles']
subdirs = ["%d" % int(starttime) for starttime, endtime in
outputs['timesteps']]
self.logger.info("directories with sliced MSs = %s", str(subdirs))
for iteration, (mapfile, subdir) in enumerate(zip(mapfiles,
subdirs)):
self.logger.info("Starting time slice iteration #%d" %
(iteration+1,))
outputs = {}
results_dir = os.path.join(
self.config.get('DEFAULT', 'working_directory'),
self.inputs['job_name'],
subdir
)
outputs = self.run_task('awimager', mapfile)
outputs.update(
self.run_task('img2fits', outputs['images'],
results_dir=os.path.join(
self.config.get('layout', 'results_directory'),
subdir))
)
outputs.update(self.run_task("source_extraction",
[outputs['combined_fitsfile']],
dataset_id=dataset.id))
outputs.update(
self.run_task("monitoringlist", outputs['image_ids']))
outputs.update(
self.run_task("transient_search", [dataset.id],
image_ids=outputs['image_ids']))
outputs.update(
self.run_task("feature_extraction", outputs['transients']))
outputs.update(
self.run_task("classification", outputs['transients']))
self.run_task("prettyprint", outputs['transients'])
dataset.process_ts = datetime.datetime.utcnow()
database.close()
if __name__ == '__main__':
sys.exit(SIP().main())
|
"""
Classes for generating random sets of intervals over larger regions.
"""
from bx.bitset import *
import bisect
random = __import__( 'random' )
class MaxtriesException( Exception ):
pass
def throw_random_list( lengths, mask, allow_overlap=False ):
rval = []
throw_random_gap_list( lengths, mask, lambda s, e: rval.append( ( s, e ) ), allow_overlap )
assert sum( b - a for a, b in rval ) == sum( lengths )
return rval
def throw_random_bits( lengths, mask, allow_overlap=False ):
rval = BitSet( mask.size )
throw_random_gap_list( lengths, mask, lambda s, e: rval.set_range( s, e - s ), allow_overlap )
if not allow_overlap:
assert rval.count_range( 0, rval.size ) == sum( lengths )
return rval
def throw_random_gap_list( lengths, mask, save_interval_func, allow_overlap=False ):
"""
Generates a set of non-overlapping random intervals from a length
distribution.
`lengths`: list containing the length of each interval to be generated.
We expect this to be sorted by decreasing length to minimize
the chance of failure (MaxtriesException) and for some
performance gains when allow_overlap==True and there are
duplicate lengths
`mask`: a BitSet in which set bits represent regions not to place
intervals. The size of the region is also determined from the
mask.
"""
# Use mask to find the gaps; gaps is a list of (length,start,end)
lengths = [length for length in lengths if length > 0]
min_length = min( lengths )
gaps = []
start = end = 0
while 1:
start = mask.next_clear( end )
if start == mask.size: break
end = mask.next_set( start )
if end-start >= min_length:
gaps.append( ( end-start, start, None ) )
# Sort (long regions first)
gaps.sort()
gaps.reverse()
# Throw
throw_random_private( lengths, gaps, save_interval_func, allow_overlap, three_args=False )
def throw_random_intervals( lengths, regions, save_interval_func=None, allow_overlap=False ):
"""
Generates a set of non-overlapping random intervals from a length
distribution.
`lengths`: list containing the length of each interval to be generated.
We expect this to be sorted by decreasing length to minimize
the chance of failure (MaxtriesException) and for some
performance gains when allow_overlap==True and there are
duplicate lengths.
`regions`: A list of regions in which intervals can be placed. Elements
are tuples or lists of the form (start, end, ...), where ...
indicates any number of items (including zero).
`save_interval_func`: A function accepting three arguments which will be
passed the (start,stop,region) for each generated
interval, where region is an entry in the regions
list. If this is None, the generated intervals will
be returned in a list
"""
# Copy regions
regions = [( x[1]-x[0], x[0], x ) for x in regions]
# Sort (long regions first)
regions.sort()
regions.reverse()
# Throw
if (save_interval_func != None):
throw_random_private( lengths, regions, save_interval_func, allow_overlap )
return
else:
intervals = []
save_interval_func = lambda s, e, rgn: intervals.append( ( s, e, rgn ) )
throw_random_private( lengths, regions, save_interval_func, allow_overlap )
return intervals
def throw_random_private( lengths, regions, save_interval_func, allow_overlap=False, three_args=True ):
"""
(Internal function; we expect calls only through the interface functions
above)
`lengths`: A list containing the length of each interval to be generated.
`regions`: A list of regions in which intervals can be placed, sorted by
decreasing length. Elements are triples of the form (length,
start, extra), This list CAN BE MODIFIED by this function.
`save_interval_func`: A function accepting three arguments which will be
passed the (start,stop,extra) for each generated
interval.
"""
# Implementation:
# We keep a list of the regions, sorted from largest to smallest. We then
# place each length by following steps:
# (1) construct a candidate counts array (cc array)
# (2) choose a candidate at random
# (3) find region containing that candidate
# (4) map candidate to position in that region
# (5) split region if not allowing overlaps
# (6) report placed segment
#
# The cc array is only constructed if there's a change (different length
# to place, or the region list has changed). It contains, for each
# region, the total number of number of candidate positions in regions
# *preceding* it in the region list:
# cc[i] = sum over k in 0..(i-1) of length[i] - L + 1
# where N is the number of regions and L is the length being thrown.
# At the same time, we determine the total number of candidates (the total
# number of places the current length can be placed) and the index range
# of regions into which the length will fit.
#
# example:
# for L = 20
# i = 0 1 2 3 4 5 6 7 8 9
# length[i] = 96 66 56 50 48 40 29 17 11 8
# cc[i] = 0 77 124 161 192 221 242 X X X
# candidates = 252
# lo_rgn = 0
# hi_rgn = 6
#
# The candidate is chosen in (0..candidates-1). The candidate counts
# array allows us to do a binary search to locate the region that holds that
# candidate. Continuing the example above, we choose a random candidate
# s in (0..251). If s happens to be in (124..160), it will be mapped to
# region 2 at start position s-124.
#
# During the binary search, if we are looking at region 3, if s < cc[3]
# then the desired region is region 2 or lower. Otherwise it is region 3 or
# higher.
min_length = min( lengths )
prev_length = None # (force initial cc array construction)
cc = [0] * (len( regions ) + len(lengths) - 1)
for length in lengths:
# construct cc array (only needed if length has changed or region list has
# changed)
if length != prev_length:
prev_length = length
assert len( cc ) >= len( regions )
candidates = 0
hi_rgn = 0
for region in regions:
rgn_len = region[0]
if rgn_len < length:
break
cc[hi_rgn] = candidates
candidates += rgn_len - length + 1
hi_rgn += 1
if candidates == 0:
raise MaxtriesException( "No region can fit an interval of length %d" % length )
hi_rgn -= 1
# Select a candidate
s = random.randrange( candidates )
#..
#..for ix in range( len( regions ) ):
#.. region = regions[ix]
#.. if ix <= hi_rgn: print "%2s: %5s %5s %5s" % ( ix, region[1], region[0], cc[ix] )
#.. else: print "%2s: %5s %5s %5s" % ( ix, region[1], region[0], "X" )
#..print "s = %s (of %s candidates)" % ( s, candidates )
# .Locate region containing that candidate, by binary search
lo = 0
hi = hi_rgn
while hi > lo:
mid = (lo + hi + 1) / 2 # (we round up to prevent infinite loop)
if s < cc[mid]: hi = mid-1 # (s < num candidates from 0..mid-1)
else: lo = mid # (s >= num candidates from 0..mid-1)
s -= cc[lo]
# If we are not allowing overlaps we will remove the placed interval
# from the region list
if not allow_overlap:
# Remove the chosen region and split
rgn_length, rgn_start, rgn_extra = regions.pop( lo )
rgn_end = rgn_start + rgn_length
assert s >= 0
assert rgn_start + s + length <= rgn_end, "Expected: %d + %d + %d == %d <= %d" % ( rgn_start, s, length, rgn_start + s + length, rgn_end )
regions.reverse()
if s >= min_length:
bisect.insort( regions, ( s, rgn_start, rgn_extra ) )
if s + length <= rgn_length - min_length:
bisect.insort( regions, ( rgn_length - ( s + length ), rgn_start + s + length, rgn_extra ) )
regions.reverse()
prev_length = None # (force cc array construction)
# Save the new interval
if (three_args):
save_interval_func( rgn_start + s, rgn_start + s + length, rgn_extra )
else:
save_interval_func( rgn_start + s, rgn_start + s + length )
fixed problem when overlap is allowed; also simplified the caller's burden for generating genomic intervals
"""
Classes for generating random sets of intervals over larger regions.
"""
from bx.bitset import *
import bisect
random = __import__( 'random' )
class MaxtriesException( Exception ):
pass
def throw_random_list( lengths, mask, allow_overlap=False ):
rval = []
throw_random_gap_list( lengths, mask, lambda s, e: rval.append( ( s, e ) ), allow_overlap )
assert sum( b - a for a, b in rval ) == sum( lengths )
return rval
def throw_random_bits( lengths, mask, allow_overlap=False ):
rval = BitSet( mask.size )
throw_random_gap_list( lengths, mask, lambda s, e: rval.set_range( s, e - s ), allow_overlap )
if not allow_overlap:
assert rval.count_range( 0, rval.size ) == sum( lengths )
return rval
def throw_random_gap_list( lengths, mask, save_interval_func, allow_overlap=False ):
"""
Generates a set of non-overlapping random intervals from a length
distribution.
`lengths`: list containing the length of each interval to be generated.
We expect this to be sorted by decreasing length to minimize
the chance of failure (MaxtriesException) and for some
performance gains when allow_overlap==True and there are
duplicate lengths
`mask`: a BitSet in which set bits represent regions not to place
intervals. The size of the region is also determined from the
mask.
"""
# Use mask to find the gaps; gaps is a list of (length,start,end)
lengths = [length for length in lengths if length > 0]
min_length = min( lengths )
gaps = []
start = end = 0
while 1:
start = mask.next_clear( end )
if start == mask.size: break
end = mask.next_set( start )
if end-start >= min_length:
gaps.append( ( end-start, start, None ) )
# Sort (long regions first)
gaps.sort()
gaps.reverse()
# Throw
throw_random_private( lengths, gaps, save_interval_func, allow_overlap, three_args=False )
def throw_random_intervals( lengths, regions, save_interval_func=None, allow_overlap=False ):
"""
Generates a set of non-overlapping random intervals from a length
distribution.
`lengths`: list containing the length of each interval to be generated.
We expect this to be sorted by decreasing length to minimize
the chance of failure (MaxtriesException) and for some
performance gains when allow_overlap==True and there are
duplicate lengths.
`regions`: A list of regions in which intervals can be placed. Elements
are tuples or lists of the form (start, end, ...), where ...
indicates any number of items (including zero).
`save_interval_func`: A function accepting three arguments which will be
passed the (start,stop,region) for each generated
interval, where region is an entry in the regions
list. If this is None, the generated intervals will
be returned as a list of elements copied from the
region with start and end modified.
"""
# Copy regions
regions = [( x[1]-x[0], x[0], x ) for x in regions]
# Sort (long regions first)
regions.sort()
regions.reverse()
# Throw
if (save_interval_func != None):
throw_random_private( lengths, regions, save_interval_func, allow_overlap )
return
else:
intervals = []
save_interval_func = lambda s, e, rgn: intervals.append( overwrite_start_end ( s, e, rgn ) )
throw_random_private( lengths, regions, save_interval_func, allow_overlap )
return intervals
def overwrite_start_end(s,e,rgn):
rgn = list(rgn)
rgn[0] = s
rgn[1] = e
return tuple(rgn)
def throw_random_private( lengths, regions, save_interval_func, allow_overlap=False, three_args=True ):
"""
(Internal function; we expect calls only through the interface functions
above)
`lengths`: A list containing the length of each interval to be generated.
`regions`: A list of regions in which intervals can be placed, sorted by
decreasing length. Elements are triples of the form (length,
start, extra), This list CAN BE MODIFIED by this function.
`save_interval_func`: A function accepting three arguments which will be
passed the (start,stop,extra) for each generated
interval.
"""
# Implementation:
# We keep a list of the regions, sorted from largest to smallest. We then
# place each length by following steps:
# (1) construct a candidate counts array (cc array)
# (2) choose a candidate at random
# (3) find region containing that candidate
# (4) map candidate to position in that region
# (5) split region if not allowing overlaps
# (6) report placed segment
#
# The cc array is only constructed if there's a change (different length
# to place, or the region list has changed). It contains, for each
# region, the total number of number of candidate positions in regions
# *preceding* it in the region list:
# cc[i] = sum over k in 0..(i-1) of length[i] - L + 1
# where N is the number of regions and L is the length being thrown.
# At the same time, we determine the total number of candidates (the total
# number of places the current length can be placed) and the index range
# of regions into which the length will fit.
#
# example:
# for L = 20
# i = 0 1 2 3 4 5 6 7 8 9
# length[i] = 96 66 56 50 48 40 29 17 11 8
# cc[i] = 0 77 124 161 192 221 242 X X X
# candidates = 252
# lo_rgn = 0
# hi_rgn = 6
#
# The candidate is chosen in (0..candidates-1). The candidate counts
# array allows us to do a binary search to locate the region that holds that
# candidate. Continuing the example above, we choose a random candidate
# s in (0..251). If s happens to be in (124..160), it will be mapped to
# region 2 at start position s-124.
#
# During the binary search, if we are looking at region 3, if s < cc[3]
# then the desired region is region 2 or lower. Otherwise it is region 3 or
# higher.
min_length = min( lengths )
prev_length = None # (force initial cc array construction)
cc = [0] * (len( regions ) + len(lengths) - 1)
for length in lengths:
# construct cc array (only needed if length has changed or region list has
# changed)
if length != prev_length:
prev_length = length
assert len( cc ) >= len( regions )
candidates = 0
hi_rgn = 0
for region in regions:
rgn_len = region[0]
if rgn_len < length:
break
cc[hi_rgn] = candidates
candidates += rgn_len - length + 1
hi_rgn += 1
if candidates == 0:
raise MaxtriesException( "No region can fit an interval of length %d" % length )
hi_rgn -= 1
# Select a candidate
s = random.randrange( candidates )
#..
#..for ix in range( len( regions ) ):
#.. region = regions[ix]
#.. if ix <= hi_rgn: print "%2s: %5s %5s %5s" % ( ix, region[1], region[0], cc[ix] )
#.. else: print "%2s: %5s %5s %5s" % ( ix, region[1], region[0], "X" )
#..print "s = %s (of %s candidates)" % ( s, candidates )
# Locate region containing that candidate, by binary search
lo = 0
hi = hi_rgn
while hi > lo:
mid = (lo + hi + 1) / 2 # (we round up to prevent infinite loop)
if s < cc[mid]: hi = mid-1 # (s < num candidates from 0..mid-1)
else: lo = mid # (s >= num candidates from 0..mid-1)
s -= cc[lo]
# If we are not allowing overlaps we will remove the placed interval
# from the region list
if allow_overlap:
rgn_length, rgn_start, rgn_extra = regions[lo]
else:
# Remove the chosen region and split
rgn_length, rgn_start, rgn_extra = regions.pop( lo )
rgn_end = rgn_start + rgn_length
assert s >= 0
assert rgn_start + s + length <= rgn_end, "Expected: %d + %d + %d == %d <= %d" % ( rgn_start, s, length, rgn_start + s + length, rgn_end )
regions.reverse()
if s >= min_length:
bisect.insort( regions, ( s, rgn_start, rgn_extra ) )
if s + length <= rgn_length - min_length:
bisect.insort( regions, ( rgn_length - ( s + length ), rgn_start + s + length, rgn_extra ) )
regions.reverse()
prev_length = None # (force cc array construction)
# Save the new interval
if (three_args):
save_interval_func( rgn_start + s, rgn_start + s + length, rgn_extra )
else:
save_interval_func( rgn_start + s, rgn_start + s + length )
|
from flask import render_template, safe_join, send_file
from flask_login import login_required
from redash import settings
from redash.handlers import routes
from redash.handlers.authentication import base_href
from redash.handlers.base import org_scoped_rule
def render_index():
if settings.MULTI_ORG:
response = render_template("multi_org.html", base_href=base_href())
else:
full_path = safe_join(settings.STATIC_ASSETS_PATH, 'index.html')
response = send_file(full_path, **dict(cache_timeout=0, conditional=True))
return response
@routes.route(org_scoped_rule('/<path:path>'))
@routes.route(org_scoped_rule('/'))
@login_required
def index(**kwargs):
return render_index()
Add explicit route for dashboards to allow embedding in iframes. (#3957)
* Add explicit route for dashboards to allow embedding in iframes.
* Add missing blank lines
from flask import render_template, safe_join, send_file
from flask_login import login_required
from redash import settings
from redash.handlers import routes
from redash.handlers.authentication import base_href
from redash.handlers.base import org_scoped_rule
from redash.security import csp_allows_embeding
def render_index():
if settings.MULTI_ORG:
response = render_template("multi_org.html", base_href=base_href())
else:
full_path = safe_join(settings.STATIC_ASSETS_PATH, 'index.html')
response = send_file(full_path, **dict(cache_timeout=0, conditional=True))
return response
@routes.route(org_scoped_rule('/dashboard/<slug>'), methods=['GET'])
@login_required
@csp_allows_embeding
def dashboard(slug, org_slug=None):
return render_index()
@routes.route(org_scoped_rule('/<path:path>'))
@routes.route(org_scoped_rule('/'))
@login_required
def index(**kwargs):
return render_index()
|
from r2.lib.plugin import Plugin
from r2.lib.js import Module
class Adzerk(Plugin):
needs_static_build = True
js = {
'reddit': Module('reddit.js',
'adzerk/adzerk.js',
)
}
def load_controllers(self):
# replace the standard Ads view with an Adzerk specific one.
import r2.lib.pages.pages
from adzerkads import Ads as AdzerkAds
r2.lib.pages.pages.Ads = AdzerkAds
Move adzerk.js into reddit-init to fix race condition.
This should ensure that the Adzerk postMessage receiver is loaded before
Adzerk gets its payloads.
from r2.lib.plugin import Plugin
from r2.lib.js import Module
class Adzerk(Plugin):
needs_static_build = True
js = {
'reddit-init': Module('reddit-init.js',
'adzerk/adzerk.js',
)
}
def load_controllers(self):
# replace the standard Ads view with an Adzerk specific one.
import r2.lib.pages.pages
from adzerkads import Ads as AdzerkAds
r2.lib.pages.pages.Ads = AdzerkAds
|
# -*- coding: utf-8 -*-
"""
sets
~~~~~
The `sets` module contains a standard collection, :class:`Set`, which is based
on Python's built-in set type.
Its elements are stored in a Redis `set <http://redis.io/commands#set>`_
structure.
"""
from __future__ import division, print_function, unicode_literals
import collections
from functools import reduce
import operator
import random
import six
from .base import RedisCollection
class Set(RedisCollection, collections.MutableSet):
"""
Collection based on the built-in Python :class:`set` type.
Items are stored in a Redis hash structure.
See Python's `set documentation
<https://docs.python.org/3/library/stdtypes.html#set>`_ for usage notes.
"""
if six.PY2:
_pickle = RedisCollection._pickle_2
_unpickle = RedisCollection._unpickle_2
else:
_pickle = RedisCollection._pickle_3
def __init__(self, *args, **kwargs):
"""
Create a new Set object.
If the first argument (*data*) is an iterable object, create the new
Set with its elements as the initial data.
:param data: Initial data.
:type data: iterable
:param redis: Redis client instance. If not provided, default Redis
connection is used.
:type redis: :class:`redis.StrictRedis`
:param key: Redis key for the collection. Collections with the same key
point to the same data. If not provided, a random
string is generated.
:type key: str
"""
data = args[0] if args else kwargs.pop('data', None)
super(Set, self).__init__(**kwargs)
if data:
self.update(data)
def _data(self, pipe=None):
pipe = self.redis if pipe is None else pipe
return (self._unpickle(x) for x in pipe.smembers(self.key))
def _repr_data(self):
items = (repr(v) for v in self.__iter__())
return '{{{}}}'.format(', '.join(items))
# Magic methods
def __contains__(self, value, pipe=None):
"""Test for membership of *value* in the set."""
pipe = self.redis if pipe is None else pipe
return bool(pipe.sismember(self.key, self._pickle(value)))
def __iter__(self, pipe=None):
"""Return an iterator over elements of the set."""
pipe = self.redis if pipe is None else pipe
return self._data(pipe)
def __len__(self, pipe=None):
"""Return cardinality of the set."""
pipe = self.redis if pipe is None else pipe
return pipe.scard(self.key)
# Named methods
def add(self, value):
"""Add element *value* to the set."""
# Raise TypeError if value is not hashable
hash(value)
self.redis.sadd(self.key, self._pickle(value))
def copy(self, key=None):
other = self.__class__(redis=self.redis, key=key)
other.update(self)
return other
def clear(self, pipe=None):
"""Remove all elements from the set."""
self._clear(pipe)
def discard(self, value):
"""Remove element *value* from the set if it is present."""
# Raise TypeError if value is not hashable
hash(value)
self.redis.srem(self.key, self._pickle(value))
def isdisjoint(self, other):
"""
Return ``True`` if the set has no elements in common with *other*.
Sets are disjoint if and only if their intersection is the empty set.
:param other: Any kind of iterable.
:rtype: boolean
"""
def isdisjoint_trans_pure(pipe):
return not pipe.sinter(self.key, other.key)
def isdisjoint_trans_mixed(pipe):
self_values = set(self.__iter__(pipe))
if use_redis:
other_values = set(other.__iter__(pipe))
else:
other_values = set(other)
return self_values.isdisjoint(other_values)
if self._same_redis(other):
return self._transaction(isdisjoint_trans_pure, other.key)
if self._same_redis(other, RedisCollection):
use_redis = True
return self._transaction(isdisjoint_trans_mixed, other.key)
use_redis = False
return self._transaction(isdisjoint_trans_mixed)
def pop(self):
"""
Remove and return an arbitrary element from the set.
Raises :exc:`KeyError` if the set is empty.
"""
result = self.redis.spop(self.key)
if result is None:
raise KeyError
return self._unpickle(result)
def random_sample(self, k=1):
"""
Return a *k* length list of unique elements chosen from the Set.
Elements are not removed. Similar to :func:`random.sample` function
from standard library.
:param k: Size of the sample, defaults to 1.
:rtype: :class:`list`
.. note::
This method is not available on the Python :class:`set`.
When Redis version < 2.6 is being used the whole set is stored
in memory and the sample is computed in Python.
"""
# k == 0: no work to do
if k == 0:
results = []
# k == 1: same behavior on all versions of Redis
elif k == 1:
results = [self.redis.srandmember(self.key)]
# k != 1, Redis version >= 2.6: compute in Redis
elif self.redis_version >= (2, 6, 0):
results = self.redis.srandmember(self.key, k)
# positive k, Redis version < 2.6: sample without replacement
elif k > 1:
seq = list(self.__iter__())
return random.sample(seq, min(k, len(seq)))
# negative k, Redis version < 2.6: sample with replacement
else:
seq = list(self.__iter__())
return [random.choice(seq) for __ in six.moves.xrange(abs(k))]
return [self._unpickle(x) for x in results]
def remove(self, value):
"""
Remove element *value* from the set. Raises :exc:`KeyError` if it
is not contained in the set.
"""
# Raise TypeError if value is not hashable
hash(value)
result = self.redis.srem(self.key, self._pickle(value))
if not result:
raise KeyError(value)
def scan_elements(self):
"""
Yield each of the elements from the collection, without pulling them
all into memory.
.. warning::
This method is not available on the set collections provided
by Python.
This method may return the element multiple times.
See the `Redis SCAN documentation
<http://redis.io/commands/scan#scan-guarantees>`_ for details.
"""
for x in self.redis.sscan_iter(self.key):
yield self._unpickle(x)
# Comparison and set operation helpers
def _ge_helper(self, other, op, check_type=False):
if check_type and not isinstance(other, collections.Set):
raise TypeError
def ge_trans_pure(pipe):
if not op(self.__len__(pipe), other.__len__(pipe)):
return False
return not pipe.sdiff(other.key, self.key)
def ge_trans_mixed(pipe):
len_other = other.__len__(pipe) if use_redis else len(other)
if not op(self.__len__(pipe), len_other):
return False
values = set(other.__iter__(pipe)) if use_redis else set(other)
return all(self.__contains__(v, pipe=pipe) for v in values)
if self._same_redis(other):
return self._transaction(ge_trans_pure, other.key)
if self._same_redis(other, RedisCollection):
use_redis = True
return self._transaction(ge_trans_mixed, other.key)
use_redis = False
return self._transaction(ge_trans_mixed)
def _le_helper(self, other, op, check_type=False):
if check_type and not isinstance(other, collections.Set):
raise TypeError
def le_trans_pure(pipe):
if not op(self.__len__(pipe), other.__len__(pipe)):
return False
return not pipe.sdiff(self.key, other.key)
def le_trans_mixed(pipe):
len_other = other.__len__(pipe) if use_redis else len(other)
if not op(self.__len__(pipe), len_other):
return False
values = set(other.__iter__(pipe)) if use_redis else set(other)
return all(v in values for v in self.__iter__(pipe))
if self._same_redis(other):
return self._transaction(le_trans_pure, other.key)
if self._same_redis(other, RedisCollection):
use_redis = True
return self._transaction(le_trans_mixed, other.key)
use_redis = False
return self._transaction(le_trans_mixed)
def _op_update_helper(
self, others, op, redis_op, update=False, check_type=False
):
if (
check_type and
not all(isinstance(x, collections.Set) for x in others)
):
raise TypeError
def op_update_trans_pure(pipe):
method = getattr(pipe, redis_op)
if not update:
result = method(self.key, *other_keys)
return {self._unpickle(x) for x in result}
temp_key = self._create_key()
pipe.multi()
method(temp_key, self.key, *other_keys)
pipe.rename(temp_key, self.key)
def op_update_trans_mixed(pipe):
self_values = set(self.__iter__(pipe))
other_values = []
for other in others:
if isinstance(other, RedisCollection):
other_values.append(set(other.__iter__(pipe)))
else:
other_values.append(set(other))
if not update:
return reduce(op, other_values, self_values)
new_values = reduce(op, other_values, self_values)
pipe.multi()
pipe.delete(self.key)
for v in new_values:
pipe.sadd(self.key, self._pickle(v))
other_keys = []
all_redis_sets = True
for other in others:
if self._same_redis(other):
other_keys.append(other.key)
elif self._same_redis(other, RedisCollection):
other_keys.append(other.key)
all_redis_sets = False
else:
all_redis_sets = False
if all_redis_sets:
return self._transaction(op_update_trans_pure, *other_keys)
return self._transaction(op_update_trans_mixed, *other_keys)
def _rop_helper(self, other, op):
if not isinstance(other, collections.Set):
raise TypeError
return op(set(other), set(self.__iter__()))
def _xor_helper(self, other, update=False, check_type=False):
if check_type and not isinstance(other, collections.Set):
raise TypeError
def xor_trans_pure(pipe):
diff_1_key = self._create_key()
pipe.sdiffstore(diff_1_key, self.key, other.key)
diff_2_key = self._create_key()
pipe.sdiffstore(diff_2_key, other.key, self.key)
if update:
pipe.sunionstore(self.key, diff_1_key, diff_2_key)
ret = None
else:
ret = pipe.sunion(diff_1_key, diff_2_key)
ret = {self._unpickle(x) for x in ret}
pipe.delete(diff_1_key, diff_2_key)
return ret
def xor_trans_mixed(pipe):
self_values = set(self.__iter__(pipe))
if use_redis:
other_values = set(other.__iter__(pipe))
else:
other_values = set(other)
result = self_values ^ other_values
if update:
pipe.delete(self.key)
pipe.sadd(self.key, *(self._pickle(x) for x in result))
return None
return result
if self._same_redis(other):
return self._transaction(xor_trans_pure, other.key)
elif self._same_redis(other, RedisCollection):
use_redis = True
return self._transaction(xor_trans_mixed, other.key)
use_redis = False
return self._transaction(xor_trans_mixed)
# Intersection
def __and__(self, other):
return self._op_update_helper(
(other,), operator.and_, 'sinter', check_type=True
)
def __rand__(self, other):
return self._rop_helper(other, operator.and_)
def __iand__(self, other):
self._op_update_helper(
(other,),
operator.and_,
'sinterstore',
update=True,
check_type=True,
)
return self
def intersection(self, *others):
"""
Return a new set with elements common to the set and all *others*.
:param others: Iterables, each one as a single positional argument.
:rtype: :class:`set`
.. note::
The same behavior as at :func:`union` applies.
"""
return self._op_update_helper(tuple(others), operator.and_, 'sinter')
def intersection_update(self, *others):
"""
Update the set, keeping only elements found in it and all *others*.
:param others: Iterables, each one as a single positional argument.
:rtype: None
.. note::
The same behavior as at :func:`difference_update` applies.
"""
return self._op_update_helper(
tuple(others), operator.and_, 'sinterstore', update=True
)
# Comparison
def __ge__(self, other):
return self._ge_helper(other, operator.ge, check_type=True)
def issuperset(self, other):
"""
Test whether every element in other is in the set.
:param other: Any kind of iterable.
:rtype: boolean
"""
return self._ge_helper(other, operator.ge)
def __gt__(self, other):
return self._ge_helper(other, operator.gt, check_type=True)
def __eq__(self, other):
return self._le_helper(other, operator.eq, check_type=True)
def __le__(self, other):
return self._le_helper(other, operator.le, check_type=True)
def issubset(self, other):
"""
Test whether every element in the set is in *other*.
:param other: Any kind of iterable.
:rtype: boolean
"""
return self._le_helper(other, operator.le)
def __lt__(self, other):
return self._le_helper(other, operator.lt)
# Union
def __or__(self, other):
return self._op_update_helper(
(other,), operator.or_, 'sunion', check_type=True
)
def __ror__(self, other):
return self._rop_helper(other, operator.or_)
def __ior__(self, other):
self._op_update_helper(
(other,), operator.or_, 'sunionstore', update=True, check_type=True
)
return self
def union(self, *others):
"""
Return a new set with elements from the set and all *others*.
:param others: Iterables, each one as a single positional argument.
:rtype: :class:`set`
.. note::
If all *others* are :class:`Set` instances, the operation
is performed completely in Redis. Otherwise, values are retrieved
from Redis and the operation is performed in Python.
"""
return self._op_update_helper(tuple(others), operator.or_, 'sunion')
def update(self, *others):
"""
Update the set, adding elements from all *others*.
:param others: Iterables, each one as a single positional argument.
:rtype: None
.. note::
If all *others* are :class:`Set` instances, the operation
is performed completely in Redis. Otherwise, values are retrieved
from Redis and the operation is performed in Python.
"""
return self._op_update_helper(
tuple(others), operator.or_, 'sunionstore', update=True
)
# Difference
def __sub__(self, other):
return self._op_update_helper(
(other,), operator.sub, 'sdiff', check_type=True
)
def __rsub__(self, other):
return self._rop_helper(other, operator.sub)
def __isub__(self, other):
self._op_update_helper(
(other,), operator.sub, 'sdiffstore', update=True, check_type=True
)
return self
def difference(self, *others):
"""
Return a new set with elements in the set that are not in the *others*.
:param others: Iterables, each one as a single positional argument.
:rtype: :class:`set`
.. note::
The same behavior as at :func:`union` applies.
"""
return self._op_update_helper(tuple(others), operator.sub, 'sdiff')
def difference_update(self, *others):
"""
Update the set, removing elements found in *others*.
:param others: Iterables, each one as a single positional argument.
:rtype: None
.. note::
The same behavior as at :func:`update` applies.
"""
return self._op_update_helper(
tuple(others), operator.sub, 'sdiffstore', update=True
)
# Symmetric difference
def __xor__(self, other):
return self._xor_helper(other, check_type=True)
def __ixor__(self, other):
self._xor_helper(other, update=True, check_type=True)
return self
def symmetric_difference(self, other):
"""
Return a new set with elements in either the set or *other* but not
both.
:param other: Any kind of iterable.
:rtype: :class:`set`
.. note::
The same behavior as at :func:`union` applies.
"""
return self._xor_helper(other)
def symmetric_difference_update(self, other):
"""
Update the set, keeping only elements found in either set, but not
in both.
:param other: Any kind of iterable.
:rtype: None
.. note::
The same behavior as at :func:`update` applies.
"""
self._xor_helper(other, update=True)
return self
Switch to collections.abc where possible
# -*- coding: utf-8 -*-
"""
sets
~~~~~
The `sets` module contains a standard collection, :class:`Set`, which is based
on Python's built-in set type.
Its elements are stored in a Redis `set <http://redis.io/commands#set>`_
structure.
"""
from __future__ import division, print_function, unicode_literals
try:
import collections.abc as collections_abc
except ImportError:
import collections as collections_abc
from functools import reduce
import operator
import random
import six
from .base import RedisCollection
class Set(RedisCollection, collections_abc.MutableSet):
"""
Collection based on the built-in Python :class:`set` type.
Items are stored in a Redis hash structure.
See Python's `set documentation
<https://docs.python.org/3/library/stdtypes.html#set>`_ for usage notes.
"""
if six.PY2:
_pickle = RedisCollection._pickle_2
_unpickle = RedisCollection._unpickle_2
else:
_pickle = RedisCollection._pickle_3
def __init__(self, *args, **kwargs):
"""
Create a new Set object.
If the first argument (*data*) is an iterable object, create the new
Set with its elements as the initial data.
:param data: Initial data.
:type data: iterable
:param redis: Redis client instance. If not provided, default Redis
connection is used.
:type redis: :class:`redis.StrictRedis`
:param key: Redis key for the collection. Collections with the same key
point to the same data. If not provided, a random
string is generated.
:type key: str
"""
data = args[0] if args else kwargs.pop('data', None)
super(Set, self).__init__(**kwargs)
if data:
self.update(data)
def _data(self, pipe=None):
pipe = self.redis if pipe is None else pipe
return (self._unpickle(x) for x in pipe.smembers(self.key))
def _repr_data(self):
items = (repr(v) for v in self.__iter__())
return '{{{}}}'.format(', '.join(items))
# Magic methods
def __contains__(self, value, pipe=None):
"""Test for membership of *value* in the set."""
pipe = self.redis if pipe is None else pipe
return bool(pipe.sismember(self.key, self._pickle(value)))
def __iter__(self, pipe=None):
"""Return an iterator over elements of the set."""
pipe = self.redis if pipe is None else pipe
return self._data(pipe)
def __len__(self, pipe=None):
"""Return cardinality of the set."""
pipe = self.redis if pipe is None else pipe
return pipe.scard(self.key)
# Named methods
def add(self, value):
"""Add element *value* to the set."""
# Raise TypeError if value is not hashable
hash(value)
self.redis.sadd(self.key, self._pickle(value))
def copy(self, key=None):
other = self.__class__(redis=self.redis, key=key)
other.update(self)
return other
def clear(self, pipe=None):
"""Remove all elements from the set."""
self._clear(pipe)
def discard(self, value):
"""Remove element *value* from the set if it is present."""
# Raise TypeError if value is not hashable
hash(value)
self.redis.srem(self.key, self._pickle(value))
def isdisjoint(self, other):
"""
Return ``True`` if the set has no elements in common with *other*.
Sets are disjoint if and only if their intersection is the empty set.
:param other: Any kind of iterable.
:rtype: boolean
"""
def isdisjoint_trans_pure(pipe):
return not pipe.sinter(self.key, other.key)
def isdisjoint_trans_mixed(pipe):
self_values = set(self.__iter__(pipe))
if use_redis:
other_values = set(other.__iter__(pipe))
else:
other_values = set(other)
return self_values.isdisjoint(other_values)
if self._same_redis(other):
return self._transaction(isdisjoint_trans_pure, other.key)
if self._same_redis(other, RedisCollection):
use_redis = True
return self._transaction(isdisjoint_trans_mixed, other.key)
use_redis = False
return self._transaction(isdisjoint_trans_mixed)
def pop(self):
"""
Remove and return an arbitrary element from the set.
Raises :exc:`KeyError` if the set is empty.
"""
result = self.redis.spop(self.key)
if result is None:
raise KeyError
return self._unpickle(result)
def random_sample(self, k=1):
"""
Return a *k* length list of unique elements chosen from the Set.
Elements are not removed. Similar to :func:`random.sample` function
from standard library.
:param k: Size of the sample, defaults to 1.
:rtype: :class:`list`
.. note::
This method is not available on the Python :class:`set`.
When Redis version < 2.6 is being used the whole set is stored
in memory and the sample is computed in Python.
"""
# k == 0: no work to do
if k == 0:
results = []
# k == 1: same behavior on all versions of Redis
elif k == 1:
results = [self.redis.srandmember(self.key)]
# k != 1, Redis version >= 2.6: compute in Redis
elif self.redis_version >= (2, 6, 0):
results = self.redis.srandmember(self.key, k)
# positive k, Redis version < 2.6: sample without replacement
elif k > 1:
seq = list(self.__iter__())
return random.sample(seq, min(k, len(seq)))
# negative k, Redis version < 2.6: sample with replacement
else:
seq = list(self.__iter__())
return [random.choice(seq) for __ in six.moves.xrange(abs(k))]
return [self._unpickle(x) for x in results]
def remove(self, value):
"""
Remove element *value* from the set. Raises :exc:`KeyError` if it
is not contained in the set.
"""
# Raise TypeError if value is not hashable
hash(value)
result = self.redis.srem(self.key, self._pickle(value))
if not result:
raise KeyError(value)
def scan_elements(self):
"""
Yield each of the elements from the collection, without pulling them
all into memory.
.. warning::
This method is not available on the set collections provided
by Python.
This method may return the element multiple times.
See the `Redis SCAN documentation
<http://redis.io/commands/scan#scan-guarantees>`_ for details.
"""
for x in self.redis.sscan_iter(self.key):
yield self._unpickle(x)
# Comparison and set operation helpers
def _ge_helper(self, other, op, check_type=False):
if check_type and not isinstance(other, collections_abc.Set):
raise TypeError
def ge_trans_pure(pipe):
if not op(self.__len__(pipe), other.__len__(pipe)):
return False
return not pipe.sdiff(other.key, self.key)
def ge_trans_mixed(pipe):
len_other = other.__len__(pipe) if use_redis else len(other)
if not op(self.__len__(pipe), len_other):
return False
values = set(other.__iter__(pipe)) if use_redis else set(other)
return all(self.__contains__(v, pipe=pipe) for v in values)
if self._same_redis(other):
return self._transaction(ge_trans_pure, other.key)
if self._same_redis(other, RedisCollection):
use_redis = True
return self._transaction(ge_trans_mixed, other.key)
use_redis = False
return self._transaction(ge_trans_mixed)
def _le_helper(self, other, op, check_type=False):
if check_type and not isinstance(other, collections_abc.Set):
raise TypeError
def le_trans_pure(pipe):
if not op(self.__len__(pipe), other.__len__(pipe)):
return False
return not pipe.sdiff(self.key, other.key)
def le_trans_mixed(pipe):
len_other = other.__len__(pipe) if use_redis else len(other)
if not op(self.__len__(pipe), len_other):
return False
values = set(other.__iter__(pipe)) if use_redis else set(other)
return all(v in values for v in self.__iter__(pipe))
if self._same_redis(other):
return self._transaction(le_trans_pure, other.key)
if self._same_redis(other, RedisCollection):
use_redis = True
return self._transaction(le_trans_mixed, other.key)
use_redis = False
return self._transaction(le_trans_mixed)
def _op_update_helper(
self, others, op, redis_op, update=False, check_type=False
):
if (
check_type and
not all(isinstance(x, collections_abc.Set) for x in others)
):
raise TypeError
def op_update_trans_pure(pipe):
method = getattr(pipe, redis_op)
if not update:
result = method(self.key, *other_keys)
return {self._unpickle(x) for x in result}
temp_key = self._create_key()
pipe.multi()
method(temp_key, self.key, *other_keys)
pipe.rename(temp_key, self.key)
def op_update_trans_mixed(pipe):
self_values = set(self.__iter__(pipe))
other_values = []
for other in others:
if isinstance(other, RedisCollection):
other_values.append(set(other.__iter__(pipe)))
else:
other_values.append(set(other))
if not update:
return reduce(op, other_values, self_values)
new_values = reduce(op, other_values, self_values)
pipe.multi()
pipe.delete(self.key)
for v in new_values:
pipe.sadd(self.key, self._pickle(v))
other_keys = []
all_redis_sets = True
for other in others:
if self._same_redis(other):
other_keys.append(other.key)
elif self._same_redis(other, RedisCollection):
other_keys.append(other.key)
all_redis_sets = False
else:
all_redis_sets = False
if all_redis_sets:
return self._transaction(op_update_trans_pure, *other_keys)
return self._transaction(op_update_trans_mixed, *other_keys)
def _rop_helper(self, other, op):
if not isinstance(other, collections_abc.Set):
raise TypeError
return op(set(other), set(self.__iter__()))
def _xor_helper(self, other, update=False, check_type=False):
if check_type and not isinstance(other, collections_abc.Set):
raise TypeError
def xor_trans_pure(pipe):
diff_1_key = self._create_key()
pipe.sdiffstore(diff_1_key, self.key, other.key)
diff_2_key = self._create_key()
pipe.sdiffstore(diff_2_key, other.key, self.key)
if update:
pipe.sunionstore(self.key, diff_1_key, diff_2_key)
ret = None
else:
ret = pipe.sunion(diff_1_key, diff_2_key)
ret = {self._unpickle(x) for x in ret}
pipe.delete(diff_1_key, diff_2_key)
return ret
def xor_trans_mixed(pipe):
self_values = set(self.__iter__(pipe))
if use_redis:
other_values = set(other.__iter__(pipe))
else:
other_values = set(other)
result = self_values ^ other_values
if update:
pipe.delete(self.key)
pipe.sadd(self.key, *(self._pickle(x) for x in result))
return None
return result
if self._same_redis(other):
return self._transaction(xor_trans_pure, other.key)
elif self._same_redis(other, RedisCollection):
use_redis = True
return self._transaction(xor_trans_mixed, other.key)
use_redis = False
return self._transaction(xor_trans_mixed)
# Intersection
def __and__(self, other):
return self._op_update_helper(
(other,), operator.and_, 'sinter', check_type=True
)
def __rand__(self, other):
return self._rop_helper(other, operator.and_)
def __iand__(self, other):
self._op_update_helper(
(other,),
operator.and_,
'sinterstore',
update=True,
check_type=True,
)
return self
def intersection(self, *others):
"""
Return a new set with elements common to the set and all *others*.
:param others: Iterables, each one as a single positional argument.
:rtype: :class:`set`
.. note::
The same behavior as at :func:`union` applies.
"""
return self._op_update_helper(tuple(others), operator.and_, 'sinter')
def intersection_update(self, *others):
"""
Update the set, keeping only elements found in it and all *others*.
:param others: Iterables, each one as a single positional argument.
:rtype: None
.. note::
The same behavior as at :func:`difference_update` applies.
"""
return self._op_update_helper(
tuple(others), operator.and_, 'sinterstore', update=True
)
# Comparison
def __ge__(self, other):
return self._ge_helper(other, operator.ge, check_type=True)
def issuperset(self, other):
"""
Test whether every element in other is in the set.
:param other: Any kind of iterable.
:rtype: boolean
"""
return self._ge_helper(other, operator.ge)
def __gt__(self, other):
return self._ge_helper(other, operator.gt, check_type=True)
def __eq__(self, other):
return self._le_helper(other, operator.eq, check_type=True)
def __le__(self, other):
return self._le_helper(other, operator.le, check_type=True)
def issubset(self, other):
"""
Test whether every element in the set is in *other*.
:param other: Any kind of iterable.
:rtype: boolean
"""
return self._le_helper(other, operator.le)
def __lt__(self, other):
return self._le_helper(other, operator.lt)
# Union
def __or__(self, other):
return self._op_update_helper(
(other,), operator.or_, 'sunion', check_type=True
)
def __ror__(self, other):
return self._rop_helper(other, operator.or_)
def __ior__(self, other):
self._op_update_helper(
(other,), operator.or_, 'sunionstore', update=True, check_type=True
)
return self
def union(self, *others):
"""
Return a new set with elements from the set and all *others*.
:param others: Iterables, each one as a single positional argument.
:rtype: :class:`set`
.. note::
If all *others* are :class:`Set` instances, the operation
is performed completely in Redis. Otherwise, values are retrieved
from Redis and the operation is performed in Python.
"""
return self._op_update_helper(tuple(others), operator.or_, 'sunion')
def update(self, *others):
"""
Update the set, adding elements from all *others*.
:param others: Iterables, each one as a single positional argument.
:rtype: None
.. note::
If all *others* are :class:`Set` instances, the operation
is performed completely in Redis. Otherwise, values are retrieved
from Redis and the operation is performed in Python.
"""
return self._op_update_helper(
tuple(others), operator.or_, 'sunionstore', update=True
)
# Difference
def __sub__(self, other):
return self._op_update_helper(
(other,), operator.sub, 'sdiff', check_type=True
)
def __rsub__(self, other):
return self._rop_helper(other, operator.sub)
def __isub__(self, other):
self._op_update_helper(
(other,), operator.sub, 'sdiffstore', update=True, check_type=True
)
return self
def difference(self, *others):
"""
Return a new set with elements in the set that are not in the *others*.
:param others: Iterables, each one as a single positional argument.
:rtype: :class:`set`
.. note::
The same behavior as at :func:`union` applies.
"""
return self._op_update_helper(tuple(others), operator.sub, 'sdiff')
def difference_update(self, *others):
"""
Update the set, removing elements found in *others*.
:param others: Iterables, each one as a single positional argument.
:rtype: None
.. note::
The same behavior as at :func:`update` applies.
"""
return self._op_update_helper(
tuple(others), operator.sub, 'sdiffstore', update=True
)
# Symmetric difference
def __xor__(self, other):
return self._xor_helper(other, check_type=True)
def __ixor__(self, other):
self._xor_helper(other, update=True, check_type=True)
return self
def symmetric_difference(self, other):
"""
Return a new set with elements in either the set or *other* but not
both.
:param other: Any kind of iterable.
:rtype: :class:`set`
.. note::
The same behavior as at :func:`union` applies.
"""
return self._xor_helper(other)
def symmetric_difference_update(self, other):
"""
Update the set, keeping only elements found in either set, but not
in both.
:param other: Any kind of iterable.
:rtype: None
.. note::
The same behavior as at :func:`update` applies.
"""
self._xor_helper(other, update=True)
return self
|
"""Base stuff for testing.
"""
import subprocess
assert not 'twill' in subprocess.__file__
from flask.ext.testing import TestCase
from abilian.application import Application
from abilian.core.entities import db
__all__ = ['TestConfig', 'BaseTestCase']
class TestConfig(object):
SQLALCHEMY_DATABASE_URI = "sqlite://"
SQLALCHEMY_ECHO = False
TESTING = True
SECRET_KEY = "SECRET"
class BaseTestCase(TestCase):
config_class = TestConfig
application_class = Application
def create_app(self):
config = self.config_class()
self.app = self.application_class(config)
return self.app
def setUp(self):
self.app.create_db()
self.session = db.session
def tearDown(self):
db.session.remove()
db.drop_all()
db.engine.dispose()
Add debugging helper.
"""Base stuff for testing.
"""
import subprocess
assert not 'twill' in subprocess.__file__
from flask.ext.testing import TestCase
from abilian.application import Application
from abilian.core.entities import db
__all__ = ['TestConfig', 'BaseTestCase']
class TestConfig(object):
SQLALCHEMY_DATABASE_URI = "sqlite://"
SQLALCHEMY_ECHO = False
TESTING = True
SECRET_KEY = "SECRET"
class BaseTestCase(TestCase):
config_class = TestConfig
application_class = Application
def create_app(self):
config = self.config_class()
self.app = self.application_class(config)
return self.app
def setUp(self):
self.app.create_db()
self.session = db.session
def tearDown(self):
db.session.remove()
db.drop_all()
db.engine.dispose()
# Useful for debugging
def dump_routes(self):
rules = list(self.app.url_map.iter_rules())
rules.sort(key=lambda x: x.rule)
for rule in rules:
print rule, rule.methods, rule.endpoint
|
# -*- coding: utf-8 -*-
import os
import logging
import httplib as http
import math
from collections import defaultdict
from itertools import islice
from bs4 import BeautifulSoup
from flask import request
from django.apps import apps
from django.core.exceptions import ValidationError
from django.db.models import Count, Q, OuterRef, Exists, Subquery
from framework import status
from framework.utils import iso8601format
from framework.auth.decorators import must_be_logged_in, collect_auth
from framework.exceptions import HTTPError
from osf.models.nodelog import NodeLog
from website import language
from website.util import rubeus
from website.exceptions import NodeStateError
from website.project import new_node, new_private_link
from website.project.decorators import (
must_be_contributor_or_public_but_not_anonymized,
must_be_contributor_or_public,
must_be_valid_project,
must_have_permission,
must_not_be_registration,
)
from website.tokens import process_token_or_pass
from website.util.permissions import ADMIN, READ, WRITE, CREATOR_PERMISSIONS
from website.util.rubeus import collect_addon_js
from website.project.model import has_anonymous_link, NodeUpdateError, validate_title
from website.project.forms import NewNodeForm
from website.project.metadata.utils import serialize_meta_schemas
from osf.models import AbstractNode, PrivateLink, Contributor, Node, NodeRelation
from osf.models.contributor import get_contributor_permissions
from osf.models.licenses import serialize_node_license_record
from website import settings
from website.views import find_bookmark_collection, validate_page_num
from website.views import serialize_node_summary
from website.profile import utils
from website.util.sanitize import strip_html
from website.util import rapply
from addons.forward.utils import serialize_settings, settings_complete
r_strip_html = lambda collection: rapply(collection, strip_html)
logger = logging.getLogger(__name__)
@must_be_valid_project
@must_have_permission(WRITE)
@must_not_be_registration
def edit_node(auth, node, **kwargs):
post_data = request.json
edited_field = post_data.get('name')
value = post_data.get('value', '')
new_val = None
if edited_field == 'title':
try:
node.set_title(value, auth=auth)
except ValidationError as e:
raise HTTPError(
http.BAD_REQUEST,
data=dict(message_long=e.message)
)
new_val = node.title
elif edited_field == 'description':
node.set_description(value, auth=auth)
new_val = node.description
elif edited_field == 'category':
node.category = new_val = value
try:
node.save()
except ValidationError as e:
raise HTTPError(
http.BAD_REQUEST,
data=dict(message_long=e.message)
)
return {
'status': 'success',
'newValue': new_val # Used by x-editable widget to reflect changes made by sanitizer
}
##############################################################################
# New Project
##############################################################################
@must_be_logged_in
def project_new(**kwargs):
return {}
@must_be_logged_in
def project_new_post(auth, **kwargs):
user = auth.user
data = request.get_json()
title = strip_html(data.get('title'))
title = title.strip()
category = data.get('category', 'project')
template = data.get('template')
description = strip_html(data.get('description'))
new_project = {}
if template:
original_node = AbstractNode.load(template)
changes = {
'title': title,
'category': category,
'template_node': original_node,
}
if description:
changes['description'] = description
project = original_node.use_as_template(
auth=auth,
changes={
template: changes,
}
)
else:
try:
project = new_node(category, title, user, description)
except ValidationError as e:
raise HTTPError(
http.BAD_REQUEST,
data=dict(message_long=e.message)
)
new_project = _view_project(project, auth)
return {
'projectUrl': project.url,
'newNode': new_project['node'] if new_project else None
}, http.CREATED
@must_be_logged_in
@must_be_valid_project
def project_new_from_template(auth, node, **kwargs):
new_node = node.use_as_template(
auth=auth,
changes=dict(),
)
return {'url': new_node.url}, http.CREATED, None
##############################################################################
# New Node
##############################################################################
@must_be_valid_project
@must_have_permission(WRITE)
@must_not_be_registration
def project_new_node(auth, node, **kwargs):
form = NewNodeForm(request.form)
user = auth.user
if form.validate():
try:
new_component = new_node(
title=strip_html(form.title.data),
user=user,
category=form.category.data,
parent=node,
)
except ValidationError as e:
raise HTTPError(
http.BAD_REQUEST,
data=dict(message_long=e.message)
)
redirect_url = node.url
message = (
'Your component was created successfully. You can keep working on the project page below, '
'or go to the new <u><a href={component_url}>component</a></u>.'
).format(component_url=new_component.url)
if form.inherit_contributors.data and node.has_permission(user, WRITE):
for contributor in node.contributors:
perm = CREATOR_PERMISSIONS if contributor._id == user._id else node.get_permissions(contributor)
if contributor._id == user._id and not contributor.is_registered:
new_component.add_unregistered_contributor(
fullname=contributor.fullname, email=contributor.email,
permissions=perm, auth=auth, existing_user=contributor
)
else:
new_component.add_contributor(contributor, permissions=perm, auth=auth)
new_component.save()
redirect_url = new_component.url + 'contributors/'
message = (
'Your component was created successfully. You can edit the contributor permissions below, '
'work on your <u><a href={component_url}>component</a></u> or return to the <u> '
'<a href="{project_url}">project page</a></u>.'
).format(component_url=new_component.url, project_url=node.url)
status.push_status_message(message, kind='info', trust=True)
return {
'status': 'success',
}, 201, None, redirect_url
else:
# TODO: This function doesn't seem to exist anymore?
status.push_errors_to_status(form.errors)
raise HTTPError(http.BAD_REQUEST, redirect_url=node.url)
@must_be_logged_in
@must_be_valid_project
def project_before_fork(auth, node, **kwargs):
user = auth.user
prompts = node.callback('before_fork', user=user)
if node.has_pointers_recursive:
prompts.append(
language.BEFORE_FORK_HAS_POINTERS.format(
category=node.project_or_component
)
)
return {'prompts': prompts}
@must_be_logged_in
@must_be_valid_project
def project_before_template(auth, node, **kwargs):
prompts = []
for addon in node.get_addons():
if 'node' in addon.config.configs:
if addon.to_json(auth.user)['addon_full_name']:
prompts.append(addon.to_json(auth.user)['addon_full_name'])
return {'prompts': prompts}
@must_be_valid_project
@must_be_contributor_or_public_but_not_anonymized
def node_registrations(auth, node, **kwargs):
return _view_project(node, auth, primary=True, embed_registrations=True)
@must_be_valid_project
@must_be_contributor_or_public_but_not_anonymized
def node_forks(auth, node, **kwargs):
return _view_project(node, auth, primary=True, embed_forks=True)
@must_be_valid_project
@must_be_logged_in
@must_have_permission(READ)
def node_setting(auth, node, **kwargs):
auth.user.update_affiliated_institutions_by_email_domain()
auth.user.save()
ret = _view_project(node, auth, primary=True)
ret['include_wiki_settings'] = node.include_wiki_settings(auth.user)
ret['wiki_enabled'] = 'wiki' in node.get_addon_names()
ret['comments'] = {
'level': node.comment_level,
}
ret['categories'] = settings.NODE_CATEGORY_MAP
ret['categories'].update({
'project': 'Project'
})
return ret
@must_be_valid_project
@must_be_logged_in
@must_have_permission(READ)
def node_addons(auth, node, **kwargs):
ret = _view_project(node, auth, primary=True)
addon_settings = []
addons_available = [addon for addon in settings.ADDONS_AVAILABLE
if addon not in settings.SYSTEM_ADDED_ADDONS['node']
and addon.short_name not in ('wiki', 'forward', 'twofactor')]
for addon in addons_available:
addon_config = apps.get_app_config('addons_{}'.format(addon.short_name))
config = addon_config.to_json()
config['template_lookup'] = addon_config.template_lookup
config['addon_icon_url'] = addon_config.icon_url
config['node_settings_template'] = os.path.basename(addon_config.node_settings_template)
config['addon_short_name'] = addon.short_name
config['addon_full_name'] = addon.full_name
config['categories'] = addon.categories
config['enabled'] = node.has_addon(addon.short_name)
config['default'] = addon.short_name in ['osfstorage']
addon_settings.append(config)
addon_settings = sorted(addon_settings, key=lambda addon: addon['full_name'].lower())
ret['addon_capabilities'] = settings.ADDON_CAPABILITIES
ret['addon_categories'] = set([item for addon in addon_settings for item in addon['categories']])
ret['addon_settings'] = addon_settings
ret['addon_js'] = collect_node_config_js([addon for addon in addon_settings if addon['enabled']])
return ret
def collect_node_config_js(addons):
"""Collect webpack bundles for each of the addons' node-cfg.js modules. Return
the URLs for each of the JS modules to be included on the node addons config page.
:param list addons: List of node's addon config records.
"""
js_modules = []
for addon in addons:
js_path = os.path.join('/', 'static', 'public', 'js', addon['short_name'], 'node-cfg.js')
if os.path.exists(js_path):
js_modules.append(js_path)
return js_modules
@must_have_permission(WRITE)
@must_not_be_registration
def node_choose_addons(auth, node, **kwargs):
node.config_addons(request.json, auth)
@must_be_valid_project
@must_have_permission(READ)
def node_contributors(auth, node, **kwargs):
ret = _view_project(node, auth, primary=True)
ret['contributors'] = utils.serialize_contributors(node.contributors, node)
ret['adminContributors'] = utils.serialize_contributors(node.admin_contributors, node, admin=True)
return ret
@must_have_permission(ADMIN)
def configure_comments(node, **kwargs):
comment_level = request.json.get('commentLevel')
if not comment_level:
node.comment_level = None
elif comment_level in ['public', 'private']:
node.comment_level = comment_level
else:
raise HTTPError(http.BAD_REQUEST)
node.save()
##############################################################################
# View Project
##############################################################################
@process_token_or_pass
@must_be_valid_project(retractions_valid=True)
@must_be_contributor_or_public
def view_project(auth, node, **kwargs):
primary = '/api/v1' not in request.path
ret = _view_project(node, auth,
primary=primary,
embed_contributors=True,
embed_descendants=True
)
ret['addon_capabilities'] = settings.ADDON_CAPABILITIES
# Collect the URIs to the static assets for addons that have widgets
ret['addon_widget_js'] = list(collect_addon_js(
node,
filename='widget-cfg.js',
config_entry='widget'
))
ret.update(rubeus.collect_addon_assets(node))
addons_widget_data = {
'wiki': None,
'mendeley': None,
'zotero': None,
'forward': None,
'dataverse': None
}
if 'wiki' in ret['addons']:
wiki = node.get_addon('wiki')
wiki_page = node.get_wiki_page('home')
# Show "Read more" link if there are multiple pages or has > 400 characters
more = len(node.wiki_pages_current.keys()) >= 2
MAX_DISPLAY_LENGTH = 400
rendered_before_update = False
if wiki_page and wiki_page.html(node):
wiki_html = BeautifulSoup(wiki_page.html(node))
if len(wiki_html) > MAX_DISPLAY_LENGTH:
wiki_html = BeautifulSoup(wiki_html[:MAX_DISPLAY_LENGTH] + '...', 'html.parser')
more = True
rendered_before_update = wiki_page.rendered_before_update
else:
wiki_html = None
wiki_widget_data = {
'complete': True,
'wiki_content': unicode(wiki_html) if wiki_html else None,
'wiki_content_url': node.api_url_for('wiki_page_content', wname='home'),
'rendered_before_update': rendered_before_update,
'more': more,
'include': False,
}
wiki_widget_data.update(wiki.config.to_json())
addons_widget_data['wiki'] = wiki_widget_data
if 'dataverse' in ret['addons']:
node_addon = node.get_addon('dataverse')
widget_url = node.api_url_for('dataverse_get_widget_contents')
dataverse_widget_data = {
'complete': node_addon.complete,
'widget_url': widget_url,
}
dataverse_widget_data.update(node_addon.config.to_json())
addons_widget_data['dataverse'] = dataverse_widget_data
if 'forward' in ret['addons']:
node_addon = node.get_addon('forward')
forward_widget_data = serialize_settings(node_addon)
forward_widget_data['complete'] = settings_complete(node_addon)
forward_widget_data.update(node_addon.config.to_json())
addons_widget_data['forward'] = forward_widget_data
if 'zotero' in ret['addons']:
node_addon = node.get_addon('zotero')
zotero_widget_data = node_addon.config.to_json()
zotero_widget_data.update({
'complete': node_addon.complete,
'list_id': node_addon.list_id,
})
addons_widget_data['zotero'] = zotero_widget_data
if 'mendeley' in ret['addons']:
node_addon = node.get_addon('mendeley')
mendeley_widget_data = node_addon.config.to_json()
mendeley_widget_data.update({
'complete': node_addon.complete,
'list_id': node_addon.list_id,
})
addons_widget_data['mendeley'] = mendeley_widget_data
ret.update({'addons_widget_data': addons_widget_data})
return ret
# Reorder components
@must_be_valid_project
@must_not_be_registration
@must_have_permission(WRITE)
def project_reorder_components(node, **kwargs):
"""Reorders the components in a project's component list.
:param-json list new_list: List of strings that include node GUIDs.
"""
ordered_guids = request.get_json().get('new_list', [])
node_relations = (
node.node_relations
.select_related('child')
.filter(child__is_deleted=False)
)
deleted_node_relation_ids = list(
node.node_relations.select_related('child')
.filter(child__is_deleted=True)
.values_list('pk', flat=True)
)
if len(ordered_guids) > len(node_relations):
raise HTTPError(http.BAD_REQUEST, data=dict(message_long='Too many node IDs'))
# Ordered NodeRelation pks, sorted according the order of guids passed in the request payload
new_node_relation_ids = [
each.id for each in sorted(node_relations,
key=lambda nr: ordered_guids.index(nr.child._id))
]
if len(node_relations) == len(ordered_guids):
node.set_noderelation_order(new_node_relation_ids + deleted_node_relation_ids)
node.save()
return {'nodes': ordered_guids}
logger.error('Got invalid node list in reorder components')
raise HTTPError(http.BAD_REQUEST)
##############################################################################
@must_be_valid_project
@must_be_contributor_or_public
def project_statistics(auth, node, **kwargs):
ret = _view_project(node, auth, primary=True)
ret['node']['keenio_read_key'] = node.keenio_read_key
return ret
###############################################################################
# Make Private/Public
###############################################################################
@must_be_valid_project
@must_have_permission(ADMIN)
def project_set_privacy(auth, node, **kwargs):
permissions = kwargs.get('permissions')
if permissions is None:
raise HTTPError(http.BAD_REQUEST)
try:
node.set_privacy(permissions, auth)
except NodeStateError as e:
raise HTTPError(http.BAD_REQUEST, data=dict(
message_short="Can't change privacy",
message_long=e.message
))
return {
'status': 'success',
'permissions': permissions,
}
@must_be_valid_project
@must_not_be_registration
@must_have_permission(WRITE)
def update_node(auth, node, **kwargs):
# in node.update() method there is a key list node.WRITABLE_WHITELIST only allow user to modify
# category, title, and description which can be edited by write permission contributor
data = r_strip_html(request.get_json())
try:
updated_field_names = node.update(data, auth=auth)
except NodeUpdateError as e:
raise HTTPError(400, data=dict(
message_short="Failed to update attribute '{0}'".format(e.key),
message_long=e.reason
))
# Need to cast tags to a string to make them JSON-serialiable
updated_fields_dict = {
key: getattr(node, key) if key != 'tags' else [str(tag) for tag in node.tags]
for key in updated_field_names
if key != 'logs' and key != 'modified' and key != 'last_logged'
}
return {'updated_fields': updated_fields_dict}
@must_be_valid_project
@must_have_permission(ADMIN)
@must_not_be_registration
def component_remove(auth, node, **kwargs):
"""Remove component, and recursively remove its children. If node has a
parent, add log and redirect to parent; else redirect to user dashboard.
"""
try:
node.remove_node(auth)
except NodeStateError as e:
raise HTTPError(
http.BAD_REQUEST,
data={
'message_short': 'Error',
'message_long': 'Could not delete component: ' + e.message
},
)
node.save()
message = '{} has been successfully deleted.'.format(
node.project_or_component.capitalize()
)
status.push_status_message(message, kind='success', trust=False)
parent = node.parent_node
if parent and parent.can_view(auth):
redirect_url = node.parent_node.url
else:
redirect_url = '/dashboard/'
return {
'url': redirect_url,
}
@must_be_valid_project
@must_have_permission(ADMIN)
def remove_private_link(*args, **kwargs):
link_id = request.json['private_link_id']
try:
link = PrivateLink.objects.get(_id=link_id)
except PrivateLink.DoesNotExist:
raise HTTPError(http.NOT_FOUND)
link.is_deleted = True
link.save()
for node in link.nodes.all():
log_dict = {
'project': node.parent_id,
'node': node._id,
'user': kwargs.get('auth').user._id,
'anonymous_link': link.anonymous,
}
node.add_log(
NodeLog.VIEW_ONLY_LINK_REMOVED,
log_dict,
auth=kwargs.get('auth', None)
)
# TODO: Split into separate functions
def _render_addons(addons):
widgets = {}
configs = {}
js = []
css = []
for addon in addons:
configs[addon.config.short_name] = addon.config.to_json()
js.extend(addon.config.include_js.get('widget', []))
css.extend(addon.config.include_css.get('widget', []))
js.extend(addon.config.include_js.get('files', []))
css.extend(addon.config.include_css.get('files', []))
return widgets, configs, js, css
def _should_show_wiki_widget(node, contributor):
has_wiki = bool(node.get_addon('wiki'))
wiki_page = node.get_wiki_page('home', None)
if contributor and contributor.write and not node.is_registration:
return has_wiki
else:
return has_wiki and wiki_page and wiki_page.html(node)
def _view_project(node, auth, primary=False,
embed_contributors=False, embed_descendants=False,
embed_registrations=False, embed_forks=False):
"""Build a JSON object containing everything needed to render
project.view.mako.
"""
node = AbstractNode.objects.filter(pk=node.pk).include('contributor__user__guids').get()
user = auth.user
try:
contributor = node.contributor_set.get(user=user)
except Contributor.DoesNotExist:
contributor = None
parent = node.find_readable_antecedent(auth)
if user:
bookmark_collection = find_bookmark_collection(user)
bookmark_collection_id = bookmark_collection._id
in_bookmark_collection = bookmark_collection.linked_nodes.filter(pk=node.pk).exists()
else:
in_bookmark_collection = False
bookmark_collection_id = ''
view_only_link = auth.private_key or request.args.get('view_only', '').strip('/')
anonymous = has_anonymous_link(node, auth)
addons = list(node.get_addons())
widgets, configs, js, css = _render_addons(addons)
redirect_url = node.url + '?view_only=None'
node_linked_preprint = node.linked_preprint
disapproval_link = ''
if (node.is_pending_registration and node.has_permission(user, ADMIN)):
disapproval_link = node.root.registration_approval.stashed_urls.get(user._id, {}).get('reject', '')
if (node.is_pending_embargo and node.has_permission(user, ADMIN)):
disapproval_link = node.root.embargo.stashed_urls.get(user._id, {}).get('reject', '')
# Before page load callback; skip if not primary call
if primary:
for addon in addons:
messages = addon.before_page_load(node, user) or []
for message in messages:
status.push_status_message(message, kind='info', dismissible=False, trust=True)
NodeRelation = apps.get_model('osf.NodeRelation')
is_registration = node.is_registration
data = {
'node': {
'disapproval_link': disapproval_link,
'id': node._primary_key,
'title': node.title,
'category': node.category_display,
'category_short': node.category,
'node_type': node.project_or_component,
'description': node.description or '',
'license': serialize_node_license_record(node.license),
'url': node.url,
'api_url': node.api_url,
'absolute_url': node.absolute_url,
'redirect_url': redirect_url,
'display_absolute_url': node.display_absolute_url,
'update_url': node.api_url_for('update_node'),
'in_dashboard': in_bookmark_collection,
'is_public': node.is_public,
'is_archiving': node.archiving,
'date_created': iso8601format(node.created),
'date_modified': iso8601format(node.last_logged) if node.last_logged else '',
'tags': list(node.tags.filter(system=False).values_list('name', flat=True)),
'children': node.nodes_active.exists(),
'child_exists': Node.objects.get_children(node, active=True).exists(),
'is_registration': is_registration,
'is_pending_registration': node.is_pending_registration if is_registration else False,
'is_retracted': node.is_retracted if is_registration else False,
'is_pending_retraction': node.is_pending_retraction if is_registration else False,
'retracted_justification': getattr(node.retraction, 'justification', None) if is_registration else None,
'date_retracted': iso8601format(getattr(node.retraction, 'date_retracted', None)) if is_registration else '',
'embargo_end_date': node.embargo_end_date.strftime('%A, %b %d, %Y') if is_registration and node.embargo_end_date else '',
'is_pending_embargo': node.is_pending_embargo if is_registration else False,
'is_embargoed': node.is_embargoed if is_registration else False,
'is_pending_embargo_termination': is_registration and node.is_embargoed and (
node.embargo_termination_approval and
node.embargo_termination_approval.is_pending_approval
),
'registered_from_url': node.registered_from.url if is_registration else '',
'registered_date': iso8601format(node.registered_date) if is_registration else '',
'root_id': node.root._id if node.root else None,
'registered_meta': node.registered_meta,
'registered_schemas': serialize_meta_schemas(list(node.registered_schema.all())) if is_registration else False,
'is_fork': node.is_fork,
'forked_from_id': node.forked_from._primary_key if node.is_fork else '',
'forked_from_display_absolute_url': node.forked_from.display_absolute_url if node.is_fork else '',
'forked_date': iso8601format(node.forked_date) if node.is_fork else '',
'fork_count': node.forks.exclude(type='osf.registration').filter(is_deleted=False).count(),
'private_links': [x.to_json() for x in node.private_links_active],
'link': view_only_link,
'templated_count': node.templated_list.count(),
'linked_nodes_count': NodeRelation.objects.filter(child=node, is_node_link=True).exclude(parent__type='osf.collection').count(),
'anonymous': anonymous,
'comment_level': node.comment_level,
'has_comments': node.comment_set.exists(),
'identifiers': {
'doi': node.get_identifier_value('doi'),
'ark': node.get_identifier_value('ark'),
},
'institutions': get_affiliated_institutions(node) if node else [],
'has_draft_registrations': node.has_active_draft_registrations,
'is_preprint': node.is_preprint,
'has_moderated_preprint': node_linked_preprint.provider.reviews_workflow if node_linked_preprint else '',
'preprint_state': node_linked_preprint.reviews_state if node_linked_preprint else '',
'preprint_word': node_linked_preprint.provider.preprint_word if node_linked_preprint else '',
'preprint_provider': {
'name': node_linked_preprint.provider.name,
'workflow': node_linked_preprint.provider.reviews_workflow
} if node_linked_preprint else {},
'is_preprint_orphan': node.is_preprint_orphan,
'has_published_preprint': node.preprints.filter(is_published=True).exists() if node else False,
'preprint_file_id': node.preprint_file._id if node.preprint_file else None,
'preprint_url': node.preprint_url
},
'parent_node': {
'exists': parent is not None,
'id': parent._primary_key if parent else '',
'title': parent.title if parent else '',
'category': parent.category_display if parent else '',
'url': parent.url if parent else '',
'api_url': parent.api_url if parent else '',
'absolute_url': parent.absolute_url if parent else '',
'registrations_url': parent.web_url_for('node_registrations') if parent else '',
'is_public': parent.is_public if parent else '',
'is_contributor': parent.is_contributor(user) if parent else '',
'can_view': parent.can_view(auth) if parent else False,
},
'user': {
'is_contributor': bool(contributor),
'is_admin': bool(contributor) and contributor.admin,
'is_admin_parent': parent.is_admin_parent(user) if parent else False,
'can_edit': bool(contributor) and contributor.write and not node.is_registration,
'can_edit_tags': bool(contributor) and contributor.write,
'has_read_permissions': node.has_permission(user, READ),
'permissions': get_contributor_permissions(contributor, as_list=True) if contributor else [],
'id': user._id if user else None,
'username': user.username if user else None,
'fullname': user.fullname if user else '',
'can_comment': bool(contributor) or node.can_comment(auth),
'show_wiki_widget': _should_show_wiki_widget(node, contributor),
'dashboard_id': bookmark_collection_id,
'institutions': get_affiliated_institutions(user) if user else [],
},
# TODO: Namespace with nested dicts
'addons_enabled': [each.short_name for each in addons],
'addons': configs,
'addon_widgets': widgets,
'addon_widget_js': js,
'addon_widget_css': css,
'node_categories': [
{'value': key, 'display_name': value}
for key, value in settings.NODE_CATEGORY_MAP.iteritems()
]
}
if embed_contributors and not anonymous:
data['node']['contributors'] = utils.serialize_visible_contributors(node)
else:
data['node']['contributors'] = list(node.contributors.values_list('guids___id', flat=True))
if embed_descendants:
descendants, all_readable = _get_readable_descendants(auth=auth, node=node)
data['user']['can_sort'] = all_readable
data['node']['descendants'] = [
serialize_node_summary(node=each, auth=auth, primary=not node.has_node_link_to(each), show_path=False)
for each in descendants
]
if embed_registrations:
data['node']['registrations'] = [
serialize_node_summary(node=each, auth=auth, show_path=False)
for each in node.registrations_all.order_by('-registered_date').exclude(is_deleted=True).annotate(nlogs=Count('logs'))
]
if embed_forks:
data['node']['forks'] = [
serialize_node_summary(node=each, auth=auth, show_path=False)
for each in node.forks.exclude(type='osf.registration').exclude(is_deleted=True).order_by('-forked_date').annotate(nlogs=Count('logs'))
]
return data
def get_affiliated_institutions(obj):
ret = []
for institution in obj.affiliated_institutions.all():
ret.append({
'name': institution.name,
'logo_path': institution.logo_path,
'id': institution._id,
})
return ret
def serialize_children(child_list, nested, indent=0):
"""
Returns the serialized representation of a list of child nodes.
This is a helper function for _get_children and as such it does not
redundantly check permissions.
"""
results = []
for child in child_list:
results.append({
'id': child._id,
'title': child.title,
'is_public': child.is_public,
'parent_id': child.parentnode_id,
'indent': indent
})
if child._id in nested.keys():
results.extend(serialize_children(nested.get(child._id), nested, indent + 1))
return results
def _get_children(node, auth):
"""
Returns the serialized representation of the given node and all of its children
for which the given user has ADMIN permission.
"""
is_admin = Contributor.objects.filter(node=OuterRef('pk'), admin=True, user=auth.user)
parent_node_sqs = NodeRelation.objects.filter(child=OuterRef('pk'), is_node_link=False).values('parent__guids___id')
children = (Node.objects.get_children(node)
.filter(is_deleted=False)
.annotate(parentnode_id=Subquery(parent_node_sqs[:1]))
.annotate(has_admin_perm=Exists(is_admin))
.filter(has_admin_perm=True))
nested = defaultdict(list)
for child in children:
nested[child.parentnode_id].append(child)
return serialize_children(nested[node._id], nested)
@must_be_valid_project
@must_have_permission(ADMIN)
def private_link_table(node, **kwargs):
data = {
'node': {
'absolute_url': node.absolute_url,
'private_links': [x.to_json() for x in node.private_links_active],
}
}
return data
@collect_auth
@must_be_valid_project
@must_have_permission(ADMIN)
def get_editable_children(auth, node, **kwargs):
children = _get_children(node, auth)
return {
'node': {'id': node._id, 'title': node.title, 'is_public': node.is_public},
'children': children,
}
@must_be_valid_project
def get_recent_logs(node, **kwargs):
logs = list(reversed(node.logs._to_primary_keys()))[:3]
return {'logs': logs}
def _get_readable_descendants(auth, node, permission=None):
descendants = []
all_readable = True
for child in node.get_nodes(is_deleted=False):
if permission:
perm = permission.lower().strip()
if not child.has_permission(auth.user, perm):
all_readable = False
continue
# User can view child
if child.can_view(auth):
descendants.append(child)
# Child is a node link and user has write permission
elif node.linked_nodes.filter(id=child.id).exists():
if node.has_permission(auth.user, 'write'):
descendants.append(child)
else:
all_readable = False
else:
all_readable = False
for descendant in child.find_readable_descendants(auth):
descendants.append(descendant)
return descendants, all_readable
def serialize_child_tree(child_list, user, nested):
"""
Recursively serializes and returns a list of child nodes.
This is a helper function for node_child_tree and as such it does not
redundantly check permissions.
"""
serialized_children = []
for child in child_list:
contributors = [{
'id': contributor.user._id,
'is_admin': contributor.admin,
'is_confirmed': contributor.user.is_confirmed,
'visible': contributor.visible
} for contributor in child.contributor_set.all()]
serialized_children.append({
'node': {
'id': child._id,
'url': child.url,
'title': child.title,
'is_public': child.is_public,
'contributors': contributors,
'is_admin': child.has_admin_perm,
},
'user_id': user._id,
'children': serialize_child_tree(nested.get(child._id), user, nested) if child._id in nested.keys() else [],
'nodeType': 'project' if not child.parentnode_id else 'component',
'category': child.category,
'permissions': {
'view': True,
'is_admin': child.has_admin_perm
}
})
return sorted(serialized_children, key=lambda k: len(k['children']), reverse=True)
def node_child_tree(user, node):
"""
Returns the serialized representation (for treebeard) of a given node and its children.
The given user must have ADMIN access on the given node, and therefore the given user has
implicit read permisson on all of node's children (i.e. read permissions aren't checked here)
:param user: OSFUser object
:param node: parent project Node object
:return: treebeard-formatted data
"""
serialized_nodes = []
is_contrib = node.is_contributor(user)
assert node, '{} is not a valid Node.'.format(node._id)
if not is_contrib:
return []
is_admin = node.has_permission(user, ADMIN)
if is_admin:
is_admin_sqs = Contributor.objects.filter(node=OuterRef('pk'), admin=True, user=user)
parent_node_sqs = NodeRelation.objects.filter(child=OuterRef('pk'), is_node_link=False).values('parent__guids___id')
children = (Node.objects.get_children(node)
.filter(is_deleted=False)
.annotate(parentnode_id=Subquery(parent_node_sqs[:1]))
.annotate(has_admin_perm=Exists(is_admin_sqs))
.include('contributor__user__guids')
)
else:
children = []
nested = defaultdict(list)
for child in children:
nested[child.parentnode_id].append(child)
contributors = [{
'id': contributor.user._id,
'is_admin': node.has_permission(contributor.user, ADMIN),
'is_confirmed': contributor.user.is_confirmed,
'visible': contributor.visible
} for contributor in node.contributor_set.all().include('user__guids')]
serialized_nodes.append({
'node': {
'id': node._id,
'url': node.url,
'title': node.title,
'is_public': node.is_public,
'contributors': contributors,
'is_admin': is_admin
},
'user_id': user._id,
'children': serialize_child_tree(nested.get(node._id), user, nested) if node._id in nested.keys() else [],
'kind': 'folder' if not node.parent_node or not node.parent_node.has_permission(user, 'read') else 'node',
'nodeType': node.project_or_component,
'category': node.category,
'permissions': {
'view': True,
'is_admin': is_admin
}
})
return serialized_nodes
@must_be_logged_in
@must_be_valid_project
def get_node_tree(auth, **kwargs):
node = kwargs.get('node') or kwargs['project']
tree = node_child_tree(auth.user, node)
return tree
@must_be_valid_project
@must_have_permission(ADMIN)
def project_generate_private_link_post(auth, node, **kwargs):
""" creata a new private link object and add it to the node and its selected children"""
node_ids = request.json.get('node_ids', [])
name = request.json.get('name', '')
anonymous = request.json.get('anonymous', False)
if node._id not in node_ids:
node_ids.insert(0, node._id)
nodes = [AbstractNode.load(node_id) for node_id in node_ids]
try:
new_link = new_private_link(
name=name, user=auth.user, nodes=nodes, anonymous=anonymous
)
except ValidationError as e:
raise HTTPError(
http.BAD_REQUEST,
data=dict(message_long=e.message)
)
return new_link
@must_be_valid_project
@must_have_permission(ADMIN)
def project_private_link_edit(auth, **kwargs):
name = request.json.get('value', '')
try:
validate_title(name)
except ValidationError as e:
message = 'Invalid link name.' if e.message == 'Invalid title.' else e.message
raise HTTPError(
http.BAD_REQUEST,
data=dict(message_long=message)
)
private_link_id = request.json.get('pk', '')
private_link = PrivateLink.load(private_link_id)
if private_link:
new_name = strip_html(name)
private_link.name = new_name
private_link.save()
return new_name
else:
raise HTTPError(
http.BAD_REQUEST,
data=dict(message_long='View-only link not found.')
)
def _serialize_node_search(node):
"""Serialize a node for use in pointer search.
:param Node node: Node to serialize
:return: Dictionary of node data
"""
data = {
'id': node._id,
'title': node.title,
'etal': len(node.visible_contributors) > 1,
'isRegistration': node.is_registration
}
if node.is_registration:
data['title'] += ' (registration)'
data['dateRegistered'] = node.registered_date.isoformat()
else:
data['dateCreated'] = node.created.isoformat()
data['dateModified'] = node.modified.isoformat()
first_author = node.visible_contributors[0]
data['firstAuthor'] = first_author.family_name or first_author.given_name or first_author.fullname
return data
@must_be_logged_in
def search_node(auth, **kwargs):
"""
"""
# Get arguments
node = AbstractNode.load(request.json.get('nodeId'))
include_public = request.json.get('includePublic')
size = float(request.json.get('size', '5').strip())
page = request.json.get('page', 0)
query = request.json.get('query', '').strip()
start = (page * size)
if not query:
return {'nodes': []}
# Exclude current node from query if provided
nin = [node.id] + list(node._nodes.values_list('pk', flat=True)) if node else []
can_view_query = Q(_contributors=auth.user)
if include_public:
can_view_query = can_view_query | Q(is_public=True)
nodes = (AbstractNode.objects
.filter(
can_view_query,
title__icontains=query,
is_deleted=False)
.exclude(id__in=nin)
.exclude(type='osf.collection')
.exclude(type='osf.quickfilesnode'))
count = nodes.count()
pages = math.ceil(count / size)
validate_page_num(page, pages)
return {
'nodes': [
_serialize_node_search(each)
for each in islice(nodes, start, start + size)
if each.contributors
],
'total': count,
'pages': pages,
'page': page
}
def _add_pointers(node, pointers, auth):
"""
:param Node node: Node to which pointers will be added
:param list pointers: Nodes to add as pointers
"""
added = False
for pointer in pointers:
node.add_pointer(pointer, auth, save=False)
added = True
if added:
node.save()
@collect_auth
def add_pointer(auth):
"""Add a single pointer to a node using only JSON parameters
"""
to_node_id = request.json.get('toNodeID')
pointer_to_move = request.json.get('pointerID')
if not (to_node_id and pointer_to_move):
raise HTTPError(http.BAD_REQUEST)
pointer = AbstractNode.load(pointer_to_move)
to_node = AbstractNode.load(to_node_id)
try:
_add_pointers(to_node, [pointer], auth)
except ValueError:
raise HTTPError(http.BAD_REQUEST)
@must_have_permission(WRITE)
@must_not_be_registration
def add_pointers(auth, node, **kwargs):
"""Add pointers to a node.
"""
node_ids = request.json.get('nodeIds')
if not node_ids:
raise HTTPError(http.BAD_REQUEST)
nodes = [
AbstractNode.load(node_id)
for node_id in node_ids
]
try:
_add_pointers(node, nodes, auth)
except ValueError:
raise HTTPError(http.BAD_REQUEST)
return {}
@must_have_permission(WRITE)
@must_not_be_registration
def remove_pointer(auth, node, **kwargs):
"""Remove a pointer from a node, raising a 400 if the pointer is not
in `node.nodes`.
"""
# TODO: since these a delete request, shouldn't use request body. put pointer
# id in the URL instead
pointer_id = request.json.get('pointerId')
if pointer_id is None:
raise HTTPError(http.BAD_REQUEST)
pointer = AbstractNode.load(pointer_id)
if pointer is None:
raise HTTPError(http.BAD_REQUEST)
try:
node.rm_pointer(pointer, auth=auth)
except ValueError:
raise HTTPError(http.BAD_REQUEST)
node.save()
@must_have_permission(WRITE)
@must_not_be_registration
def fork_pointer(auth, node, **kwargs):
"""Fork a pointer. Raises BAD_REQUEST if pointer not provided, not found,
or not present in `nodes`.
:param Auth auth: Consolidated authorization
:param Node node: root from which pointer is child
:return: Fork of node to which nodelink(pointer) points
"""
NodeRelation = apps.get_model('osf.NodeRelation')
linked_node_id = request.json.get('nodeId')
linked_node = AbstractNode.load(linked_node_id)
pointer = NodeRelation.objects.filter(child=linked_node, is_node_link=True, parent=node).first()
if pointer is None:
# TODO: Change this to 404?
raise HTTPError(http.BAD_REQUEST)
try:
fork = node.fork_pointer(pointer, auth=auth, save=True)
except ValueError:
raise HTTPError(http.BAD_REQUEST)
return {
'data': {
'node': serialize_node_summary(node=fork, auth=auth, show_path=False)
}
}, http.CREATED
def abbrev_authors(node):
lead_author = node.visible_contributors[0]
ret = lead_author.family_name or lead_author.given_name or lead_author.fullname
if node.visible_contributors.count() > 1:
ret += ' et al.'
return ret
def serialize_pointer(node, auth):
if node.can_view(auth):
return {
'id': node._id,
'url': node.url,
'title': node.title,
'authorShort': abbrev_authors(node),
}
return {
'url': None,
'title': 'Private Component',
'authorShort': 'Private Author(s)',
}
@must_be_contributor_or_public
def get_pointed(auth, node, **kwargs):
"""View that returns the pointers for a project."""
NodeRelation = apps.get_model('osf.NodeRelation')
# exclude folders
return {'pointed': [
serialize_pointer(each.parent, auth)
for each in NodeRelation.objects.filter(child=node, is_node_link=True).exclude(parent__type='osf.collection')
]}
Revert "Check if asset path exists for addons"
# -*- coding: utf-8 -*-
import os
import logging
import httplib as http
import math
from collections import defaultdict
from itertools import islice
from bs4 import BeautifulSoup
from flask import request
from django.apps import apps
from django.core.exceptions import ValidationError
from django.db.models import Count, Q, OuterRef, Exists, Subquery
from framework import status
from framework.utils import iso8601format
from framework.auth.decorators import must_be_logged_in, collect_auth
from framework.exceptions import HTTPError
from osf.models.nodelog import NodeLog
from website import language
from website.util import rubeus
from website.exceptions import NodeStateError
from website.project import new_node, new_private_link
from website.project.decorators import (
must_be_contributor_or_public_but_not_anonymized,
must_be_contributor_or_public,
must_be_valid_project,
must_have_permission,
must_not_be_registration,
)
from website.tokens import process_token_or_pass
from website.util.permissions import ADMIN, READ, WRITE, CREATOR_PERMISSIONS
from website.util.rubeus import collect_addon_js
from website.project.model import has_anonymous_link, NodeUpdateError, validate_title
from website.project.forms import NewNodeForm
from website.project.metadata.utils import serialize_meta_schemas
from osf.models import AbstractNode, PrivateLink, Contributor, Node, NodeRelation
from osf.models.contributor import get_contributor_permissions
from osf.models.licenses import serialize_node_license_record
from website import settings
from website.views import find_bookmark_collection, validate_page_num
from website.views import serialize_node_summary
from website.profile import utils
from website.util.sanitize import strip_html
from website.util import rapply
from addons.forward.utils import serialize_settings, settings_complete
r_strip_html = lambda collection: rapply(collection, strip_html)
logger = logging.getLogger(__name__)
@must_be_valid_project
@must_have_permission(WRITE)
@must_not_be_registration
def edit_node(auth, node, **kwargs):
post_data = request.json
edited_field = post_data.get('name')
value = post_data.get('value', '')
new_val = None
if edited_field == 'title':
try:
node.set_title(value, auth=auth)
except ValidationError as e:
raise HTTPError(
http.BAD_REQUEST,
data=dict(message_long=e.message)
)
new_val = node.title
elif edited_field == 'description':
node.set_description(value, auth=auth)
new_val = node.description
elif edited_field == 'category':
node.category = new_val = value
try:
node.save()
except ValidationError as e:
raise HTTPError(
http.BAD_REQUEST,
data=dict(message_long=e.message)
)
return {
'status': 'success',
'newValue': new_val # Used by x-editable widget to reflect changes made by sanitizer
}
##############################################################################
# New Project
##############################################################################
@must_be_logged_in
def project_new(**kwargs):
return {}
@must_be_logged_in
def project_new_post(auth, **kwargs):
user = auth.user
data = request.get_json()
title = strip_html(data.get('title'))
title = title.strip()
category = data.get('category', 'project')
template = data.get('template')
description = strip_html(data.get('description'))
new_project = {}
if template:
original_node = AbstractNode.load(template)
changes = {
'title': title,
'category': category,
'template_node': original_node,
}
if description:
changes['description'] = description
project = original_node.use_as_template(
auth=auth,
changes={
template: changes,
}
)
else:
try:
project = new_node(category, title, user, description)
except ValidationError as e:
raise HTTPError(
http.BAD_REQUEST,
data=dict(message_long=e.message)
)
new_project = _view_project(project, auth)
return {
'projectUrl': project.url,
'newNode': new_project['node'] if new_project else None
}, http.CREATED
@must_be_logged_in
@must_be_valid_project
def project_new_from_template(auth, node, **kwargs):
new_node = node.use_as_template(
auth=auth,
changes=dict(),
)
return {'url': new_node.url}, http.CREATED, None
##############################################################################
# New Node
##############################################################################
@must_be_valid_project
@must_have_permission(WRITE)
@must_not_be_registration
def project_new_node(auth, node, **kwargs):
form = NewNodeForm(request.form)
user = auth.user
if form.validate():
try:
new_component = new_node(
title=strip_html(form.title.data),
user=user,
category=form.category.data,
parent=node,
)
except ValidationError as e:
raise HTTPError(
http.BAD_REQUEST,
data=dict(message_long=e.message)
)
redirect_url = node.url
message = (
'Your component was created successfully. You can keep working on the project page below, '
'or go to the new <u><a href={component_url}>component</a></u>.'
).format(component_url=new_component.url)
if form.inherit_contributors.data and node.has_permission(user, WRITE):
for contributor in node.contributors:
perm = CREATOR_PERMISSIONS if contributor._id == user._id else node.get_permissions(contributor)
if contributor._id == user._id and not contributor.is_registered:
new_component.add_unregistered_contributor(
fullname=contributor.fullname, email=contributor.email,
permissions=perm, auth=auth, existing_user=contributor
)
else:
new_component.add_contributor(contributor, permissions=perm, auth=auth)
new_component.save()
redirect_url = new_component.url + 'contributors/'
message = (
'Your component was created successfully. You can edit the contributor permissions below, '
'work on your <u><a href={component_url}>component</a></u> or return to the <u> '
'<a href="{project_url}">project page</a></u>.'
).format(component_url=new_component.url, project_url=node.url)
status.push_status_message(message, kind='info', trust=True)
return {
'status': 'success',
}, 201, None, redirect_url
else:
# TODO: This function doesn't seem to exist anymore?
status.push_errors_to_status(form.errors)
raise HTTPError(http.BAD_REQUEST, redirect_url=node.url)
@must_be_logged_in
@must_be_valid_project
def project_before_fork(auth, node, **kwargs):
user = auth.user
prompts = node.callback('before_fork', user=user)
if node.has_pointers_recursive:
prompts.append(
language.BEFORE_FORK_HAS_POINTERS.format(
category=node.project_or_component
)
)
return {'prompts': prompts}
@must_be_logged_in
@must_be_valid_project
def project_before_template(auth, node, **kwargs):
prompts = []
for addon in node.get_addons():
if 'node' in addon.config.configs:
if addon.to_json(auth.user)['addon_full_name']:
prompts.append(addon.to_json(auth.user)['addon_full_name'])
return {'prompts': prompts}
@must_be_valid_project
@must_be_contributor_or_public_but_not_anonymized
def node_registrations(auth, node, **kwargs):
return _view_project(node, auth, primary=True, embed_registrations=True)
@must_be_valid_project
@must_be_contributor_or_public_but_not_anonymized
def node_forks(auth, node, **kwargs):
return _view_project(node, auth, primary=True, embed_forks=True)
@must_be_valid_project
@must_be_logged_in
@must_have_permission(READ)
def node_setting(auth, node, **kwargs):
auth.user.update_affiliated_institutions_by_email_domain()
auth.user.save()
ret = _view_project(node, auth, primary=True)
ret['include_wiki_settings'] = node.include_wiki_settings(auth.user)
ret['wiki_enabled'] = 'wiki' in node.get_addon_names()
ret['comments'] = {
'level': node.comment_level,
}
ret['categories'] = settings.NODE_CATEGORY_MAP
ret['categories'].update({
'project': 'Project'
})
return ret
@must_be_valid_project
@must_be_logged_in
@must_have_permission(READ)
def node_addons(auth, node, **kwargs):
ret = _view_project(node, auth, primary=True)
addon_settings = []
addons_available = [addon for addon in settings.ADDONS_AVAILABLE
if addon not in settings.SYSTEM_ADDED_ADDONS['node']
and addon.short_name not in ('wiki', 'forward', 'twofactor')]
for addon in addons_available:
addon_config = apps.get_app_config('addons_{}'.format(addon.short_name))
config = addon_config.to_json()
config['template_lookup'] = addon_config.template_lookup
config['addon_icon_url'] = addon_config.icon_url
config['node_settings_template'] = os.path.basename(addon_config.node_settings_template)
config['addon_short_name'] = addon.short_name
config['addon_full_name'] = addon.full_name
config['categories'] = addon.categories
config['enabled'] = node.has_addon(addon.short_name)
config['default'] = addon.short_name in ['osfstorage']
addon_settings.append(config)
addon_settings = sorted(addon_settings, key=lambda addon: addon['full_name'].lower())
ret['addon_capabilities'] = settings.ADDON_CAPABILITIES
ret['addon_categories'] = set([item for addon in addon_settings for item in addon['categories']])
ret['addon_settings'] = addon_settings
ret['addon_js'] = collect_node_config_js([addon for addon in addon_settings if addon['enabled']])
return ret
def collect_node_config_js(addons):
"""Collect webpack bundles for each of the addons' node-cfg.js modules. Return
the URLs for each of the JS modules to be included on the node addons config page.
:param list addons: List of node's addon config records.
"""
js_modules = []
for addon in addons:
js_path = os.path.join('/', 'static', 'public', 'js', addon['short_name'], 'node-cfg.js')
if js_path:
js_modules.append(js_path)
return js_modules
@must_have_permission(WRITE)
@must_not_be_registration
def node_choose_addons(auth, node, **kwargs):
node.config_addons(request.json, auth)
@must_be_valid_project
@must_have_permission(READ)
def node_contributors(auth, node, **kwargs):
ret = _view_project(node, auth, primary=True)
ret['contributors'] = utils.serialize_contributors(node.contributors, node)
ret['adminContributors'] = utils.serialize_contributors(node.admin_contributors, node, admin=True)
return ret
@must_have_permission(ADMIN)
def configure_comments(node, **kwargs):
comment_level = request.json.get('commentLevel')
if not comment_level:
node.comment_level = None
elif comment_level in ['public', 'private']:
node.comment_level = comment_level
else:
raise HTTPError(http.BAD_REQUEST)
node.save()
##############################################################################
# View Project
##############################################################################
@process_token_or_pass
@must_be_valid_project(retractions_valid=True)
@must_be_contributor_or_public
def view_project(auth, node, **kwargs):
primary = '/api/v1' not in request.path
ret = _view_project(node, auth,
primary=primary,
embed_contributors=True,
embed_descendants=True
)
ret['addon_capabilities'] = settings.ADDON_CAPABILITIES
# Collect the URIs to the static assets for addons that have widgets
ret['addon_widget_js'] = list(collect_addon_js(
node,
filename='widget-cfg.js',
config_entry='widget'
))
ret.update(rubeus.collect_addon_assets(node))
addons_widget_data = {
'wiki': None,
'mendeley': None,
'zotero': None,
'forward': None,
'dataverse': None
}
if 'wiki' in ret['addons']:
wiki = node.get_addon('wiki')
wiki_page = node.get_wiki_page('home')
# Show "Read more" link if there are multiple pages or has > 400 characters
more = len(node.wiki_pages_current.keys()) >= 2
MAX_DISPLAY_LENGTH = 400
rendered_before_update = False
if wiki_page and wiki_page.html(node):
wiki_html = BeautifulSoup(wiki_page.html(node))
if len(wiki_html) > MAX_DISPLAY_LENGTH:
wiki_html = BeautifulSoup(wiki_html[:MAX_DISPLAY_LENGTH] + '...', 'html.parser')
more = True
rendered_before_update = wiki_page.rendered_before_update
else:
wiki_html = None
wiki_widget_data = {
'complete': True,
'wiki_content': unicode(wiki_html) if wiki_html else None,
'wiki_content_url': node.api_url_for('wiki_page_content', wname='home'),
'rendered_before_update': rendered_before_update,
'more': more,
'include': False,
}
wiki_widget_data.update(wiki.config.to_json())
addons_widget_data['wiki'] = wiki_widget_data
if 'dataverse' in ret['addons']:
node_addon = node.get_addon('dataverse')
widget_url = node.api_url_for('dataverse_get_widget_contents')
dataverse_widget_data = {
'complete': node_addon.complete,
'widget_url': widget_url,
}
dataverse_widget_data.update(node_addon.config.to_json())
addons_widget_data['dataverse'] = dataverse_widget_data
if 'forward' in ret['addons']:
node_addon = node.get_addon('forward')
forward_widget_data = serialize_settings(node_addon)
forward_widget_data['complete'] = settings_complete(node_addon)
forward_widget_data.update(node_addon.config.to_json())
addons_widget_data['forward'] = forward_widget_data
if 'zotero' in ret['addons']:
node_addon = node.get_addon('zotero')
zotero_widget_data = node_addon.config.to_json()
zotero_widget_data.update({
'complete': node_addon.complete,
'list_id': node_addon.list_id,
})
addons_widget_data['zotero'] = zotero_widget_data
if 'mendeley' in ret['addons']:
node_addon = node.get_addon('mendeley')
mendeley_widget_data = node_addon.config.to_json()
mendeley_widget_data.update({
'complete': node_addon.complete,
'list_id': node_addon.list_id,
})
addons_widget_data['mendeley'] = mendeley_widget_data
ret.update({'addons_widget_data': addons_widget_data})
return ret
# Reorder components
@must_be_valid_project
@must_not_be_registration
@must_have_permission(WRITE)
def project_reorder_components(node, **kwargs):
"""Reorders the components in a project's component list.
:param-json list new_list: List of strings that include node GUIDs.
"""
ordered_guids = request.get_json().get('new_list', [])
node_relations = (
node.node_relations
.select_related('child')
.filter(child__is_deleted=False)
)
deleted_node_relation_ids = list(
node.node_relations.select_related('child')
.filter(child__is_deleted=True)
.values_list('pk', flat=True)
)
if len(ordered_guids) > len(node_relations):
raise HTTPError(http.BAD_REQUEST, data=dict(message_long='Too many node IDs'))
# Ordered NodeRelation pks, sorted according the order of guids passed in the request payload
new_node_relation_ids = [
each.id for each in sorted(node_relations,
key=lambda nr: ordered_guids.index(nr.child._id))
]
if len(node_relations) == len(ordered_guids):
node.set_noderelation_order(new_node_relation_ids + deleted_node_relation_ids)
node.save()
return {'nodes': ordered_guids}
logger.error('Got invalid node list in reorder components')
raise HTTPError(http.BAD_REQUEST)
##############################################################################
@must_be_valid_project
@must_be_contributor_or_public
def project_statistics(auth, node, **kwargs):
ret = _view_project(node, auth, primary=True)
ret['node']['keenio_read_key'] = node.keenio_read_key
return ret
###############################################################################
# Make Private/Public
###############################################################################
@must_be_valid_project
@must_have_permission(ADMIN)
def project_set_privacy(auth, node, **kwargs):
permissions = kwargs.get('permissions')
if permissions is None:
raise HTTPError(http.BAD_REQUEST)
try:
node.set_privacy(permissions, auth)
except NodeStateError as e:
raise HTTPError(http.BAD_REQUEST, data=dict(
message_short="Can't change privacy",
message_long=e.message
))
return {
'status': 'success',
'permissions': permissions,
}
@must_be_valid_project
@must_not_be_registration
@must_have_permission(WRITE)
def update_node(auth, node, **kwargs):
# in node.update() method there is a key list node.WRITABLE_WHITELIST only allow user to modify
# category, title, and description which can be edited by write permission contributor
data = r_strip_html(request.get_json())
try:
updated_field_names = node.update(data, auth=auth)
except NodeUpdateError as e:
raise HTTPError(400, data=dict(
message_short="Failed to update attribute '{0}'".format(e.key),
message_long=e.reason
))
# Need to cast tags to a string to make them JSON-serialiable
updated_fields_dict = {
key: getattr(node, key) if key != 'tags' else [str(tag) for tag in node.tags]
for key in updated_field_names
if key != 'logs' and key != 'modified' and key != 'last_logged'
}
return {'updated_fields': updated_fields_dict}
@must_be_valid_project
@must_have_permission(ADMIN)
@must_not_be_registration
def component_remove(auth, node, **kwargs):
"""Remove component, and recursively remove its children. If node has a
parent, add log and redirect to parent; else redirect to user dashboard.
"""
try:
node.remove_node(auth)
except NodeStateError as e:
raise HTTPError(
http.BAD_REQUEST,
data={
'message_short': 'Error',
'message_long': 'Could not delete component: ' + e.message
},
)
node.save()
message = '{} has been successfully deleted.'.format(
node.project_or_component.capitalize()
)
status.push_status_message(message, kind='success', trust=False)
parent = node.parent_node
if parent and parent.can_view(auth):
redirect_url = node.parent_node.url
else:
redirect_url = '/dashboard/'
return {
'url': redirect_url,
}
@must_be_valid_project
@must_have_permission(ADMIN)
def remove_private_link(*args, **kwargs):
link_id = request.json['private_link_id']
try:
link = PrivateLink.objects.get(_id=link_id)
except PrivateLink.DoesNotExist:
raise HTTPError(http.NOT_FOUND)
link.is_deleted = True
link.save()
for node in link.nodes.all():
log_dict = {
'project': node.parent_id,
'node': node._id,
'user': kwargs.get('auth').user._id,
'anonymous_link': link.anonymous,
}
node.add_log(
NodeLog.VIEW_ONLY_LINK_REMOVED,
log_dict,
auth=kwargs.get('auth', None)
)
# TODO: Split into separate functions
def _render_addons(addons):
widgets = {}
configs = {}
js = []
css = []
for addon in addons:
configs[addon.config.short_name] = addon.config.to_json()
js.extend(addon.config.include_js.get('widget', []))
css.extend(addon.config.include_css.get('widget', []))
js.extend(addon.config.include_js.get('files', []))
css.extend(addon.config.include_css.get('files', []))
return widgets, configs, js, css
def _should_show_wiki_widget(node, contributor):
has_wiki = bool(node.get_addon('wiki'))
wiki_page = node.get_wiki_page('home', None)
if contributor and contributor.write and not node.is_registration:
return has_wiki
else:
return has_wiki and wiki_page and wiki_page.html(node)
def _view_project(node, auth, primary=False,
embed_contributors=False, embed_descendants=False,
embed_registrations=False, embed_forks=False):
"""Build a JSON object containing everything needed to render
project.view.mako.
"""
node = AbstractNode.objects.filter(pk=node.pk).include('contributor__user__guids').get()
user = auth.user
try:
contributor = node.contributor_set.get(user=user)
except Contributor.DoesNotExist:
contributor = None
parent = node.find_readable_antecedent(auth)
if user:
bookmark_collection = find_bookmark_collection(user)
bookmark_collection_id = bookmark_collection._id
in_bookmark_collection = bookmark_collection.linked_nodes.filter(pk=node.pk).exists()
else:
in_bookmark_collection = False
bookmark_collection_id = ''
view_only_link = auth.private_key or request.args.get('view_only', '').strip('/')
anonymous = has_anonymous_link(node, auth)
addons = list(node.get_addons())
widgets, configs, js, css = _render_addons(addons)
redirect_url = node.url + '?view_only=None'
node_linked_preprint = node.linked_preprint
disapproval_link = ''
if (node.is_pending_registration and node.has_permission(user, ADMIN)):
disapproval_link = node.root.registration_approval.stashed_urls.get(user._id, {}).get('reject', '')
if (node.is_pending_embargo and node.has_permission(user, ADMIN)):
disapproval_link = node.root.embargo.stashed_urls.get(user._id, {}).get('reject', '')
# Before page load callback; skip if not primary call
if primary:
for addon in addons:
messages = addon.before_page_load(node, user) or []
for message in messages:
status.push_status_message(message, kind='info', dismissible=False, trust=True)
NodeRelation = apps.get_model('osf.NodeRelation')
is_registration = node.is_registration
data = {
'node': {
'disapproval_link': disapproval_link,
'id': node._primary_key,
'title': node.title,
'category': node.category_display,
'category_short': node.category,
'node_type': node.project_or_component,
'description': node.description or '',
'license': serialize_node_license_record(node.license),
'url': node.url,
'api_url': node.api_url,
'absolute_url': node.absolute_url,
'redirect_url': redirect_url,
'display_absolute_url': node.display_absolute_url,
'update_url': node.api_url_for('update_node'),
'in_dashboard': in_bookmark_collection,
'is_public': node.is_public,
'is_archiving': node.archiving,
'date_created': iso8601format(node.created),
'date_modified': iso8601format(node.last_logged) if node.last_logged else '',
'tags': list(node.tags.filter(system=False).values_list('name', flat=True)),
'children': node.nodes_active.exists(),
'child_exists': Node.objects.get_children(node, active=True).exists(),
'is_registration': is_registration,
'is_pending_registration': node.is_pending_registration if is_registration else False,
'is_retracted': node.is_retracted if is_registration else False,
'is_pending_retraction': node.is_pending_retraction if is_registration else False,
'retracted_justification': getattr(node.retraction, 'justification', None) if is_registration else None,
'date_retracted': iso8601format(getattr(node.retraction, 'date_retracted', None)) if is_registration else '',
'embargo_end_date': node.embargo_end_date.strftime('%A, %b %d, %Y') if is_registration and node.embargo_end_date else '',
'is_pending_embargo': node.is_pending_embargo if is_registration else False,
'is_embargoed': node.is_embargoed if is_registration else False,
'is_pending_embargo_termination': is_registration and node.is_embargoed and (
node.embargo_termination_approval and
node.embargo_termination_approval.is_pending_approval
),
'registered_from_url': node.registered_from.url if is_registration else '',
'registered_date': iso8601format(node.registered_date) if is_registration else '',
'root_id': node.root._id if node.root else None,
'registered_meta': node.registered_meta,
'registered_schemas': serialize_meta_schemas(list(node.registered_schema.all())) if is_registration else False,
'is_fork': node.is_fork,
'forked_from_id': node.forked_from._primary_key if node.is_fork else '',
'forked_from_display_absolute_url': node.forked_from.display_absolute_url if node.is_fork else '',
'forked_date': iso8601format(node.forked_date) if node.is_fork else '',
'fork_count': node.forks.exclude(type='osf.registration').filter(is_deleted=False).count(),
'private_links': [x.to_json() for x in node.private_links_active],
'link': view_only_link,
'templated_count': node.templated_list.count(),
'linked_nodes_count': NodeRelation.objects.filter(child=node, is_node_link=True).exclude(parent__type='osf.collection').count(),
'anonymous': anonymous,
'comment_level': node.comment_level,
'has_comments': node.comment_set.exists(),
'identifiers': {
'doi': node.get_identifier_value('doi'),
'ark': node.get_identifier_value('ark'),
},
'institutions': get_affiliated_institutions(node) if node else [],
'has_draft_registrations': node.has_active_draft_registrations,
'is_preprint': node.is_preprint,
'has_moderated_preprint': node_linked_preprint.provider.reviews_workflow if node_linked_preprint else '',
'preprint_state': node_linked_preprint.reviews_state if node_linked_preprint else '',
'preprint_word': node_linked_preprint.provider.preprint_word if node_linked_preprint else '',
'preprint_provider': {
'name': node_linked_preprint.provider.name,
'workflow': node_linked_preprint.provider.reviews_workflow
} if node_linked_preprint else {},
'is_preprint_orphan': node.is_preprint_orphan,
'has_published_preprint': node.preprints.filter(is_published=True).exists() if node else False,
'preprint_file_id': node.preprint_file._id if node.preprint_file else None,
'preprint_url': node.preprint_url
},
'parent_node': {
'exists': parent is not None,
'id': parent._primary_key if parent else '',
'title': parent.title if parent else '',
'category': parent.category_display if parent else '',
'url': parent.url if parent else '',
'api_url': parent.api_url if parent else '',
'absolute_url': parent.absolute_url if parent else '',
'registrations_url': parent.web_url_for('node_registrations') if parent else '',
'is_public': parent.is_public if parent else '',
'is_contributor': parent.is_contributor(user) if parent else '',
'can_view': parent.can_view(auth) if parent else False,
},
'user': {
'is_contributor': bool(contributor),
'is_admin': bool(contributor) and contributor.admin,
'is_admin_parent': parent.is_admin_parent(user) if parent else False,
'can_edit': bool(contributor) and contributor.write and not node.is_registration,
'can_edit_tags': bool(contributor) and contributor.write,
'has_read_permissions': node.has_permission(user, READ),
'permissions': get_contributor_permissions(contributor, as_list=True) if contributor else [],
'id': user._id if user else None,
'username': user.username if user else None,
'fullname': user.fullname if user else '',
'can_comment': bool(contributor) or node.can_comment(auth),
'show_wiki_widget': _should_show_wiki_widget(node, contributor),
'dashboard_id': bookmark_collection_id,
'institutions': get_affiliated_institutions(user) if user else [],
},
# TODO: Namespace with nested dicts
'addons_enabled': [each.short_name for each in addons],
'addons': configs,
'addon_widgets': widgets,
'addon_widget_js': js,
'addon_widget_css': css,
'node_categories': [
{'value': key, 'display_name': value}
for key, value in settings.NODE_CATEGORY_MAP.iteritems()
]
}
if embed_contributors and not anonymous:
data['node']['contributors'] = utils.serialize_visible_contributors(node)
else:
data['node']['contributors'] = list(node.contributors.values_list('guids___id', flat=True))
if embed_descendants:
descendants, all_readable = _get_readable_descendants(auth=auth, node=node)
data['user']['can_sort'] = all_readable
data['node']['descendants'] = [
serialize_node_summary(node=each, auth=auth, primary=not node.has_node_link_to(each), show_path=False)
for each in descendants
]
if embed_registrations:
data['node']['registrations'] = [
serialize_node_summary(node=each, auth=auth, show_path=False)
for each in node.registrations_all.order_by('-registered_date').exclude(is_deleted=True).annotate(nlogs=Count('logs'))
]
if embed_forks:
data['node']['forks'] = [
serialize_node_summary(node=each, auth=auth, show_path=False)
for each in node.forks.exclude(type='osf.registration').exclude(is_deleted=True).order_by('-forked_date').annotate(nlogs=Count('logs'))
]
return data
def get_affiliated_institutions(obj):
ret = []
for institution in obj.affiliated_institutions.all():
ret.append({
'name': institution.name,
'logo_path': institution.logo_path,
'id': institution._id,
})
return ret
def serialize_children(child_list, nested, indent=0):
"""
Returns the serialized representation of a list of child nodes.
This is a helper function for _get_children and as such it does not
redundantly check permissions.
"""
results = []
for child in child_list:
results.append({
'id': child._id,
'title': child.title,
'is_public': child.is_public,
'parent_id': child.parentnode_id,
'indent': indent
})
if child._id in nested.keys():
results.extend(serialize_children(nested.get(child._id), nested, indent + 1))
return results
def _get_children(node, auth):
"""
Returns the serialized representation of the given node and all of its children
for which the given user has ADMIN permission.
"""
is_admin = Contributor.objects.filter(node=OuterRef('pk'), admin=True, user=auth.user)
parent_node_sqs = NodeRelation.objects.filter(child=OuterRef('pk'), is_node_link=False).values('parent__guids___id')
children = (Node.objects.get_children(node)
.filter(is_deleted=False)
.annotate(parentnode_id=Subquery(parent_node_sqs[:1]))
.annotate(has_admin_perm=Exists(is_admin))
.filter(has_admin_perm=True))
nested = defaultdict(list)
for child in children:
nested[child.parentnode_id].append(child)
return serialize_children(nested[node._id], nested)
@must_be_valid_project
@must_have_permission(ADMIN)
def private_link_table(node, **kwargs):
data = {
'node': {
'absolute_url': node.absolute_url,
'private_links': [x.to_json() for x in node.private_links_active],
}
}
return data
@collect_auth
@must_be_valid_project
@must_have_permission(ADMIN)
def get_editable_children(auth, node, **kwargs):
children = _get_children(node, auth)
return {
'node': {'id': node._id, 'title': node.title, 'is_public': node.is_public},
'children': children,
}
@must_be_valid_project
def get_recent_logs(node, **kwargs):
logs = list(reversed(node.logs._to_primary_keys()))[:3]
return {'logs': logs}
def _get_readable_descendants(auth, node, permission=None):
descendants = []
all_readable = True
for child in node.get_nodes(is_deleted=False):
if permission:
perm = permission.lower().strip()
if not child.has_permission(auth.user, perm):
all_readable = False
continue
# User can view child
if child.can_view(auth):
descendants.append(child)
# Child is a node link and user has write permission
elif node.linked_nodes.filter(id=child.id).exists():
if node.has_permission(auth.user, 'write'):
descendants.append(child)
else:
all_readable = False
else:
all_readable = False
for descendant in child.find_readable_descendants(auth):
descendants.append(descendant)
return descendants, all_readable
def serialize_child_tree(child_list, user, nested):
"""
Recursively serializes and returns a list of child nodes.
This is a helper function for node_child_tree and as such it does not
redundantly check permissions.
"""
serialized_children = []
for child in child_list:
contributors = [{
'id': contributor.user._id,
'is_admin': contributor.admin,
'is_confirmed': contributor.user.is_confirmed,
'visible': contributor.visible
} for contributor in child.contributor_set.all()]
serialized_children.append({
'node': {
'id': child._id,
'url': child.url,
'title': child.title,
'is_public': child.is_public,
'contributors': contributors,
'is_admin': child.has_admin_perm,
},
'user_id': user._id,
'children': serialize_child_tree(nested.get(child._id), user, nested) if child._id in nested.keys() else [],
'nodeType': 'project' if not child.parentnode_id else 'component',
'category': child.category,
'permissions': {
'view': True,
'is_admin': child.has_admin_perm
}
})
return sorted(serialized_children, key=lambda k: len(k['children']), reverse=True)
def node_child_tree(user, node):
"""
Returns the serialized representation (for treebeard) of a given node and its children.
The given user must have ADMIN access on the given node, and therefore the given user has
implicit read permisson on all of node's children (i.e. read permissions aren't checked here)
:param user: OSFUser object
:param node: parent project Node object
:return: treebeard-formatted data
"""
serialized_nodes = []
is_contrib = node.is_contributor(user)
assert node, '{} is not a valid Node.'.format(node._id)
if not is_contrib:
return []
is_admin = node.has_permission(user, ADMIN)
if is_admin:
is_admin_sqs = Contributor.objects.filter(node=OuterRef('pk'), admin=True, user=user)
parent_node_sqs = NodeRelation.objects.filter(child=OuterRef('pk'), is_node_link=False).values('parent__guids___id')
children = (Node.objects.get_children(node)
.filter(is_deleted=False)
.annotate(parentnode_id=Subquery(parent_node_sqs[:1]))
.annotate(has_admin_perm=Exists(is_admin_sqs))
.include('contributor__user__guids')
)
else:
children = []
nested = defaultdict(list)
for child in children:
nested[child.parentnode_id].append(child)
contributors = [{
'id': contributor.user._id,
'is_admin': node.has_permission(contributor.user, ADMIN),
'is_confirmed': contributor.user.is_confirmed,
'visible': contributor.visible
} for contributor in node.contributor_set.all().include('user__guids')]
serialized_nodes.append({
'node': {
'id': node._id,
'url': node.url,
'title': node.title,
'is_public': node.is_public,
'contributors': contributors,
'is_admin': is_admin
},
'user_id': user._id,
'children': serialize_child_tree(nested.get(node._id), user, nested) if node._id in nested.keys() else [],
'kind': 'folder' if not node.parent_node or not node.parent_node.has_permission(user, 'read') else 'node',
'nodeType': node.project_or_component,
'category': node.category,
'permissions': {
'view': True,
'is_admin': is_admin
}
})
return serialized_nodes
@must_be_logged_in
@must_be_valid_project
def get_node_tree(auth, **kwargs):
node = kwargs.get('node') or kwargs['project']
tree = node_child_tree(auth.user, node)
return tree
@must_be_valid_project
@must_have_permission(ADMIN)
def project_generate_private_link_post(auth, node, **kwargs):
""" creata a new private link object and add it to the node and its selected children"""
node_ids = request.json.get('node_ids', [])
name = request.json.get('name', '')
anonymous = request.json.get('anonymous', False)
if node._id not in node_ids:
node_ids.insert(0, node._id)
nodes = [AbstractNode.load(node_id) for node_id in node_ids]
try:
new_link = new_private_link(
name=name, user=auth.user, nodes=nodes, anonymous=anonymous
)
except ValidationError as e:
raise HTTPError(
http.BAD_REQUEST,
data=dict(message_long=e.message)
)
return new_link
@must_be_valid_project
@must_have_permission(ADMIN)
def project_private_link_edit(auth, **kwargs):
name = request.json.get('value', '')
try:
validate_title(name)
except ValidationError as e:
message = 'Invalid link name.' if e.message == 'Invalid title.' else e.message
raise HTTPError(
http.BAD_REQUEST,
data=dict(message_long=message)
)
private_link_id = request.json.get('pk', '')
private_link = PrivateLink.load(private_link_id)
if private_link:
new_name = strip_html(name)
private_link.name = new_name
private_link.save()
return new_name
else:
raise HTTPError(
http.BAD_REQUEST,
data=dict(message_long='View-only link not found.')
)
def _serialize_node_search(node):
"""Serialize a node for use in pointer search.
:param Node node: Node to serialize
:return: Dictionary of node data
"""
data = {
'id': node._id,
'title': node.title,
'etal': len(node.visible_contributors) > 1,
'isRegistration': node.is_registration
}
if node.is_registration:
data['title'] += ' (registration)'
data['dateRegistered'] = node.registered_date.isoformat()
else:
data['dateCreated'] = node.created.isoformat()
data['dateModified'] = node.modified.isoformat()
first_author = node.visible_contributors[0]
data['firstAuthor'] = first_author.family_name or first_author.given_name or first_author.fullname
return data
@must_be_logged_in
def search_node(auth, **kwargs):
"""
"""
# Get arguments
node = AbstractNode.load(request.json.get('nodeId'))
include_public = request.json.get('includePublic')
size = float(request.json.get('size', '5').strip())
page = request.json.get('page', 0)
query = request.json.get('query', '').strip()
start = (page * size)
if not query:
return {'nodes': []}
# Exclude current node from query if provided
nin = [node.id] + list(node._nodes.values_list('pk', flat=True)) if node else []
can_view_query = Q(_contributors=auth.user)
if include_public:
can_view_query = can_view_query | Q(is_public=True)
nodes = (AbstractNode.objects
.filter(
can_view_query,
title__icontains=query,
is_deleted=False)
.exclude(id__in=nin)
.exclude(type='osf.collection')
.exclude(type='osf.quickfilesnode'))
count = nodes.count()
pages = math.ceil(count / size)
validate_page_num(page, pages)
return {
'nodes': [
_serialize_node_search(each)
for each in islice(nodes, start, start + size)
if each.contributors
],
'total': count,
'pages': pages,
'page': page
}
def _add_pointers(node, pointers, auth):
"""
:param Node node: Node to which pointers will be added
:param list pointers: Nodes to add as pointers
"""
added = False
for pointer in pointers:
node.add_pointer(pointer, auth, save=False)
added = True
if added:
node.save()
@collect_auth
def add_pointer(auth):
"""Add a single pointer to a node using only JSON parameters
"""
to_node_id = request.json.get('toNodeID')
pointer_to_move = request.json.get('pointerID')
if not (to_node_id and pointer_to_move):
raise HTTPError(http.BAD_REQUEST)
pointer = AbstractNode.load(pointer_to_move)
to_node = AbstractNode.load(to_node_id)
try:
_add_pointers(to_node, [pointer], auth)
except ValueError:
raise HTTPError(http.BAD_REQUEST)
@must_have_permission(WRITE)
@must_not_be_registration
def add_pointers(auth, node, **kwargs):
"""Add pointers to a node.
"""
node_ids = request.json.get('nodeIds')
if not node_ids:
raise HTTPError(http.BAD_REQUEST)
nodes = [
AbstractNode.load(node_id)
for node_id in node_ids
]
try:
_add_pointers(node, nodes, auth)
except ValueError:
raise HTTPError(http.BAD_REQUEST)
return {}
@must_have_permission(WRITE)
@must_not_be_registration
def remove_pointer(auth, node, **kwargs):
"""Remove a pointer from a node, raising a 400 if the pointer is not
in `node.nodes`.
"""
# TODO: since these a delete request, shouldn't use request body. put pointer
# id in the URL instead
pointer_id = request.json.get('pointerId')
if pointer_id is None:
raise HTTPError(http.BAD_REQUEST)
pointer = AbstractNode.load(pointer_id)
if pointer is None:
raise HTTPError(http.BAD_REQUEST)
try:
node.rm_pointer(pointer, auth=auth)
except ValueError:
raise HTTPError(http.BAD_REQUEST)
node.save()
@must_have_permission(WRITE)
@must_not_be_registration
def fork_pointer(auth, node, **kwargs):
"""Fork a pointer. Raises BAD_REQUEST if pointer not provided, not found,
or not present in `nodes`.
:param Auth auth: Consolidated authorization
:param Node node: root from which pointer is child
:return: Fork of node to which nodelink(pointer) points
"""
NodeRelation = apps.get_model('osf.NodeRelation')
linked_node_id = request.json.get('nodeId')
linked_node = AbstractNode.load(linked_node_id)
pointer = NodeRelation.objects.filter(child=linked_node, is_node_link=True, parent=node).first()
if pointer is None:
# TODO: Change this to 404?
raise HTTPError(http.BAD_REQUEST)
try:
fork = node.fork_pointer(pointer, auth=auth, save=True)
except ValueError:
raise HTTPError(http.BAD_REQUEST)
return {
'data': {
'node': serialize_node_summary(node=fork, auth=auth, show_path=False)
}
}, http.CREATED
def abbrev_authors(node):
lead_author = node.visible_contributors[0]
ret = lead_author.family_name or lead_author.given_name or lead_author.fullname
if node.visible_contributors.count() > 1:
ret += ' et al.'
return ret
def serialize_pointer(node, auth):
if node.can_view(auth):
return {
'id': node._id,
'url': node.url,
'title': node.title,
'authorShort': abbrev_authors(node),
}
return {
'url': None,
'title': 'Private Component',
'authorShort': 'Private Author(s)',
}
@must_be_contributor_or_public
def get_pointed(auth, node, **kwargs):
"""View that returns the pointers for a project."""
NodeRelation = apps.get_model('osf.NodeRelation')
# exclude folders
return {'pointed': [
serialize_pointer(each.parent, auth)
for each in NodeRelation.objects.filter(child=node, is_node_link=True).exclude(parent__type='osf.collection')
]}
|
# coding=utf-8
# Copyright 2020-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Utilities for the Trainer and TFTrainer class. Should be independent from PyTorch and TensorFlow.
"""
import copy
import functools
import gc
import inspect
import os
import random
import re
import threading
import time
from typing import Any, Dict, NamedTuple, Optional, Tuple, Union
import numpy as np
from .file_utils import (
ExplicitEnum,
is_psutil_available,
is_sagemaker_dp_enabled,
is_tf_available,
is_torch_available,
is_torch_cuda_available,
is_torch_tpu_available,
)
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
def set_seed(seed: int):
"""
Helper function for reproducible behavior to set the seed in ``random``, ``numpy``, ``torch`` and/or ``tf`` (if
installed).
Args:
seed (:obj:`int`): The seed to set.
"""
random.seed(seed)
np.random.seed(seed)
if is_torch_available():
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
# ^^ safe to call this function even if cuda is not available
if is_tf_available():
tf.random.set_seed(seed)
class EvalPrediction(NamedTuple):
"""
Evaluation output (always contains labels), to be used to compute metrics.
Parameters:
predictions (:obj:`np.ndarray`): Predictions of the model.
label_ids (:obj:`np.ndarray`): Targets to be matched.
"""
predictions: Union[np.ndarray, Tuple[np.ndarray]]
label_ids: np.ndarray
class EvalLoopOutput(NamedTuple):
predictions: Union[np.ndarray, Tuple[np.ndarray]]
label_ids: Optional[np.ndarray]
metrics: Optional[Dict[str, float]]
num_samples: Optional[int]
class PredictionOutput(NamedTuple):
predictions: Union[np.ndarray, Tuple[np.ndarray]]
label_ids: Optional[np.ndarray]
metrics: Optional[Dict[str, float]]
class TrainOutput(NamedTuple):
global_step: int
training_loss: float
metrics: Dict[str, float]
PREFIX_CHECKPOINT_DIR = "checkpoint"
_re_checkpoint = re.compile(r"^" + PREFIX_CHECKPOINT_DIR + r"\-(\d+)$")
def get_last_checkpoint(folder):
content = os.listdir(folder)
checkpoints = [
path
for path in content
if _re_checkpoint.search(path) is not None and os.path.isdir(os.path.join(folder, path))
]
if len(checkpoints) == 0:
return
return os.path.join(folder, max(checkpoints, key=lambda x: int(_re_checkpoint.search(x).groups()[0])))
class IntervalStrategy(ExplicitEnum):
NO = "no"
STEPS = "steps"
EPOCH = "epoch"
class EvaluationStrategy(ExplicitEnum):
NO = "no"
STEPS = "steps"
EPOCH = "epoch"
class HubStrategy(ExplicitEnum):
END = "end"
EVERY_SAVE = "every_save"
CHECKPOINT = "checkpoint"
ALL_CHECKPOINTS = "all_checkpoints"
class BestRun(NamedTuple):
"""
The best run found by an hyperparameter search (see :class:`~transformers.Trainer.hyperparameter_search`).
Parameters:
run_id (:obj:`str`):
The id of the best run (if models were saved, the corresponding checkpoint will be in the folder ending
with run-{run_id}).
objective (:obj:`float`):
The objective that was obtained for this run.
hyperparameters (:obj:`Dict[str, Any]`):
The hyperparameters picked to get this run.
"""
run_id: str
objective: float
hyperparameters: Dict[str, Any]
def default_compute_objective(metrics: Dict[str, float]) -> float:
"""
The default objective to maximize/minimize when doing an hyperparameter search. It is the evaluation loss if no
metrics are provided to the :class:`~transformers.Trainer`, the sum of all metrics otherwise.
Args:
metrics (:obj:`Dict[str, float]`): The metrics returned by the evaluate method.
Return:
:obj:`float`: The objective to minimize or maximize
"""
metrics = copy.deepcopy(metrics)
loss = metrics.pop("eval_loss", None)
_ = metrics.pop("epoch", None)
# Remove speed metrics
speed_metrics = [m for m in metrics.keys() if m.endswith("_runtime") or m.endswith("_per_second")]
for sm in speed_metrics:
_ = metrics.pop(sm, None)
return loss if len(metrics) == 0 else sum(metrics.values())
def default_hp_space_optuna(trial) -> Dict[str, float]:
from .integrations import is_optuna_available
assert is_optuna_available(), "This function needs Optuna installed: `pip install optuna`"
return {
"learning_rate": trial.suggest_float("learning_rate", 1e-6, 1e-4, log=True),
"num_train_epochs": trial.suggest_int("num_train_epochs", 1, 5),
"seed": trial.suggest_int("seed", 1, 40),
"per_device_train_batch_size": trial.suggest_categorical("per_device_train_batch_size", [4, 8, 16, 32, 64]),
}
def default_hp_space_ray(trial) -> Dict[str, float]:
from .integrations import is_ray_tune_available
assert is_ray_tune_available(), "This function needs ray installed: `pip " "install ray[tune]`"
from ray import tune
return {
"learning_rate": tune.loguniform(1e-6, 1e-4),
"num_train_epochs": tune.choice(list(range(1, 6))),
"seed": tune.uniform(1, 40),
"per_device_train_batch_size": tune.choice([4, 8, 16, 32, 64]),
}
def default_hp_space_sigopt(trial):
return [
{"bounds": {"min": 1e-6, "max": 1e-4}, "name": "learning_rate", "type": "double", "transformamtion": "log"},
{"bounds": {"min": 1, "max": 6}, "name": "num_train_epochs", "type": "int"},
{"bounds": {"min": 1, "max": 40}, "name": "seed", "type": "int"},
{
"categorical_values": ["4", "8", "16", "32", "64"],
"name": "per_device_train_batch_size",
"type": "categorical",
},
]
class HPSearchBackend(ExplicitEnum):
OPTUNA = "optuna"
RAY = "ray"
SIGOPT = "sigopt"
default_hp_space = {
HPSearchBackend.OPTUNA: default_hp_space_optuna,
HPSearchBackend.RAY: default_hp_space_ray,
HPSearchBackend.SIGOPT: default_hp_space_sigopt,
}
def is_main_process(local_rank):
"""
Whether or not the current process is the local process, based on `xm.get_ordinal()` (for TPUs) first, then on
`local_rank`.
"""
if is_torch_tpu_available():
import torch_xla.core.xla_model as xm
return xm.get_ordinal() == 0
return local_rank in [-1, 0]
def total_processes_number(local_rank):
"""
Return the number of processes launched in parallel. Works with `torch.distributed` and TPUs.
"""
if is_torch_tpu_available():
import torch_xla.core.xla_model as xm
return xm.xrt_world_size()
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.distributed as dist
return dist.get_world_size()
elif local_rank != -1 and is_torch_available():
import torch
return torch.distributed.get_world_size()
return 1
def speed_metrics(split, start_time, num_samples=None, num_steps=None):
"""
Measure and return speed performance metrics.
This function requires a time snapshot `start_time` before the operation to be measured starts and this function
should be run immediately after the operation to be measured has completed.
Args:
- split: name to prefix metric (like train, eval, test...)
- start_time: operation start time
- num_samples: number of samples processed
"""
runtime = time.time() - start_time
result = {f"{split}_runtime": round(runtime, 4)}
if num_samples is not None:
samples_per_second = num_samples / runtime
result[f"{split}_samples_per_second"] = round(samples_per_second, 3)
if num_steps is not None:
steps_per_second = num_steps / runtime
result[f"{split}_steps_per_second"] = round(steps_per_second, 3)
return result
class SchedulerType(ExplicitEnum):
LINEAR = "linear"
COSINE = "cosine"
COSINE_WITH_RESTARTS = "cosine_with_restarts"
POLYNOMIAL = "polynomial"
CONSTANT = "constant"
CONSTANT_WITH_WARMUP = "constant_with_warmup"
class TrainerMemoryTracker:
"""
A helper class that tracks cpu and gpu memory.
This class will silently skip unless ``psutil`` is available. Install with ``pip install psutil``.
When a stage completes, it can pass metrics dict to update with the memory metrics gathered during this stage.
Example ::
self._memory_tracker = TrainerMemoryTracker(self.args.skip_memory_metrics)
self._memory_tracker.start()
code ...
metrics = {"train_runtime": 10.5}
self._memory_tracker.stop_and_update_metrics(metrics)
At the moment GPU tracking is only for ``pytorch``, but can be extended to support ``tensorflow``.
To understand this class' intricacies please read the documentation of :meth:`~transformers.Trainer.log_metrics`.
"""
# map trainer methods to metrics prefix
stages = {
"__init__": "init",
"train": "train",
"evaluate": "eval",
"predict": "test",
}
def __init__(self, skip_memory_metrics=False):
self.skip_memory_metrics = skip_memory_metrics
if not is_psutil_available():
# soft dependency on psutil
self.skip_memory_metrics = True
if self.skip_memory_metrics:
return
import psutil # noqa
if is_torch_cuda_available():
import torch
self.torch = torch
self.gpu = {}
else:
self.torch = None
self.process = psutil.Process()
self.cur_stage = None
self.cpu = {}
self.init_reported = False
def derive_stage(self):
"""derives the stage/caller name automatically"""
caller = inspect.currentframe().f_back.f_back.f_code.co_name
if caller in self.stages:
return self.stages[caller]
else:
raise ValueError(
f"was called from {caller}, but only expect to be called from one of {self.stages.keys()}"
)
def cpu_mem_used(self):
"""get resident set size memory for the current process"""
return self.process.memory_info().rss
def peak_monitor_func(self):
self.cpu_mem_used_peak = -1
while True:
self.cpu_mem_used_peak = max(self.cpu_mem_used(), self.cpu_mem_used_peak)
# can't sleep or will not catch the peak right (this comment is here on purpose)
# time.sleep(0.001) # 1msec
if not self.peak_monitoring:
break
def start(self):
"""start tracking for the caller's stage"""
if self.skip_memory_metrics:
return
stage = self.derive_stage()
# deal with nested calls of eval during train - simply ignore those
if self.cur_stage is not None and self.cur_stage != stage:
return
self.cur_stage = stage
gc.collect()
if self.torch is not None:
self.torch.cuda.reset_peak_memory_stats()
self.torch.cuda.empty_cache()
# gpu
if self.torch is not None:
self.gpu_mem_used_at_start = self.torch.cuda.memory_allocated()
# cpu
self.cpu_mem_used_at_start = self.cpu_mem_used()
self.peak_monitoring = True
peak_monitor_thread = threading.Thread(target=self.peak_monitor_func)
peak_monitor_thread.daemon = True
peak_monitor_thread.start()
def stop(self, stage):
"""stop tracking for the passed stage"""
# deal with nested calls of eval during train - simply ignore those
if self.cur_stage is not None and self.cur_stage != stage:
return
# this sends a signal to peak_monitor_func to complete its loop
self.peak_monitoring = False
# first ensure all objects get collected and their memory is freed
gc.collect()
if self.torch is not None:
self.torch.cuda.empty_cache()
# concepts:
# - alloc_delta: the difference of allocated memory between the end and the start
# - peaked_delta: the difference between the peak memory and the current memory
# in order to know how much memory the measured code consumed one needs to sum these two
# gpu
if self.torch is not None:
self.gpu_mem_used_now = self.torch.cuda.memory_allocated()
self.gpu_mem_used_peak = self.torch.cuda.max_memory_allocated()
self.gpu[self.cur_stage] = dict(
begin=self.gpu_mem_used_at_start,
end=self.gpu_mem_used_now,
alloc=(self.gpu_mem_used_now - self.gpu_mem_used_at_start),
peaked=max(0, self.gpu_mem_used_peak - self.gpu_mem_used_now),
)
# cpu
self.cpu_mem_used_now = self.cpu_mem_used()
self.cpu[self.cur_stage] = dict(
begin=self.cpu_mem_used_at_start,
end=self.cpu_mem_used_now,
alloc=(self.cpu_mem_used_now - self.cpu_mem_used_at_start),
peaked=max(0, self.cpu_mem_used_peak - self.cpu_mem_used_now),
)
# reset - cycle finished
self.cur_stage = None
def update_metrics(self, stage, metrics):
"""updates the metrics"""
if self.skip_memory_metrics:
return
# deal with nested calls of eval during train - simply ignore those
if self.cur_stage is not None and self.cur_stage != stage:
return
# since we don't have a way to return init metrics, we push them into the first of train/val/predict
stages = [stage]
if not self.init_reported:
stages.insert(0, "init")
self.init_reported = True
for stage in stages:
for t in ["alloc", "peaked"]:
if stage in self.cpu and t in self.cpu[stage]:
metrics[f"{stage}_mem_cpu_{t}_delta"] = self.cpu[stage][t]
if self.torch is not None and stage in self.gpu and t in self.gpu[stage]:
metrics[f"{stage}_mem_gpu_{t}_delta"] = self.gpu[stage][t]
# if we need additional debug info, enable the following
# for t in ["begin", "end"]:
# if stage in self.cpu and t in self.cpu[stage]:
# metrics[f"{stage}_mem_cpu_{t}"] = self.cpu[stage][t]
# if self.torch is not None and stage in self.gpu and t in self.gpu[stage]:
# metrics[f"{stage}_mem_gpu_{t}"] = self.gpu[stage][t]
# since memory can be allocated before init, and it might be difficult to track overall
# memory usage, in particular for GPU, let's report memory usage at the point init was called
if stages[0] == "init":
metrics["before_init_mem_cpu"] = self.cpu["init"]["begin"]
if self.torch is not None:
metrics["before_init_mem_gpu"] = self.gpu["init"]["begin"]
# if we also wanted to report any additional memory allocations in between init and
# whatever the next stage was we could also report this:
# if self.cpu["init"]["end"] != self.cpu[stage]["begin"]:
# metrics[f"after_init_mem_cpu_delta"] = self.cpu[stage]["begin"] - self.cpu["init"]["end"]
# if self.torch is not None and self.gpu["init"]["end"] != self.gpu[stage]["begin"]:
# metrics[f"after_init_mem_gpu_delta"] = self.gpu[stage]["begin"] - self.gpu["init"]["end"]
def stop_and_update_metrics(self, metrics=None):
"""combine stop and metrics update in one call for simpler code"""
if self.skip_memory_metrics:
return
stage = self.derive_stage()
self.stop(stage)
# init doesn't have metrics to update so we just save that data for later stages to retrieve
if metrics is not None:
self.update_metrics(stage, metrics)
def denumpify_detensorize(metrics):
"""
Recursively calls `.item()` on the element of the dictionary passed
"""
if isinstance(metrics, (list, tuple)):
return type(metrics)(denumpify_detensorize(m) for m in metrics)
elif isinstance(metrics, dict):
return type(metrics)({k: denumpify_detensorize(v) for k, v in metrics.items()})
elif isinstance(metrics, np.generic):
return metrics.item()
elif is_torch_available() and isinstance(metrics, torch.Tensor) and metrics.numel() == 1:
return metrics.item()
return metrics
def number_of_arguments(func):
"""
Return the number of arguments of the passed function, even if it's a partial function.
"""
if isinstance(func, functools.partial):
total_args = len(inspect.signature(func.func).parameters)
return total_args - len(func.args) - len(func.keywords)
return len(inspect.signature(func).parameters)
class ShardedDDPOption(ExplicitEnum):
SIMPLE = "simple"
ZERO_DP_2 = "zero_dp_2"
ZERO_DP_3 = "zero_dp_3"
OFFLOAD = "offload"
AUTO_WRAP = "auto_wrap"
add Tuple as possible type hint for EvalPredictions label_ids (#14473)
* Update trainer_utils.py
* add Tuple type hints to all label_ids outputs
affects EvalLoopOutput and PredicctionOutput
# coding=utf-8
# Copyright 2020-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Utilities for the Trainer and TFTrainer class. Should be independent from PyTorch and TensorFlow.
"""
import copy
import functools
import gc
import inspect
import os
import random
import re
import threading
import time
from typing import Any, Dict, NamedTuple, Optional, Tuple, Union
import numpy as np
from .file_utils import (
ExplicitEnum,
is_psutil_available,
is_sagemaker_dp_enabled,
is_tf_available,
is_torch_available,
is_torch_cuda_available,
is_torch_tpu_available,
)
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
def set_seed(seed: int):
"""
Helper function for reproducible behavior to set the seed in ``random``, ``numpy``, ``torch`` and/or ``tf`` (if
installed).
Args:
seed (:obj:`int`): The seed to set.
"""
random.seed(seed)
np.random.seed(seed)
if is_torch_available():
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
# ^^ safe to call this function even if cuda is not available
if is_tf_available():
tf.random.set_seed(seed)
class EvalPrediction(NamedTuple):
"""
Evaluation output (always contains labels), to be used to compute metrics.
Parameters:
predictions (:obj:`np.ndarray`): Predictions of the model.
label_ids (:obj:`np.ndarray`): Targets to be matched.
"""
predictions: Union[np.ndarray, Tuple[np.ndarray]]
label_ids: Union[np.ndarray, Tuple[np.ndarray]]
class EvalLoopOutput(NamedTuple):
predictions: Union[np.ndarray, Tuple[np.ndarray]]
label_ids: Optional[Union[np.ndarray, Tuple[np.ndarray]]]
metrics: Optional[Dict[str, float]]
num_samples: Optional[int]
class PredictionOutput(NamedTuple):
predictions: Union[np.ndarray, Tuple[np.ndarray]]
label_ids: Optional[Union[np.ndarray, Tuple[np.ndarray]]]
metrics: Optional[Dict[str, float]]
class TrainOutput(NamedTuple):
global_step: int
training_loss: float
metrics: Dict[str, float]
PREFIX_CHECKPOINT_DIR = "checkpoint"
_re_checkpoint = re.compile(r"^" + PREFIX_CHECKPOINT_DIR + r"\-(\d+)$")
def get_last_checkpoint(folder):
content = os.listdir(folder)
checkpoints = [
path
for path in content
if _re_checkpoint.search(path) is not None and os.path.isdir(os.path.join(folder, path))
]
if len(checkpoints) == 0:
return
return os.path.join(folder, max(checkpoints, key=lambda x: int(_re_checkpoint.search(x).groups()[0])))
class IntervalStrategy(ExplicitEnum):
NO = "no"
STEPS = "steps"
EPOCH = "epoch"
class EvaluationStrategy(ExplicitEnum):
NO = "no"
STEPS = "steps"
EPOCH = "epoch"
class HubStrategy(ExplicitEnum):
END = "end"
EVERY_SAVE = "every_save"
CHECKPOINT = "checkpoint"
ALL_CHECKPOINTS = "all_checkpoints"
class BestRun(NamedTuple):
"""
The best run found by an hyperparameter search (see :class:`~transformers.Trainer.hyperparameter_search`).
Parameters:
run_id (:obj:`str`):
The id of the best run (if models were saved, the corresponding checkpoint will be in the folder ending
with run-{run_id}).
objective (:obj:`float`):
The objective that was obtained for this run.
hyperparameters (:obj:`Dict[str, Any]`):
The hyperparameters picked to get this run.
"""
run_id: str
objective: float
hyperparameters: Dict[str, Any]
def default_compute_objective(metrics: Dict[str, float]) -> float:
"""
The default objective to maximize/minimize when doing an hyperparameter search. It is the evaluation loss if no
metrics are provided to the :class:`~transformers.Trainer`, the sum of all metrics otherwise.
Args:
metrics (:obj:`Dict[str, float]`): The metrics returned by the evaluate method.
Return:
:obj:`float`: The objective to minimize or maximize
"""
metrics = copy.deepcopy(metrics)
loss = metrics.pop("eval_loss", None)
_ = metrics.pop("epoch", None)
# Remove speed metrics
speed_metrics = [m for m in metrics.keys() if m.endswith("_runtime") or m.endswith("_per_second")]
for sm in speed_metrics:
_ = metrics.pop(sm, None)
return loss if len(metrics) == 0 else sum(metrics.values())
def default_hp_space_optuna(trial) -> Dict[str, float]:
from .integrations import is_optuna_available
assert is_optuna_available(), "This function needs Optuna installed: `pip install optuna`"
return {
"learning_rate": trial.suggest_float("learning_rate", 1e-6, 1e-4, log=True),
"num_train_epochs": trial.suggest_int("num_train_epochs", 1, 5),
"seed": trial.suggest_int("seed", 1, 40),
"per_device_train_batch_size": trial.suggest_categorical("per_device_train_batch_size", [4, 8, 16, 32, 64]),
}
def default_hp_space_ray(trial) -> Dict[str, float]:
from .integrations import is_ray_tune_available
assert is_ray_tune_available(), "This function needs ray installed: `pip " "install ray[tune]`"
from ray import tune
return {
"learning_rate": tune.loguniform(1e-6, 1e-4),
"num_train_epochs": tune.choice(list(range(1, 6))),
"seed": tune.uniform(1, 40),
"per_device_train_batch_size": tune.choice([4, 8, 16, 32, 64]),
}
def default_hp_space_sigopt(trial):
return [
{"bounds": {"min": 1e-6, "max": 1e-4}, "name": "learning_rate", "type": "double", "transformamtion": "log"},
{"bounds": {"min": 1, "max": 6}, "name": "num_train_epochs", "type": "int"},
{"bounds": {"min": 1, "max": 40}, "name": "seed", "type": "int"},
{
"categorical_values": ["4", "8", "16", "32", "64"],
"name": "per_device_train_batch_size",
"type": "categorical",
},
]
class HPSearchBackend(ExplicitEnum):
OPTUNA = "optuna"
RAY = "ray"
SIGOPT = "sigopt"
default_hp_space = {
HPSearchBackend.OPTUNA: default_hp_space_optuna,
HPSearchBackend.RAY: default_hp_space_ray,
HPSearchBackend.SIGOPT: default_hp_space_sigopt,
}
def is_main_process(local_rank):
"""
Whether or not the current process is the local process, based on `xm.get_ordinal()` (for TPUs) first, then on
`local_rank`.
"""
if is_torch_tpu_available():
import torch_xla.core.xla_model as xm
return xm.get_ordinal() == 0
return local_rank in [-1, 0]
def total_processes_number(local_rank):
"""
Return the number of processes launched in parallel. Works with `torch.distributed` and TPUs.
"""
if is_torch_tpu_available():
import torch_xla.core.xla_model as xm
return xm.xrt_world_size()
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.distributed as dist
return dist.get_world_size()
elif local_rank != -1 and is_torch_available():
import torch
return torch.distributed.get_world_size()
return 1
def speed_metrics(split, start_time, num_samples=None, num_steps=None):
"""
Measure and return speed performance metrics.
This function requires a time snapshot `start_time` before the operation to be measured starts and this function
should be run immediately after the operation to be measured has completed.
Args:
- split: name to prefix metric (like train, eval, test...)
- start_time: operation start time
- num_samples: number of samples processed
"""
runtime = time.time() - start_time
result = {f"{split}_runtime": round(runtime, 4)}
if num_samples is not None:
samples_per_second = num_samples / runtime
result[f"{split}_samples_per_second"] = round(samples_per_second, 3)
if num_steps is not None:
steps_per_second = num_steps / runtime
result[f"{split}_steps_per_second"] = round(steps_per_second, 3)
return result
class SchedulerType(ExplicitEnum):
LINEAR = "linear"
COSINE = "cosine"
COSINE_WITH_RESTARTS = "cosine_with_restarts"
POLYNOMIAL = "polynomial"
CONSTANT = "constant"
CONSTANT_WITH_WARMUP = "constant_with_warmup"
class TrainerMemoryTracker:
"""
A helper class that tracks cpu and gpu memory.
This class will silently skip unless ``psutil`` is available. Install with ``pip install psutil``.
When a stage completes, it can pass metrics dict to update with the memory metrics gathered during this stage.
Example ::
self._memory_tracker = TrainerMemoryTracker(self.args.skip_memory_metrics)
self._memory_tracker.start()
code ...
metrics = {"train_runtime": 10.5}
self._memory_tracker.stop_and_update_metrics(metrics)
At the moment GPU tracking is only for ``pytorch``, but can be extended to support ``tensorflow``.
To understand this class' intricacies please read the documentation of :meth:`~transformers.Trainer.log_metrics`.
"""
# map trainer methods to metrics prefix
stages = {
"__init__": "init",
"train": "train",
"evaluate": "eval",
"predict": "test",
}
def __init__(self, skip_memory_metrics=False):
self.skip_memory_metrics = skip_memory_metrics
if not is_psutil_available():
# soft dependency on psutil
self.skip_memory_metrics = True
if self.skip_memory_metrics:
return
import psutil # noqa
if is_torch_cuda_available():
import torch
self.torch = torch
self.gpu = {}
else:
self.torch = None
self.process = psutil.Process()
self.cur_stage = None
self.cpu = {}
self.init_reported = False
def derive_stage(self):
"""derives the stage/caller name automatically"""
caller = inspect.currentframe().f_back.f_back.f_code.co_name
if caller in self.stages:
return self.stages[caller]
else:
raise ValueError(
f"was called from {caller}, but only expect to be called from one of {self.stages.keys()}"
)
def cpu_mem_used(self):
"""get resident set size memory for the current process"""
return self.process.memory_info().rss
def peak_monitor_func(self):
self.cpu_mem_used_peak = -1
while True:
self.cpu_mem_used_peak = max(self.cpu_mem_used(), self.cpu_mem_used_peak)
# can't sleep or will not catch the peak right (this comment is here on purpose)
# time.sleep(0.001) # 1msec
if not self.peak_monitoring:
break
def start(self):
"""start tracking for the caller's stage"""
if self.skip_memory_metrics:
return
stage = self.derive_stage()
# deal with nested calls of eval during train - simply ignore those
if self.cur_stage is not None and self.cur_stage != stage:
return
self.cur_stage = stage
gc.collect()
if self.torch is not None:
self.torch.cuda.reset_peak_memory_stats()
self.torch.cuda.empty_cache()
# gpu
if self.torch is not None:
self.gpu_mem_used_at_start = self.torch.cuda.memory_allocated()
# cpu
self.cpu_mem_used_at_start = self.cpu_mem_used()
self.peak_monitoring = True
peak_monitor_thread = threading.Thread(target=self.peak_monitor_func)
peak_monitor_thread.daemon = True
peak_monitor_thread.start()
def stop(self, stage):
"""stop tracking for the passed stage"""
# deal with nested calls of eval during train - simply ignore those
if self.cur_stage is not None and self.cur_stage != stage:
return
# this sends a signal to peak_monitor_func to complete its loop
self.peak_monitoring = False
# first ensure all objects get collected and their memory is freed
gc.collect()
if self.torch is not None:
self.torch.cuda.empty_cache()
# concepts:
# - alloc_delta: the difference of allocated memory between the end and the start
# - peaked_delta: the difference between the peak memory and the current memory
# in order to know how much memory the measured code consumed one needs to sum these two
# gpu
if self.torch is not None:
self.gpu_mem_used_now = self.torch.cuda.memory_allocated()
self.gpu_mem_used_peak = self.torch.cuda.max_memory_allocated()
self.gpu[self.cur_stage] = dict(
begin=self.gpu_mem_used_at_start,
end=self.gpu_mem_used_now,
alloc=(self.gpu_mem_used_now - self.gpu_mem_used_at_start),
peaked=max(0, self.gpu_mem_used_peak - self.gpu_mem_used_now),
)
# cpu
self.cpu_mem_used_now = self.cpu_mem_used()
self.cpu[self.cur_stage] = dict(
begin=self.cpu_mem_used_at_start,
end=self.cpu_mem_used_now,
alloc=(self.cpu_mem_used_now - self.cpu_mem_used_at_start),
peaked=max(0, self.cpu_mem_used_peak - self.cpu_mem_used_now),
)
# reset - cycle finished
self.cur_stage = None
def update_metrics(self, stage, metrics):
"""updates the metrics"""
if self.skip_memory_metrics:
return
# deal with nested calls of eval during train - simply ignore those
if self.cur_stage is not None and self.cur_stage != stage:
return
# since we don't have a way to return init metrics, we push them into the first of train/val/predict
stages = [stage]
if not self.init_reported:
stages.insert(0, "init")
self.init_reported = True
for stage in stages:
for t in ["alloc", "peaked"]:
if stage in self.cpu and t in self.cpu[stage]:
metrics[f"{stage}_mem_cpu_{t}_delta"] = self.cpu[stage][t]
if self.torch is not None and stage in self.gpu and t in self.gpu[stage]:
metrics[f"{stage}_mem_gpu_{t}_delta"] = self.gpu[stage][t]
# if we need additional debug info, enable the following
# for t in ["begin", "end"]:
# if stage in self.cpu and t in self.cpu[stage]:
# metrics[f"{stage}_mem_cpu_{t}"] = self.cpu[stage][t]
# if self.torch is not None and stage in self.gpu and t in self.gpu[stage]:
# metrics[f"{stage}_mem_gpu_{t}"] = self.gpu[stage][t]
# since memory can be allocated before init, and it might be difficult to track overall
# memory usage, in particular for GPU, let's report memory usage at the point init was called
if stages[0] == "init":
metrics["before_init_mem_cpu"] = self.cpu["init"]["begin"]
if self.torch is not None:
metrics["before_init_mem_gpu"] = self.gpu["init"]["begin"]
# if we also wanted to report any additional memory allocations in between init and
# whatever the next stage was we could also report this:
# if self.cpu["init"]["end"] != self.cpu[stage]["begin"]:
# metrics[f"after_init_mem_cpu_delta"] = self.cpu[stage]["begin"] - self.cpu["init"]["end"]
# if self.torch is not None and self.gpu["init"]["end"] != self.gpu[stage]["begin"]:
# metrics[f"after_init_mem_gpu_delta"] = self.gpu[stage]["begin"] - self.gpu["init"]["end"]
def stop_and_update_metrics(self, metrics=None):
"""combine stop and metrics update in one call for simpler code"""
if self.skip_memory_metrics:
return
stage = self.derive_stage()
self.stop(stage)
# init doesn't have metrics to update so we just save that data for later stages to retrieve
if metrics is not None:
self.update_metrics(stage, metrics)
def denumpify_detensorize(metrics):
"""
Recursively calls `.item()` on the element of the dictionary passed
"""
if isinstance(metrics, (list, tuple)):
return type(metrics)(denumpify_detensorize(m) for m in metrics)
elif isinstance(metrics, dict):
return type(metrics)({k: denumpify_detensorize(v) for k, v in metrics.items()})
elif isinstance(metrics, np.generic):
return metrics.item()
elif is_torch_available() and isinstance(metrics, torch.Tensor) and metrics.numel() == 1:
return metrics.item()
return metrics
def number_of_arguments(func):
"""
Return the number of arguments of the passed function, even if it's a partial function.
"""
if isinstance(func, functools.partial):
total_args = len(inspect.signature(func.func).parameters)
return total_args - len(func.args) - len(func.keywords)
return len(inspect.signature(func).parameters)
class ShardedDDPOption(ExplicitEnum):
SIMPLE = "simple"
ZERO_DP_2 = "zero_dp_2"
ZERO_DP_3 = "zero_dp_3"
OFFLOAD = "offload"
AUTO_WRAP = "auto_wrap"
|
# -*- coding: utf-8 -*-
import logging
import httplib as http
from flask import request
from modularodm import Q
from modularodm.exceptions import ModularOdmException
from framework import status
from framework.mongo import StoredObject
from framework.auth.decorators import must_be_logged_in, collect_auth
from framework.exceptions import HTTPError, PermissionsError
from framework.mongo.utils import from_mongo
from website import language
from website.exceptions import NodeStateError
from website.project import clean_template_name, new_node, new_private_link
from website.project.decorators import (
must_be_contributor_or_public,
must_be_valid_project,
must_have_permission,
must_not_be_registration,
)
from website.project.model import has_anonymous_link
from website.project.forms import NewNodeForm
from website.models import Node, Pointer, WatchConfig, PrivateLink
from website import settings
from website.views import _render_nodes
from website.profile import utils
from website.project import new_folder
from website.util.sanitize import strip_html
from .log import _get_logs
logger = logging.getLogger(__name__)
@must_be_valid_project # returns project
@must_have_permission('write')
@must_not_be_registration
def edit_node(auth, **kwargs):
node = kwargs['node'] or kwargs['project']
post_data = request.json
edited_field = post_data.get('name')
value = strip_html(post_data.get('value', ''))
if edited_field == 'title':
node.set_title(value, auth=auth)
elif edited_field == 'description':
node.set_description(value, auth=auth)
node.save()
return {'status': 'success'}
##############################################################################
# New Project
##############################################################################
@must_be_logged_in
def project_new(**kwargs):
return {}
@must_be_logged_in
def project_new_post(auth, **kwargs):
user = auth.user
title = request.json.get('title')
template = request.json.get('template')
description = request.json.get('description')
if not title or len(title) > 200:
raise HTTPError(http.BAD_REQUEST)
if template:
original_node = Node.load(template)
changes = {
'title': title
}
if description:
changes['description'] = description
project = original_node.use_as_template(
auth=auth,
changes={
template: changes
})
else:
project = new_node('project', title, user, description)
return {
'projectUrl': project.url
}, http.CREATED
@must_be_logged_in
@must_be_valid_project
def project_new_from_template(**kwargs):
original_node = kwargs.get('node')
new_node = original_node.use_as_template(
auth=kwargs['auth'],
changes=dict(),
)
return {'url': new_node.url}, http.CREATED, None
##############################################################################
# New Folder
##############################################################################
@must_be_logged_in
def folder_new(**kwargs):
node_id = kwargs['nid']
return_value = {}
if node_id is not None:
return_value = {'node_id': node_id}
return return_value
@must_be_logged_in
def folder_new_post(auth, nid, **kwargs):
user = auth.user
title = request.json.get('title')
if not title or len(title) > 200:
raise HTTPError(http.BAD_REQUEST)
node = Node.load(nid)
if node.is_deleted or node.is_registration or not node.is_folder:
raise HTTPError(http.BAD_REQUEST)
folder = new_folder(strip_html(title), user)
folders = [folder]
_add_pointers(node, folders, auth)
return {
'projectUrl': '/dashboard/',
}, http.CREATED
def rename_folder(**kwargs):
pass
@collect_auth
def add_folder(**kwargs):
auth = kwargs['auth']
user = auth.user
title = strip_html(request.json.get('title'))
node_id = request.json.get('node_id')
node = Node.load(node_id)
if node.is_deleted or node.is_registration or not node.is_folder:
raise HTTPError(http.BAD_REQUEST)
folder = new_folder(
title, user
)
folders = [folder]
_add_pointers(node, folders, auth)
return {}, 201, None
##############################################################################
# New Node
##############################################################################
@must_be_valid_project # returns project
@must_have_permission('write')
@must_not_be_registration
def project_new_node(**kwargs):
form = NewNodeForm(request.form)
project = kwargs['project']
user = kwargs['auth'].user
if form.validate():
node = new_node(
title=form.title.data,
user=user,
category=form.category.data,
project=project,
)
message = (
'Your component was created successfully. You can keep working on the component page below, '
'or return to the <u><a href="{url}">Project Page</a></u>.'
).format(url=project.url)
status.push_status_message(message, 'info')
return {
'status': 'success',
}, 201, None, node.url
else:
status.push_errors_to_status(form.errors)
raise HTTPError(http.BAD_REQUEST, redirect_url=project.url)
@must_be_logged_in
@must_be_valid_project # returns project
def project_before_fork(**kwargs):
node = kwargs['node'] or kwargs['project']
user = kwargs['auth'].user
prompts = node.callback('before_fork', user=user)
if node.has_pointers_recursive:
prompts.append(
language.BEFORE_FORK_HAS_POINTERS.format(
category=node.project_or_component
)
)
return {'prompts': prompts}
@must_be_logged_in
@must_be_valid_project # returns project
def project_before_template(auth, **kwargs):
node = kwargs['node'] or kwargs['project']
prompts = []
for addon in node.get_addons():
if 'node' in addon.config.configs:
if addon.to_json(auth.user)['addon_full_name']:
prompts.append(addon.to_json(auth.user)['addon_full_name'])
return {'prompts': prompts}
@must_be_logged_in
@must_be_valid_project
def node_fork_page(**kwargs):
project = kwargs['project']
node = kwargs['node']
auth = kwargs['auth']
if node:
node_to_use = node
raise HTTPError(
http.FORBIDDEN,
message='At this time, only projects can be forked; however, this behavior is coming soon.',
redirect_url=node_to_use.url
)
else:
node_to_use = project
try:
fork = node_to_use.fork_node(auth)
except PermissionsError:
raise HTTPError(
http.FORBIDDEN,
redirect_url=node_to_use.url
)
return fork.url
@must_be_valid_project
@must_be_contributor_or_public # returns user, project
def node_registrations(**kwargs):
auth = kwargs['auth']
node_to_use = kwargs['node'] or kwargs['project']
return _view_project(node_to_use, auth, primary=True)
@must_be_valid_project
@must_be_contributor_or_public # returns user, project
def node_forks(**kwargs):
project = kwargs['project']
node = kwargs['node']
auth = kwargs['auth']
node_to_use = node or project
return _view_project(node_to_use, auth, primary=True)
@must_be_valid_project
@must_have_permission('write')
def node_setting(**kwargs):
auth = kwargs['auth']
node = kwargs['node'] or kwargs['project']
if not node.can_edit(auth):
raise HTTPError(http.FORBIDDEN)
rv = _view_project(node, auth, primary=True)
addons_enabled = []
addon_enabled_settings = []
for addon in node.get_addons():
addons_enabled.append(addon.config.short_name)
if 'node' in addon.config.configs:
addon_enabled_settings.append(addon.to_json(auth.user))
rv['addon_categories'] = settings.ADDON_CATEGORIES
rv['addons_available'] = [
addon
for addon in settings.ADDONS_AVAILABLE
if 'node' in addon.owners
and 'node' not in addon.added_mandatory
and addon.short_name not in settings.SYSTEM_ADDED_ADDONS['node']
]
rv['addons_enabled'] = addons_enabled
rv['addon_enabled_settings'] = addon_enabled_settings
rv['addon_capabilities'] = settings.ADDON_CAPABILITIES
rv['comments'] = {
'level': node.comment_level,
}
return rv
@must_have_permission('write')
@must_not_be_registration
def node_choose_addons(**kwargs):
node = kwargs['node'] or kwargs['project']
auth = kwargs['auth']
node.config_addons(request.json, auth)
@must_be_valid_project
@must_have_permission('read')
def node_contributors(**kwargs):
auth = kwargs['auth']
node = kwargs['node'] or kwargs['project']
rv = _view_project(node, auth)
rv['contributors'] = utils.serialize_contributors(node.contributors, node)
return rv
@must_have_permission('write')
def configure_comments(**kwargs):
node = kwargs['node'] or kwargs['project']
comment_level = request.json.get('commentLevel')
if not comment_level:
node.comment_level = None
elif comment_level in ['public', 'private']:
node.comment_level = comment_level
else:
raise HTTPError(http.BAD_REQUEST)
node.save()
##############################################################################
# View Project
##############################################################################
@must_be_valid_project
@must_be_contributor_or_public
def view_project(**kwargs):
auth = kwargs['auth']
node_to_use = kwargs['node'] or kwargs['project']
primary = '/api/v1' not in request.path
rv = _view_project(node_to_use, auth, primary=primary)
rv['addon_capabilities'] = settings.ADDON_CAPABILITIES
return rv
#### Expand/Collapse
@must_be_valid_project
@must_be_contributor_or_public
def expand(auth, **kwargs):
node_to_use = kwargs['node'] or kwargs['project']
node_to_use.expand(user=auth.user)
return {}, 200, None
@must_be_valid_project
@must_be_contributor_or_public
def collapse(auth, **kwargs):
node_to_use = kwargs['node'] or kwargs['project']
node_to_use.collapse(user=auth.user)
return {}, 200, None
# Reorder components
@must_be_valid_project
@must_not_be_registration
@must_have_permission('write')
def project_reorder_components(project, **kwargs):
"""Reorders the components in a project's component list.
:param-json list new_list: List of strings that include node IDs and
node type delimited by ':'.
"""
# TODO(sloria): Change new_list parameter to be an array of objects
# {
# 'newList': {
# {'key': 'abc123', 'type': 'node'}
# }
# }
new_list = [
tuple(node.split(':'))
for node in request.json.get('new_list', [])
]
nodes_new = [
StoredObject.get_collection(schema).load(key)
for key, schema in new_list
]
valid_nodes = [
node for node in project.nodes
if not node.is_deleted
]
deleted_nodes = [
node for node in project.nodes
if node.is_deleted
]
if len(valid_nodes) == len(nodes_new) and set(valid_nodes) == set(nodes_new):
project.nodes = nodes_new + deleted_nodes
project.save()
return {}
logger.error('Got invalid node list in reorder components')
raise HTTPError(http.BAD_REQUEST)
##############################################################################
@must_be_valid_project
@must_be_contributor_or_public # returns user, project
def project_statistics(**kwargs):
auth = kwargs['auth']
node = kwargs['node'] or kwargs['project']
if not (node.can_edit(auth) or node.is_public):
raise HTTPError(http.FORBIDDEN)
return _view_project(node, auth, primary=True)
###############################################################################
# Make Private/Public
###############################################################################
@must_be_valid_project
@must_have_permission('admin')
def project_before_set_public(**kwargs):
node = kwargs['node'] or kwargs['project']
return {
'prompts': node.callback('before_make_public')
}
@must_be_valid_project
@must_have_permission('admin')
def project_set_privacy(auth, **kwargs):
permissions = kwargs.get('permissions')
if permissions is None:
raise HTTPError(http.BAD_REQUEST)
node = kwargs['node'] or kwargs['project']
node.set_privacy(permissions, auth)
return {
'status': 'success',
'permissions': permissions,
}
@must_be_valid_project # returns project
@must_be_contributor_or_public
@must_not_be_registration
def watch_post(**kwargs):
node = kwargs['node'] or kwargs['project']
user = kwargs['auth'].user
watch_config = WatchConfig(node=node,
digest=request.json.get('digest', False),
immediate=request.json.get('immediate', False))
try:
user.watch(watch_config)
except ValueError: # Node is already being watched
raise HTTPError(http.BAD_REQUEST)
user.save()
return {
'status': 'success',
'watchCount': len(node.watchconfig__watched)
}
@must_be_valid_project # returns project
@must_be_contributor_or_public
@must_not_be_registration
def unwatch_post(**kwargs):
node = kwargs['node'] or kwargs['project']
user = kwargs['auth'].user
watch_config = WatchConfig(node=node,
digest=request.json.get('digest', False),
immediate=request.json.get('immediate', False))
try:
user.unwatch(watch_config)
except ValueError: # Node isn't being watched
raise HTTPError(http.BAD_REQUEST)
return {
'status': 'success',
'watchCount': len(node.watchconfig__watched)
}
@must_be_valid_project # returns project
@must_be_contributor_or_public
@must_not_be_registration
def togglewatch_post(**kwargs):
'''View for toggling watch mode for a node.'''
# TODO: refactor this, watch_post, unwatch_post (@mambocab)
node = kwargs['node'] or kwargs['project']
user = kwargs['auth'].user
watch_config = WatchConfig(
node=node,
digest=request.json.get('digest', False),
immediate=request.json.get('immediate', False)
)
try:
if user.is_watching(node):
user.unwatch(watch_config)
else:
user.watch(watch_config)
except ValueError:
raise HTTPError(http.BAD_REQUEST)
user.save()
return {
'status': 'success',
'watchCount': len(node.watchconfig__watched),
'watched': user.is_watching(node)
}
@must_be_valid_project # returns project
@must_have_permission('admin')
@must_not_be_registration
def component_remove(**kwargs):
"""Remove component, and recursively remove its children. If node has a
parent, add log and redirect to parent; else redirect to user dashboard.
"""
node_to_use = kwargs['node'] or kwargs['project']
auth = kwargs['auth']
try:
node_to_use.remove_node(auth)
except NodeStateError as e:
raise HTTPError(
http.BAD_REQUEST,
data={
'message_long': 'Could not delete component: ' + e.message
},
)
node_to_use.save()
message = '{} deleted'.format(
node_to_use.project_or_component.capitalize()
)
status.push_status_message(message)
if node_to_use.node__parent:
redirect_url = node_to_use.node__parent[0].url
else:
redirect_url = '/dashboard/'
return {
'url': redirect_url,
}
#@must_be_valid_project # injects project
@must_have_permission('admin')
@must_not_be_registration
def delete_folder(auth, **kwargs):
"""Remove folder node
"""
node = kwargs['node'] or kwargs['project']
if node is None:
raise HTTPError(http.BAD_REQUEST)
if not node.is_folder or node.is_dashboard:
raise HTTPError(http.BAD_REQUEST)
try:
node.remove_node(auth)
except NodeStateError as e:
raise HTTPError(
http.BAD_REQUEST,
data={
'message_long': 'Could not delete component: ' + e.message
},
)
return {}
@must_be_valid_project # returns project
@must_have_permission("admin")
def remove_private_link(*args, **kwargs):
link_id = request.json['private_link_id']
try:
link = PrivateLink.load(link_id)
link.is_deleted = True
link.save()
except ModularOdmException:
raise HTTPError(http.NOT_FOUND)
# TODO: Split into separate functions
def _render_addon(node):
widgets = {}
configs = {}
js = []
css = []
for addon in node.get_addons():
configs[addon.config.short_name] = addon.config.to_json()
js.extend(addon.config.include_js.get('widget', []))
css.extend(addon.config.include_css.get('widget', []))
js.extend(addon.config.include_js.get('files', []))
css.extend(addon.config.include_css.get('files', []))
return widgets, configs, js, css
def _view_project(node, auth, primary=False):
"""Build a JSON object containing everything needed to render
project.view.mako.
"""
user = auth.user
parent = node.parent_node
view_only_link = auth.private_key or request.args.get('view_only', '').strip('/')
anonymous = has_anonymous_link(node, auth)
widgets, configs, js, css = _render_addon(node)
redirect_url = node.url + '?view_only=None'
# Before page load callback; skip if not primary call
if primary:
for addon in node.get_addons():
messages = addon.before_page_load(node, user) or []
for message in messages:
status.push_status_message(message)
data = {
'node': {
'id': node._primary_key,
'title': node.title,
'category': node.category_display,
'node_type': node.project_or_component,
'description': node.description or '',
'url': node.url,
'api_url': node.api_url,
'absolute_url': node.absolute_url,
'redirect_url': redirect_url,
'display_absolute_url': node.display_absolute_url,
'citations': {
'apa': node.citation_apa,
'mla': node.citation_mla,
'chicago': node.citation_chicago,
} if not anonymous else '',
'is_public': node.is_public,
'date_created': node.date_created.strftime('%m/%d/%Y %H:%M UTC'),
'date_modified': node.logs[-1].date.strftime('%m/%d/%Y %H:%M UTC') if node.logs else '',
'tags': [tag._primary_key for tag in node.tags],
'children': bool(node.nodes),
'is_registration': node.is_registration,
'registered_from_url': node.registered_from.url if node.is_registration else '',
'registered_date': node.registered_date.strftime('%Y/%m/%d %H:%M UTC') if node.is_registration else '',
'registered_meta': [
{
'name_no_ext': from_mongo(meta),
'name_clean': clean_template_name(meta),
}
for meta in node.registered_meta or []
],
'registration_count': len(node.registration_list),
'is_fork': node.is_fork,
'forked_from_id': node.forked_from._primary_key if node.is_fork else '',
'forked_from_display_absolute_url': node.forked_from.display_absolute_url if node.is_fork else '',
'forked_date': node.forked_date.strftime('%Y/%m/%d %I:%M %p') if node.is_fork else '',
'fork_count': len(node.fork_list),
'templated_count': len(node.templated_list),
'watched_count': len(node.watchconfig__watched),
'private_links': [x.to_json() for x in node.private_links_active],
'link': view_only_link,
'anonymous': anonymous,
'points': node.points,
'piwik_site_id': node.piwik_site_id,
'comment_level': node.comment_level,
'has_comments': bool(getattr(node, 'commented', [])),
'has_children': bool(getattr(node, 'commented', False)),
},
'parent_node': {
'id': parent._primary_key if parent else '',
'title': parent.title if parent else '',
'url': parent.url if parent else '',
'api_url': parent.api_url if parent else '',
'absolute_url': parent.absolute_url if parent else '',
'is_public': parent.is_public if parent else '',
'is_contributor': parent.is_contributor(user) if parent else '',
'can_view': (auth.private_key in parent.private_link_keys_active) if parent else False
},
'user': {
'is_contributor': node.is_contributor(user),
'can_edit': (node.can_edit(auth)
and not node.is_registration),
'permissions': node.get_permissions(user) if user else [],
'is_watching': user.is_watching(node) if user else False,
'piwik_token': user.piwik_token if user else '',
'id': user._id if user else None,
'username': user.username if user else None,
'can_comment': node.can_comment(auth),
},
'badges': _get_badge(user),
# TODO: Namespace with nested dicts
'addons_enabled': node.get_addon_names(),
'addons': configs,
'addon_widgets': widgets,
'addon_widget_js': js,
'addon_widget_css': css,
}
return data
def _get_badge(user):
if user:
badger = user.get_addon('badges')
if badger:
return {
'can_award': badger.can_award,
'badges': badger.get_badges_json()
}
return {}
def _get_children(node, auth, indent=0):
children = []
for child in node.nodes_primary:
if not child.is_deleted and child.can_edit(auth):
children.append({
'id': child._primary_key,
'title': child.title,
'indent': indent,
})
children.extend(_get_children(child, auth, indent + 1))
return children
@must_be_valid_project # returns project
@must_have_permission('admin')
def private_link_table(**kwargs):
node = kwargs['node'] or kwargs['project']
data = {
'node': {
'absolute_url': node.absolute_url,
'private_links': [x.to_json() for x in node.private_links_active],
}
}
return data
@collect_auth
@must_be_valid_project
def get_editable_children(auth, **kwargs):
node = kwargs['node'] or kwargs['project']
if not node.can_edit(auth):
return
children = _get_children(node, auth)
return {
'node': {'title': node.title, },
'children': children,
}
def _get_user_activity(node, auth, rescale_ratio):
# Counters
total_count = len(node.logs)
# Note: It's typically much faster to find logs of a given node
# attached to a given user using node.logs.find(...) than by
# loading the logs into Python and checking each one. However,
# using deep caching might be even faster down the road.
if auth.user:
ua_count = node.logs.find(Q('user', 'eq', auth.user)).count()
else:
ua_count = 0
non_ua_count = total_count - ua_count # base length of blue bar
# Normalize over all nodes
try:
ua = ua_count / rescale_ratio * settings.USER_ACTIVITY_MAX_WIDTH
except ZeroDivisionError:
ua = 0
try:
non_ua = non_ua_count / rescale_ratio * settings.USER_ACTIVITY_MAX_WIDTH
except ZeroDivisionError:
non_ua = 0
return ua_count, ua, non_ua
@must_be_valid_project
def get_recent_logs(**kwargs):
node_to_use = kwargs['node'] or kwargs['project']
logs = list(reversed(node_to_use.logs._to_primary_keys()))[:3]
return {'logs': logs}
def _get_summary(node, auth, rescale_ratio, primary=True, link_id=None):
# TODO(sloria): Refactor this or remove (lots of duplication with _view_project)
summary = {
'id': link_id if link_id else node._id,
'primary': primary,
'is_registration': node.is_registration,
'is_fork': node.is_fork,
}
if node.can_view(auth):
summary.update({
'can_view': True,
'can_edit': node.can_edit(auth),
'primary_id': node._id,
'url': node.url,
'primary': primary,
'api_url': node.api_url,
'title': node.title,
'category': node.category,
'node_type': node.project_or_component,
'is_registration': node.is_registration,
'anonymous': has_anonymous_link(node, auth),
'registered_date': node.registered_date.strftime('%Y-%m-%d %H:%M UTC')
if node.is_registration
else None,
'nlogs': None,
'ua_count': None,
'ua': None,
'non_ua': None,
'addons_enabled': node.get_addon_names(),
'is_public': node.is_public
})
if rescale_ratio:
ua_count, ua, non_ua = _get_user_activity(node, auth, rescale_ratio)
summary.update({
'nlogs': len(node.logs),
'ua_count': ua_count,
'ua': ua,
'non_ua': non_ua,
})
else:
summary['can_view'] = False
# TODO: Make output format consistent with _view_project
return {
'summary': summary,
}
@collect_auth
@must_be_valid_project
def get_summary(**kwargs):
auth = kwargs['auth']
node = kwargs['node'] or kwargs['project']
rescale_ratio = kwargs.get('rescale_ratio')
primary = kwargs.get('primary')
link_id = kwargs.get('link_id')
return _get_summary(
node, auth, rescale_ratio, primary=primary, link_id=link_id
)
@must_be_contributor_or_public
def get_children(**kwargs):
node_to_use = kwargs['node'] or kwargs['project']
return _render_nodes([
node
for node in node_to_use.nodes
if not node.is_deleted
])
@must_be_contributor_or_public
def get_folder_pointers(**kwargs):
node_to_use = kwargs['node'] or kwargs['project']
if not node_to_use.is_folder:
return []
return [
node.resolve()._id
for node in node_to_use.nodes
if node is not None and not node.is_deleted and not node.primary
]
@must_be_contributor_or_public
def get_forks(**kwargs):
node_to_use = kwargs['node'] or kwargs['project']
forks = node_to_use.node__forked.find(
Q('is_deleted', 'eq', False) &
Q('is_registration', 'eq', False)
)
return _render_nodes(forks)
@must_be_contributor_or_public
def get_registrations(**kwargs):
node_to_use = kwargs['node'] or kwargs['project']
registrations = node_to_use.node__registrations
return _render_nodes(registrations)
@must_be_valid_project # returns project
@must_have_permission('admin')
def project_generate_private_link_post(auth, **kwargs):
""" creata a new private link object and add it to the node and its selected children"""
node_to_use = kwargs['node'] or kwargs['project']
node_ids = request.json.get('node_ids', [])
name = request.json.get('name', '')
anonymous = request.json.get('anonymous', False)
if node_to_use._id not in node_ids:
node_ids.insert(0, node_to_use._id)
nodes = [Node.load(node_id) for node_id in node_ids]
new_link = new_private_link(
name=name, user=auth.user, nodes=nodes, anonymous=anonymous
)
return new_link
@must_be_valid_project # returns project
@must_have_permission('admin')
def project_private_link_edit(auth, **kwargs):
new_name = request.json.get('value', '')
private_link_id = request.json.get('pk', '')
private_link = PrivateLink.load(private_link_id)
if private_link:
private_link.name = new_name
private_link.save()
def _serialize_node_search(node):
"""Serialize a node for use in pointer search.
:param Node node: Node to serialize
:return: Dictionary of node data
"""
title = node.title
if node.is_registration:
title += ' (registration)'
return {
'id': node._id,
'title': title,
'firstAuthor': node.contributors[0].family_name,
'etal': len(node.contributors) > 1,
}
@must_be_logged_in
def search_node(**kwargs):
"""
"""
# Get arguments
auth = kwargs['auth']
node = Node.load(request.json.get('nodeId'))
include_public = request.json.get('includePublic')
query = request.json.get('query', '').strip()
if not query:
return {'nodes': []}
# Build ODM query
title_query = Q('title', 'icontains', query)
not_deleted_query = Q('is_deleted', 'eq', False)
visibility_query = Q('contributors', 'eq', auth.user)
no_folders_query = Q('is_folder', 'eq', False)
if include_public:
visibility_query = visibility_query | Q('is_public', 'eq', True)
odm_query = title_query & not_deleted_query & visibility_query & no_folders_query
# Exclude current node from query if provided
if node:
nin = [node._id] + node.node_ids
odm_query = (
odm_query &
Q('_id', 'nin', nin)
)
# TODO: Parameterize limit; expose pagination
cursor = Node.find(odm_query).limit(20)
return {
'nodes': [
_serialize_node_search(each)
for each in cursor
if each.contributors
]
}
def _add_pointers(node, pointers, auth):
"""
:param Node node: Node to which pointers will be added
:param list pointers: Nodes to add as pointers
"""
added = False
for pointer in pointers:
node.add_pointer(pointer, auth, save=False)
added = True
if added:
node.save()
@collect_auth
def move_pointers(auth):
"""Move pointer from one node to another node.
"""
from_node_id = request.json.get('fromNodeId')
to_node_id = request.json.get('toNodeId')
pointers_to_move = request.json.get('pointerIds')
if from_node_id is None or to_node_id is None or pointers_to_move is None:
raise HTTPError(http.BAD_REQUEST)
from_node = Node.load(from_node_id)
to_node = Node.load(to_node_id)
if to_node is None or from_node is None:
raise HTTPError(http.BAD_REQUEST)
for pointer_to_move in pointers_to_move:
pointer_id = from_node.pointing_at(pointer_to_move)
pointer_node = Node.load(pointer_to_move)
pointer = Pointer.load(pointer_id)
if pointer is None:
raise HTTPError(http.BAD_REQUEST)
try:
from_node.rm_pointer(pointer, auth=auth)
except ValueError:
raise HTTPError(http.BAD_REQUEST)
from_node.save()
_add_pointers(to_node, [pointer_node], auth)
return {}, 200, None
@collect_auth
def add_pointer(auth):
"""Add a single pointer to a node using only JSON parameters
"""
to_node_id = request.json.get('toNodeID')
pointer_to_move = request.json.get('pointerID')
if not (to_node_id and pointer_to_move):
raise HTTPError(http.BAD_REQUEST)
pointer = Node.load(pointer_to_move)
to_node = Node.load(to_node_id)
_add_pointers(to_node, [pointer], auth)
@must_have_permission('write')
@must_not_be_registration
def add_pointers(**kwargs):
"""Add pointers to a node.
"""
auth = kwargs['auth']
node = kwargs['node'] or kwargs['project']
node_ids = request.json.get('nodeIds')
if not node_ids:
raise HTTPError(http.BAD_REQUEST)
nodes = [
Node.load(node_id)
for node_id in node_ids
]
_add_pointers(node, nodes, auth)
return {}
@must_have_permission('write')
@must_not_be_registration
def remove_pointer(**kwargs):
"""Remove a pointer from a node, raising a 400 if the pointer is not
in `node.nodes`.
"""
auth = kwargs['auth']
node = kwargs['node'] or kwargs['project']
# TODO: since these a delete request, shouldn't use request body. put pointer
# id in the URL instead
pointer_id = request.json.get('pointerId')
if pointer_id is None:
raise HTTPError(http.BAD_REQUEST)
pointer = Pointer.load(pointer_id)
if pointer is None:
raise HTTPError(http.BAD_REQUEST)
try:
node.rm_pointer(pointer, auth=auth)
except ValueError:
raise HTTPError(http.BAD_REQUEST)
node.save()
@must_be_valid_project # returns project
@must_have_permission('write')
@must_not_be_registration
def remove_pointer_from_folder(pointer_id, **kwargs):
"""Remove a pointer from a node, raising a 400 if the pointer is not
in `node.nodes`.
"""
auth = kwargs['auth']
node = kwargs['node'] or kwargs['project']
if pointer_id is None:
raise HTTPError(http.BAD_REQUEST)
pointer_id = node.pointing_at(pointer_id)
pointer = Pointer.load(pointer_id)
if pointer is None:
raise HTTPError(http.BAD_REQUEST)
try:
node.rm_pointer(pointer, auth=auth)
except ValueError:
raise HTTPError(http.BAD_REQUEST)
node.save()
@must_be_valid_project # returns project
@must_have_permission('write')
@must_not_be_registration
def remove_pointers_from_folder(**kwargs):
"""Remove multiple pointers from a node, raising a 400 if the pointer is not
in `node.nodes`.
"""
auth = kwargs['auth']
node = kwargs['node'] or kwargs['project']
pointer_ids = request.json.get('pointerIds')
if pointer_ids is None:
raise HTTPError(http.BAD_REQUEST)
for pointer_id in pointer_ids:
pointer_id = node.pointing_at(pointer_id)
pointer = Pointer.load(pointer_id)
if pointer is None:
raise HTTPError(http.BAD_REQUEST)
try:
node.rm_pointer(pointer, auth=auth)
except ValueError:
raise HTTPError(http.BAD_REQUEST)
node.save()
@must_have_permission('write')
@must_not_be_registration
def fork_pointer(**kwargs):
"""Fork a pointer. Raises BAD_REQUEST if pointer not provided, not found,
or not present in `nodes`.
"""
auth = kwargs['auth']
node = kwargs['node'] or kwargs['project']
pointer_id = request.json.get('pointerId')
pointer = Pointer.load(pointer_id)
if pointer is None:
raise HTTPError(http.BAD_REQUEST)
try:
node.fork_pointer(pointer, auth=auth, save=True)
except ValueError:
raise HTTPError(http.BAD_REQUEST)
def abbrev_authors(node):
lead_author = node.visible_contributors[0]
ret = lead_author.family_name or lead_author.given_name or lead_author.fullname
if len(node.visible_contributor_ids) > 1:
ret += ' et al.'
return ret
def serialize_pointer(pointer, auth):
# The `parent_node` property of the `Pointer` schema refers to the parents
# of the pointed-at `Node`, not the parents of the `Pointer`; use the
# back-reference syntax to find the parents of the `Pointer`.
parent_refs = pointer.node__parent
assert len(parent_refs) == 1, 'Pointer must have exactly one parent'
node = parent_refs[0]
if node.can_view(auth):
return {
'url': node.url,
'title': node.title,
'authorShort': abbrev_authors(node),
}
return {
'url': None,
'title': 'Private Component',
'authorShort': 'Private Author(s)',
}
@must_be_contributor_or_public
def get_pointed(auth, **kwargs):
node = kwargs['node'] or kwargs['project']
return {'pointed': [
serialize_pointer(each, auth)
for each in node.pointed
]}
Catch errors on views and raise a proper http error.
# -*- coding: utf-8 -*-
import logging
import httplib as http
from flask import request
from modularodm import Q
from modularodm.exceptions import ModularOdmException
from framework import status
from framework.mongo import StoredObject
from framework.auth.decorators import must_be_logged_in, collect_auth
from framework.exceptions import HTTPError, PermissionsError
from framework.mongo.utils import from_mongo
from website import language
from website.exceptions import NodeStateError
from website.project import clean_template_name, new_node, new_private_link
from website.project.decorators import (
must_be_contributor_or_public,
must_be_valid_project,
must_have_permission,
must_not_be_registration,
)
from website.project.model import has_anonymous_link
from website.project.forms import NewNodeForm
from website.models import Node, Pointer, WatchConfig, PrivateLink
from website import settings
from website.views import _render_nodes
from website.profile import utils
from website.project import new_folder
from website.util.sanitize import strip_html
from .log import _get_logs
logger = logging.getLogger(__name__)
@must_be_valid_project # returns project
@must_have_permission('write')
@must_not_be_registration
def edit_node(auth, **kwargs):
node = kwargs['node'] or kwargs['project']
post_data = request.json
edited_field = post_data.get('name')
value = strip_html(post_data.get('value', ''))
if edited_field == 'title':
node.set_title(value, auth=auth)
elif edited_field == 'description':
node.set_description(value, auth=auth)
node.save()
return {'status': 'success'}
##############################################################################
# New Project
##############################################################################
@must_be_logged_in
def project_new(**kwargs):
return {}
@must_be_logged_in
def project_new_post(auth, **kwargs):
user = auth.user
title = request.json.get('title')
template = request.json.get('template')
description = request.json.get('description')
if not title or len(title) > 200:
raise HTTPError(http.BAD_REQUEST)
if template:
original_node = Node.load(template)
changes = {
'title': title
}
if description:
changes['description'] = description
project = original_node.use_as_template(
auth=auth,
changes={
template: changes
})
else:
project = new_node('project', title, user, description)
return {
'projectUrl': project.url
}, http.CREATED
@must_be_logged_in
@must_be_valid_project
def project_new_from_template(**kwargs):
original_node = kwargs.get('node')
new_node = original_node.use_as_template(
auth=kwargs['auth'],
changes=dict(),
)
return {'url': new_node.url}, http.CREATED, None
##############################################################################
# New Folder
##############################################################################
@must_be_logged_in
def folder_new(**kwargs):
node_id = kwargs['nid']
return_value = {}
if node_id is not None:
return_value = {'node_id': node_id}
return return_value
@must_be_logged_in
def folder_new_post(auth, nid, **kwargs):
user = auth.user
title = request.json.get('title')
if not title or len(title) > 200:
raise HTTPError(http.BAD_REQUEST)
node = Node.load(nid)
if node.is_deleted or node.is_registration or not node.is_folder:
raise HTTPError(http.BAD_REQUEST)
folder = new_folder(strip_html(title), user)
folders = [folder]
try:
_add_pointers(node, folders, auth)
except ValueError:
raise HTTPError(http.BAD_REQUEST)
return {
'projectUrl': '/dashboard/',
}, http.CREATED
def rename_folder(**kwargs):
pass
@collect_auth
def add_folder(**kwargs):
auth = kwargs['auth']
user = auth.user
title = strip_html(request.json.get('title'))
node_id = request.json.get('node_id')
node = Node.load(node_id)
if node.is_deleted or node.is_registration or not node.is_folder:
raise HTTPError(http.BAD_REQUEST)
folder = new_folder(
title, user
)
folders = [folder]
try:
_add_pointers(node, folders, auth)
except ValueError:
raise HTTPError(http.BAD_REQUEST)
return {}, 201, None
##############################################################################
# New Node
##############################################################################
@must_be_valid_project # returns project
@must_have_permission('write')
@must_not_be_registration
def project_new_node(**kwargs):
form = NewNodeForm(request.form)
project = kwargs['project']
user = kwargs['auth'].user
if form.validate():
node = new_node(
title=form.title.data,
user=user,
category=form.category.data,
project=project,
)
message = (
'Your component was created successfully. You can keep working on the component page below, '
'or return to the <u><a href="{url}">Project Page</a></u>.'
).format(url=project.url)
status.push_status_message(message, 'info')
return {
'status': 'success',
}, 201, None, node.url
else:
status.push_errors_to_status(form.errors)
raise HTTPError(http.BAD_REQUEST, redirect_url=project.url)
@must_be_logged_in
@must_be_valid_project # returns project
def project_before_fork(**kwargs):
node = kwargs['node'] or kwargs['project']
user = kwargs['auth'].user
prompts = node.callback('before_fork', user=user)
if node.has_pointers_recursive:
prompts.append(
language.BEFORE_FORK_HAS_POINTERS.format(
category=node.project_or_component
)
)
return {'prompts': prompts}
@must_be_logged_in
@must_be_valid_project # returns project
def project_before_template(auth, **kwargs):
node = kwargs['node'] or kwargs['project']
prompts = []
for addon in node.get_addons():
if 'node' in addon.config.configs:
if addon.to_json(auth.user)['addon_full_name']:
prompts.append(addon.to_json(auth.user)['addon_full_name'])
return {'prompts': prompts}
@must_be_logged_in
@must_be_valid_project
def node_fork_page(**kwargs):
project = kwargs['project']
node = kwargs['node']
auth = kwargs['auth']
if node:
node_to_use = node
raise HTTPError(
http.FORBIDDEN,
message='At this time, only projects can be forked; however, this behavior is coming soon.',
redirect_url=node_to_use.url
)
else:
node_to_use = project
try:
fork = node_to_use.fork_node(auth)
except PermissionsError:
raise HTTPError(
http.FORBIDDEN,
redirect_url=node_to_use.url
)
return fork.url
@must_be_valid_project
@must_be_contributor_or_public # returns user, project
def node_registrations(**kwargs):
auth = kwargs['auth']
node_to_use = kwargs['node'] or kwargs['project']
return _view_project(node_to_use, auth, primary=True)
@must_be_valid_project
@must_be_contributor_or_public # returns user, project
def node_forks(**kwargs):
project = kwargs['project']
node = kwargs['node']
auth = kwargs['auth']
node_to_use = node or project
return _view_project(node_to_use, auth, primary=True)
@must_be_valid_project
@must_have_permission('write')
def node_setting(**kwargs):
auth = kwargs['auth']
node = kwargs['node'] or kwargs['project']
if not node.can_edit(auth):
raise HTTPError(http.FORBIDDEN)
rv = _view_project(node, auth, primary=True)
addons_enabled = []
addon_enabled_settings = []
for addon in node.get_addons():
addons_enabled.append(addon.config.short_name)
if 'node' in addon.config.configs:
addon_enabled_settings.append(addon.to_json(auth.user))
rv['addon_categories'] = settings.ADDON_CATEGORIES
rv['addons_available'] = [
addon
for addon in settings.ADDONS_AVAILABLE
if 'node' in addon.owners
and 'node' not in addon.added_mandatory
and addon.short_name not in settings.SYSTEM_ADDED_ADDONS['node']
]
rv['addons_enabled'] = addons_enabled
rv['addon_enabled_settings'] = addon_enabled_settings
rv['addon_capabilities'] = settings.ADDON_CAPABILITIES
rv['comments'] = {
'level': node.comment_level,
}
return rv
@must_have_permission('write')
@must_not_be_registration
def node_choose_addons(**kwargs):
node = kwargs['node'] or kwargs['project']
auth = kwargs['auth']
node.config_addons(request.json, auth)
@must_be_valid_project
@must_have_permission('read')
def node_contributors(**kwargs):
auth = kwargs['auth']
node = kwargs['node'] or kwargs['project']
rv = _view_project(node, auth)
rv['contributors'] = utils.serialize_contributors(node.contributors, node)
return rv
@must_have_permission('write')
def configure_comments(**kwargs):
node = kwargs['node'] or kwargs['project']
comment_level = request.json.get('commentLevel')
if not comment_level:
node.comment_level = None
elif comment_level in ['public', 'private']:
node.comment_level = comment_level
else:
raise HTTPError(http.BAD_REQUEST)
node.save()
##############################################################################
# View Project
##############################################################################
@must_be_valid_project
@must_be_contributor_or_public
def view_project(**kwargs):
auth = kwargs['auth']
node_to_use = kwargs['node'] or kwargs['project']
primary = '/api/v1' not in request.path
rv = _view_project(node_to_use, auth, primary=primary)
rv['addon_capabilities'] = settings.ADDON_CAPABILITIES
return rv
#### Expand/Collapse
@must_be_valid_project
@must_be_contributor_or_public
def expand(auth, **kwargs):
node_to_use = kwargs['node'] or kwargs['project']
node_to_use.expand(user=auth.user)
return {}, 200, None
@must_be_valid_project
@must_be_contributor_or_public
def collapse(auth, **kwargs):
node_to_use = kwargs['node'] or kwargs['project']
node_to_use.collapse(user=auth.user)
return {}, 200, None
# Reorder components
@must_be_valid_project
@must_not_be_registration
@must_have_permission('write')
def project_reorder_components(project, **kwargs):
"""Reorders the components in a project's component list.
:param-json list new_list: List of strings that include node IDs and
node type delimited by ':'.
"""
# TODO(sloria): Change new_list parameter to be an array of objects
# {
# 'newList': {
# {'key': 'abc123', 'type': 'node'}
# }
# }
new_list = [
tuple(node.split(':'))
for node in request.json.get('new_list', [])
]
nodes_new = [
StoredObject.get_collection(schema).load(key)
for key, schema in new_list
]
valid_nodes = [
node for node in project.nodes
if not node.is_deleted
]
deleted_nodes = [
node for node in project.nodes
if node.is_deleted
]
if len(valid_nodes) == len(nodes_new) and set(valid_nodes) == set(nodes_new):
project.nodes = nodes_new + deleted_nodes
project.save()
return {}
logger.error('Got invalid node list in reorder components')
raise HTTPError(http.BAD_REQUEST)
##############################################################################
@must_be_valid_project
@must_be_contributor_or_public # returns user, project
def project_statistics(**kwargs):
auth = kwargs['auth']
node = kwargs['node'] or kwargs['project']
if not (node.can_edit(auth) or node.is_public):
raise HTTPError(http.FORBIDDEN)
return _view_project(node, auth, primary=True)
###############################################################################
# Make Private/Public
###############################################################################
@must_be_valid_project
@must_have_permission('admin')
def project_before_set_public(**kwargs):
node = kwargs['node'] or kwargs['project']
return {
'prompts': node.callback('before_make_public')
}
@must_be_valid_project
@must_have_permission('admin')
def project_set_privacy(auth, **kwargs):
permissions = kwargs.get('permissions')
if permissions is None:
raise HTTPError(http.BAD_REQUEST)
node = kwargs['node'] or kwargs['project']
node.set_privacy(permissions, auth)
return {
'status': 'success',
'permissions': permissions,
}
@must_be_valid_project # returns project
@must_be_contributor_or_public
@must_not_be_registration
def watch_post(**kwargs):
node = kwargs['node'] or kwargs['project']
user = kwargs['auth'].user
watch_config = WatchConfig(node=node,
digest=request.json.get('digest', False),
immediate=request.json.get('immediate', False))
try:
user.watch(watch_config)
except ValueError: # Node is already being watched
raise HTTPError(http.BAD_REQUEST)
user.save()
return {
'status': 'success',
'watchCount': len(node.watchconfig__watched)
}
@must_be_valid_project # returns project
@must_be_contributor_or_public
@must_not_be_registration
def unwatch_post(**kwargs):
node = kwargs['node'] or kwargs['project']
user = kwargs['auth'].user
watch_config = WatchConfig(node=node,
digest=request.json.get('digest', False),
immediate=request.json.get('immediate', False))
try:
user.unwatch(watch_config)
except ValueError: # Node isn't being watched
raise HTTPError(http.BAD_REQUEST)
return {
'status': 'success',
'watchCount': len(node.watchconfig__watched)
}
@must_be_valid_project # returns project
@must_be_contributor_or_public
@must_not_be_registration
def togglewatch_post(**kwargs):
'''View for toggling watch mode for a node.'''
# TODO: refactor this, watch_post, unwatch_post (@mambocab)
node = kwargs['node'] or kwargs['project']
user = kwargs['auth'].user
watch_config = WatchConfig(
node=node,
digest=request.json.get('digest', False),
immediate=request.json.get('immediate', False)
)
try:
if user.is_watching(node):
user.unwatch(watch_config)
else:
user.watch(watch_config)
except ValueError:
raise HTTPError(http.BAD_REQUEST)
user.save()
return {
'status': 'success',
'watchCount': len(node.watchconfig__watched),
'watched': user.is_watching(node)
}
@must_be_valid_project # returns project
@must_have_permission('admin')
@must_not_be_registration
def component_remove(**kwargs):
"""Remove component, and recursively remove its children. If node has a
parent, add log and redirect to parent; else redirect to user dashboard.
"""
node_to_use = kwargs['node'] or kwargs['project']
auth = kwargs['auth']
try:
node_to_use.remove_node(auth)
except NodeStateError as e:
raise HTTPError(
http.BAD_REQUEST,
data={
'message_long': 'Could not delete component: ' + e.message
},
)
node_to_use.save()
message = '{} deleted'.format(
node_to_use.project_or_component.capitalize()
)
status.push_status_message(message)
if node_to_use.node__parent:
redirect_url = node_to_use.node__parent[0].url
else:
redirect_url = '/dashboard/'
return {
'url': redirect_url,
}
#@must_be_valid_project # injects project
@must_have_permission('admin')
@must_not_be_registration
def delete_folder(auth, **kwargs):
"""Remove folder node
"""
node = kwargs['node'] or kwargs['project']
if node is None:
raise HTTPError(http.BAD_REQUEST)
if not node.is_folder or node.is_dashboard:
raise HTTPError(http.BAD_REQUEST)
try:
node.remove_node(auth)
except NodeStateError as e:
raise HTTPError(
http.BAD_REQUEST,
data={
'message_long': 'Could not delete component: ' + e.message
},
)
return {}
@must_be_valid_project # returns project
@must_have_permission("admin")
def remove_private_link(*args, **kwargs):
link_id = request.json['private_link_id']
try:
link = PrivateLink.load(link_id)
link.is_deleted = True
link.save()
except ModularOdmException:
raise HTTPError(http.NOT_FOUND)
# TODO: Split into separate functions
def _render_addon(node):
widgets = {}
configs = {}
js = []
css = []
for addon in node.get_addons():
configs[addon.config.short_name] = addon.config.to_json()
js.extend(addon.config.include_js.get('widget', []))
css.extend(addon.config.include_css.get('widget', []))
js.extend(addon.config.include_js.get('files', []))
css.extend(addon.config.include_css.get('files', []))
return widgets, configs, js, css
def _view_project(node, auth, primary=False):
"""Build a JSON object containing everything needed to render
project.view.mako.
"""
user = auth.user
parent = node.parent_node
view_only_link = auth.private_key or request.args.get('view_only', '').strip('/')
anonymous = has_anonymous_link(node, auth)
widgets, configs, js, css = _render_addon(node)
redirect_url = node.url + '?view_only=None'
# Before page load callback; skip if not primary call
if primary:
for addon in node.get_addons():
messages = addon.before_page_load(node, user) or []
for message in messages:
status.push_status_message(message)
data = {
'node': {
'id': node._primary_key,
'title': node.title,
'category': node.category_display,
'node_type': node.project_or_component,
'description': node.description or '',
'url': node.url,
'api_url': node.api_url,
'absolute_url': node.absolute_url,
'redirect_url': redirect_url,
'display_absolute_url': node.display_absolute_url,
'citations': {
'apa': node.citation_apa,
'mla': node.citation_mla,
'chicago': node.citation_chicago,
} if not anonymous else '',
'is_public': node.is_public,
'date_created': node.date_created.strftime('%m/%d/%Y %H:%M UTC'),
'date_modified': node.logs[-1].date.strftime('%m/%d/%Y %H:%M UTC') if node.logs else '',
'tags': [tag._primary_key for tag in node.tags],
'children': bool(node.nodes),
'is_registration': node.is_registration,
'registered_from_url': node.registered_from.url if node.is_registration else '',
'registered_date': node.registered_date.strftime('%Y/%m/%d %H:%M UTC') if node.is_registration else '',
'registered_meta': [
{
'name_no_ext': from_mongo(meta),
'name_clean': clean_template_name(meta),
}
for meta in node.registered_meta or []
],
'registration_count': len(node.registration_list),
'is_fork': node.is_fork,
'forked_from_id': node.forked_from._primary_key if node.is_fork else '',
'forked_from_display_absolute_url': node.forked_from.display_absolute_url if node.is_fork else '',
'forked_date': node.forked_date.strftime('%Y/%m/%d %I:%M %p') if node.is_fork else '',
'fork_count': len(node.fork_list),
'templated_count': len(node.templated_list),
'watched_count': len(node.watchconfig__watched),
'private_links': [x.to_json() for x in node.private_links_active],
'link': view_only_link,
'anonymous': anonymous,
'points': node.points,
'piwik_site_id': node.piwik_site_id,
'comment_level': node.comment_level,
'has_comments': bool(getattr(node, 'commented', [])),
'has_children': bool(getattr(node, 'commented', False)),
},
'parent_node': {
'id': parent._primary_key if parent else '',
'title': parent.title if parent else '',
'url': parent.url if parent else '',
'api_url': parent.api_url if parent else '',
'absolute_url': parent.absolute_url if parent else '',
'is_public': parent.is_public if parent else '',
'is_contributor': parent.is_contributor(user) if parent else '',
'can_view': (auth.private_key in parent.private_link_keys_active) if parent else False
},
'user': {
'is_contributor': node.is_contributor(user),
'can_edit': (node.can_edit(auth)
and not node.is_registration),
'permissions': node.get_permissions(user) if user else [],
'is_watching': user.is_watching(node) if user else False,
'piwik_token': user.piwik_token if user else '',
'id': user._id if user else None,
'username': user.username if user else None,
'can_comment': node.can_comment(auth),
},
'badges': _get_badge(user),
# TODO: Namespace with nested dicts
'addons_enabled': node.get_addon_names(),
'addons': configs,
'addon_widgets': widgets,
'addon_widget_js': js,
'addon_widget_css': css,
}
return data
def _get_badge(user):
if user:
badger = user.get_addon('badges')
if badger:
return {
'can_award': badger.can_award,
'badges': badger.get_badges_json()
}
return {}
def _get_children(node, auth, indent=0):
children = []
for child in node.nodes_primary:
if not child.is_deleted and child.can_edit(auth):
children.append({
'id': child._primary_key,
'title': child.title,
'indent': indent,
})
children.extend(_get_children(child, auth, indent + 1))
return children
@must_be_valid_project # returns project
@must_have_permission('admin')
def private_link_table(**kwargs):
node = kwargs['node'] or kwargs['project']
data = {
'node': {
'absolute_url': node.absolute_url,
'private_links': [x.to_json() for x in node.private_links_active],
}
}
return data
@collect_auth
@must_be_valid_project
def get_editable_children(auth, **kwargs):
node = kwargs['node'] or kwargs['project']
if not node.can_edit(auth):
return
children = _get_children(node, auth)
return {
'node': {'title': node.title, },
'children': children,
}
def _get_user_activity(node, auth, rescale_ratio):
# Counters
total_count = len(node.logs)
# Note: It's typically much faster to find logs of a given node
# attached to a given user using node.logs.find(...) than by
# loading the logs into Python and checking each one. However,
# using deep caching might be even faster down the road.
if auth.user:
ua_count = node.logs.find(Q('user', 'eq', auth.user)).count()
else:
ua_count = 0
non_ua_count = total_count - ua_count # base length of blue bar
# Normalize over all nodes
try:
ua = ua_count / rescale_ratio * settings.USER_ACTIVITY_MAX_WIDTH
except ZeroDivisionError:
ua = 0
try:
non_ua = non_ua_count / rescale_ratio * settings.USER_ACTIVITY_MAX_WIDTH
except ZeroDivisionError:
non_ua = 0
return ua_count, ua, non_ua
@must_be_valid_project
def get_recent_logs(**kwargs):
node_to_use = kwargs['node'] or kwargs['project']
logs = list(reversed(node_to_use.logs._to_primary_keys()))[:3]
return {'logs': logs}
def _get_summary(node, auth, rescale_ratio, primary=True, link_id=None):
# TODO(sloria): Refactor this or remove (lots of duplication with _view_project)
summary = {
'id': link_id if link_id else node._id,
'primary': primary,
'is_registration': node.is_registration,
'is_fork': node.is_fork,
}
if node.can_view(auth):
summary.update({
'can_view': True,
'can_edit': node.can_edit(auth),
'primary_id': node._id,
'url': node.url,
'primary': primary,
'api_url': node.api_url,
'title': node.title,
'category': node.category,
'node_type': node.project_or_component,
'is_registration': node.is_registration,
'anonymous': has_anonymous_link(node, auth),
'registered_date': node.registered_date.strftime('%Y-%m-%d %H:%M UTC')
if node.is_registration
else None,
'nlogs': None,
'ua_count': None,
'ua': None,
'non_ua': None,
'addons_enabled': node.get_addon_names(),
'is_public': node.is_public
})
if rescale_ratio:
ua_count, ua, non_ua = _get_user_activity(node, auth, rescale_ratio)
summary.update({
'nlogs': len(node.logs),
'ua_count': ua_count,
'ua': ua,
'non_ua': non_ua,
})
else:
summary['can_view'] = False
# TODO: Make output format consistent with _view_project
return {
'summary': summary,
}
@collect_auth
@must_be_valid_project
def get_summary(**kwargs):
auth = kwargs['auth']
node = kwargs['node'] or kwargs['project']
rescale_ratio = kwargs.get('rescale_ratio')
primary = kwargs.get('primary')
link_id = kwargs.get('link_id')
return _get_summary(
node, auth, rescale_ratio, primary=primary, link_id=link_id
)
@must_be_contributor_or_public
def get_children(**kwargs):
node_to_use = kwargs['node'] or kwargs['project']
return _render_nodes([
node
for node in node_to_use.nodes
if not node.is_deleted
])
@must_be_contributor_or_public
def get_folder_pointers(**kwargs):
node_to_use = kwargs['node'] or kwargs['project']
if not node_to_use.is_folder:
return []
return [
node.resolve()._id
for node in node_to_use.nodes
if node is not None and not node.is_deleted and not node.primary
]
@must_be_contributor_or_public
def get_forks(**kwargs):
node_to_use = kwargs['node'] or kwargs['project']
forks = node_to_use.node__forked.find(
Q('is_deleted', 'eq', False) &
Q('is_registration', 'eq', False)
)
return _render_nodes(forks)
@must_be_contributor_or_public
def get_registrations(**kwargs):
node_to_use = kwargs['node'] or kwargs['project']
registrations = node_to_use.node__registrations
return _render_nodes(registrations)
@must_be_valid_project # returns project
@must_have_permission('admin')
def project_generate_private_link_post(auth, **kwargs):
""" creata a new private link object and add it to the node and its selected children"""
node_to_use = kwargs['node'] or kwargs['project']
node_ids = request.json.get('node_ids', [])
name = request.json.get('name', '')
anonymous = request.json.get('anonymous', False)
if node_to_use._id not in node_ids:
node_ids.insert(0, node_to_use._id)
nodes = [Node.load(node_id) for node_id in node_ids]
new_link = new_private_link(
name=name, user=auth.user, nodes=nodes, anonymous=anonymous
)
return new_link
@must_be_valid_project # returns project
@must_have_permission('admin')
def project_private_link_edit(auth, **kwargs):
new_name = request.json.get('value', '')
private_link_id = request.json.get('pk', '')
private_link = PrivateLink.load(private_link_id)
if private_link:
private_link.name = new_name
private_link.save()
def _serialize_node_search(node):
"""Serialize a node for use in pointer search.
:param Node node: Node to serialize
:return: Dictionary of node data
"""
title = node.title
if node.is_registration:
title += ' (registration)'
return {
'id': node._id,
'title': title,
'firstAuthor': node.contributors[0].family_name,
'etal': len(node.contributors) > 1,
}
@must_be_logged_in
def search_node(**kwargs):
"""
"""
# Get arguments
auth = kwargs['auth']
node = Node.load(request.json.get('nodeId'))
include_public = request.json.get('includePublic')
query = request.json.get('query', '').strip()
if not query:
return {'nodes': []}
# Build ODM query
title_query = Q('title', 'icontains', query)
not_deleted_query = Q('is_deleted', 'eq', False)
visibility_query = Q('contributors', 'eq', auth.user)
no_folders_query = Q('is_folder', 'eq', False)
if include_public:
visibility_query = visibility_query | Q('is_public', 'eq', True)
odm_query = title_query & not_deleted_query & visibility_query & no_folders_query
# Exclude current node from query if provided
if node:
nin = [node._id] + node.node_ids
odm_query = (
odm_query &
Q('_id', 'nin', nin)
)
# TODO: Parameterize limit; expose pagination
cursor = Node.find(odm_query).limit(20)
return {
'nodes': [
_serialize_node_search(each)
for each in cursor
if each.contributors
]
}
def _add_pointers(node, pointers, auth):
"""
:param Node node: Node to which pointers will be added
:param list pointers: Nodes to add as pointers
"""
added = False
for pointer in pointers:
node.add_pointer(pointer, auth, save=False)
added = True
if added:
node.save()
@collect_auth
def move_pointers(auth):
"""Move pointer from one node to another node.
"""
from_node_id = request.json.get('fromNodeId')
to_node_id = request.json.get('toNodeId')
pointers_to_move = request.json.get('pointerIds')
if from_node_id is None or to_node_id is None or pointers_to_move is None:
raise HTTPError(http.BAD_REQUEST)
from_node = Node.load(from_node_id)
to_node = Node.load(to_node_id)
if to_node is None or from_node is None:
raise HTTPError(http.BAD_REQUEST)
for pointer_to_move in pointers_to_move:
pointer_id = from_node.pointing_at(pointer_to_move)
pointer_node = Node.load(pointer_to_move)
pointer = Pointer.load(pointer_id)
if pointer is None:
raise HTTPError(http.BAD_REQUEST)
try:
from_node.rm_pointer(pointer, auth=auth)
except ValueError:
raise HTTPError(http.BAD_REQUEST)
from_node.save()
try:
_add_pointers(to_node, [pointer_node], auth)
except ValueError:
raise HTTPError(http.BAD_REQUEST)
return {}, 200, None
@collect_auth
def add_pointer(auth):
"""Add a single pointer to a node using only JSON parameters
"""
to_node_id = request.json.get('toNodeID')
pointer_to_move = request.json.get('pointerID')
if not (to_node_id and pointer_to_move):
raise HTTPError(http.BAD_REQUEST)
pointer = Node.load(pointer_to_move)
to_node = Node.load(to_node_id)
try:
_add_pointers(to_node, [pointer], auth)
except ValueError:
raise HTTPError(http.BAD_REQUEST)
@must_have_permission('write')
@must_not_be_registration
def add_pointers(**kwargs):
"""Add pointers to a node.
"""
auth = kwargs['auth']
node = kwargs['node'] or kwargs['project']
node_ids = request.json.get('nodeIds')
if not node_ids:
raise HTTPError(http.BAD_REQUEST)
nodes = [
Node.load(node_id)
for node_id in node_ids
]
try:
_add_pointers(node, nodes, auth)
except ValueError:
raise HTTPError(http.BAD_REQUEST)
return {}
@must_have_permission('write')
@must_not_be_registration
def remove_pointer(**kwargs):
"""Remove a pointer from a node, raising a 400 if the pointer is not
in `node.nodes`.
"""
auth = kwargs['auth']
node = kwargs['node'] or kwargs['project']
# TODO: since these a delete request, shouldn't use request body. put pointer
# id in the URL instead
pointer_id = request.json.get('pointerId')
if pointer_id is None:
raise HTTPError(http.BAD_REQUEST)
pointer = Pointer.load(pointer_id)
if pointer is None:
raise HTTPError(http.BAD_REQUEST)
try:
node.rm_pointer(pointer, auth=auth)
except ValueError:
raise HTTPError(http.BAD_REQUEST)
node.save()
@must_be_valid_project # returns project
@must_have_permission('write')
@must_not_be_registration
def remove_pointer_from_folder(pointer_id, **kwargs):
"""Remove a pointer from a node, raising a 400 if the pointer is not
in `node.nodes`.
"""
auth = kwargs['auth']
node = kwargs['node'] or kwargs['project']
if pointer_id is None:
raise HTTPError(http.BAD_REQUEST)
pointer_id = node.pointing_at(pointer_id)
pointer = Pointer.load(pointer_id)
if pointer is None:
raise HTTPError(http.BAD_REQUEST)
try:
node.rm_pointer(pointer, auth=auth)
except ValueError:
raise HTTPError(http.BAD_REQUEST)
node.save()
@must_be_valid_project # returns project
@must_have_permission('write')
@must_not_be_registration
def remove_pointers_from_folder(**kwargs):
"""Remove multiple pointers from a node, raising a 400 if the pointer is not
in `node.nodes`.
"""
auth = kwargs['auth']
node = kwargs['node'] or kwargs['project']
pointer_ids = request.json.get('pointerIds')
if pointer_ids is None:
raise HTTPError(http.BAD_REQUEST)
for pointer_id in pointer_ids:
pointer_id = node.pointing_at(pointer_id)
pointer = Pointer.load(pointer_id)
if pointer is None:
raise HTTPError(http.BAD_REQUEST)
try:
node.rm_pointer(pointer, auth=auth)
except ValueError:
raise HTTPError(http.BAD_REQUEST)
node.save()
@must_have_permission('write')
@must_not_be_registration
def fork_pointer(**kwargs):
"""Fork a pointer. Raises BAD_REQUEST if pointer not provided, not found,
or not present in `nodes`.
"""
auth = kwargs['auth']
node = kwargs['node'] or kwargs['project']
pointer_id = request.json.get('pointerId')
pointer = Pointer.load(pointer_id)
if pointer is None:
raise HTTPError(http.BAD_REQUEST)
try:
node.fork_pointer(pointer, auth=auth, save=True)
except ValueError:
raise HTTPError(http.BAD_REQUEST)
def abbrev_authors(node):
lead_author = node.visible_contributors[0]
ret = lead_author.family_name or lead_author.given_name or lead_author.fullname
if len(node.visible_contributor_ids) > 1:
ret += ' et al.'
return ret
def serialize_pointer(pointer, auth):
# The `parent_node` property of the `Pointer` schema refers to the parents
# of the pointed-at `Node`, not the parents of the `Pointer`; use the
# back-reference syntax to find the parents of the `Pointer`.
parent_refs = pointer.node__parent
assert len(parent_refs) == 1, 'Pointer must have exactly one parent'
node = parent_refs[0]
if node.can_view(auth):
return {
'url': node.url,
'title': node.title,
'authorShort': abbrev_authors(node),
}
return {
'url': None,
'title': 'Private Component',
'authorShort': 'Private Author(s)',
}
@must_be_contributor_or_public
def get_pointed(auth, **kwargs):
node = kwargs['node'] or kwargs['project']
return {'pointed': [
serialize_pointer(each, auth)
for each in node.pointed
]}
|
"""
I lifted from stackoverflow and hacked it
import os
import glob
files = glob.glob(os.path.dirname(__file__)+"/*.py")
modules = []
for f in files:
if os.path.isfile(f) and not f.endswith('__init__.py'):
modules[] = os.path.basename(f)[:-3]
__all__ = modules
del files
del modules
"""
import os
import re
import time
from ..imports import httplib2, json
from ..response import Response
from ..util import make_acapi_request
class AcquiaData(object):
def __init__(self, uri, auth, data=None):
self.uri = uri
self.auth = auth
self.data = data
def request(self, uri=None, method='GET', data=None, params=None):
if None == uri:
uri = self.uri
return make_acapi_request(method, uri, auth=self.auth, data=data, params=params)
def task_uri(self, task_data):
"""
This is a horrible hack, but it is needed for now
"""
task_id = int(task_data['id'])
p = re.compile('/sites/(.*)/envs.*')
task_uri = ('%s/%d' % (p.sub('/sites/\g<1>/tasks', self.uri), task_id))
return task_uri
def wait_for_task(self, task_data, uri=None):
"""
FIXME: Move this some where elese
"""
if None == uri:
uri = self.uri
task_uri = self.task_uri(task_data)
task = Task(task_uri, self.auth)
while task.pending():
# This seems like a reasonable trade off between being polite and
# hammering the API.
time.sleep(3)
return task.get()
class AcquiaResource(AcquiaData):
def get(self):
if None == self.data:
response = self.request()
self.data = response.content
return self.data
class AcquiaList(AcquiaData, dict):
def __init__(self, uri, auth, *args, **kwargs):
AcquiaData.__init__(self, uri, auth)
dict.__init__(self, *args, **kwargs)
class Backup(AcquiaResource):
def delete(self):
response = self.request(method='DELETE')
return response.ok
def download(self, target_file):
"""
Download a database backup file from the Acquia Cloud API.
"""
response = self.request()
backup = response.content
# We do this as make_acapi_request() assumes response is a json string.
http = httplib2.Http(
proxy_info=Connection.proxy_info(),
)
resp, content = http.request(backup['link'].encode('ascii', 'ignore'), 'GET')
file = open(target_file, 'wb')
file.write(content)
file.close()
return True
class BackupList(AcquiaList):
def create(self):
response = self.request(method='POST')
task_data = response.content
task = self.wait_for_task(task_data)
if None == task['completed']:
raise Exception('Unable to request backup')
id = int(json.loads(task['result'])['backupid'])
uri = ('%s/%d' % (self.uri, id))
backup = Backup(uri, self.auth)
self.__setitem__(id, backup)
return backup
class Database(AcquiaResource):
def backup(self, id):
uri = ('%s/backups/%d' % (self.uri, id))
return Backup(uri, self.auth)
def backups(self):
uri = ('%s/backups' % (self.uri))
backups = BackupList(uri, self.auth)
response = self.request(uri=uri)
for backup in response.content:
id = int(backup['id'])
backup_uri = ('%s/%d' % (uri, id))
backups[id] = Backup(uri, self.auth, data=backup)
return backups
def copy(self, target):
# More regex hacks to work around the limitations of the ACAPI.
p = re.compile('/envs/(.*)/dbs/(.*)')
m = p.search(self.uri)
current_env = m.group(1)
db = m.group(2)
move_uri = ('%s/%s' % (p.sub('/dbs/\g<2>/db-copy/\g<1>', self.uri), target))
response = self.request(uri=move_uri, method='POST')
if response.ok:
# Another hack, this time to get the URI for the domain.
new_uri = self.uri.replace(('/%s/' % (current_env)), ('/%s/' % (target)))
return Database(new_uri, self.auth)
return False
class Domain(AcquiaResource):
def cache_purge(self):
uri = ('%s/cache' % (self.uri))
response = self.request(uri=uri, method='DELETE')
return response.ok
def delete(self):
response = self.request(method='DELETE')
return response.ok
def move(self, target):
# These regex hacks are needed because Acquia doesn't keep this function
# with domains, which sucks.
p = re.compile('/envs/(.*)/domains/(.*)')
m = p.search(self.uri)
current_env = m.group(1)
domain = m.group(2)
move_uri = ('%s/%s' % (p.sub('/domain-move/\g<1>', self.uri), target))
data = {'domains': [domain]}
response = self.request(uri=move_uri, method='POST', data=data)
if response.ok:
# Another hack, this time to get the URI for the domain.
new_uri = self.uri.replace(('/%s/' % (current_env)), ('/%s/' % (target)))
return Domain(new_uri, self.auth)
return False
class DomainList(AcquiaList):
def create(self, name):
uri = ('%s/%s' % (self.uri, name))
response = self.request(method='POST', uri=uri)
task_data = response.content
task = self.wait_for_task(task_data, uri)
if None == task['completed']:
raise Exception('Failed to create domain')
domain = Domain(uri, self.auth)
self.__setitem__(name, domain)
return domain
class Environment(AcquiaResource):
def db(self, name):
uri = ('%s/dbs/%s' % (self.uri, name))
return Database(uri, self.auth)
def dbs(self):
dbs = {}
uri = ('%s/dbs' % (self.uri))
response = self.request(uri=uri)
for db in response.content:
name = db['name'].encode('ascii', 'ignore')
db_uri = ('%s/%s' % (uri, name))
dbs[name] = Database(db_uri, self.auth, data=db)
return dbs
def deploy_code(self, path):
uri = ('%s/code-deploy' % (self.uri))
params = {'path': path}
response = self.request(uri=uri, method='POST', params=params)
if response.ok:
index = self.uri.find('/envs/')
base_uri = self.uri[:index]
task_uri = ('%s/tasks/%d' % (base_uri, int(response.content['id'])))
return Task(task_uri, self.auth)
return False
def domain(self, name):
uri = ('%s/domains/%s' % (self.uri, name))
return Domain(uri, self.auth)
def domains(self):
uri = ('%s/domains' % (self.uri))
domains = DomainList(uri, self.auth)
response = self.request(uri=uri)
for domain in response.content:
name = domain['name'].encode('ascii','ignore')
domain_uri = ('%s/%s' % (uri, name))
domains[name] = Domain(domain_uri, self.auth, data=domain)
return domains
class Site(AcquiaResource):
def environment(self, name):
uri = ('%s/envs/%s' % (self.uri, name))
return Environment(uri, self.auth)
def environments(self):
envs = {}
uri = ('%s/envs' % (self.uri))
response = self.request(uri=uri)
for env in response.content:
name = env['name'].encode('ascii', 'ignore')
env_uri = ('%s/%s' % (uri, name))
envs[name] = Environment(env_uri, self.auth, data=env)
return envs
def task(self, id):
uri = ('%s/tasks/%d' % (self.uri, id))
return Task(uri, self.auth)
def tasks(self):
tasks = {}
uri = ('%s/tasks' % (self.uri))
response = self.request(uri=uri)
for task in response.content:
id = int(task[u'id'])
task_uri = ('%s/%d', (uri, id))
tasks[id] = Task(task_uri, self.auth, data=task)
return tasks
class Task(AcquiaResource):
def pending(self):
# Ensure we don't have stale data
self.data = None
task = self.get()
state = task['state'].encode('ascii', 'ignore')
return state not in ['done', 'error']
class User(AcquiaResource):
def drushrc(self):
"""
The json+PHP output of this isn't very useful in python.
"""
uri = ('%s/drushrc' % (self.uri))
response = make_acapi_request('GET', uri, auth=self.auth)
return response.content
Add server support
"""
I lifted from stackoverflow and hacked it
import os
import glob
files = glob.glob(os.path.dirname(__file__)+"/*.py")
modules = []
for f in files:
if os.path.isfile(f) and not f.endswith('__init__.py'):
modules[] = os.path.basename(f)[:-3]
__all__ = modules
del files
del modules
"""
import os
import re
import time
from ..imports import httplib2, json
from ..response import Response
from ..util import make_acapi_request
class AcquiaData(object):
def __init__(self, uri, auth, data=None):
self.uri = uri
self.auth = auth
self.data = data
def request(self, uri=None, method='GET', data=None, params=None):
if None == uri:
uri = self.uri
return make_acapi_request(method, uri, auth=self.auth, data=data, params=params)
def task_uri(self, task_data):
"""
This is a horrible hack, but it is needed for now
"""
task_id = int(task_data['id'])
p = re.compile('/sites/(.*)/envs.*')
task_uri = ('%s/%d' % (p.sub('/sites/\g<1>/tasks', self.uri), task_id))
return task_uri
def wait_for_task(self, task_data, uri=None):
"""
FIXME: Move this some where elese
"""
if None == uri:
uri = self.uri
task_uri = self.task_uri(task_data)
task = Task(task_uri, self.auth)
while task.pending():
# This seems like a reasonable trade off between being polite and
# hammering the API.
time.sleep(3)
return task.get()
class AcquiaResource(AcquiaData):
def get(self):
if None == self.data:
response = self.request()
self.data = response.content
return self.data
class AcquiaList(AcquiaData, dict):
def __init__(self, uri, auth, *args, **kwargs):
AcquiaData.__init__(self, uri, auth)
dict.__init__(self, *args, **kwargs)
class Backup(AcquiaResource):
def delete(self):
response = self.request(method='DELETE')
return response.ok
def download(self, target_file):
"""
Download a database backup file from the Acquia Cloud API.
"""
response = self.request()
backup = response.content
# We do this as make_acapi_request() assumes response is a json string.
http = httplib2.Http(
proxy_info=Connection.proxy_info(),
)
resp, content = http.request(backup['link'].encode('ascii', 'ignore'), 'GET')
file = open(target_file, 'wb')
file.write(content)
file.close()
return True
class BackupList(AcquiaList):
def create(self):
response = self.request(method='POST')
task_data = response.content
task = self.wait_for_task(task_data)
if None == task['completed']:
raise Exception('Unable to request backup')
id = int(json.loads(task['result'])['backupid'])
uri = ('%s/%d' % (self.uri, id))
backup = Backup(uri, self.auth)
self.__setitem__(id, backup)
return backup
class Database(AcquiaResource):
def backup(self, id):
uri = ('%s/backups/%d' % (self.uri, id))
return Backup(uri, self.auth)
def backups(self):
uri = ('%s/backups' % (self.uri))
backups = BackupList(uri, self.auth)
response = self.request(uri=uri)
for backup in response.content:
id = int(backup['id'])
backup_uri = ('%s/%d' % (uri, id))
backups[id] = Backup(uri, self.auth, data=backup)
return backups
def copy(self, target):
# More regex hacks to work around the limitations of the ACAPI.
p = re.compile('/envs/(.*)/dbs/(.*)')
m = p.search(self.uri)
current_env = m.group(1)
db = m.group(2)
move_uri = ('%s/%s' % (p.sub('/dbs/\g<2>/db-copy/\g<1>', self.uri), target))
response = self.request(uri=move_uri, method='POST')
if response.ok:
# Another hack, this time to get the URI for the domain.
new_uri = self.uri.replace(('/%s/' % (current_env)), ('/%s/' % (target)))
return Database(new_uri, self.auth)
return False
class Domain(AcquiaResource):
def cache_purge(self):
uri = ('%s/cache' % (self.uri))
response = self.request(uri=uri, method='DELETE')
return response.ok
def delete(self):
response = self.request(method='DELETE')
return response.ok
def move(self, target):
# These regex hacks are needed because Acquia doesn't keep this function
# with domains, which sucks.
p = re.compile('/envs/(.*)/domains/(.*)')
m = p.search(self.uri)
current_env = m.group(1)
domain = m.group(2)
move_uri = ('%s/%s' % (p.sub('/domain-move/\g<1>', self.uri), target))
data = {'domains': [domain]}
response = self.request(uri=move_uri, method='POST', data=data)
if response.ok:
# Another hack, this time to get the URI for the domain.
new_uri = self.uri.replace(('/%s/' % (current_env)), ('/%s/' % (target)))
return Domain(new_uri, self.auth)
return False
class DomainList(AcquiaList):
def create(self, name):
uri = ('%s/%s' % (self.uri, name))
response = self.request(method='POST', uri=uri)
task_data = response.content
task = self.wait_for_task(task_data, uri)
if None == task['completed']:
raise Exception('Failed to create domain')
domain = Domain(uri, self.auth)
self.__setitem__(name, domain)
return domain
class Environment(AcquiaResource):
def db(self, name):
uri = ('%s/dbs/%s' % (self.uri, name))
return Database(uri, self.auth)
def dbs(self):
dbs = {}
uri = ('%s/dbs' % (self.uri))
response = self.request(uri=uri)
for db in response.content:
name = db['name'].encode('ascii', 'ignore')
db_uri = ('%s/%s' % (uri, name))
dbs[name] = Database(db_uri, self.auth, data=db)
return dbs
def deploy_code(self, path):
uri = ('%s/code-deploy' % (self.uri))
params = {'path': path}
response = self.request(uri=uri, method='POST', params=params)
if response.ok:
index = self.uri.find('/envs/')
base_uri = self.uri[:index]
task_uri = ('%s/tasks/%d' % (base_uri, int(response.content['id'])))
return Task(task_uri, self.auth)
return False
def domain(self, name):
uri = ('%s/domains/%s' % (self.uri, name))
return Domain(uri, self.auth)
def domains(self):
uri = ('%s/domains' % (self.uri))
domains = DomainList(uri, self.auth)
response = self.request(uri=uri)
for domain in response.content:
name = domain['name'].encode('ascii','ignore')
domain_uri = ('%s/%s' % (uri, name))
domains[name] = Domain(domain_uri, self.auth, data=domain)
return domains
def server(self, name):
uri = ('%s/servers/%s', (self.uri, name))
return Server(uri, self.auth)
def servers(self):
uri = ('%s/servers' % (self.uri))
servers = ServerList(uri, self.auth)
response = self.request(uri=uri)
for server in response.content:
name = server['name'].encode('ascii', 'ignore')
server_uri = ('%s/%s' % (uri, name))
servers[name] = Server(server_uri, self.auth, data=server)
return servers
class Server(AcquiaResource):
pass
class ServerList(AcquiaList):
pass
class Site(AcquiaResource):
def environment(self, name):
uri = ('%s/envs/%s' % (self.uri, name))
return Environment(uri, self.auth)
def environments(self):
envs = {}
uri = ('%s/envs' % (self.uri))
response = self.request(uri=uri)
for env in response.content:
name = env['name'].encode('ascii', 'ignore')
env_uri = ('%s/%s' % (uri, name))
envs[name] = Environment(env_uri, self.auth, data=env)
return envs
def task(self, id):
uri = ('%s/tasks/%d' % (self.uri, id))
return Task(uri, self.auth)
def tasks(self):
tasks = {}
uri = ('%s/tasks' % (self.uri))
response = self.request(uri=uri)
for task in response.content:
id = int(task[u'id'])
task_uri = ('%s/%d', (uri, id))
tasks[id] = Task(task_uri, self.auth, data=task)
return tasks
class Task(AcquiaResource):
def pending(self):
# Ensure we don't have stale data
self.data = None
task = self.get()
state = task['state'].encode('ascii', 'ignore')
return state not in ['done', 'error']
class User(AcquiaResource):
def drushrc(self):
"""
The json+PHP output of this isn't very useful in python.
"""
uri = ('%s/drushrc' % (self.uri))
response = make_acapi_request('GET', uri, auth=self.auth)
return response.content
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Generic utils."""
import codecs
import errno
import logging
import os
import Queue
import re
import stat
import sys
import tempfile
import threading
import time
import urlparse
import subprocess2
class Error(Exception):
"""gclient exception class."""
pass
def SplitUrlRevision(url):
"""Splits url and returns a two-tuple: url, rev"""
if url.startswith('ssh:'):
# Make sure ssh://user-name@example.com/~/test.git@stable works
regex = r'(ssh://(?:[-\w]+@)?[-\w:\.]+/[-~\w\./]+)(?:@(.+))?'
components = re.search(regex, url).groups()
else:
components = url.split('@', 1)
if len(components) == 1:
components += [None]
return tuple(components)
def IsDateRevision(revision):
"""Returns true if the given revision is of the form "{ ... }"."""
return bool(revision and re.match(r'^\{.+\}$', str(revision)))
def MakeDateRevision(date):
"""Returns a revision representing the latest revision before the given
date."""
return "{" + date + "}"
def SyntaxErrorToError(filename, e):
"""Raises a gclient_utils.Error exception with the human readable message"""
try:
# Try to construct a human readable error message
if filename:
error_message = 'There is a syntax error in %s\n' % filename
else:
error_message = 'There is a syntax error\n'
error_message += 'Line #%s, character %s: "%s"' % (
e.lineno, e.offset, re.sub(r'[\r\n]*$', '', e.text))
except:
# Something went wrong, re-raise the original exception
raise e
else:
raise Error(error_message)
class PrintableObject(object):
def __str__(self):
output = ''
for i in dir(self):
if i.startswith('__'):
continue
output += '%s = %s\n' % (i, str(getattr(self, i, '')))
return output
def FileRead(filename, mode='rU'):
with open(filename, mode=mode) as f:
# codecs.open() has different behavior than open() on python 2.6 so use
# open() and decode manually.
return f.read().decode('utf-8')
def FileWrite(filename, content, mode='w'):
with codecs.open(filename, mode=mode, encoding='utf-8') as f:
f.write(content)
def rmtree(path):
"""shutil.rmtree() on steroids.
Recursively removes a directory, even if it's marked read-only.
shutil.rmtree() doesn't work on Windows if any of the files or directories
are read-only, which svn repositories and some .svn files are. We need to
be able to force the files to be writable (i.e., deletable) as we traverse
the tree.
Even with all this, Windows still sometimes fails to delete a file, citing
a permission error (maybe something to do with antivirus scans or disk
indexing). The best suggestion any of the user forums had was to wait a
bit and try again, so we do that too. It's hand-waving, but sometimes it
works. :/
On POSIX systems, things are a little bit simpler. The modes of the files
to be deleted doesn't matter, only the modes of the directories containing
them are significant. As the directory tree is traversed, each directory
has its mode set appropriately before descending into it. This should
result in the entire tree being removed, with the possible exception of
*path itself, because nothing attempts to change the mode of its parent.
Doing so would be hazardous, as it's not a directory slated for removal.
In the ordinary case, this is not a problem: for our purposes, the user
will never lack write permission on *path's parent.
"""
if not os.path.exists(path):
return
if os.path.islink(path) or not os.path.isdir(path):
raise Error('Called rmtree(%s) in non-directory' % path)
if sys.platform == 'win32':
# Some people don't have the APIs installed. In that case we'll do without.
win32api = None
win32con = None
try:
# Unable to import 'XX'
# pylint: disable=F0401
import win32api, win32con
except ImportError:
pass
else:
# On POSIX systems, we need the x-bit set on the directory to access it,
# the r-bit to see its contents, and the w-bit to remove files from it.
# The actual modes of the files within the directory is irrelevant.
os.chmod(path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
def remove(func, subpath):
if sys.platform == 'win32':
os.chmod(subpath, stat.S_IWRITE)
if win32api and win32con:
win32api.SetFileAttributes(subpath, win32con.FILE_ATTRIBUTE_NORMAL)
try:
func(subpath)
except OSError, e:
if e.errno != errno.EACCES or sys.platform != 'win32':
raise
# Failed to delete, try again after a 100ms sleep.
time.sleep(0.1)
func(subpath)
for fn in os.listdir(path):
# If fullpath is a symbolic link that points to a directory, isdir will
# be True, but we don't want to descend into that as a directory, we just
# want to remove the link. Check islink and treat links as ordinary files
# would be treated regardless of what they reference.
fullpath = os.path.join(path, fn)
if os.path.islink(fullpath) or not os.path.isdir(fullpath):
remove(os.remove, fullpath)
else:
# Recurse.
rmtree(fullpath)
remove(os.rmdir, path)
# TODO(maruel): Rename the references.
RemoveDirectory = rmtree
def safe_makedirs(tree):
"""Creates the directory in a safe manner.
Because multiple threads can create these directories concurently, trap the
exception and pass on.
"""
count = 0
while not os.path.exists(tree):
count += 1
try:
os.makedirs(tree)
except OSError, e:
# 17 POSIX, 183 Windows
if e.errno not in (17, 183):
raise
if count > 40:
# Give up.
raise
def CheckCallAndFilterAndHeader(args, always=False, **kwargs):
"""Adds 'header' support to CheckCallAndFilter.
If |always| is True, a message indicating what is being done
is printed to stdout all the time even if not output is generated. Otherwise
the message header is printed only if the call generated any ouput.
"""
stdout = kwargs.get('stdout', None) or sys.stdout
if always:
stdout.write('\n________ running \'%s\' in \'%s\'\n'
% (' '.join(args), kwargs.get('cwd', '.')))
else:
filter_fn = kwargs.get('filter_fn', None)
def filter_msg(line):
if line is None:
stdout.write('\n________ running \'%s\' in \'%s\'\n'
% (' '.join(args), kwargs.get('cwd', '.')))
elif filter_fn:
filter_fn(line)
kwargs['filter_fn'] = filter_msg
kwargs['call_filter_on_first_line'] = True
# Obviously.
kwargs['print_stdout'] = True
return CheckCallAndFilter(args, **kwargs)
class Wrapper(object):
"""Wraps an object, acting as a transparent proxy for all properties by
default.
"""
def __init__(self, wrapped):
self._wrapped = wrapped
def __getattr__(self, name):
return getattr(self._wrapped, name)
class AutoFlush(Wrapper):
"""Creates a file object clone to automatically flush after N seconds."""
def __init__(self, wrapped, delay):
super(AutoFlush, self).__init__(wrapped)
if not hasattr(self, 'lock'):
self.lock = threading.Lock()
self.__last_flushed_at = time.time()
self.delay = delay
@property
def autoflush(self):
return self
def write(self, out, *args, **kwargs):
self._wrapped.write(out, *args, **kwargs)
should_flush = False
self.lock.acquire()
try:
if self.delay and (time.time() - self.__last_flushed_at) > self.delay:
should_flush = True
self.__last_flushed_at = time.time()
finally:
self.lock.release()
if should_flush:
self.flush()
class Annotated(Wrapper):
"""Creates a file object clone to automatically prepends every line in worker
threads with a NN> prefix.
"""
def __init__(self, wrapped, include_zero=False):
super(Annotated, self).__init__(wrapped)
if not hasattr(self, 'lock'):
self.lock = threading.Lock()
self.__output_buffers = {}
self.__include_zero = include_zero
@property
def annotated(self):
return self
def write(self, out):
index = getattr(threading.currentThread(), 'index', 0)
if not index and not self.__include_zero:
# Unindexed threads aren't buffered.
return self._wrapped.write(out)
self.lock.acquire()
try:
# Use a dummy array to hold the string so the code can be lockless.
# Strings are immutable, requiring to keep a lock for the whole dictionary
# otherwise. Using an array is faster than using a dummy object.
if not index in self.__output_buffers:
obj = self.__output_buffers[index] = ['']
else:
obj = self.__output_buffers[index]
finally:
self.lock.release()
# Continue lockless.
obj[0] += out
while '\n' in obj[0]:
line, remaining = obj[0].split('\n', 1)
if line:
self._wrapped.write('%d>%s\n' % (index, line))
obj[0] = remaining
def flush(self):
"""Flush buffered output."""
orphans = []
self.lock.acquire()
try:
# Detect threads no longer existing.
indexes = (getattr(t, 'index', None) for t in threading.enumerate())
indexes = filter(None, indexes)
for index in self.__output_buffers:
if not index in indexes:
orphans.append((index, self.__output_buffers[index][0]))
for orphan in orphans:
del self.__output_buffers[orphan[0]]
finally:
self.lock.release()
# Don't keep the lock while writting. Will append \n when it shouldn't.
for orphan in orphans:
if orphan[1]:
self._wrapped.write('%d>%s\n' % (orphan[0], orphan[1]))
return self._wrapped.flush()
def MakeFileAutoFlush(fileobj, delay=10):
autoflush = getattr(fileobj, 'autoflush', None)
if autoflush:
autoflush.delay = delay
return fileobj
return AutoFlush(fileobj, delay)
def MakeFileAnnotated(fileobj, include_zero=False):
if getattr(fileobj, 'annotated', None):
return fileobj
return Annotated(fileobj)
def CheckCallAndFilter(args, stdout=None, filter_fn=None,
print_stdout=None, call_filter_on_first_line=False,
**kwargs):
"""Runs a command and calls back a filter function if needed.
Accepts all subprocess2.Popen() parameters plus:
print_stdout: If True, the command's stdout is forwarded to stdout.
filter_fn: A function taking a single string argument called with each line
of the subprocess2's output. Each line has the trailing newline
character trimmed.
stdout: Can be any bufferable output.
stderr is always redirected to stdout.
"""
assert print_stdout or filter_fn
stdout = stdout or sys.stdout
filter_fn = filter_fn or (lambda x: None)
kid = subprocess2.Popen(
args, bufsize=0, stdout=subprocess2.PIPE, stderr=subprocess2.STDOUT,
**kwargs)
# Do a flush of stdout before we begin reading from the subprocess2's stdout
stdout.flush()
# Also, we need to forward stdout to prevent weird re-ordering of output.
# This has to be done on a per byte basis to make sure it is not buffered:
# normally buffering is done for each line, but if svn requests input, no
# end-of-line character is output after the prompt and it would not show up.
try:
in_byte = kid.stdout.read(1)
if in_byte:
if call_filter_on_first_line:
filter_fn(None)
in_line = ''
while in_byte:
if in_byte != '\r':
if print_stdout:
stdout.write(in_byte)
if in_byte != '\n':
in_line += in_byte
else:
filter_fn(in_line)
in_line = ''
else:
filter_fn(in_line)
in_line = ''
in_byte = kid.stdout.read(1)
# Flush the rest of buffered output. This is only an issue with
# stdout/stderr not ending with a \n.
if len(in_line):
filter_fn(in_line)
rv = kid.wait()
except KeyboardInterrupt:
print >> sys.stderr, 'Failed while running "%s"' % ' '.join(args)
raise
if rv:
raise subprocess2.CalledProcessError(
rv, args, kwargs.get('cwd', None), None, None)
return 0
def FindGclientRoot(from_dir, filename='.gclient'):
"""Tries to find the gclient root."""
real_from_dir = os.path.realpath(from_dir)
path = real_from_dir
while not os.path.exists(os.path.join(path, filename)):
split_path = os.path.split(path)
if not split_path[1]:
return None
path = split_path[0]
# If we did not find the file in the current directory, make sure we are in a
# sub directory that is controlled by this configuration.
if path != real_from_dir:
entries_filename = os.path.join(path, filename + '_entries')
if not os.path.exists(entries_filename):
# If .gclient_entries does not exist, a previous call to gclient sync
# might have failed. In that case, we cannot verify that the .gclient
# is the one we want to use. In order to not to cause too much trouble,
# just issue a warning and return the path anyway.
print >> sys.stderr, ("%s file in parent directory %s might not be the "
"file you want to use" % (filename, path))
return path
scope = {}
try:
exec(FileRead(entries_filename), scope)
except SyntaxError, e:
SyntaxErrorToError(filename, e)
all_directories = scope['entries'].keys()
path_to_check = real_from_dir[len(path)+1:]
while path_to_check:
if path_to_check in all_directories:
return path
path_to_check = os.path.dirname(path_to_check)
return None
logging.info('Found gclient root at ' + path)
return path
def PathDifference(root, subpath):
"""Returns the difference subpath minus root."""
root = os.path.realpath(root)
subpath = os.path.realpath(subpath)
if not subpath.startswith(root):
return None
# If the root does not have a trailing \ or /, we add it so the returned
# path starts immediately after the seperator regardless of whether it is
# provided.
root = os.path.join(root, '')
return subpath[len(root):]
def FindFileUpwards(filename, path=None):
"""Search upwards from the a directory (default: current) to find a file.
Returns nearest upper-level directory with the passed in file.
"""
if not path:
path = os.getcwd()
path = os.path.realpath(path)
while True:
file_path = os.path.join(path, filename)
if os.path.exists(file_path):
return path
(new_path, _) = os.path.split(path)
if new_path == path:
return None
path = new_path
def GetGClientRootAndEntries(path=None):
"""Returns the gclient root and the dict of entries."""
config_file = '.gclient_entries'
root = FindFileUpwards(config_file, path)
if not root:
print "Can't find %s" % config_file
return None
config_path = os.path.join(root, config_file)
env = {}
execfile(config_path, env)
config_dir = os.path.dirname(config_path)
return config_dir, env['entries']
def lockedmethod(method):
"""Method decorator that holds self.lock for the duration of the call."""
def inner(self, *args, **kwargs):
try:
try:
self.lock.acquire()
except KeyboardInterrupt:
print >> sys.stderr, 'Was deadlocked'
raise
return method(self, *args, **kwargs)
finally:
self.lock.release()
return inner
class WorkItem(object):
"""One work item."""
# On cygwin, creating a lock throwing randomly when nearing ~100 locks.
# As a workaround, use a single lock. Yep you read it right. Single lock for
# all the 100 objects.
lock = threading.Lock()
def __init__(self, name):
# A unique string representing this work item.
self._name = name
def run(self, work_queue):
"""work_queue is passed as keyword argument so it should be
the last parameters of the function when you override it."""
pass
@property
def name(self):
return self._name
class ExecutionQueue(object):
"""Runs a set of WorkItem that have interdependencies and were WorkItem are
added as they are processed.
In gclient's case, Dependencies sometime needs to be run out of order due to
From() keyword. This class manages that all the required dependencies are run
before running each one.
Methods of this class are thread safe.
"""
def __init__(self, jobs, progress):
"""jobs specifies the number of concurrent tasks to allow. progress is a
Progress instance."""
# Set when a thread is done or a new item is enqueued.
self.ready_cond = threading.Condition()
# Maximum number of concurrent tasks.
self.jobs = jobs
# List of WorkItem, for gclient, these are Dependency instances.
self.queued = []
# List of strings representing each Dependency.name that was run.
self.ran = []
# List of items currently running.
self.running = []
# Exceptions thrown if any.
self.exceptions = Queue.Queue()
# Progress status
self.progress = progress
if self.progress:
self.progress.update(0)
def enqueue(self, d):
"""Enqueue one Dependency to be executed later once its requirements are
satisfied.
"""
assert isinstance(d, WorkItem)
self.ready_cond.acquire()
try:
self.queued.append(d)
total = len(self.queued) + len(self.ran) + len(self.running)
logging.debug('enqueued(%s)' % d.name)
if self.progress:
self.progress._total = total + 1
self.progress.update(0)
self.ready_cond.notifyAll()
finally:
self.ready_cond.release()
def flush(self, *args, **kwargs):
"""Runs all enqueued items until all are executed."""
kwargs['work_queue'] = self
self.ready_cond.acquire()
try:
while True:
# Check for task to run first, then wait.
while True:
if not self.exceptions.empty():
# Systematically flush the queue when an exception logged.
self.queued = []
self._flush_terminated_threads()
if (not self.queued and not self.running or
self.jobs == len(self.running)):
logging.debug('No more worker threads or can\'t queue anything.')
break
# Check for new tasks to start.
for i in xrange(len(self.queued)):
# Verify its requirements.
for r in self.queued[i].requirements:
if not r in self.ran:
# Requirement not met.
break
else:
# Start one work item: all its requirements are satisfied.
self._run_one_task(self.queued.pop(i), args, kwargs)
break
else:
# Couldn't find an item that could run. Break out the outher loop.
break
if not self.queued and not self.running:
# We're done.
break
# We need to poll here otherwise Ctrl-C isn't processed.
try:
self.ready_cond.wait(10)
except KeyboardInterrupt:
# Help debugging by printing some information:
print >> sys.stderr, (
('\nAllowed parallel jobs: %d\n# queued: %d\nRan: %s\n'
'Running: %d') % (
self.jobs,
len(self.queued),
', '.join(self.ran),
len(self.running)))
for i in self.queued:
print >> sys.stderr, '%s: %s' % (i.name, ', '.join(i.requirements))
raise
# Something happened: self.enqueue() or a thread terminated. Loop again.
finally:
self.ready_cond.release()
assert not self.running, 'Now guaranteed to be single-threaded'
if not self.exceptions.empty():
# To get back the stack location correctly, the raise a, b, c form must be
# used, passing a tuple as the first argument doesn't work.
e = self.exceptions.get()
raise e[0], e[1], e[2]
if self.progress:
self.progress.end()
def _flush_terminated_threads(self):
"""Flush threads that have terminated."""
running = self.running
self.running = []
for t in running:
if t.isAlive():
self.running.append(t)
else:
t.join()
sys.stdout.flush()
if self.progress:
self.progress.update(1, t.item.name)
if t.item.name in self.ran:
raise Error(
'gclient is confused, "%s" is already in "%s"' % (
t.item.name, ', '.join(self.ran)))
if not t.item.name in self.ran:
self.ran.append(t.item.name)
def _run_one_task(self, task_item, args, kwargs):
if self.jobs > 1:
# Start the thread.
index = len(self.ran) + len(self.running) + 1
new_thread = self._Worker(task_item, index, args, kwargs)
self.running.append(new_thread)
new_thread.start()
else:
# Run the 'thread' inside the main thread. Don't try to catch any
# exception.
task_item.run(*args, **kwargs)
self.ran.append(task_item.name)
if self.progress:
self.progress.update(1, ', '.join(t.item.name for t in self.running))
class _Worker(threading.Thread):
"""One thread to execute one WorkItem."""
def __init__(self, item, index, args, kwargs):
threading.Thread.__init__(self, name=item.name or 'Worker')
logging.info('_Worker(%s) reqs:%s' % (item.name, item.requirements))
self.item = item
self.index = index
self.args = args
self.kwargs = kwargs
def run(self):
"""Runs in its own thread."""
logging.debug('_Worker.run(%s)' % self.item.name)
work_queue = self.kwargs['work_queue']
try:
self.item.run(*self.args, **self.kwargs)
except Exception:
# Catch exception location.
logging.info('Caught exception in thread %s' % self.item.name)
logging.info(str(sys.exc_info()))
work_queue.exceptions.put(sys.exc_info())
logging.info('_Worker.run(%s) done' % self.item.name)
work_queue.ready_cond.acquire()
try:
work_queue.ready_cond.notifyAll()
finally:
work_queue.ready_cond.release()
def GetEditor(git):
"""Returns the most plausible editor to use."""
if git:
editor = os.environ.get('GIT_EDITOR')
else:
editor = os.environ.get('SVN_EDITOR')
if not editor:
editor = os.environ.get('EDITOR')
if not editor:
if sys.platform.startswith('win'):
editor = 'notepad'
else:
editor = 'vim'
return editor
def RunEditor(content, git):
"""Opens up the default editor in the system to get the CL description."""
file_handle, filename = tempfile.mkstemp(text=True)
# Make sure CRLF is handled properly by requiring none.
if '\r' in content:
print >> sys.stderr, (
'!! Please remove \\r from your change description !!')
fileobj = os.fdopen(file_handle, 'w')
# Still remove \r if present.
fileobj.write(re.sub('\r?\n', '\n', content))
fileobj.close()
try:
cmd = '%s %s' % (GetEditor(git), filename)
if sys.platform == 'win32' and os.environ.get('TERM') == 'msys':
# Msysgit requires the usage of 'env' to be present.
cmd = 'env ' + cmd
try:
# shell=True to allow the shell to handle all forms of quotes in
# $EDITOR.
subprocess2.check_call(cmd, shell=True)
except subprocess2.CalledProcessError:
return None
return FileRead(filename)
finally:
os.remove(filename)
def UpgradeToHttps(url):
"""Upgrades random urls to https://.
Do not touch unknown urls like ssh:// or git://.
Do not touch http:// urls with a port number,
Fixes invalid GAE url.
"""
if not url:
return url
if not re.match(r'[a-z\-]+\://.*', url):
# Make sure it is a valid uri. Otherwise, urlparse() will consider it a
# relative url and will use http:///foo. Note that it defaults to http://
# for compatibility with naked url like "localhost:8080".
url = 'http://%s' % url
parsed = list(urlparse.urlparse(url))
# Do not automatically upgrade http to https if a port number is provided.
if parsed[0] == 'http' and not re.match(r'^.+?\:\d+$', parsed[1]):
parsed[0] = 'https'
# Until GAE supports SNI, manually convert the url.
if parsed[1] == 'codereview.chromium.org':
parsed[1] = 'chromiumcodereview.appspot.com'
return urlparse.urlunparse(parsed)
def ParseCodereviewSettingsContent(content):
"""Process a codereview.settings file properly."""
lines = (l for l in content.splitlines() if not l.strip().startswith("#"))
try:
keyvals = dict([x.strip() for x in l.split(':', 1)] for l in lines if l)
except ValueError:
raise Error(
'Failed to process settings, please fix. Content:\n\n%s' % content)
def fix_url(key):
if keyvals.get(key):
keyvals[key] = UpgradeToHttps(keyvals[key])
fix_url('CODE_REVIEW_SERVER')
fix_url('VIEW_VC')
return keyvals
Handle non-UTF-8 encoded files in presubmit checks.
BUG=
TEST=
Review URL: https://chromiumcodereview.appspot.com/10696202
git-svn-id: bd64dd6fa6f3f0ed0c0666d1018379882b742947@146391 4ff67af0-8c30-449e-8e8b-ad334ec8d88c
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Generic utils."""
import codecs
import errno
import logging
import os
import Queue
import re
import stat
import sys
import tempfile
import threading
import time
import urlparse
import subprocess2
class Error(Exception):
"""gclient exception class."""
pass
def SplitUrlRevision(url):
"""Splits url and returns a two-tuple: url, rev"""
if url.startswith('ssh:'):
# Make sure ssh://user-name@example.com/~/test.git@stable works
regex = r'(ssh://(?:[-\w]+@)?[-\w:\.]+/[-~\w\./]+)(?:@(.+))?'
components = re.search(regex, url).groups()
else:
components = url.split('@', 1)
if len(components) == 1:
components += [None]
return tuple(components)
def IsDateRevision(revision):
"""Returns true if the given revision is of the form "{ ... }"."""
return bool(revision and re.match(r'^\{.+\}$', str(revision)))
def MakeDateRevision(date):
"""Returns a revision representing the latest revision before the given
date."""
return "{" + date + "}"
def SyntaxErrorToError(filename, e):
"""Raises a gclient_utils.Error exception with the human readable message"""
try:
# Try to construct a human readable error message
if filename:
error_message = 'There is a syntax error in %s\n' % filename
else:
error_message = 'There is a syntax error\n'
error_message += 'Line #%s, character %s: "%s"' % (
e.lineno, e.offset, re.sub(r'[\r\n]*$', '', e.text))
except:
# Something went wrong, re-raise the original exception
raise e
else:
raise Error(error_message)
class PrintableObject(object):
def __str__(self):
output = ''
for i in dir(self):
if i.startswith('__'):
continue
output += '%s = %s\n' % (i, str(getattr(self, i, '')))
return output
def FileRead(filename, mode='rU'):
with open(filename, mode=mode) as f:
# codecs.open() has different behavior than open() on python 2.6 so use
# open() and decode manually.
s = f.read()
try:
return s.decode('utf-8')
except UnicodeDecodeError:
return s
def FileWrite(filename, content, mode='w'):
with codecs.open(filename, mode=mode, encoding='utf-8') as f:
f.write(content)
def rmtree(path):
"""shutil.rmtree() on steroids.
Recursively removes a directory, even if it's marked read-only.
shutil.rmtree() doesn't work on Windows if any of the files or directories
are read-only, which svn repositories and some .svn files are. We need to
be able to force the files to be writable (i.e., deletable) as we traverse
the tree.
Even with all this, Windows still sometimes fails to delete a file, citing
a permission error (maybe something to do with antivirus scans or disk
indexing). The best suggestion any of the user forums had was to wait a
bit and try again, so we do that too. It's hand-waving, but sometimes it
works. :/
On POSIX systems, things are a little bit simpler. The modes of the files
to be deleted doesn't matter, only the modes of the directories containing
them are significant. As the directory tree is traversed, each directory
has its mode set appropriately before descending into it. This should
result in the entire tree being removed, with the possible exception of
*path itself, because nothing attempts to change the mode of its parent.
Doing so would be hazardous, as it's not a directory slated for removal.
In the ordinary case, this is not a problem: for our purposes, the user
will never lack write permission on *path's parent.
"""
if not os.path.exists(path):
return
if os.path.islink(path) or not os.path.isdir(path):
raise Error('Called rmtree(%s) in non-directory' % path)
if sys.platform == 'win32':
# Some people don't have the APIs installed. In that case we'll do without.
win32api = None
win32con = None
try:
# Unable to import 'XX'
# pylint: disable=F0401
import win32api, win32con
except ImportError:
pass
else:
# On POSIX systems, we need the x-bit set on the directory to access it,
# the r-bit to see its contents, and the w-bit to remove files from it.
# The actual modes of the files within the directory is irrelevant.
os.chmod(path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
def remove(func, subpath):
if sys.platform == 'win32':
os.chmod(subpath, stat.S_IWRITE)
if win32api and win32con:
win32api.SetFileAttributes(subpath, win32con.FILE_ATTRIBUTE_NORMAL)
try:
func(subpath)
except OSError, e:
if e.errno != errno.EACCES or sys.platform != 'win32':
raise
# Failed to delete, try again after a 100ms sleep.
time.sleep(0.1)
func(subpath)
for fn in os.listdir(path):
# If fullpath is a symbolic link that points to a directory, isdir will
# be True, but we don't want to descend into that as a directory, we just
# want to remove the link. Check islink and treat links as ordinary files
# would be treated regardless of what they reference.
fullpath = os.path.join(path, fn)
if os.path.islink(fullpath) or not os.path.isdir(fullpath):
remove(os.remove, fullpath)
else:
# Recurse.
rmtree(fullpath)
remove(os.rmdir, path)
# TODO(maruel): Rename the references.
RemoveDirectory = rmtree
def safe_makedirs(tree):
"""Creates the directory in a safe manner.
Because multiple threads can create these directories concurently, trap the
exception and pass on.
"""
count = 0
while not os.path.exists(tree):
count += 1
try:
os.makedirs(tree)
except OSError, e:
# 17 POSIX, 183 Windows
if e.errno not in (17, 183):
raise
if count > 40:
# Give up.
raise
def CheckCallAndFilterAndHeader(args, always=False, **kwargs):
"""Adds 'header' support to CheckCallAndFilter.
If |always| is True, a message indicating what is being done
is printed to stdout all the time even if not output is generated. Otherwise
the message header is printed only if the call generated any ouput.
"""
stdout = kwargs.get('stdout', None) or sys.stdout
if always:
stdout.write('\n________ running \'%s\' in \'%s\'\n'
% (' '.join(args), kwargs.get('cwd', '.')))
else:
filter_fn = kwargs.get('filter_fn', None)
def filter_msg(line):
if line is None:
stdout.write('\n________ running \'%s\' in \'%s\'\n'
% (' '.join(args), kwargs.get('cwd', '.')))
elif filter_fn:
filter_fn(line)
kwargs['filter_fn'] = filter_msg
kwargs['call_filter_on_first_line'] = True
# Obviously.
kwargs['print_stdout'] = True
return CheckCallAndFilter(args, **kwargs)
class Wrapper(object):
"""Wraps an object, acting as a transparent proxy for all properties by
default.
"""
def __init__(self, wrapped):
self._wrapped = wrapped
def __getattr__(self, name):
return getattr(self._wrapped, name)
class AutoFlush(Wrapper):
"""Creates a file object clone to automatically flush after N seconds."""
def __init__(self, wrapped, delay):
super(AutoFlush, self).__init__(wrapped)
if not hasattr(self, 'lock'):
self.lock = threading.Lock()
self.__last_flushed_at = time.time()
self.delay = delay
@property
def autoflush(self):
return self
def write(self, out, *args, **kwargs):
self._wrapped.write(out, *args, **kwargs)
should_flush = False
self.lock.acquire()
try:
if self.delay and (time.time() - self.__last_flushed_at) > self.delay:
should_flush = True
self.__last_flushed_at = time.time()
finally:
self.lock.release()
if should_flush:
self.flush()
class Annotated(Wrapper):
"""Creates a file object clone to automatically prepends every line in worker
threads with a NN> prefix.
"""
def __init__(self, wrapped, include_zero=False):
super(Annotated, self).__init__(wrapped)
if not hasattr(self, 'lock'):
self.lock = threading.Lock()
self.__output_buffers = {}
self.__include_zero = include_zero
@property
def annotated(self):
return self
def write(self, out):
index = getattr(threading.currentThread(), 'index', 0)
if not index and not self.__include_zero:
# Unindexed threads aren't buffered.
return self._wrapped.write(out)
self.lock.acquire()
try:
# Use a dummy array to hold the string so the code can be lockless.
# Strings are immutable, requiring to keep a lock for the whole dictionary
# otherwise. Using an array is faster than using a dummy object.
if not index in self.__output_buffers:
obj = self.__output_buffers[index] = ['']
else:
obj = self.__output_buffers[index]
finally:
self.lock.release()
# Continue lockless.
obj[0] += out
while '\n' in obj[0]:
line, remaining = obj[0].split('\n', 1)
if line:
self._wrapped.write('%d>%s\n' % (index, line))
obj[0] = remaining
def flush(self):
"""Flush buffered output."""
orphans = []
self.lock.acquire()
try:
# Detect threads no longer existing.
indexes = (getattr(t, 'index', None) for t in threading.enumerate())
indexes = filter(None, indexes)
for index in self.__output_buffers:
if not index in indexes:
orphans.append((index, self.__output_buffers[index][0]))
for orphan in orphans:
del self.__output_buffers[orphan[0]]
finally:
self.lock.release()
# Don't keep the lock while writting. Will append \n when it shouldn't.
for orphan in orphans:
if orphan[1]:
self._wrapped.write('%d>%s\n' % (orphan[0], orphan[1]))
return self._wrapped.flush()
def MakeFileAutoFlush(fileobj, delay=10):
autoflush = getattr(fileobj, 'autoflush', None)
if autoflush:
autoflush.delay = delay
return fileobj
return AutoFlush(fileobj, delay)
def MakeFileAnnotated(fileobj, include_zero=False):
if getattr(fileobj, 'annotated', None):
return fileobj
return Annotated(fileobj)
def CheckCallAndFilter(args, stdout=None, filter_fn=None,
print_stdout=None, call_filter_on_first_line=False,
**kwargs):
"""Runs a command and calls back a filter function if needed.
Accepts all subprocess2.Popen() parameters plus:
print_stdout: If True, the command's stdout is forwarded to stdout.
filter_fn: A function taking a single string argument called with each line
of the subprocess2's output. Each line has the trailing newline
character trimmed.
stdout: Can be any bufferable output.
stderr is always redirected to stdout.
"""
assert print_stdout or filter_fn
stdout = stdout or sys.stdout
filter_fn = filter_fn or (lambda x: None)
kid = subprocess2.Popen(
args, bufsize=0, stdout=subprocess2.PIPE, stderr=subprocess2.STDOUT,
**kwargs)
# Do a flush of stdout before we begin reading from the subprocess2's stdout
stdout.flush()
# Also, we need to forward stdout to prevent weird re-ordering of output.
# This has to be done on a per byte basis to make sure it is not buffered:
# normally buffering is done for each line, but if svn requests input, no
# end-of-line character is output after the prompt and it would not show up.
try:
in_byte = kid.stdout.read(1)
if in_byte:
if call_filter_on_first_line:
filter_fn(None)
in_line = ''
while in_byte:
if in_byte != '\r':
if print_stdout:
stdout.write(in_byte)
if in_byte != '\n':
in_line += in_byte
else:
filter_fn(in_line)
in_line = ''
else:
filter_fn(in_line)
in_line = ''
in_byte = kid.stdout.read(1)
# Flush the rest of buffered output. This is only an issue with
# stdout/stderr not ending with a \n.
if len(in_line):
filter_fn(in_line)
rv = kid.wait()
except KeyboardInterrupt:
print >> sys.stderr, 'Failed while running "%s"' % ' '.join(args)
raise
if rv:
raise subprocess2.CalledProcessError(
rv, args, kwargs.get('cwd', None), None, None)
return 0
def FindGclientRoot(from_dir, filename='.gclient'):
"""Tries to find the gclient root."""
real_from_dir = os.path.realpath(from_dir)
path = real_from_dir
while not os.path.exists(os.path.join(path, filename)):
split_path = os.path.split(path)
if not split_path[1]:
return None
path = split_path[0]
# If we did not find the file in the current directory, make sure we are in a
# sub directory that is controlled by this configuration.
if path != real_from_dir:
entries_filename = os.path.join(path, filename + '_entries')
if not os.path.exists(entries_filename):
# If .gclient_entries does not exist, a previous call to gclient sync
# might have failed. In that case, we cannot verify that the .gclient
# is the one we want to use. In order to not to cause too much trouble,
# just issue a warning and return the path anyway.
print >> sys.stderr, ("%s file in parent directory %s might not be the "
"file you want to use" % (filename, path))
return path
scope = {}
try:
exec(FileRead(entries_filename), scope)
except SyntaxError, e:
SyntaxErrorToError(filename, e)
all_directories = scope['entries'].keys()
path_to_check = real_from_dir[len(path)+1:]
while path_to_check:
if path_to_check in all_directories:
return path
path_to_check = os.path.dirname(path_to_check)
return None
logging.info('Found gclient root at ' + path)
return path
def PathDifference(root, subpath):
"""Returns the difference subpath minus root."""
root = os.path.realpath(root)
subpath = os.path.realpath(subpath)
if not subpath.startswith(root):
return None
# If the root does not have a trailing \ or /, we add it so the returned
# path starts immediately after the seperator regardless of whether it is
# provided.
root = os.path.join(root, '')
return subpath[len(root):]
def FindFileUpwards(filename, path=None):
"""Search upwards from the a directory (default: current) to find a file.
Returns nearest upper-level directory with the passed in file.
"""
if not path:
path = os.getcwd()
path = os.path.realpath(path)
while True:
file_path = os.path.join(path, filename)
if os.path.exists(file_path):
return path
(new_path, _) = os.path.split(path)
if new_path == path:
return None
path = new_path
def GetGClientRootAndEntries(path=None):
"""Returns the gclient root and the dict of entries."""
config_file = '.gclient_entries'
root = FindFileUpwards(config_file, path)
if not root:
print "Can't find %s" % config_file
return None
config_path = os.path.join(root, config_file)
env = {}
execfile(config_path, env)
config_dir = os.path.dirname(config_path)
return config_dir, env['entries']
def lockedmethod(method):
"""Method decorator that holds self.lock for the duration of the call."""
def inner(self, *args, **kwargs):
try:
try:
self.lock.acquire()
except KeyboardInterrupt:
print >> sys.stderr, 'Was deadlocked'
raise
return method(self, *args, **kwargs)
finally:
self.lock.release()
return inner
class WorkItem(object):
"""One work item."""
# On cygwin, creating a lock throwing randomly when nearing ~100 locks.
# As a workaround, use a single lock. Yep you read it right. Single lock for
# all the 100 objects.
lock = threading.Lock()
def __init__(self, name):
# A unique string representing this work item.
self._name = name
def run(self, work_queue):
"""work_queue is passed as keyword argument so it should be
the last parameters of the function when you override it."""
pass
@property
def name(self):
return self._name
class ExecutionQueue(object):
"""Runs a set of WorkItem that have interdependencies and were WorkItem are
added as they are processed.
In gclient's case, Dependencies sometime needs to be run out of order due to
From() keyword. This class manages that all the required dependencies are run
before running each one.
Methods of this class are thread safe.
"""
def __init__(self, jobs, progress):
"""jobs specifies the number of concurrent tasks to allow. progress is a
Progress instance."""
# Set when a thread is done or a new item is enqueued.
self.ready_cond = threading.Condition()
# Maximum number of concurrent tasks.
self.jobs = jobs
# List of WorkItem, for gclient, these are Dependency instances.
self.queued = []
# List of strings representing each Dependency.name that was run.
self.ran = []
# List of items currently running.
self.running = []
# Exceptions thrown if any.
self.exceptions = Queue.Queue()
# Progress status
self.progress = progress
if self.progress:
self.progress.update(0)
def enqueue(self, d):
"""Enqueue one Dependency to be executed later once its requirements are
satisfied.
"""
assert isinstance(d, WorkItem)
self.ready_cond.acquire()
try:
self.queued.append(d)
total = len(self.queued) + len(self.ran) + len(self.running)
logging.debug('enqueued(%s)' % d.name)
if self.progress:
self.progress._total = total + 1
self.progress.update(0)
self.ready_cond.notifyAll()
finally:
self.ready_cond.release()
def flush(self, *args, **kwargs):
"""Runs all enqueued items until all are executed."""
kwargs['work_queue'] = self
self.ready_cond.acquire()
try:
while True:
# Check for task to run first, then wait.
while True:
if not self.exceptions.empty():
# Systematically flush the queue when an exception logged.
self.queued = []
self._flush_terminated_threads()
if (not self.queued and not self.running or
self.jobs == len(self.running)):
logging.debug('No more worker threads or can\'t queue anything.')
break
# Check for new tasks to start.
for i in xrange(len(self.queued)):
# Verify its requirements.
for r in self.queued[i].requirements:
if not r in self.ran:
# Requirement not met.
break
else:
# Start one work item: all its requirements are satisfied.
self._run_one_task(self.queued.pop(i), args, kwargs)
break
else:
# Couldn't find an item that could run. Break out the outher loop.
break
if not self.queued and not self.running:
# We're done.
break
# We need to poll here otherwise Ctrl-C isn't processed.
try:
self.ready_cond.wait(10)
except KeyboardInterrupt:
# Help debugging by printing some information:
print >> sys.stderr, (
('\nAllowed parallel jobs: %d\n# queued: %d\nRan: %s\n'
'Running: %d') % (
self.jobs,
len(self.queued),
', '.join(self.ran),
len(self.running)))
for i in self.queued:
print >> sys.stderr, '%s: %s' % (i.name, ', '.join(i.requirements))
raise
# Something happened: self.enqueue() or a thread terminated. Loop again.
finally:
self.ready_cond.release()
assert not self.running, 'Now guaranteed to be single-threaded'
if not self.exceptions.empty():
# To get back the stack location correctly, the raise a, b, c form must be
# used, passing a tuple as the first argument doesn't work.
e = self.exceptions.get()
raise e[0], e[1], e[2]
if self.progress:
self.progress.end()
def _flush_terminated_threads(self):
"""Flush threads that have terminated."""
running = self.running
self.running = []
for t in running:
if t.isAlive():
self.running.append(t)
else:
t.join()
sys.stdout.flush()
if self.progress:
self.progress.update(1, t.item.name)
if t.item.name in self.ran:
raise Error(
'gclient is confused, "%s" is already in "%s"' % (
t.item.name, ', '.join(self.ran)))
if not t.item.name in self.ran:
self.ran.append(t.item.name)
def _run_one_task(self, task_item, args, kwargs):
if self.jobs > 1:
# Start the thread.
index = len(self.ran) + len(self.running) + 1
new_thread = self._Worker(task_item, index, args, kwargs)
self.running.append(new_thread)
new_thread.start()
else:
# Run the 'thread' inside the main thread. Don't try to catch any
# exception.
task_item.run(*args, **kwargs)
self.ran.append(task_item.name)
if self.progress:
self.progress.update(1, ', '.join(t.item.name for t in self.running))
class _Worker(threading.Thread):
"""One thread to execute one WorkItem."""
def __init__(self, item, index, args, kwargs):
threading.Thread.__init__(self, name=item.name or 'Worker')
logging.info('_Worker(%s) reqs:%s' % (item.name, item.requirements))
self.item = item
self.index = index
self.args = args
self.kwargs = kwargs
def run(self):
"""Runs in its own thread."""
logging.debug('_Worker.run(%s)' % self.item.name)
work_queue = self.kwargs['work_queue']
try:
self.item.run(*self.args, **self.kwargs)
except Exception:
# Catch exception location.
logging.info('Caught exception in thread %s' % self.item.name)
logging.info(str(sys.exc_info()))
work_queue.exceptions.put(sys.exc_info())
logging.info('_Worker.run(%s) done' % self.item.name)
work_queue.ready_cond.acquire()
try:
work_queue.ready_cond.notifyAll()
finally:
work_queue.ready_cond.release()
def GetEditor(git):
"""Returns the most plausible editor to use."""
if git:
editor = os.environ.get('GIT_EDITOR')
else:
editor = os.environ.get('SVN_EDITOR')
if not editor:
editor = os.environ.get('EDITOR')
if not editor:
if sys.platform.startswith('win'):
editor = 'notepad'
else:
editor = 'vim'
return editor
def RunEditor(content, git):
"""Opens up the default editor in the system to get the CL description."""
file_handle, filename = tempfile.mkstemp(text=True)
# Make sure CRLF is handled properly by requiring none.
if '\r' in content:
print >> sys.stderr, (
'!! Please remove \\r from your change description !!')
fileobj = os.fdopen(file_handle, 'w')
# Still remove \r if present.
fileobj.write(re.sub('\r?\n', '\n', content))
fileobj.close()
try:
cmd = '%s %s' % (GetEditor(git), filename)
if sys.platform == 'win32' and os.environ.get('TERM') == 'msys':
# Msysgit requires the usage of 'env' to be present.
cmd = 'env ' + cmd
try:
# shell=True to allow the shell to handle all forms of quotes in
# $EDITOR.
subprocess2.check_call(cmd, shell=True)
except subprocess2.CalledProcessError:
return None
return FileRead(filename)
finally:
os.remove(filename)
def UpgradeToHttps(url):
"""Upgrades random urls to https://.
Do not touch unknown urls like ssh:// or git://.
Do not touch http:// urls with a port number,
Fixes invalid GAE url.
"""
if not url:
return url
if not re.match(r'[a-z\-]+\://.*', url):
# Make sure it is a valid uri. Otherwise, urlparse() will consider it a
# relative url and will use http:///foo. Note that it defaults to http://
# for compatibility with naked url like "localhost:8080".
url = 'http://%s' % url
parsed = list(urlparse.urlparse(url))
# Do not automatically upgrade http to https if a port number is provided.
if parsed[0] == 'http' and not re.match(r'^.+?\:\d+$', parsed[1]):
parsed[0] = 'https'
# Until GAE supports SNI, manually convert the url.
if parsed[1] == 'codereview.chromium.org':
parsed[1] = 'chromiumcodereview.appspot.com'
return urlparse.urlunparse(parsed)
def ParseCodereviewSettingsContent(content):
"""Process a codereview.settings file properly."""
lines = (l for l in content.splitlines() if not l.strip().startswith("#"))
try:
keyvals = dict([x.strip() for x in l.split(':', 1)] for l in lines if l)
except ValueError:
raise Error(
'Failed to process settings, please fix. Content:\n\n%s' % content)
def fix_url(key):
if keyvals.get(key):
keyvals[key] = UpgradeToHttps(keyvals[key])
fix_url('CODE_REVIEW_SERVER')
fix_url('VIEW_VC')
return keyvals
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Generic utils."""
import codecs
import logging
import os
import pipes
import Queue
import re
import stat
import subprocess
import sys
import tempfile
import threading
import time
import urlparse
import subprocess2
RETRY_MAX = 3
RETRY_INITIAL_SLEEP = 0.5
class Error(Exception):
"""gclient exception class."""
def __init__(self, msg, *args, **kwargs):
index = getattr(threading.currentThread(), 'index', 0)
if index:
msg = '\n'.join('%d> %s' % (index, l) for l in msg.splitlines())
super(Error, self).__init__(msg, *args, **kwargs)
def SplitUrlRevision(url):
"""Splits url and returns a two-tuple: url, rev"""
if url.startswith('ssh:'):
# Make sure ssh://user-name@example.com/~/test.git@stable works
regex = r'(ssh://(?:[-.\w]+@)?[-\w:\.]+/[-~\w\./]+)(?:@(.+))?'
components = re.search(regex, url).groups()
else:
components = url.split('@', 1)
if len(components) == 1:
components += [None]
return tuple(components)
def IsDateRevision(revision):
"""Returns true if the given revision is of the form "{ ... }"."""
return bool(revision and re.match(r'^\{.+\}$', str(revision)))
def MakeDateRevision(date):
"""Returns a revision representing the latest revision before the given
date."""
return "{" + date + "}"
def SyntaxErrorToError(filename, e):
"""Raises a gclient_utils.Error exception with the human readable message"""
try:
# Try to construct a human readable error message
if filename:
error_message = 'There is a syntax error in %s\n' % filename
else:
error_message = 'There is a syntax error\n'
error_message += 'Line #%s, character %s: "%s"' % (
e.lineno, e.offset, re.sub(r'[\r\n]*$', '', e.text))
except:
# Something went wrong, re-raise the original exception
raise e
else:
raise Error(error_message)
class PrintableObject(object):
def __str__(self):
output = ''
for i in dir(self):
if i.startswith('__'):
continue
output += '%s = %s\n' % (i, str(getattr(self, i, '')))
return output
def FileRead(filename, mode='rU'):
with open(filename, mode=mode) as f:
# codecs.open() has different behavior than open() on python 2.6 so use
# open() and decode manually.
s = f.read()
try:
return s.decode('utf-8')
except UnicodeDecodeError:
return s
def FileWrite(filename, content, mode='w'):
with codecs.open(filename, mode=mode, encoding='utf-8') as f:
f.write(content)
def safe_rename(old, new):
"""Renames a file reliably.
Sometimes os.rename does not work because a dying git process keeps a handle
on it for a few seconds. An exception is then thrown, which make the program
give up what it was doing and remove what was deleted.
The only solution is to catch the exception and try again until it works.
"""
# roughly 10s
retries = 100
for i in range(retries):
try:
os.rename(old, new)
break
except OSError:
if i == (retries - 1):
# Give up.
raise
# retry
logging.debug("Renaming failed from %s to %s. Retrying ..." % (old, new))
time.sleep(0.1)
def rmtree(path):
"""shutil.rmtree() on steroids.
Recursively removes a directory, even if it's marked read-only.
shutil.rmtree() doesn't work on Windows if any of the files or directories
are read-only, which svn repositories and some .svn files are. We need to
be able to force the files to be writable (i.e., deletable) as we traverse
the tree.
Even with all this, Windows still sometimes fails to delete a file, citing
a permission error (maybe something to do with antivirus scans or disk
indexing). The best suggestion any of the user forums had was to wait a
bit and try again, so we do that too. It's hand-waving, but sometimes it
works. :/
On POSIX systems, things are a little bit simpler. The modes of the files
to be deleted doesn't matter, only the modes of the directories containing
them are significant. As the directory tree is traversed, each directory
has its mode set appropriately before descending into it. This should
result in the entire tree being removed, with the possible exception of
*path itself, because nothing attempts to change the mode of its parent.
Doing so would be hazardous, as it's not a directory slated for removal.
In the ordinary case, this is not a problem: for our purposes, the user
will never lack write permission on *path's parent.
"""
if not os.path.exists(path):
return
if os.path.islink(path) or not os.path.isdir(path):
raise Error('Called rmtree(%s) in non-directory' % path)
if sys.platform == 'win32':
# Give up and use cmd.exe's rd command.
path = os.path.normcase(path)
for _ in xrange(3):
exitcode = subprocess.call(['cmd.exe', '/c', 'rd', '/q', '/s', path])
if exitcode == 0:
return
else:
print >> sys.stderr, 'rd exited with code %d' % exitcode
time.sleep(3)
raise Exception('Failed to remove path %s' % path)
# On POSIX systems, we need the x-bit set on the directory to access it,
# the r-bit to see its contents, and the w-bit to remove files from it.
# The actual modes of the files within the directory is irrelevant.
os.chmod(path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
def remove(func, subpath):
func(subpath)
for fn in os.listdir(path):
# If fullpath is a symbolic link that points to a directory, isdir will
# be True, but we don't want to descend into that as a directory, we just
# want to remove the link. Check islink and treat links as ordinary files
# would be treated regardless of what they reference.
fullpath = os.path.join(path, fn)
if os.path.islink(fullpath) or not os.path.isdir(fullpath):
remove(os.remove, fullpath)
else:
# Recurse.
rmtree(fullpath)
remove(os.rmdir, path)
def safe_makedirs(tree):
"""Creates the directory in a safe manner.
Because multiple threads can create these directories concurently, trap the
exception and pass on.
"""
count = 0
while not os.path.exists(tree):
count += 1
try:
os.makedirs(tree)
except OSError, e:
# 17 POSIX, 183 Windows
if e.errno not in (17, 183):
raise
if count > 40:
# Give up.
raise
def CommandToStr(args):
"""Converts an arg list into a shell escaped string."""
return ' '.join(pipes.quote(arg) for arg in args)
def CheckCallAndFilterAndHeader(args, always=False, header=None, **kwargs):
"""Adds 'header' support to CheckCallAndFilter.
If |always| is True, a message indicating what is being done
is printed to stdout all the time even if not output is generated. Otherwise
the message header is printed only if the call generated any ouput.
"""
stdout = kwargs.setdefault('stdout', sys.stdout)
if header is None:
header = "\n________ running '%s' in '%s'\n" % (
' '.join(args), kwargs.get('cwd', '.'))
if always:
stdout.write(header)
else:
filter_fn = kwargs.get('filter_fn')
def filter_msg(line):
if line is None:
stdout.write(header)
elif filter_fn:
filter_fn(line)
kwargs['filter_fn'] = filter_msg
kwargs['call_filter_on_first_line'] = True
# Obviously.
kwargs.setdefault('print_stdout', True)
return CheckCallAndFilter(args, **kwargs)
class Wrapper(object):
"""Wraps an object, acting as a transparent proxy for all properties by
default.
"""
def __init__(self, wrapped):
self._wrapped = wrapped
def __getattr__(self, name):
return getattr(self._wrapped, name)
class AutoFlush(Wrapper):
"""Creates a file object clone to automatically flush after N seconds."""
def __init__(self, wrapped, delay):
super(AutoFlush, self).__init__(wrapped)
if not hasattr(self, 'lock'):
self.lock = threading.Lock()
self.__last_flushed_at = time.time()
self.delay = delay
@property
def autoflush(self):
return self
def write(self, out, *args, **kwargs):
self._wrapped.write(out, *args, **kwargs)
should_flush = False
self.lock.acquire()
try:
if self.delay and (time.time() - self.__last_flushed_at) > self.delay:
should_flush = True
self.__last_flushed_at = time.time()
finally:
self.lock.release()
if should_flush:
self.flush()
class Annotated(Wrapper):
"""Creates a file object clone to automatically prepends every line in worker
threads with a NN> prefix.
"""
def __init__(self, wrapped, include_zero=False):
super(Annotated, self).__init__(wrapped)
if not hasattr(self, 'lock'):
self.lock = threading.Lock()
self.__output_buffers = {}
self.__include_zero = include_zero
@property
def annotated(self):
return self
def write(self, out):
index = getattr(threading.currentThread(), 'index', 0)
if not index and not self.__include_zero:
# Unindexed threads aren't buffered.
return self._wrapped.write(out)
self.lock.acquire()
try:
# Use a dummy array to hold the string so the code can be lockless.
# Strings are immutable, requiring to keep a lock for the whole dictionary
# otherwise. Using an array is faster than using a dummy object.
if not index in self.__output_buffers:
obj = self.__output_buffers[index] = ['']
else:
obj = self.__output_buffers[index]
finally:
self.lock.release()
# Continue lockless.
obj[0] += out
while '\n' in obj[0]:
line, remaining = obj[0].split('\n', 1)
if line:
self._wrapped.write('%d>%s\n' % (index, line))
obj[0] = remaining
def flush(self):
"""Flush buffered output."""
orphans = []
self.lock.acquire()
try:
# Detect threads no longer existing.
indexes = (getattr(t, 'index', None) for t in threading.enumerate())
indexes = filter(None, indexes)
for index in self.__output_buffers:
if not index in indexes:
orphans.append((index, self.__output_buffers[index][0]))
for orphan in orphans:
del self.__output_buffers[orphan[0]]
finally:
self.lock.release()
# Don't keep the lock while writting. Will append \n when it shouldn't.
for orphan in orphans:
if orphan[1]:
self._wrapped.write('%d>%s\n' % (orphan[0], orphan[1]))
return self._wrapped.flush()
def MakeFileAutoFlush(fileobj, delay=10):
autoflush = getattr(fileobj, 'autoflush', None)
if autoflush:
autoflush.delay = delay
return fileobj
return AutoFlush(fileobj, delay)
def MakeFileAnnotated(fileobj, include_zero=False):
if getattr(fileobj, 'annotated', None):
return fileobj
return Annotated(fileobj)
GCLIENT_CHILDREN = []
GCLIENT_CHILDREN_LOCK = threading.Lock()
class GClientChildren(object):
@staticmethod
def add(popen_obj):
with GCLIENT_CHILDREN_LOCK:
GCLIENT_CHILDREN.append(popen_obj)
@staticmethod
def remove(popen_obj):
with GCLIENT_CHILDREN_LOCK:
GCLIENT_CHILDREN.remove(popen_obj)
@staticmethod
def _attemptToKillChildren():
global GCLIENT_CHILDREN
with GCLIENT_CHILDREN_LOCK:
zombies = [c for c in GCLIENT_CHILDREN if c.poll() is None]
for zombie in zombies:
try:
zombie.kill()
except OSError:
pass
with GCLIENT_CHILDREN_LOCK:
GCLIENT_CHILDREN = [k for k in GCLIENT_CHILDREN if k.poll() is not None]
@staticmethod
def _areZombies():
with GCLIENT_CHILDREN_LOCK:
return bool(GCLIENT_CHILDREN)
@staticmethod
def KillAllRemainingChildren():
GClientChildren._attemptToKillChildren()
if GClientChildren._areZombies():
time.sleep(0.5)
GClientChildren._attemptToKillChildren()
with GCLIENT_CHILDREN_LOCK:
if GCLIENT_CHILDREN:
print >> sys.stderr, 'Could not kill the following subprocesses:'
for zombie in GCLIENT_CHILDREN:
print >> sys.stderr, ' ', zombie.pid
def CheckCallAndFilter(args, stdout=None, filter_fn=None,
print_stdout=None, call_filter_on_first_line=False,
retry=False, **kwargs):
"""Runs a command and calls back a filter function if needed.
Accepts all subprocess2.Popen() parameters plus:
print_stdout: If True, the command's stdout is forwarded to stdout.
filter_fn: A function taking a single string argument called with each line
of the subprocess2's output. Each line has the trailing newline
character trimmed.
stdout: Can be any bufferable output.
retry: If the process exits non-zero, sleep for a brief interval and try
again, up to RETRY_MAX times.
stderr is always redirected to stdout.
"""
assert print_stdout or filter_fn
stdout = stdout or sys.stdout
filter_fn = filter_fn or (lambda x: None)
sleep_interval = RETRY_INITIAL_SLEEP
run_cwd = kwargs.get('cwd', os.getcwd())
for _ in xrange(RETRY_MAX + 1):
kid = subprocess2.Popen(
args, bufsize=0, stdout=subprocess2.PIPE, stderr=subprocess2.STDOUT,
**kwargs)
GClientChildren.add(kid)
# Do a flush of stdout before we begin reading from the subprocess2's stdout
stdout.flush()
# Also, we need to forward stdout to prevent weird re-ordering of output.
# This has to be done on a per byte basis to make sure it is not buffered:
# normally buffering is done for each line, but if svn requests input, no
# end-of-line character is output after the prompt and it would not show up.
try:
in_byte = kid.stdout.read(1)
if in_byte:
if call_filter_on_first_line:
filter_fn(None)
in_line = ''
while in_byte:
if in_byte != '\r':
if print_stdout:
stdout.write(in_byte)
if in_byte != '\n':
in_line += in_byte
else:
filter_fn(in_line)
in_line = ''
else:
filter_fn(in_line)
in_line = ''
in_byte = kid.stdout.read(1)
# Flush the rest of buffered output. This is only an issue with
# stdout/stderr not ending with a \n.
if len(in_line):
filter_fn(in_line)
rv = kid.wait()
# Don't put this in a 'finally,' since the child may still run if we get
# an exception.
GClientChildren.remove(kid)
except KeyboardInterrupt:
print >> sys.stderr, 'Failed while running "%s"' % ' '.join(args)
raise
if rv == 0:
return 0
if not retry:
break
print ("WARNING: subprocess '%s' in %s failed; will retry after a short "
'nap...' % (' '.join('"%s"' % x for x in args), run_cwd))
sys.sleep(sleep_interval)
sleep_interval *= 2
raise subprocess2.CalledProcessError(
rv, args, kwargs.get('cwd', None), None, None)
def FindGclientRoot(from_dir, filename='.gclient'):
"""Tries to find the gclient root."""
real_from_dir = os.path.realpath(from_dir)
path = real_from_dir
while not os.path.exists(os.path.join(path, filename)):
split_path = os.path.split(path)
if not split_path[1]:
return None
path = split_path[0]
# If we did not find the file in the current directory, make sure we are in a
# sub directory that is controlled by this configuration.
if path != real_from_dir:
entries_filename = os.path.join(path, filename + '_entries')
if not os.path.exists(entries_filename):
# If .gclient_entries does not exist, a previous call to gclient sync
# might have failed. In that case, we cannot verify that the .gclient
# is the one we want to use. In order to not to cause too much trouble,
# just issue a warning and return the path anyway.
print >> sys.stderr, ("%s file in parent directory %s might not be the "
"file you want to use" % (filename, path))
return path
scope = {}
try:
exec(FileRead(entries_filename), scope)
except SyntaxError, e:
SyntaxErrorToError(filename, e)
all_directories = scope['entries'].keys()
path_to_check = real_from_dir[len(path)+1:]
while path_to_check:
if path_to_check in all_directories:
return path
path_to_check = os.path.dirname(path_to_check)
return None
logging.info('Found gclient root at ' + path)
return path
def PathDifference(root, subpath):
"""Returns the difference subpath minus root."""
root = os.path.realpath(root)
subpath = os.path.realpath(subpath)
if not subpath.startswith(root):
return None
# If the root does not have a trailing \ or /, we add it so the returned
# path starts immediately after the seperator regardless of whether it is
# provided.
root = os.path.join(root, '')
return subpath[len(root):]
def FindFileUpwards(filename, path=None):
"""Search upwards from the a directory (default: current) to find a file.
Returns nearest upper-level directory with the passed in file.
"""
if not path:
path = os.getcwd()
path = os.path.realpath(path)
while True:
file_path = os.path.join(path, filename)
if os.path.exists(file_path):
return path
(new_path, _) = os.path.split(path)
if new_path == path:
return None
path = new_path
def GetGClientRootAndEntries(path=None):
"""Returns the gclient root and the dict of entries."""
config_file = '.gclient_entries'
root = FindFileUpwards(config_file, path)
if not root:
print "Can't find %s" % config_file
return None
config_path = os.path.join(root, config_file)
env = {}
execfile(config_path, env)
config_dir = os.path.dirname(config_path)
return config_dir, env['entries']
def lockedmethod(method):
"""Method decorator that holds self.lock for the duration of the call."""
def inner(self, *args, **kwargs):
try:
try:
self.lock.acquire()
except KeyboardInterrupt:
print >> sys.stderr, 'Was deadlocked'
raise
return method(self, *args, **kwargs)
finally:
self.lock.release()
return inner
class WorkItem(object):
"""One work item."""
# On cygwin, creating a lock throwing randomly when nearing ~100 locks.
# As a workaround, use a single lock. Yep you read it right. Single lock for
# all the 100 objects.
lock = threading.Lock()
def __init__(self, name):
# A unique string representing this work item.
self._name = name
def run(self, work_queue):
"""work_queue is passed as keyword argument so it should be
the last parameters of the function when you override it."""
pass
@property
def name(self):
return self._name
class ExecutionQueue(object):
"""Runs a set of WorkItem that have interdependencies and were WorkItem are
added as they are processed.
In gclient's case, Dependencies sometime needs to be run out of order due to
From() keyword. This class manages that all the required dependencies are run
before running each one.
Methods of this class are thread safe.
"""
def __init__(self, jobs, progress, ignore_requirements):
"""jobs specifies the number of concurrent tasks to allow. progress is a
Progress instance."""
# Set when a thread is done or a new item is enqueued.
self.ready_cond = threading.Condition()
# Maximum number of concurrent tasks.
self.jobs = jobs
# List of WorkItem, for gclient, these are Dependency instances.
self.queued = []
# List of strings representing each Dependency.name that was run.
self.ran = []
# List of items currently running.
self.running = []
# Exceptions thrown if any.
self.exceptions = Queue.Queue()
# Progress status
self.progress = progress
if self.progress:
self.progress.update(0)
self.ignore_requirements = ignore_requirements
def enqueue(self, d):
"""Enqueue one Dependency to be executed later once its requirements are
satisfied.
"""
assert isinstance(d, WorkItem)
self.ready_cond.acquire()
try:
self.queued.append(d)
total = len(self.queued) + len(self.ran) + len(self.running)
logging.debug('enqueued(%s)' % d.name)
if self.progress:
self.progress._total = total + 1
self.progress.update(0)
self.ready_cond.notifyAll()
finally:
self.ready_cond.release()
def flush(self, *args, **kwargs):
"""Runs all enqueued items until all are executed."""
kwargs['work_queue'] = self
self.ready_cond.acquire()
try:
while True:
# Check for task to run first, then wait.
while True:
if not self.exceptions.empty():
# Systematically flush the queue when an exception logged.
self.queued = []
self._flush_terminated_threads()
if (not self.queued and not self.running or
self.jobs == len(self.running)):
logging.debug('No more worker threads or can\'t queue anything.')
break
# Check for new tasks to start.
for i in xrange(len(self.queued)):
# Verify its requirements.
if (self.ignore_requirements or
not (set(self.queued[i].requirements) - set(self.ran))):
# Start one work item: all its requirements are satisfied.
self._run_one_task(self.queued.pop(i), args, kwargs)
break
else:
# Couldn't find an item that could run. Break out the outher loop.
break
if not self.queued and not self.running:
# We're done.
break
# We need to poll here otherwise Ctrl-C isn't processed.
try:
self.ready_cond.wait(10)
except KeyboardInterrupt:
# Help debugging by printing some information:
print >> sys.stderr, (
('\nAllowed parallel jobs: %d\n# queued: %d\nRan: %s\n'
'Running: %d') % (
self.jobs,
len(self.queued),
', '.join(self.ran),
len(self.running)))
for i in self.queued:
print >> sys.stderr, '%s: %s' % (i.name, ', '.join(i.requirements))
raise
# Something happened: self.enqueue() or a thread terminated. Loop again.
finally:
self.ready_cond.release()
assert not self.running, 'Now guaranteed to be single-threaded'
if not self.exceptions.empty():
# To get back the stack location correctly, the raise a, b, c form must be
# used, passing a tuple as the first argument doesn't work.
e = self.exceptions.get()
raise e[0], e[1], e[2]
if self.progress:
self.progress.end()
def _flush_terminated_threads(self):
"""Flush threads that have terminated."""
running = self.running
self.running = []
for t in running:
if t.isAlive():
self.running.append(t)
else:
t.join()
sys.stdout.flush()
if self.progress:
self.progress.update(1, t.item.name)
if t.item.name in self.ran:
raise Error(
'gclient is confused, "%s" is already in "%s"' % (
t.item.name, ', '.join(self.ran)))
if not t.item.name in self.ran:
self.ran.append(t.item.name)
def _run_one_task(self, task_item, args, kwargs):
if self.jobs > 1:
# Start the thread.
index = len(self.ran) + len(self.running) + 1
new_thread = self._Worker(task_item, index, args, kwargs)
self.running.append(new_thread)
new_thread.start()
else:
# Run the 'thread' inside the main thread. Don't try to catch any
# exception.
task_item.run(*args, **kwargs)
self.ran.append(task_item.name)
if self.progress:
self.progress.update(1, ', '.join(t.item.name for t in self.running))
class _Worker(threading.Thread):
"""One thread to execute one WorkItem."""
def __init__(self, item, index, args, kwargs):
threading.Thread.__init__(self, name=item.name or 'Worker')
logging.info('_Worker(%s) reqs:%s' % (item.name, item.requirements))
self.item = item
self.index = index
self.args = args
self.kwargs = kwargs
self.daemon = True
def run(self):
"""Runs in its own thread."""
logging.debug('_Worker.run(%s)' % self.item.name)
work_queue = self.kwargs['work_queue']
try:
self.item.run(*self.args, **self.kwargs)
except KeyboardInterrupt:
logging.info('Caught KeyboardInterrupt in thread %s', self.item.name)
logging.info(str(sys.exc_info()))
work_queue.exceptions.put(sys.exc_info())
raise
except Exception:
# Catch exception location.
logging.info('Caught exception in thread %s', self.item.name)
logging.info(str(sys.exc_info()))
work_queue.exceptions.put(sys.exc_info())
finally:
logging.info('_Worker.run(%s) done', self.item.name)
work_queue.ready_cond.acquire()
try:
work_queue.ready_cond.notifyAll()
finally:
work_queue.ready_cond.release()
def GetEditor(git, git_editor=None):
"""Returns the most plausible editor to use.
In order of preference:
- GIT_EDITOR/SVN_EDITOR environment variable
- core.editor git configuration variable (if supplied by git-cl)
- VISUAL environment variable
- EDITOR environment variable
- vim (non-Windows) or notepad (Windows)
In the case of git-cl, this matches git's behaviour, except that it does not
include dumb terminal detection.
In the case of gcl, this matches svn's behaviour, except that it does not
accept a command-line flag or check the editor-cmd configuration variable.
"""
if git:
editor = os.environ.get('GIT_EDITOR') or git_editor
else:
editor = os.environ.get('SVN_EDITOR')
if not editor:
editor = os.environ.get('VISUAL')
if not editor:
editor = os.environ.get('EDITOR')
if not editor:
if sys.platform.startswith('win'):
editor = 'notepad'
else:
editor = 'vim'
return editor
def RunEditor(content, git, git_editor=None):
"""Opens up the default editor in the system to get the CL description."""
file_handle, filename = tempfile.mkstemp(text=True, prefix='cl_description')
# Make sure CRLF is handled properly by requiring none.
if '\r' in content:
print >> sys.stderr, (
'!! Please remove \\r from your change description !!')
fileobj = os.fdopen(file_handle, 'w')
# Still remove \r if present.
fileobj.write(re.sub('\r?\n', '\n', content))
fileobj.close()
try:
editor = GetEditor(git, git_editor=git_editor)
if not editor:
return None
cmd = '%s %s' % (editor, filename)
if sys.platform == 'win32' and os.environ.get('TERM') == 'msys':
# Msysgit requires the usage of 'env' to be present.
cmd = 'env ' + cmd
try:
# shell=True to allow the shell to handle all forms of quotes in
# $EDITOR.
subprocess2.check_call(cmd, shell=True)
except subprocess2.CalledProcessError:
return None
return FileRead(filename)
finally:
os.remove(filename)
def UpgradeToHttps(url):
"""Upgrades random urls to https://.
Do not touch unknown urls like ssh:// or git://.
Do not touch http:// urls with a port number,
Fixes invalid GAE url.
"""
if not url:
return url
if not re.match(r'[a-z\-]+\://.*', url):
# Make sure it is a valid uri. Otherwise, urlparse() will consider it a
# relative url and will use http:///foo. Note that it defaults to http://
# for compatibility with naked url like "localhost:8080".
url = 'http://%s' % url
parsed = list(urlparse.urlparse(url))
# Do not automatically upgrade http to https if a port number is provided.
if parsed[0] == 'http' and not re.match(r'^.+?\:\d+$', parsed[1]):
parsed[0] = 'https'
return urlparse.urlunparse(parsed)
def ParseCodereviewSettingsContent(content):
"""Process a codereview.settings file properly."""
lines = (l for l in content.splitlines() if not l.strip().startswith("#"))
try:
keyvals = dict([x.strip() for x in l.split(':', 1)] for l in lines if l)
except ValueError:
raise Error(
'Failed to process settings, please fix. Content:\n\n%s' % content)
def fix_url(key):
if keyvals.get(key):
keyvals[key] = UpgradeToHttps(keyvals[key])
fix_url('CODE_REVIEW_SERVER')
fix_url('VIEW_VC')
return keyvals
def NumLocalCpus():
"""Returns the number of processors.
Python on OSX 10.6 raises a NotImplementedError exception.
"""
try:
import multiprocessing
return multiprocessing.cpu_count()
except: # pylint: disable=W0702
# Mac OS 10.6 only
# pylint: disable=E1101
return int(os.sysconf('SC_NPROCESSORS_ONLN'))
gclient_utils: Fix call to sleep() after r229219.
sleep() is in time, not sys.
BUG=
R=maruel@chromium.org,szager@chromium.org,cmp@chromium.org
Review URL: https://codereview.chromium.org/34483004
git-svn-id: bd64dd6fa6f3f0ed0c0666d1018379882b742947@230108 4ff67af0-8c30-449e-8e8b-ad334ec8d88c
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Generic utils."""
import codecs
import logging
import os
import pipes
import Queue
import re
import stat
import subprocess
import sys
import tempfile
import threading
import time
import urlparse
import subprocess2
RETRY_MAX = 3
RETRY_INITIAL_SLEEP = 0.5
class Error(Exception):
"""gclient exception class."""
def __init__(self, msg, *args, **kwargs):
index = getattr(threading.currentThread(), 'index', 0)
if index:
msg = '\n'.join('%d> %s' % (index, l) for l in msg.splitlines())
super(Error, self).__init__(msg, *args, **kwargs)
def SplitUrlRevision(url):
"""Splits url and returns a two-tuple: url, rev"""
if url.startswith('ssh:'):
# Make sure ssh://user-name@example.com/~/test.git@stable works
regex = r'(ssh://(?:[-.\w]+@)?[-\w:\.]+/[-~\w\./]+)(?:@(.+))?'
components = re.search(regex, url).groups()
else:
components = url.split('@', 1)
if len(components) == 1:
components += [None]
return tuple(components)
def IsDateRevision(revision):
"""Returns true if the given revision is of the form "{ ... }"."""
return bool(revision and re.match(r'^\{.+\}$', str(revision)))
def MakeDateRevision(date):
"""Returns a revision representing the latest revision before the given
date."""
return "{" + date + "}"
def SyntaxErrorToError(filename, e):
"""Raises a gclient_utils.Error exception with the human readable message"""
try:
# Try to construct a human readable error message
if filename:
error_message = 'There is a syntax error in %s\n' % filename
else:
error_message = 'There is a syntax error\n'
error_message += 'Line #%s, character %s: "%s"' % (
e.lineno, e.offset, re.sub(r'[\r\n]*$', '', e.text))
except:
# Something went wrong, re-raise the original exception
raise e
else:
raise Error(error_message)
class PrintableObject(object):
def __str__(self):
output = ''
for i in dir(self):
if i.startswith('__'):
continue
output += '%s = %s\n' % (i, str(getattr(self, i, '')))
return output
def FileRead(filename, mode='rU'):
with open(filename, mode=mode) as f:
# codecs.open() has different behavior than open() on python 2.6 so use
# open() and decode manually.
s = f.read()
try:
return s.decode('utf-8')
except UnicodeDecodeError:
return s
def FileWrite(filename, content, mode='w'):
with codecs.open(filename, mode=mode, encoding='utf-8') as f:
f.write(content)
def safe_rename(old, new):
"""Renames a file reliably.
Sometimes os.rename does not work because a dying git process keeps a handle
on it for a few seconds. An exception is then thrown, which make the program
give up what it was doing and remove what was deleted.
The only solution is to catch the exception and try again until it works.
"""
# roughly 10s
retries = 100
for i in range(retries):
try:
os.rename(old, new)
break
except OSError:
if i == (retries - 1):
# Give up.
raise
# retry
logging.debug("Renaming failed from %s to %s. Retrying ..." % (old, new))
time.sleep(0.1)
def rmtree(path):
"""shutil.rmtree() on steroids.
Recursively removes a directory, even if it's marked read-only.
shutil.rmtree() doesn't work on Windows if any of the files or directories
are read-only, which svn repositories and some .svn files are. We need to
be able to force the files to be writable (i.e., deletable) as we traverse
the tree.
Even with all this, Windows still sometimes fails to delete a file, citing
a permission error (maybe something to do with antivirus scans or disk
indexing). The best suggestion any of the user forums had was to wait a
bit and try again, so we do that too. It's hand-waving, but sometimes it
works. :/
On POSIX systems, things are a little bit simpler. The modes of the files
to be deleted doesn't matter, only the modes of the directories containing
them are significant. As the directory tree is traversed, each directory
has its mode set appropriately before descending into it. This should
result in the entire tree being removed, with the possible exception of
*path itself, because nothing attempts to change the mode of its parent.
Doing so would be hazardous, as it's not a directory slated for removal.
In the ordinary case, this is not a problem: for our purposes, the user
will never lack write permission on *path's parent.
"""
if not os.path.exists(path):
return
if os.path.islink(path) or not os.path.isdir(path):
raise Error('Called rmtree(%s) in non-directory' % path)
if sys.platform == 'win32':
# Give up and use cmd.exe's rd command.
path = os.path.normcase(path)
for _ in xrange(3):
exitcode = subprocess.call(['cmd.exe', '/c', 'rd', '/q', '/s', path])
if exitcode == 0:
return
else:
print >> sys.stderr, 'rd exited with code %d' % exitcode
time.sleep(3)
raise Exception('Failed to remove path %s' % path)
# On POSIX systems, we need the x-bit set on the directory to access it,
# the r-bit to see its contents, and the w-bit to remove files from it.
# The actual modes of the files within the directory is irrelevant.
os.chmod(path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
def remove(func, subpath):
func(subpath)
for fn in os.listdir(path):
# If fullpath is a symbolic link that points to a directory, isdir will
# be True, but we don't want to descend into that as a directory, we just
# want to remove the link. Check islink and treat links as ordinary files
# would be treated regardless of what they reference.
fullpath = os.path.join(path, fn)
if os.path.islink(fullpath) or not os.path.isdir(fullpath):
remove(os.remove, fullpath)
else:
# Recurse.
rmtree(fullpath)
remove(os.rmdir, path)
def safe_makedirs(tree):
"""Creates the directory in a safe manner.
Because multiple threads can create these directories concurently, trap the
exception and pass on.
"""
count = 0
while not os.path.exists(tree):
count += 1
try:
os.makedirs(tree)
except OSError, e:
# 17 POSIX, 183 Windows
if e.errno not in (17, 183):
raise
if count > 40:
# Give up.
raise
def CommandToStr(args):
"""Converts an arg list into a shell escaped string."""
return ' '.join(pipes.quote(arg) for arg in args)
def CheckCallAndFilterAndHeader(args, always=False, header=None, **kwargs):
"""Adds 'header' support to CheckCallAndFilter.
If |always| is True, a message indicating what is being done
is printed to stdout all the time even if not output is generated. Otherwise
the message header is printed only if the call generated any ouput.
"""
stdout = kwargs.setdefault('stdout', sys.stdout)
if header is None:
header = "\n________ running '%s' in '%s'\n" % (
' '.join(args), kwargs.get('cwd', '.'))
if always:
stdout.write(header)
else:
filter_fn = kwargs.get('filter_fn')
def filter_msg(line):
if line is None:
stdout.write(header)
elif filter_fn:
filter_fn(line)
kwargs['filter_fn'] = filter_msg
kwargs['call_filter_on_first_line'] = True
# Obviously.
kwargs.setdefault('print_stdout', True)
return CheckCallAndFilter(args, **kwargs)
class Wrapper(object):
"""Wraps an object, acting as a transparent proxy for all properties by
default.
"""
def __init__(self, wrapped):
self._wrapped = wrapped
def __getattr__(self, name):
return getattr(self._wrapped, name)
class AutoFlush(Wrapper):
"""Creates a file object clone to automatically flush after N seconds."""
def __init__(self, wrapped, delay):
super(AutoFlush, self).__init__(wrapped)
if not hasattr(self, 'lock'):
self.lock = threading.Lock()
self.__last_flushed_at = time.time()
self.delay = delay
@property
def autoflush(self):
return self
def write(self, out, *args, **kwargs):
self._wrapped.write(out, *args, **kwargs)
should_flush = False
self.lock.acquire()
try:
if self.delay and (time.time() - self.__last_flushed_at) > self.delay:
should_flush = True
self.__last_flushed_at = time.time()
finally:
self.lock.release()
if should_flush:
self.flush()
class Annotated(Wrapper):
"""Creates a file object clone to automatically prepends every line in worker
threads with a NN> prefix.
"""
def __init__(self, wrapped, include_zero=False):
super(Annotated, self).__init__(wrapped)
if not hasattr(self, 'lock'):
self.lock = threading.Lock()
self.__output_buffers = {}
self.__include_zero = include_zero
@property
def annotated(self):
return self
def write(self, out):
index = getattr(threading.currentThread(), 'index', 0)
if not index and not self.__include_zero:
# Unindexed threads aren't buffered.
return self._wrapped.write(out)
self.lock.acquire()
try:
# Use a dummy array to hold the string so the code can be lockless.
# Strings are immutable, requiring to keep a lock for the whole dictionary
# otherwise. Using an array is faster than using a dummy object.
if not index in self.__output_buffers:
obj = self.__output_buffers[index] = ['']
else:
obj = self.__output_buffers[index]
finally:
self.lock.release()
# Continue lockless.
obj[0] += out
while '\n' in obj[0]:
line, remaining = obj[0].split('\n', 1)
if line:
self._wrapped.write('%d>%s\n' % (index, line))
obj[0] = remaining
def flush(self):
"""Flush buffered output."""
orphans = []
self.lock.acquire()
try:
# Detect threads no longer existing.
indexes = (getattr(t, 'index', None) for t in threading.enumerate())
indexes = filter(None, indexes)
for index in self.__output_buffers:
if not index in indexes:
orphans.append((index, self.__output_buffers[index][0]))
for orphan in orphans:
del self.__output_buffers[orphan[0]]
finally:
self.lock.release()
# Don't keep the lock while writting. Will append \n when it shouldn't.
for orphan in orphans:
if orphan[1]:
self._wrapped.write('%d>%s\n' % (orphan[0], orphan[1]))
return self._wrapped.flush()
def MakeFileAutoFlush(fileobj, delay=10):
autoflush = getattr(fileobj, 'autoflush', None)
if autoflush:
autoflush.delay = delay
return fileobj
return AutoFlush(fileobj, delay)
def MakeFileAnnotated(fileobj, include_zero=False):
if getattr(fileobj, 'annotated', None):
return fileobj
return Annotated(fileobj)
GCLIENT_CHILDREN = []
GCLIENT_CHILDREN_LOCK = threading.Lock()
class GClientChildren(object):
@staticmethod
def add(popen_obj):
with GCLIENT_CHILDREN_LOCK:
GCLIENT_CHILDREN.append(popen_obj)
@staticmethod
def remove(popen_obj):
with GCLIENT_CHILDREN_LOCK:
GCLIENT_CHILDREN.remove(popen_obj)
@staticmethod
def _attemptToKillChildren():
global GCLIENT_CHILDREN
with GCLIENT_CHILDREN_LOCK:
zombies = [c for c in GCLIENT_CHILDREN if c.poll() is None]
for zombie in zombies:
try:
zombie.kill()
except OSError:
pass
with GCLIENT_CHILDREN_LOCK:
GCLIENT_CHILDREN = [k for k in GCLIENT_CHILDREN if k.poll() is not None]
@staticmethod
def _areZombies():
with GCLIENT_CHILDREN_LOCK:
return bool(GCLIENT_CHILDREN)
@staticmethod
def KillAllRemainingChildren():
GClientChildren._attemptToKillChildren()
if GClientChildren._areZombies():
time.sleep(0.5)
GClientChildren._attemptToKillChildren()
with GCLIENT_CHILDREN_LOCK:
if GCLIENT_CHILDREN:
print >> sys.stderr, 'Could not kill the following subprocesses:'
for zombie in GCLIENT_CHILDREN:
print >> sys.stderr, ' ', zombie.pid
def CheckCallAndFilter(args, stdout=None, filter_fn=None,
print_stdout=None, call_filter_on_first_line=False,
retry=False, **kwargs):
"""Runs a command and calls back a filter function if needed.
Accepts all subprocess2.Popen() parameters plus:
print_stdout: If True, the command's stdout is forwarded to stdout.
filter_fn: A function taking a single string argument called with each line
of the subprocess2's output. Each line has the trailing newline
character trimmed.
stdout: Can be any bufferable output.
retry: If the process exits non-zero, sleep for a brief interval and try
again, up to RETRY_MAX times.
stderr is always redirected to stdout.
"""
assert print_stdout or filter_fn
stdout = stdout or sys.stdout
filter_fn = filter_fn or (lambda x: None)
sleep_interval = RETRY_INITIAL_SLEEP
run_cwd = kwargs.get('cwd', os.getcwd())
for _ in xrange(RETRY_MAX + 1):
kid = subprocess2.Popen(
args, bufsize=0, stdout=subprocess2.PIPE, stderr=subprocess2.STDOUT,
**kwargs)
GClientChildren.add(kid)
# Do a flush of stdout before we begin reading from the subprocess2's stdout
stdout.flush()
# Also, we need to forward stdout to prevent weird re-ordering of output.
# This has to be done on a per byte basis to make sure it is not buffered:
# normally buffering is done for each line, but if svn requests input, no
# end-of-line character is output after the prompt and it would not show up.
try:
in_byte = kid.stdout.read(1)
if in_byte:
if call_filter_on_first_line:
filter_fn(None)
in_line = ''
while in_byte:
if in_byte != '\r':
if print_stdout:
stdout.write(in_byte)
if in_byte != '\n':
in_line += in_byte
else:
filter_fn(in_line)
in_line = ''
else:
filter_fn(in_line)
in_line = ''
in_byte = kid.stdout.read(1)
# Flush the rest of buffered output. This is only an issue with
# stdout/stderr not ending with a \n.
if len(in_line):
filter_fn(in_line)
rv = kid.wait()
# Don't put this in a 'finally,' since the child may still run if we get
# an exception.
GClientChildren.remove(kid)
except KeyboardInterrupt:
print >> sys.stderr, 'Failed while running "%s"' % ' '.join(args)
raise
if rv == 0:
return 0
if not retry:
break
print ("WARNING: subprocess '%s' in %s failed; will retry after a short "
'nap...' % (' '.join('"%s"' % x for x in args), run_cwd))
time.sleep(sleep_interval)
sleep_interval *= 2
raise subprocess2.CalledProcessError(
rv, args, kwargs.get('cwd', None), None, None)
def FindGclientRoot(from_dir, filename='.gclient'):
"""Tries to find the gclient root."""
real_from_dir = os.path.realpath(from_dir)
path = real_from_dir
while not os.path.exists(os.path.join(path, filename)):
split_path = os.path.split(path)
if not split_path[1]:
return None
path = split_path[0]
# If we did not find the file in the current directory, make sure we are in a
# sub directory that is controlled by this configuration.
if path != real_from_dir:
entries_filename = os.path.join(path, filename + '_entries')
if not os.path.exists(entries_filename):
# If .gclient_entries does not exist, a previous call to gclient sync
# might have failed. In that case, we cannot verify that the .gclient
# is the one we want to use. In order to not to cause too much trouble,
# just issue a warning and return the path anyway.
print >> sys.stderr, ("%s file in parent directory %s might not be the "
"file you want to use" % (filename, path))
return path
scope = {}
try:
exec(FileRead(entries_filename), scope)
except SyntaxError, e:
SyntaxErrorToError(filename, e)
all_directories = scope['entries'].keys()
path_to_check = real_from_dir[len(path)+1:]
while path_to_check:
if path_to_check in all_directories:
return path
path_to_check = os.path.dirname(path_to_check)
return None
logging.info('Found gclient root at ' + path)
return path
def PathDifference(root, subpath):
"""Returns the difference subpath minus root."""
root = os.path.realpath(root)
subpath = os.path.realpath(subpath)
if not subpath.startswith(root):
return None
# If the root does not have a trailing \ or /, we add it so the returned
# path starts immediately after the seperator regardless of whether it is
# provided.
root = os.path.join(root, '')
return subpath[len(root):]
def FindFileUpwards(filename, path=None):
"""Search upwards from the a directory (default: current) to find a file.
Returns nearest upper-level directory with the passed in file.
"""
if not path:
path = os.getcwd()
path = os.path.realpath(path)
while True:
file_path = os.path.join(path, filename)
if os.path.exists(file_path):
return path
(new_path, _) = os.path.split(path)
if new_path == path:
return None
path = new_path
def GetGClientRootAndEntries(path=None):
"""Returns the gclient root and the dict of entries."""
config_file = '.gclient_entries'
root = FindFileUpwards(config_file, path)
if not root:
print "Can't find %s" % config_file
return None
config_path = os.path.join(root, config_file)
env = {}
execfile(config_path, env)
config_dir = os.path.dirname(config_path)
return config_dir, env['entries']
def lockedmethod(method):
"""Method decorator that holds self.lock for the duration of the call."""
def inner(self, *args, **kwargs):
try:
try:
self.lock.acquire()
except KeyboardInterrupt:
print >> sys.stderr, 'Was deadlocked'
raise
return method(self, *args, **kwargs)
finally:
self.lock.release()
return inner
class WorkItem(object):
"""One work item."""
# On cygwin, creating a lock throwing randomly when nearing ~100 locks.
# As a workaround, use a single lock. Yep you read it right. Single lock for
# all the 100 objects.
lock = threading.Lock()
def __init__(self, name):
# A unique string representing this work item.
self._name = name
def run(self, work_queue):
"""work_queue is passed as keyword argument so it should be
the last parameters of the function when you override it."""
pass
@property
def name(self):
return self._name
class ExecutionQueue(object):
"""Runs a set of WorkItem that have interdependencies and were WorkItem are
added as they are processed.
In gclient's case, Dependencies sometime needs to be run out of order due to
From() keyword. This class manages that all the required dependencies are run
before running each one.
Methods of this class are thread safe.
"""
def __init__(self, jobs, progress, ignore_requirements):
"""jobs specifies the number of concurrent tasks to allow. progress is a
Progress instance."""
# Set when a thread is done or a new item is enqueued.
self.ready_cond = threading.Condition()
# Maximum number of concurrent tasks.
self.jobs = jobs
# List of WorkItem, for gclient, these are Dependency instances.
self.queued = []
# List of strings representing each Dependency.name that was run.
self.ran = []
# List of items currently running.
self.running = []
# Exceptions thrown if any.
self.exceptions = Queue.Queue()
# Progress status
self.progress = progress
if self.progress:
self.progress.update(0)
self.ignore_requirements = ignore_requirements
def enqueue(self, d):
"""Enqueue one Dependency to be executed later once its requirements are
satisfied.
"""
assert isinstance(d, WorkItem)
self.ready_cond.acquire()
try:
self.queued.append(d)
total = len(self.queued) + len(self.ran) + len(self.running)
logging.debug('enqueued(%s)' % d.name)
if self.progress:
self.progress._total = total + 1
self.progress.update(0)
self.ready_cond.notifyAll()
finally:
self.ready_cond.release()
def flush(self, *args, **kwargs):
"""Runs all enqueued items until all are executed."""
kwargs['work_queue'] = self
self.ready_cond.acquire()
try:
while True:
# Check for task to run first, then wait.
while True:
if not self.exceptions.empty():
# Systematically flush the queue when an exception logged.
self.queued = []
self._flush_terminated_threads()
if (not self.queued and not self.running or
self.jobs == len(self.running)):
logging.debug('No more worker threads or can\'t queue anything.')
break
# Check for new tasks to start.
for i in xrange(len(self.queued)):
# Verify its requirements.
if (self.ignore_requirements or
not (set(self.queued[i].requirements) - set(self.ran))):
# Start one work item: all its requirements are satisfied.
self._run_one_task(self.queued.pop(i), args, kwargs)
break
else:
# Couldn't find an item that could run. Break out the outher loop.
break
if not self.queued and not self.running:
# We're done.
break
# We need to poll here otherwise Ctrl-C isn't processed.
try:
self.ready_cond.wait(10)
except KeyboardInterrupt:
# Help debugging by printing some information:
print >> sys.stderr, (
('\nAllowed parallel jobs: %d\n# queued: %d\nRan: %s\n'
'Running: %d') % (
self.jobs,
len(self.queued),
', '.join(self.ran),
len(self.running)))
for i in self.queued:
print >> sys.stderr, '%s: %s' % (i.name, ', '.join(i.requirements))
raise
# Something happened: self.enqueue() or a thread terminated. Loop again.
finally:
self.ready_cond.release()
assert not self.running, 'Now guaranteed to be single-threaded'
if not self.exceptions.empty():
# To get back the stack location correctly, the raise a, b, c form must be
# used, passing a tuple as the first argument doesn't work.
e = self.exceptions.get()
raise e[0], e[1], e[2]
if self.progress:
self.progress.end()
def _flush_terminated_threads(self):
"""Flush threads that have terminated."""
running = self.running
self.running = []
for t in running:
if t.isAlive():
self.running.append(t)
else:
t.join()
sys.stdout.flush()
if self.progress:
self.progress.update(1, t.item.name)
if t.item.name in self.ran:
raise Error(
'gclient is confused, "%s" is already in "%s"' % (
t.item.name, ', '.join(self.ran)))
if not t.item.name in self.ran:
self.ran.append(t.item.name)
def _run_one_task(self, task_item, args, kwargs):
if self.jobs > 1:
# Start the thread.
index = len(self.ran) + len(self.running) + 1
new_thread = self._Worker(task_item, index, args, kwargs)
self.running.append(new_thread)
new_thread.start()
else:
# Run the 'thread' inside the main thread. Don't try to catch any
# exception.
task_item.run(*args, **kwargs)
self.ran.append(task_item.name)
if self.progress:
self.progress.update(1, ', '.join(t.item.name for t in self.running))
class _Worker(threading.Thread):
"""One thread to execute one WorkItem."""
def __init__(self, item, index, args, kwargs):
threading.Thread.__init__(self, name=item.name or 'Worker')
logging.info('_Worker(%s) reqs:%s' % (item.name, item.requirements))
self.item = item
self.index = index
self.args = args
self.kwargs = kwargs
self.daemon = True
def run(self):
"""Runs in its own thread."""
logging.debug('_Worker.run(%s)' % self.item.name)
work_queue = self.kwargs['work_queue']
try:
self.item.run(*self.args, **self.kwargs)
except KeyboardInterrupt:
logging.info('Caught KeyboardInterrupt in thread %s', self.item.name)
logging.info(str(sys.exc_info()))
work_queue.exceptions.put(sys.exc_info())
raise
except Exception:
# Catch exception location.
logging.info('Caught exception in thread %s', self.item.name)
logging.info(str(sys.exc_info()))
work_queue.exceptions.put(sys.exc_info())
finally:
logging.info('_Worker.run(%s) done', self.item.name)
work_queue.ready_cond.acquire()
try:
work_queue.ready_cond.notifyAll()
finally:
work_queue.ready_cond.release()
def GetEditor(git, git_editor=None):
"""Returns the most plausible editor to use.
In order of preference:
- GIT_EDITOR/SVN_EDITOR environment variable
- core.editor git configuration variable (if supplied by git-cl)
- VISUAL environment variable
- EDITOR environment variable
- vim (non-Windows) or notepad (Windows)
In the case of git-cl, this matches git's behaviour, except that it does not
include dumb terminal detection.
In the case of gcl, this matches svn's behaviour, except that it does not
accept a command-line flag or check the editor-cmd configuration variable.
"""
if git:
editor = os.environ.get('GIT_EDITOR') or git_editor
else:
editor = os.environ.get('SVN_EDITOR')
if not editor:
editor = os.environ.get('VISUAL')
if not editor:
editor = os.environ.get('EDITOR')
if not editor:
if sys.platform.startswith('win'):
editor = 'notepad'
else:
editor = 'vim'
return editor
def RunEditor(content, git, git_editor=None):
"""Opens up the default editor in the system to get the CL description."""
file_handle, filename = tempfile.mkstemp(text=True, prefix='cl_description')
# Make sure CRLF is handled properly by requiring none.
if '\r' in content:
print >> sys.stderr, (
'!! Please remove \\r from your change description !!')
fileobj = os.fdopen(file_handle, 'w')
# Still remove \r if present.
fileobj.write(re.sub('\r?\n', '\n', content))
fileobj.close()
try:
editor = GetEditor(git, git_editor=git_editor)
if not editor:
return None
cmd = '%s %s' % (editor, filename)
if sys.platform == 'win32' and os.environ.get('TERM') == 'msys':
# Msysgit requires the usage of 'env' to be present.
cmd = 'env ' + cmd
try:
# shell=True to allow the shell to handle all forms of quotes in
# $EDITOR.
subprocess2.check_call(cmd, shell=True)
except subprocess2.CalledProcessError:
return None
return FileRead(filename)
finally:
os.remove(filename)
def UpgradeToHttps(url):
"""Upgrades random urls to https://.
Do not touch unknown urls like ssh:// or git://.
Do not touch http:// urls with a port number,
Fixes invalid GAE url.
"""
if not url:
return url
if not re.match(r'[a-z\-]+\://.*', url):
# Make sure it is a valid uri. Otherwise, urlparse() will consider it a
# relative url and will use http:///foo. Note that it defaults to http://
# for compatibility with naked url like "localhost:8080".
url = 'http://%s' % url
parsed = list(urlparse.urlparse(url))
# Do not automatically upgrade http to https if a port number is provided.
if parsed[0] == 'http' and not re.match(r'^.+?\:\d+$', parsed[1]):
parsed[0] = 'https'
return urlparse.urlunparse(parsed)
def ParseCodereviewSettingsContent(content):
"""Process a codereview.settings file properly."""
lines = (l for l in content.splitlines() if not l.strip().startswith("#"))
try:
keyvals = dict([x.strip() for x in l.split(':', 1)] for l in lines if l)
except ValueError:
raise Error(
'Failed to process settings, please fix. Content:\n\n%s' % content)
def fix_url(key):
if keyvals.get(key):
keyvals[key] = UpgradeToHttps(keyvals[key])
fix_url('CODE_REVIEW_SERVER')
fix_url('VIEW_VC')
return keyvals
def NumLocalCpus():
"""Returns the number of processors.
Python on OSX 10.6 raises a NotImplementedError exception.
"""
try:
import multiprocessing
return multiprocessing.cpu_count()
except: # pylint: disable=W0702
# Mac OS 10.6 only
# pylint: disable=E1101
return int(os.sysconf('SC_NPROCESSORS_ONLN'))
|
import types
from django.core import urlresolvers as django_urlresolvers
from django.utils.functional import curry
class DecoratorMixin(object):
"""
Mixin class to return decorated views from RegexURLPattern/RegexURLResolver
"""
def __init__(self, *args, **kwargs):
super(DecoratorMixin, self).__init__(*args, **kwargs)
self.decorators = []
def resolve(self, path):
match = super(DecoratorMixin, self).resolve(path)
if not match:
return match
callback, args, kwargs = match
callback = self.apply_decorators(callback)
return callback, args, kwargs
def apply_decorators(self, callback):
if not isinstance(callback, types.FunctionType):
callback = curry(callback) # Some decorators do not work with class views
for decorator in self.decorators:
callback = decorator(callback)
return callback
class RegexURLPattern(DecoratorMixin, django_urlresolvers.RegexURLPattern):
pass
class RegexURLResolver(DecoratorMixin, django_urlresolvers.RegexURLResolver):
pass
Fix for the new ResolverMatch object in Django 1.3.
import types
from django.core import urlresolvers as django_urlresolvers
from django.utils.functional import curry
class DecoratorMixin(object):
"""
Mixin class to return decorated views from RegexURLPattern/RegexURLResolver
"""
def __init__(self, *args, **kwargs):
super(DecoratorMixin, self).__init__(*args, **kwargs)
self.decorators = []
def resolve(self, path):
match = super(DecoratorMixin, self).resolve(path)
if not match:
return match
try:
# In Django 1.3 match is an instance of ResolverMatch class
match.func = self.apply_decorators(match.func)
except AttributeError:
# Before Django 1.3 match is a tuple
match = self.apply_decorators(match[0]), match[1], match[2]
return match
def apply_decorators(self, callback):
if not isinstance(callback, types.FunctionType):
callback = curry(callback) # Some decorators do not work with class views
for decorator in self.decorators:
callback = decorator(callback)
return callback
class RegexURLPattern(DecoratorMixin, django_urlresolvers.RegexURLPattern):
pass
class RegexURLResolver(DecoratorMixin, django_urlresolvers.RegexURLResolver):
pass
|
import numpy
__all__ = ['enumarray']
class enumarray:
def __init__(self, labels, data=None):
"""An enumerated NumPy array.
Create an enumarray with the given *labels*, which must be specified as
an iterable of iterables. If *data* is supplied (optional), it forms
the values in the enumarray; otherwise the enumarray is filled with
numpy.NaN.
The enumarray can be addressed in a variety of ways:
The *labels* are converted to `list`s, so care should be taken when
passing iterable but unordered types (`dict`, `set`) containing labels.
Any type of label object (i.e. not only `str`) is accepted; but
ambiguous behaviour may result for:
- Integer labels that are also within the range of a particular
dimension.
- Labels that are slices.
The *data* is converted using `numpy.asarray`, so no copy is performed
if it is already an ndarray.
"""
# determine the shape of the enumarray from the labels
shape = []
for i in labels:
shape.append(len(i))
if shape[-1] != len(set(i)): # check that elements are unique
raise TypeError(('dimension {} has duplicate labels among {}'
).format(len(shape) + 1, i))
# process the data, if any
if data is None: # no data supplied
data = numpy.NaN * numpy.ones(shape)
else:
data = numpy.asarray(data)
if len(shape) != len(data.shape): # check that data & labels agree
raise ValueError(('mismatch of {}-D labels and {}-D data'
).format(len(shape), len(data.shape)))
for i in range(len(shape)): # check each dim. of data and label
if len(labels[i]) != data.shape[i]:
raise ValueError(('dimension {} has {} labels but length '
'{}').format(len(labels[i]),
data.shape[i]))
# store the data, dimension, and convert lable iterables to lists
self._data = data
self.dim = len(self._data.shape)
self.labels = [list(i) for i in labels]
def _indices(self, key):
"""Return a set of indices which can be used to address a NumPy array.
For any *key* object or tuple, return a tuple of integer indices to the
NumPy array enumarray._data.
"""
try: # try to use the key directly
# TODO this can be expensive if *key* is standard, but expensive,
# NumPy way of indexing arrays. Streamline if possible.
self._data[key]
return key
except (ValueError, IndexError): # key contains at least some labels
pass
# wrap a string key to a 1-D array so len() below doesn't give the
# length of the string
# TODO check if this works without the typecheck
if self.dim == 1 and type(key) == str:
key = (key,)
if len(key) != self.dim: # check that the key is of proper length
raise KeyError('expected {} dimension(s), got {}: {}'.format(
self.dim, len(key), key))
# interpret key contents, dimension by dimension
result = []
for i, k in enumerate(key): # i is dimension, k is key contents
if type(k) == slice: # slice objects
if k == slice(None): # an 'all' slice (ie. ":") passes through
result.append(k)
else: # convert all other slices to indices
result.append(range(*k.indices(self._data.shape[i])))
elif isinstance(k, int): # integers: use directly as single index
result.append(k)
else: # other contents
try:
result.append(self.labels[i].index(k))
continue
except ValueError:
pass
if isinstance(k, type(self.labels[i][0])):
# key is of same type as the labels for this dimension, so
# it is probably a single label
k = (k,)
# look up elements of k (may only be 1) in the list of labels
# for this dimension
_result = []
try:
for k_ in k:
_result.append(self.labels[i].index(k_))
except ValueError: # one of the labels was incorrect
raise ValueError(
("label '{}' in slice/index {} does not appear in "
"dimension {}: {}").format(k_, k, i, self.labels[i]))\
from None
result.append(_result)
return tuple(result)
def __getitem__(self, key):
try:
return self._data[self._indices(key)]
except ValueError as e:
print(self._data, self._data.shape, key, self._indices(key))
raise e
def __setitem__(self, key, value):
self._data[self._indices(key)] = value
def __str__(self):
# TODO add pretty-printing
return str(self._data)
if __name__ == '__main__':
ea = enumarray(labels=(('a', 'b', 'c'), ('d', 'e')),
data=numpy.arange(6).reshape((3, 2)))
print(ea)
print(ea['b', :])
print(ea[('a', 'c'), -1])
print(ea[:, 'e'])
Improve indexing options with numpy.ix_
import numpy
__all__ = ['enumarray']
class enumarray:
def __init__(self, labels, data=None):
"""An enumerated NumPy array.
Create an enumarray with the given *labels*, which must be specified as
an iterable of iterables. If *data* is supplied (optional), it forms
the values in the enumarray; otherwise the enumarray is filled with
numpy.NaN.
The enumarray can be addressed in a variety of ways:
The *labels* are converted to `list`s, so care should be taken when
passing iterable but unordered types (`dict`, `set`) containing labels.
Any type of label object (i.e. not only `str`) is accepted; but
ambiguous behaviour may result for:
- Integer labels that are also within the range of a particular
dimension.
- Labels that are slices.
The *data* is converted using `numpy.asarray`, so no copy is performed
if it is already an ndarray.
"""
# determine the shape of the enumarray from the labels
shape = []
for i in labels:
shape.append(len(i))
if shape[-1] != len(set(i)): # check that elements are unique
raise TypeError(('dimension {} has duplicate labels among {}'
).format(len(shape) + 1, i))
# process the data, if any
if data is None: # no data supplied
data = numpy.NaN * numpy.ones(shape)
else:
data = numpy.asarray(data)
if len(shape) != len(data.shape): # check that data & labels agree
raise ValueError(('mismatch of {}-D labels and {}-D data'
).format(len(shape), len(data.shape)))
for i in range(len(shape)): # check each dim. of data and label
if len(labels[i]) != data.shape[i]:
raise ValueError(('dimension {} has {} labels but length '
'{}').format(len(labels[i]),
data.shape[i]))
# store the data, dimension, and convert lable iterables to lists
self._data = data
self.dim = len(self._data.shape)
self.labels = [list(i) for i in labels]
def _indices(self, key):
"""Return a set of indices which can be used to address a NumPy array.
For any *key* object or tuple, return a tuple of integer indices to the
NumPy array enumarray._data.
"""
try: # try to use the key directly
# TODO this can be expensive if *key* is standard, but expensive,
# NumPy way of indexing arrays. Streamline if possible.
self._data[key]
return key
except (ValueError, IndexError): # key contains at least some labels
pass
# wrap a string key to a 1-D array so len() below doesn't give the
# length of the string
# TODO check if this works without the typecheck
if self.dim == 1 and type(key) == str:
key = (key,)
if len(key) != self.dim: # check that the key is of proper length
raise KeyError('expected {} dimension(s), got {}: {}'.format(
self.dim, len(key), key))
# interpret key contents, dimension by dimension
result = []
for i, k in enumerate(key): # i is dimension, k is key contents
if type(k) == slice: # slice objects
if k == slice(None): # an 'all' slice (ie. ":") passes through
result.append(range(self._data.shape[i]))
else: # convert all other slices to indices
result.append(range(*k.indices(self._data.shape[i])))
elif isinstance(k, int): # integers: use directly as single index
result.append(k)
else: # other contents
try: # an iterable of integers
if all([isinstance(k_, int) for k_ in k]):
result.append(k)
continue
except TypeError:
pass
try: # a single label
result.append((self.labels[i].index(k),))
continue
except ValueError:
pass
if isinstance(k, type(self.labels[i][0])):
# key is of same type as the labels for this dimension, so
# it is probably a single label
k = (k,)
# look up elements of k (may only be 1) in the list of labels
# for this dimension
_result = []
try:
for k_ in k:
_result.append(self.labels[i].index(k_))
except ValueError: # one of the labels was incorrect
raise ValueError(
("label '{}' in slice/index {} does not appear in "
"dimension {}: {}").format(k_, k, i, self.labels[i]))\
from None
result.append(_result)
return numpy.ix_(*result)
def __getitem__(self, key):
try:
return self._data[self._indices(key)]
except ValueError as e:
print(self._data, self._data.shape, key, self._indices(key))
raise e
def __setitem__(self, key, value):
self._data[self._indices(key)] = value
def __str__(self):
# TODO add pretty-printing
return str(self._data)
if __name__ == '__main__':
ea = enumarray(labels=(('a', 'b', 'c'), ('d', 'e')),
data=numpy.arange(6).reshape((3, 2)))
print(ea)
print(ea['b', :])
print(ea[('a', 'c'), -1])
print(ea[:, 'e'])
|
import os
from .exceptions import ImproperlyConfigured
from .utils import safe_join
class BaseFinder(object):
def find(self, path, all=False):
raise NotImplementedError()
class FileSystemFinder(BaseFinder):
def __init__(self, directories):
self.locations = []
if not isinstance(directories, (list, tuple)):
raise ImproperlyConfigured(
"FileSystemFinder's 'directories' parameter is not a "
"tuple or list; perhaps you forgot a trailing comma?")
for directory in directories:
if directory not in self.locations:
self.locations.append(directory)
def find(self, path, all=False):
matches = []
for root in self.locations:
matched_path = self.find_location(root, path)
if matched_path:
if not all:
return matched_path
matches.append(matched_path)
return matches
def find_location(self, root, path):
path = safe_join(root, path)
if os.path.exists(path):
return path
Fix FileSystemFinder's find return value if not all
import os
from .exceptions import ImproperlyConfigured
from .utils import safe_join
class BaseFinder(object):
def find(self, path, all=False):
raise NotImplementedError()
class FileSystemFinder(BaseFinder):
def __init__(self, directories):
self.locations = []
if not isinstance(directories, (list, tuple)):
raise ImproperlyConfigured(
"FileSystemFinder's 'directories' parameter is not a "
"tuple or list; perhaps you forgot a trailing comma?")
for directory in directories:
if directory not in self.locations:
self.locations.append(directory)
def find(self, path, all=False):
matches = []
for root in self.locations:
matched_path = self.find_location(root, path)
if matched_path:
if not all:
return matched_path
matches.append(matched_path)
return matches if all else None
def find_location(self, root, path):
path = safe_join(root, path)
if os.path.exists(path):
return path
|
# -*- coding: utf-8 -*-
__author__ = 'Blake Printy'
__email__ = 'bprinty@gmail.com'
__version__ = '0.2.9'
from .datatypes import composite
from .datatypes import filetree
from .decorators import require
from .decorators import exception
from .decorators import keywords
from .decorators import cached
from .metaclasses import DocRequire
incremented patch version for minor feature addition
# -*- coding: utf-8 -*-
__author__ = 'Blake Printy'
__email__ = 'bprinty@gmail.com'
__version__ = '0.2.10'
from .datatypes import composite
from .datatypes import filetree
from .decorators import require
from .decorators import exception
from .decorators import keywords
from .decorators import cached
from .metaclasses import DocRequire
|
from .models import Client
from .models import Contact
from .models import Contract
from .models import Estimate
from .models import File
from .models import Invoice
from .models import Newsletter
from .models import Note
from .models import Profile
from .models import Project
from .models import Proposal
from .models import Report
from .models import Service
from .models import SettingsApp
from .models import SettingsCompany
from .models import SettingsContract
from .models import Task
from .models import Time
from .models import DASHBOARD_CHOICES
from django import forms
from taggit.models import Tag
from django.utils import timezone
class AdminProfileForm(forms.ModelForm):
class Meta:
model = Profile
fields = ('rate', 'preferred_payment_method', 'bio', 'address',
'dashboard_override', 'dashboard_choices', 'icon_size',
'published')
widgets = {
'bio': forms.widgets.TextInput(attrs={
'class': 'tinymce'
}),
}
exclude = ('notify', )
dashboard_choices = forms.MultipleChoiceField(
choices=DASHBOARD_CHOICES,
label='Dashboard Choices',
required=False,
widget=forms.SelectMultiple(attrs={
'size': '6',
}))
class AdminTimeForm(forms.ModelForm):
class Meta:
model = Time
fields = (
'date',
'hours',
'log',
'client',
'estimate',
'invoice',
'project',
'user',
'task',
'invoiced',
)
widgets = {
'hours': forms.widgets.NumberInput(attrs={
'class': 'col-2'
}),
}
date = forms.DateField(
widget=forms.DateInput(attrs={
'type': 'date',
'class': 'col-2'
}),
required=False,
initial=timezone.now())
class ClientForm(forms.ModelForm):
class Meta:
model = Client
fields = '__all__'
class ContactForm(forms.ModelForm):
class Meta:
model = Contact
fields = ('active', 'first_name', 'last_name', 'email', 'office_phone',
'mobile_phone', 'client', 'subscribed')
class ContractForm(forms.ModelForm):
class Meta:
model = Contract
fields = '__all__'
widgets = {
'body': forms.widgets.TextInput(attrs={
'class': 'tinymce'
}),
}
class EstimateForm(forms.ModelForm):
class Meta:
model = Estimate
fields = (
'hidden',
'subject',
'client',
'project',
'accepted_date',
'issue_date',
'is_sow',
'is_to',
'contacts',
'user',
)
contacts = forms.ModelMultipleChoiceField(
queryset=Contact.objects.filter(active=True).exclude(
email=None).order_by('first_name'),
required=False,
widget=forms.SelectMultiple(attrs={
'size': '5'
}))
class FileForm(forms.ModelForm):
class Meta:
model = File
fields = '__all__'
class InvoiceForm(forms.ModelForm):
"""
Issue Date, Last Payment Date, Invoice ID, PO Number, Client, Subject,
Invoice Amount, Paid Amount, Balance, Subtotal, Discount, Tax, Tax2,
Currency, Currency Symbol, Document Type
"""
class Meta:
model = Invoice
fields = (
'hidden',
'subject',
'po_number',
'client',
'project',
'issue_date',
'last_payment_date',
)
class NewsletterForm(forms.ModelForm):
class Meta:
model = Newsletter
fields = ('template_choices', 'contacts', 'subject', 'text')
widgets = {
'text': forms.widgets.TextInput(attrs={
'class': 'tinymce'
}),
}
contacts = forms.ModelMultipleChoiceField(
queryset=Contact.objects.filter(subscribed=True).exclude(
email=None).order_by('first_name'),
label='Recipients',
widget=forms.SelectMultiple(attrs={
'size': '50'
}),
required=False)
class NoteForm(forms.ModelForm):
class Meta:
model = Note
fields = ('active', 'hidden', 'title', 'tags', 'note', 'due_date',
'contacts')
widgets = {
'note': forms.widgets.TextInput(attrs={
'class': 'tinymce'
}),
}
tags = forms.ModelMultipleChoiceField(
queryset=Tag.objects.all(),
required=False,
widget=forms.SelectMultiple(attrs={
'size': '5'
}))
contacts = forms.ModelMultipleChoiceField(
queryset=Contact.objects.filter(active=True).exclude(
email=None).order_by('first_name'),
required=False,
widget=forms.SelectMultiple(attrs={
'size': '5'
}))
class ProfileForm(forms.ModelForm):
class Meta:
model = Profile
fields = ('rate', 'bio', 'address', 'preferred_payment_method',
'icon_size')
widgets = {
'bio': forms.widgets.TextInput(attrs={
'class': 'tinymce'
}),
}
class ProjectForm(forms.ModelForm):
class Meta:
model = Project
fields = ('active', 'hidden', 'name', 'start_date', 'end_date',
'notes', 'client', 'task', 'team')
widgets = {
'notes': forms.widgets.TextInput(attrs={
'class': 'tinymce'
}),
}
start_date = forms.DateField(
widget=forms.DateInput(attrs={
'type': 'date',
'class': 'col-2'
}),
required=False,
initial=timezone.now())
end_date = forms.DateField(
widget=forms.DateInput(attrs={
'type': 'date',
'class': 'col-2'
}),
required=False,
initial=timezone.now())
class ProposalForm(forms.ModelForm):
class Meta:
model = Proposal
fields = '__all__'
widgets = {
'body': forms.widgets.TextInput(attrs={
'class': 'tinymce'
}),
}
class ReportForm(forms.ModelForm):
class Meta:
model = Report
fields = '__all__'
invoices = forms.ModelMultipleChoiceField(
required=False, queryset=Invoice.objects.all().order_by('-issue_date'))
class ServiceForm(forms.ModelForm):
class Meta:
model = Service
fields = '__all__'
widgets = {
'description': forms.widgets.TextInput(attrs={
'class': 'tinymce'
}),
}
class SettingsAppForm(forms.ModelForm):
class Meta:
model = SettingsApp
fields = '__all__'
class SettingsCompanyForm(forms.ModelForm):
class Meta:
model = SettingsCompany
fields = '__all__'
widgets = {
'notes': forms.widgets.TextInput(attrs={
'class': 'tinymce'
}),
}
class SettingsContractForm(forms.ModelForm):
class Meta:
model = SettingsContract
fields = '__all__'
widgets = {
'parties':
forms.widgets.TextInput(attrs={
'class': 'tinymce'
}),
'scope_of_work':
forms.widgets.TextInput(attrs={
'class': 'tinymce'
}),
'payment_terms':
forms.widgets.TextInput(attrs={
'class': 'tinymce'
}),
'timing_of_payment':
forms.widgets.TextInput(attrs={
'class': 'tinymce'
}),
'contributor_assignment_agreement':
forms.widgets.TextInput(attrs={
'class': 'tinymce'
}),
'authority_to_act':
forms.widgets.TextInput(attrs={
'class': 'tinymce'
}),
'termination':
forms.widgets.TextInput(attrs={
'class': 'tinymce'
}),
'governing_laws':
forms.widgets.TextInput(attrs={
'class': 'tinymce'
}),
'period_of_agreement':
forms.widgets.TextInput(attrs={
'class': 'tinymce'
}),
'confidentiality':
forms.widgets.TextInput(attrs={
'class': 'tinymce'
}),
'taxes':
forms.widgets.TextInput(attrs={
'class': 'tinymce'
}),
'limited_warranty':
forms.widgets.TextInput(attrs={
'class': 'tinymce'
}),
'complete_agreement':
forms.widgets.TextInput(attrs={
'class': 'tinymce'
}),
}
class TaskForm(forms.ModelForm):
class Meta:
model = Task
fields = '__all__'
class TimeForm(forms.ModelForm):
class Meta:
model = Time
fields = ('date', 'hours', 'log')
widgets = {
'hours': forms.widgets.NumberInput(attrs={
'class': 'col-2'
}),
}
date = forms.DateField(
widget=forms.DateInput(attrs={
'type': 'date',
'class': 'col-2'
}),
required=False,
initial=timezone.now())
Update
from .models import Client
from .models import Contact
from .models import Contract
from .models import Estimate
from .models import File
from .models import Invoice
from .models import Newsletter
from .models import Note
from .models import Profile
from .models import Project
from .models import Proposal
from .models import Report
from .models import Service
from .models import SettingsApp
from .models import SettingsCompany
from .models import SettingsContract
from .models import Task
from .models import Time
from .models import DASHBOARD_CHOICES
from django import forms
from taggit.models import Tag
from django.utils import timezone
class AdminProfileForm(forms.ModelForm):
class Meta:
model = Profile
fields = ('rate', 'preferred_payment_method', 'bio', 'address',
'dashboard_override', 'dashboard_choices', 'icon_size',
'published')
widgets = {
'bio': forms.widgets.TextInput(attrs={
'class': 'tinymce'
}),
}
exclude = ('notify', )
dashboard_choices = forms.MultipleChoiceField(
choices=DASHBOARD_CHOICES,
label='Dashboard Choices',
required=False,
widget=forms.SelectMultiple(attrs={
'size': '6',
}))
class AdminTimeForm(forms.ModelForm):
class Meta:
model = Time
fields = (
'date',
'hours',
'log',
'estimate',
'invoice',
'project',
'user',
'task',
'invoiced',
)
widgets = {
'hours': forms.widgets.NumberInput(attrs={
'class': 'col-2'
}),
}
date = forms.DateField(
widget=forms.DateInput(attrs={
'type': 'date',
'class': 'col-2'
}),
required=False,
initial=timezone.now())
class ClientForm(forms.ModelForm):
class Meta:
model = Client
fields = '__all__'
class ContactForm(forms.ModelForm):
class Meta:
model = Contact
fields = ('active', 'first_name', 'last_name', 'email', 'office_phone',
'mobile_phone', 'client', 'subscribed')
class ContractForm(forms.ModelForm):
class Meta:
model = Contract
fields = '__all__'
widgets = {
'body': forms.widgets.TextInput(attrs={
'class': 'tinymce'
}),
}
class EstimateForm(forms.ModelForm):
class Meta:
model = Estimate
fields = (
'hidden',
'subject',
'client',
'project',
'accepted_date',
'issue_date',
'is_sow',
'is_to',
'contacts',
'user',
)
contacts = forms.ModelMultipleChoiceField(
queryset=Contact.objects.filter(active=True).exclude(
email=None).order_by('first_name'),
required=False,
widget=forms.SelectMultiple(attrs={
'size': '5'
}))
class FileForm(forms.ModelForm):
class Meta:
model = File
fields = '__all__'
class InvoiceForm(forms.ModelForm):
"""
Issue Date, Last Payment Date, Invoice ID, PO Number, Client, Subject,
Invoice Amount, Paid Amount, Balance, Subtotal, Discount, Tax, Tax2,
Currency, Currency Symbol, Document Type
"""
class Meta:
model = Invoice
fields = (
'hidden',
'subject',
'po_number',
'client',
'project',
'issue_date',
'last_payment_date',
)
class NewsletterForm(forms.ModelForm):
class Meta:
model = Newsletter
fields = ('template_choices', 'contacts', 'subject', 'text')
widgets = {
'text': forms.widgets.TextInput(attrs={
'class': 'tinymce'
}),
}
contacts = forms.ModelMultipleChoiceField(
queryset=Contact.objects.filter(subscribed=True).exclude(
email=None).order_by('first_name'),
label='Recipients',
widget=forms.SelectMultiple(attrs={
'size': '50'
}),
required=False)
class NoteForm(forms.ModelForm):
class Meta:
model = Note
fields = ('active', 'hidden', 'title', 'tags', 'note', 'due_date',
'contacts')
widgets = {
'note': forms.widgets.TextInput(attrs={
'class': 'tinymce'
}),
}
tags = forms.ModelMultipleChoiceField(
queryset=Tag.objects.all(),
required=False,
widget=forms.SelectMultiple(attrs={
'size': '5'
}))
contacts = forms.ModelMultipleChoiceField(
queryset=Contact.objects.filter(active=True).exclude(
email=None).order_by('first_name'),
required=False,
widget=forms.SelectMultiple(attrs={
'size': '5'
}))
class ProfileForm(forms.ModelForm):
class Meta:
model = Profile
fields = ('rate', 'bio', 'address', 'preferred_payment_method',
'icon_size')
widgets = {
'bio': forms.widgets.TextInput(attrs={
'class': 'tinymce'
}),
}
class ProjectForm(forms.ModelForm):
class Meta:
model = Project
fields = ('active', 'hidden', 'name', 'start_date', 'end_date',
'notes', 'client', 'task', 'team')
widgets = {
'notes': forms.widgets.TextInput(attrs={
'class': 'tinymce'
}),
}
start_date = forms.DateField(
widget=forms.DateInput(attrs={
'type': 'date',
'class': 'col-2'
}),
required=False,
initial=timezone.now())
end_date = forms.DateField(
widget=forms.DateInput(attrs={
'type': 'date',
'class': 'col-2'
}),
required=False,
initial=timezone.now())
class ProposalForm(forms.ModelForm):
class Meta:
model = Proposal
fields = '__all__'
widgets = {
'body': forms.widgets.TextInput(attrs={
'class': 'tinymce'
}),
}
class ReportForm(forms.ModelForm):
class Meta:
model = Report
fields = '__all__'
invoices = forms.ModelMultipleChoiceField(
required=False, queryset=Invoice.objects.all().order_by('-issue_date'))
class ServiceForm(forms.ModelForm):
class Meta:
model = Service
fields = '__all__'
widgets = {
'description': forms.widgets.TextInput(attrs={
'class': 'tinymce'
}),
}
class SettingsAppForm(forms.ModelForm):
class Meta:
model = SettingsApp
fields = '__all__'
class SettingsCompanyForm(forms.ModelForm):
class Meta:
model = SettingsCompany
fields = '__all__'
widgets = {
'notes': forms.widgets.TextInput(attrs={
'class': 'tinymce'
}),
}
class SettingsContractForm(forms.ModelForm):
class Meta:
model = SettingsContract
fields = '__all__'
widgets = {
'parties':
forms.widgets.TextInput(attrs={
'class': 'tinymce'
}),
'scope_of_work':
forms.widgets.TextInput(attrs={
'class': 'tinymce'
}),
'payment_terms':
forms.widgets.TextInput(attrs={
'class': 'tinymce'
}),
'timing_of_payment':
forms.widgets.TextInput(attrs={
'class': 'tinymce'
}),
'contributor_assignment_agreement':
forms.widgets.TextInput(attrs={
'class': 'tinymce'
}),
'authority_to_act':
forms.widgets.TextInput(attrs={
'class': 'tinymce'
}),
'termination':
forms.widgets.TextInput(attrs={
'class': 'tinymce'
}),
'governing_laws':
forms.widgets.TextInput(attrs={
'class': 'tinymce'
}),
'period_of_agreement':
forms.widgets.TextInput(attrs={
'class': 'tinymce'
}),
'confidentiality':
forms.widgets.TextInput(attrs={
'class': 'tinymce'
}),
'taxes':
forms.widgets.TextInput(attrs={
'class': 'tinymce'
}),
'limited_warranty':
forms.widgets.TextInput(attrs={
'class': 'tinymce'
}),
'complete_agreement':
forms.widgets.TextInput(attrs={
'class': 'tinymce'
}),
}
class TaskForm(forms.ModelForm):
class Meta:
model = Task
fields = '__all__'
class TimeForm(forms.ModelForm):
class Meta:
model = Time
fields = ('date', 'hours', 'log')
widgets = {
'hours': forms.widgets.NumberInput(attrs={
'class': 'col-2'
}),
}
date = forms.DateField(
widget=forms.DateInput(attrs={
'type': 'date',
'class': 'col-2'
}),
required=False,
initial=timezone.now())
|
from boto.exception import BotoServerError
from collections import OrderedDict
from decimal import Decimal
from django.conf import settings as django_settings
from django.contrib import messages
from django.contrib.gis.geoip2 import GeoIP2
from django.core.mail import send_mail
from django.core.paginator import Paginator
from django.core.paginator import EmptyPage
from django.core.paginator import PageNotAnInteger
from django.core.urlresolvers import reverse
from django.db.models import Q
from django.db.models import F
from django.db.models import Sum
from django.http import HttpResponse
from django.http import HttpResponseRedirect
from django.shortcuts import get_object_or_404
from django.shortcuts import render
from django.template.loader import render_to_string
from django.utils import timezone
from docx import Document
from docx.enum.text import WD_ALIGN_PARAGRAPH
from faker import Faker
from functools import reduce
from import_export import widgets
from hashlib import md5
from io import BytesIO
from io import StringIO
from lxml import etree
from matplotlib.dates import DateFormatter
from matplotlib.dates import MonthLocator
from matplotlib.dates import date2num
from matplotlib.figure import Figure
from matplotlib.backends.backend_agg import FigureCanvasAgg
from operator import or_ as OR
fake = Faker()
URL_NAMES = {
'app settings': ('settings_app', 'settings_app_edit', ''),
'client': ('client', 'client_edit', 'client_index'),
'contact': ('contact', 'contact_edit', 'contact_index'),
'contract': ('contract', 'contract_edit', 'contract_index'),
'contract settings': ('settings_contract', 'settings_contract_edit', ''),
'Company': ('settings_company', 'settings_company_edit', ''),
'estimate': ('estimate', 'estimate_edit', 'estimate_index'),
'file': ('file', 'file_edit', 'file_index'),
'invoice': ('invoice', 'invoice_edit', 'invoice_index'),
'log': ('log', 'log_edit', 'log_index'),
'newsletter': ('newsletter', 'newsletter_edit', 'newsletter_index'),
'note': ('note', 'note_edit', 'note_index'),
'profile': ('user', 'user_edit', 'user_index'),
'project': ('project', 'project_edit', 'project_index'),
'proposal': ('proposal', 'proposal_edit', 'proposal_index'),
'report': ('report', 'report_edit', 'report_index'),
'service': ('company', 'service_edit', ''),
'task': ('task', 'task_edit', 'task_index'),
'time': ('time_entry', 'time_edit', 'time_index'),
'user': ('user', 'user_edit', 'user_index'),
}
class BooleanWidget(widgets.Widget):
"""
Convert strings to boolean values
"""
def clean(self, value):
if value == 'Yes':
return True
else:
return False
class DecimalWidget(widgets.Widget):
"""
Convert strings to decimal values
"""
def clean(self, value):
if value:
return Decimal(value.replace(',', ''))
else:
return Decimal(0)
class UserWidget(widgets.Widget):
"""
"""
def clean(self, value):
return value
def mail_compose(obj, **kwargs):
context = {}
form = kwargs.get('form')
request = kwargs.get('request')
model_name = obj._meta.verbose_name
recipients = mail_recipients(obj)
if model_name == 'contact':
if 'test' in form.data:
message = fake.text()
subject = fake.text()
else:
message = form.cleaned_data['message']
subject = form.cleaned_data['subject']
elif model_name == 'note':
items = get_fields([obj, ])
message = render_to_string('table_items.html', {'items': items, })
subject = obj.title
# http://stackoverflow.com/a/28476681/185820
if 'html' in form.data:
context['html_message'] = render_to_string('cerberus-fluid.html',
{'message': message, })
context['message'] = message
context['recipients'] = recipients
context['request'] = request
context['sender'] = django_settings.EMAIL_FROM
context['subject'] = subject
return context
def mail_obj(request, **kwargs):
query_contact = get_query(request, 'contact')
query_note = get_query(request, 'note')
contact_model = kwargs.get('contact_model')
note_model = kwargs.get('note_model')
if contact_model and query_contact:
obj = contact_model.objects.get(pk=query_contact)
elif note_model and query_note:
obj = note_model.objects.get(pk=query_note)
return obj
def mail_recipients(obj):
model_name = obj._meta.verbose_name
if model_name == 'contact':
return (obj.email, )
elif model_name == 'note':
return [i.email for i in obj.contacts.all()]
def mail_send(**kwargs):
fail_silently = kwargs.get('fail_silently')
html_message = kwargs.get('html_message')
message = kwargs.get('message')
recipients = kwargs.get('recipients')
sender = kwargs.get('sender')
subject = kwargs.get('subject')
try:
send_mail(
subject,
message,
sender,
recipients,
fail_silently=fail_silently,
html_message=html_message)
status = True
return recipients, status
except BotoServerError:
status = False
return recipients, status
def set_check_boxes(obj, query_checkbox, refer, app_settings_model):
model_name = obj._meta.verbose_name
if query_checkbox['active'] == 'on' or query_checkbox[
'active'] == 'off': # Active
if query_checkbox['active'] == 'on':
obj.active = True
else:
obj.active = False
# Auto-hide notes
if model_name == 'note' and app_settings_model:
app_settings = app_settings_model.get_solo()
if app_settings.auto_hide_notes:
obj.hidden = True
elif query_checkbox['subscribe'] == 'on' or query_checkbox[
'subscribe'] == 'off': # Subscribe
if query_checkbox['active'] == 'on':
obj.subscribed = True
else:
obj.subscribed = False
obj.save()
return HttpResponseRedirect(refer)
def edit(request, **kwargs):
context = {}
obj = None
active_nav = kwargs.get('active_nav')
app_settings_model = kwargs.get('app_settings_model')
client_model = kwargs.get('client_model')
company_model = kwargs.get('company_model')
contact_model = kwargs.get('contact_model')
estimate_model = kwargs.get('estimate_model')
form_model = kwargs.get('form_model')
invoice_model = kwargs.get('invoice_model')
model = kwargs.get('model')
note_model = kwargs.get('note_model')
pk = kwargs.get('pk')
project_model = kwargs.get('project_model')
user_model = kwargs.get('user_model')
if pk is None: # New
form = get_form(
client_model=client_model,
form_model=form_model,
invoice_model=invoice_model,
model=model,
user_model=user_model,
request=request)
else: # Existing
obj = get_object_or_404(model, pk=pk)
form = get_form(form_model=form_model, obj=obj)
if request.method == 'POST':
refer = request.META['HTTP_REFERER']
if pk is None:
form = form_model(request.POST)
else:
# Copy or delete
copy = request.POST.get('copy')
delete = request.POST.get('delete')
if copy:
return obj_copy(obj)
if delete:
return obj_remove(obj)
# Check boxes
query_checkbox = get_query(request, 'checkbox')
if query_checkbox['condition']:
return set_check_boxes(obj, query_checkbox, refer,
app_settings_model)
form = form_model(request.POST, instance=obj)
if form.is_valid():
try:
obj = form.save()
set_relationship(
obj,
request,
client_model=client_model,
company_model=company_model,
estimate_model=estimate_model,
invoice_model=invoice_model,
project_model=project_model)
return obj_edit(obj, pk=pk)
except AttributeError:
obj = mail_obj(
request,
contact_model=contact_model,
note_model=note_model)
recipients, status = mail_send(
**mail_compose(
obj, form=form, request=request))
if status:
messages.add_message(request, messages.SUCCESS,
'Mail sent to %s!' %
', '.join(recipients))
else:
messages.add_message(request, messages.WARNING,
'Mail not sent to %s!' %
', '.join(recipients))
context['active_nav'] = active_nav
context['form'] = form
context['item'] = obj
context['pk'] = pk
if company_model:
company = company_model.get_solo()
context['company'] = company
if invoice_model: # Dashboard totals for reporting
gross, net = get_invoice_totals(invoice_model)
context['gross'] = gross
context['net'] = net
if model:
model_name = model._meta.verbose_name
elif contact_model:
model_name = contact_model._meta.verbose_name
elif note_model:
model_name = note_model._meta.verbose_name
template_name = get_template_and_url_names(
model_name=model_name, page_type='edit')
return render(request, template_name, context)
def generate_doc(contract):
"""
https://stackoverflow.com/a/24122313/185820
"""
document = Document()
# Head
task = ''
if contract.task:
task = contract.task
title = document.add_heading(
'ACLARK.NET, LLC %s AGREEMENT PREPARED FOR:' % task, level=1)
title.alignment = WD_ALIGN_PARAGRAPH.CENTER
if contract.client:
client_name = document.add_heading(contract.client.name, level=1)
client_name.alignment = WD_ALIGN_PARAGRAPH.CENTER
client_address = document.add_heading(contract.client.address, level=1)
client_address.alignment = WD_ALIGN_PARAGRAPH.CENTER
parser = etree.HTMLParser() # http://lxml.de/parsing.html
tree = etree.parse(StringIO(contract.body), parser)
# Body
for element in tree.iter():
if element.tag == 'h2':
document.add_heading(element.text, level=2)
elif element.tag == 'p':
document.add_paragraph(element.text)
return document
def get_active_kwarg(model, active=False, user=None):
"""
Kwarg for "active" varies by type
"""
kwargs = {}
model_name = model._meta.verbose_name
if model_name == 'estimate':
# Unaccepted invoices are "active"
if active:
kwargs['accepted_date'] = None
elif model_name == 'invoice':
# Unpaid invoices are "active"
if active:
kwargs['last_payment_date'] = None
elif model_name == 'time':
# Only staff can see all items
if not user.is_staff:
kwargs['user'] = user
# Uninvoiced times are "active"
kwargs['invoiced'] = not (active)
# Estimated times are never "active"
kwargs['estimate'] = None
elif model_name == 'user':
# Use related model's active field
kwargs['profile__active'] = active
else:
# All other models check active field
kwargs['active'] = active
return kwargs
def get_client_city(request):
ip_address = get_client_ip(request)
geo = GeoIP2()
if ip_address:
return geo.city(ip_address)
# https://stackoverflow.com/a/4581997/185820
def get_client_ip(request):
return request.META.get('HTTP_X_REAL_IP')
def get_company_name(company):
if company.name:
company_name = company.name
else:
company_name = fake.text()
company_name = company.name.replace('.', '_')
company_name = company_name.replace(', ', '_')
company_name = company_name.upper()
return company_name
def get_fields(items):
for item in items:
fields = item._meta._get_fields()
item.fields = OrderedDict()
for field in fields:
if not field.is_relation:
item.fields[field.name] = getattr(item, field.name)
return items
def get_form(**kwargs):
"""
Return appropriate form based on new or edit
"""
client_model = kwargs.get('client_model')
form_model = kwargs.get('form_model')
invoice_model = kwargs.get('invoice_model')
model = kwargs.get('model')
obj = kwargs.get('obj')
request = kwargs.get('request')
user_model = kwargs.get('user_model')
query_client = None
query_user = None
if request:
query_user = get_query(request, 'user')
query_client = get_query(request, 'client')
if obj: # Existing object
model_name = obj._meta.verbose_name
if model_name == 'note': # Populate form with tags already set
form = form_model(initial={'tags': obj.tags.all()}, instance=obj)
else:
form = form_model(instance=obj)
else: # New object or mail
if model:
model_name = model._meta.verbose_name
if model_name == 'report' and invoice_model: # Populate new report
# with gross, net.
gross, net = get_invoice_totals(invoice_model)
obj = model(gross=gross, net=net)
form = form_model(instance=obj)
elif model_name == 'contact': # Populate new contact
# with appropriate fields
if query_user:
user = get_object_or_404(user_model, pk=query_user)
obj = model(email=user.email)
elif query_client:
client = get_object_or_404(client_model, pk=query_client)
obj = model(client=client)
form = form_model(instance=obj)
else:
form = form_model()
else:
form = form_model()
return form
def get_index_items(request, model, **kwargs):
"""
"""
context = {}
model_name = model._meta.verbose_name
app_settings_model = kwargs.get('app_settings_model')
active_nav = kwargs.get('active_nav')
columns_visible = kwargs.get('columns_visible')
company_model = kwargs.get('company_model')
edit_url = kwargs.get('edit_url')
order_by = kwargs.get('order_by')
page_size = kwargs.get('page_size')
search_fields = kwargs.get('search_fields')
show_search = kwargs.get('show_search')
if columns_visible:
context['columns_visible'] = columns_visible
if company_model:
company = company_model.get_solo()
context['company'] = company
page = get_query(request, 'page')
paginated = get_query(request, 'paginated')
search = get_query(request, 'search')
# Search is easy
if request.method == 'POST':
if search == u'': # Empty search returns none
context['active_nav'] = active_nav
context['show_search'] = True
return context
else:
return get_search_results(
model,
search_fields,
search,
active_nav=active_nav,
app_settings_model=app_settings_model,
edit_url=edit_url,
request=request)
# Not a search
if model_name == 'note' and get_setting(request, app_settings_model,
'exclude_hidden_notes'):
items = model.objects.exclude(hidden=True)
else:
items = model.objects.all()
# Order items (http://stackoverflow.com/a/20257999/185820)
if order_by is not None:
items = items.order_by(*order_by)
# Don't show items to anon
if not request.user.is_authenticated:
items = []
# Per model extras
if model_name == 'note':
context['note_stats'] = get_note_stats(model)
elif model_name == 'time':
context['total_hours'] = get_total_hours(items)
# Paginate if paginated
if paginated:
page_size = get_setting(
request, app_settings_model, 'page_size', page_size=page_size)
items = paginate(items, page, page_size)
context['active_nav'] = active_nav
context['edit_url'] = edit_url
context['icon_size'] = get_setting(request, app_settings_model,
'icon_size')
context['icon_color'] = get_setting(request, app_settings_model,
'icon_color')
context['page'] = page
context['paginated'] = paginated
context['show_search'] = show_search
items = set_items_name(model_name, items=items)
context['items'] = items
return context
def get_invoice_totals(model):
invoices = model.objects.filter(last_payment_date=None)
invoice_amount = invoice_cog = 0
for invoice in invoices:
if invoice.amount:
invoice_amount += invoice.amount
if invoice.cog:
invoice_cog += invoice.cog
return invoice_amount, invoice_amount - invoice_cog
def get_query(request, query):
"""
"""
# Special handling for some query strings
if query == 'paginated':
paginated = request.GET.get('paginated')
if paginated == u'false':
return False
else:
return True
elif query == 'search' and request.method == 'POST':
return request.POST.get('search', '')
elif query == 'values': # plot
values = request.GET.get('values')
if values:
values = values.split(' ')
else:
values = []
values = [i.split(',') for i in values]
return values
elif query == 'checkbox':
query_checkbox = {}
query_checkbox_active = request.POST.get('checkbox-active')
query_checkbox_subscribe = request.POST.get('checkbox-subscribe')
condition = ( # if any of these exist
query_checkbox_active == 'on' or query_checkbox_active == 'off' or
query_checkbox_subscribe == 'on' or
query_checkbox_subscribe == 'off')
query_checkbox['active'] = query_checkbox_active
query_checkbox['subscribe'] = query_checkbox_subscribe
query_checkbox['condition'] = condition
return query_checkbox
elif query == 'doc':
doc = request.GET.get('doc')
if doc:
return True
else:
return False
elif query == 'pdf':
pdf = request.GET.get('pdf')
if pdf:
return True
else:
return False
elif query == 'test':
test = request.GET.get('test')
if test:
return True
else:
return False
else: # Normal handling
return request.GET.get(query, '')
def get_search_results(model,
search_fields,
search,
active_nav='',
app_settings_model=None,
edit_url='',
request=None):
context = {}
query = []
model_name = model._meta.verbose_name
for field in search_fields:
query.append(Q(**{field + '__icontains': search}))
items = model.objects.filter(reduce(OR, query))
context['active_nav'] = active_nav
context['edit_url'] = edit_url
context['icon_size'] = get_setting(request, app_settings_model,
'icon_size')
context['icon_color'] = get_setting(request, app_settings_model,
'icon_color')
context['show_search'] = True
items = set_items_name(model_name, items=items)
context['items'] = items
return context
def get_setting(request, app_settings_model, setting, page_size=None):
"""
Allow user to override global setting
"""
if not request.user.is_authenticated:
return
dashboard_override = user_pref = None
app_settings = app_settings_model.get_solo()
if setting == 'icon_size':
if hasattr(request.user, 'profile'):
user_pref = request.user.profile.icon_size
if user_pref:
return user_pref
else:
return app_settings.icon_size
elif setting == 'icon_color':
if hasattr(request.user, 'profile'):
user_pref = request.user.profile.icon_color
if user_pref:
return user_pref
else:
return app_settings.icon_color
elif setting == 'page_size':
if hasattr(request.user, 'profile'):
user_pref = request.user.profile.page_size
if user_pref:
return user_pref
elif page_size: # View's page_size preference
return page_size
else:
return app_settings.page_size
elif setting == 'dashboard_choices':
dashboard_choices = app_settings.dashboard_choices
dashboard_override = has_profile = False
if hasattr(request.user, 'profile'):
has_profile = True
if has_profile:
dashboard_override = request.user.profile.dashboard_override
if has_profile and dashboard_override:
dashboard_choices = request.user.profile.dashboard_choices
return dashboard_choices
elif setting == 'exclude_hidden_notes':
return app_settings.exclude_hidden_notes
def get_template_and_url_names(**kwargs):
"""
"""
model_name = kwargs.get('model_name')
page_type = kwargs.get('page_type')
if page_type == 'view':
url_name = URL_NAMES[model_name][0]
template_name = '%s.html' % url_name
return template_name, url_name
elif page_type == 'edit':
template_name = '%s.html' % URL_NAMES[model_name][1]
return template_name
elif page_type == 'home':
url_name = 'home'
return url_name
elif page_type == 'index':
url_name = URL_NAMES[model_name][2]
return url_name
def get_times_for_obj(obj, time_model):
model_name = obj._meta.verbose_name
if model_name == 'invoice':
times_project = time_model.objects.filter(
invoiced=False, project=obj.project, estimate=None, invoice=None)
times_invoice = time_model.objects.filter(invoice=obj)
times = times_project | times_invoice
elif model_name == 'project':
times = time_model.objects.filter(
invoiced=False, estimate=None, invoice=None, project=obj)
return times
def get_total_hours(items):
total_hours = items.aggregate(hours=Sum(F('hours')))
total_hours = total_hours['hours']
return total_hours
def gravatar_url(email):
"""
MD5 hash of email address for use with Gravatar
"""
return django_settings.GRAVATAR_URL % md5(email.lower()).hexdigest()
def set_items_name(model_name, items=None, _items={}):
"""
Share templates by returning dictionary of items e.g.
for item in items.reports
instead of:
for item in reports
"""
_items['%ss' % model_name] = items
return _items
def get_note_stats(note_model):
note_stats = {}
active = len(note_model.objects.filter(active=True))
hidden = len(note_model.objects.filter(hidden=True))
inactive = len(note_model.objects.filter(active=False))
total = len(note_model.objects.all())
not_hidden = inactive - hidden
note_stats['active_note_count'] = active
note_stats['hidden_note_count'] = hidden
note_stats['inactive_note_count'] = inactive
note_stats['total_note_count'] = total
note_stats['not_hidden_count'] = not_hidden
return note_stats
def get_page_items(request, **kwargs):
app_settings_model = kwargs.get('app_settings_model')
company_model = kwargs.get('company_model')
columns_visible = kwargs.get('columns_visible')
contact_model = kwargs.get('contact_model')
contract_model = kwargs.get('contract_model')
estimate_model = kwargs.get('estimate_model')
invoice_model = kwargs.get('invoice_model')
model = kwargs.get('model')
note_model = kwargs.get('note_model')
profile_model = kwargs.get('profile_model')
project_model = kwargs.get('project_model')
report_model = kwargs.get('report_model')
order_by = kwargs.get('order_by')
pk = kwargs.get('pk')
time_model = kwargs.get('time_model')
user_model = kwargs.get('user_model')
context = {}
items = None
if company_model:
company = company_model.get_solo()
context['company'] = company
if columns_visible:
context['columns_visible'] = columns_visible
if model:
model_name = model._meta.verbose_name
context['model_name'] = model_name
if model_name == 'app settings':
app_settings = app_settings_model.get_solo()
context['items'] = get_fields([app_settings, ]) # table_items.html
context['active_tab'] = 'system'
context['active_nav'] = 'dropdown'
elif model_name == 'contract settings':
contract_settings = model.get_solo()
context['items'] = get_fields(
[contract_settings, ]) # table_items.html
context['active_tab'] = 'contract'
context['active_nav'] = 'dropdown'
elif model_name == 'client':
client = get_object_or_404(model, pk=pk)
contacts = contact_model.objects.filter(client=client)
contracts = contract_model.objects.filter(client=client)
projects = project_model.objects.filter(client=client)
context['active_nav'] = 'client'
context['contacts'] = contacts
context['contracts'] = contracts
context['edit_url'] = 'client_edit'
context['item'] = client
context['notes'] = client.note.all()
context['projects'] = projects
elif model_name == 'Company':
company_settings = model.get_solo()
context['items'] = get_fields(
[company_settings, ]) # table_items.html
context['active_nav'] = 'dropdown'
context['active_tab'] = 'company'
elif model_name == 'contact':
contact = get_object_or_404(model, pk=pk)
context['active_nav'] = 'contact'
context['edit_url'] = 'contact_edit'
context['items'] = get_fields([contact, ]) # table_items.html
context['item'] = contact
elif model_name == 'contract':
contract = get_object_or_404(model, pk=pk)
estimate = contract.statement_of_work
if estimate:
times_client = time_model.objects.filter(
client=estimate.client,
estimate=None,
project=None,
invoiced=False,
invoice=None)
times_estimate = time_model.objects.filter(estimate=estimate)
times = times_client | times_estimate
else:
times = None
context['active_nav'] = 'contract'
context['edit_url'] = 'contract_edit'
context['item'] = contract
context['times'] = times
elif model_name == 'estimate':
estimate = get_object_or_404(model, pk=pk)
if not estimate.is_sow:
doc_type = model_name
else:
doc_type = 'statement of work'
times_client = time_model.objects.filter(
client=estimate.client,
estimate=None,
project=None,
invoiced=False,
invoice=None)
times_estimate = time_model.objects.filter(estimate=estimate)
times = times_client | times_estimate
times = times.order_by(*order_by['time'])
times = set_invoice_totals(times, estimate=estimate)
context['active_nav'] = 'estimate'
context['document_type'] = doc_type
context['entries'] = times
context['edit_url'] = 'estimate_edit'
context['item'] = estimate
if model_name == 'file':
file_obj = get_object_or_404(model, pk=pk)
context['active_nav'] = 'dropdown'
context['edit_url'] = 'file_edit'
context['item'] = file_obj
elif model_name == 'invoice':
invoice = get_object_or_404(model, pk=pk)
times = get_times_for_obj(invoice, time_model)
times = times.order_by(*order_by['time'])
times = set_invoice_totals(times, invoice=invoice)
last_payment_date = invoice.last_payment_date
context['active_nav'] = 'invoice'
context['document_type'] = model_name
context['edit_url'] = 'invoice_edit'
context['entries'] = times
context['item'] = invoice
context['invoice'] = True
context['last_payment_date'] = last_payment_date
elif model_name == 'newsletter':
newsletter = get_object_or_404(model, pk=pk)
context['edit_url'] = 'newsletter_edit'
context['active_nav'] = 'newsletter'
context['item'] = newsletter
elif model_name == 'note':
note = get_object_or_404(model, pk=pk)
context['edit_url'] = 'note_edit'
context['active_nav'] = 'note'
context['item'] = note
elif model_name == 'project':
project = get_object_or_404(model, pk=pk)
contacts = contact_model.objects.all()
items = set_items_name('contact', items=contacts)
estimates = estimate_model.objects.filter(
project=project, accepted_date=None)
items = set_items_name('estimate', items=estimates, _items=items)
invoices = invoice_model.objects.filter(
project=project, last_payment_date=None)
items = set_items_name('invoice', items=invoices, _items=items)
times = get_times_for_obj(project, time_model)
times = times.order_by(*order_by['time'])
items = set_items_name('time', items=times, _items=items)
users = user_model.objects.filter(project=project)
items = set_items_name('user', items=users, _items=items)
context['active_nav'] = 'project'
context['edit_url'] = 'project_edit'
context['item'] = project
context['items'] = items
elif model_name == 'proposal':
proposal = get_object_or_404(model, pk=pk)
context['active_nav'] = 'dropdown'
context['edit_url'] = 'proposal_edit'
context['item'] = proposal
elif model_name == 'report':
report = get_object_or_404(model, pk=pk)
reports = model.objects.filter(active=True)
reports = reports.aggregate(
gross=Sum(F('gross')), net=Sum(F('net')))
context['active_nav'] = 'dropdown'
context['cost'] = report.gross - report.net
context['edit_url'] = 'report_edit'
context['item'] = report
elif model_name == 'task':
task = get_object_or_404(model, pk=pk)
context['edit_url'] = 'task_edit'
context['active_nav'] = 'task'
context['item'] = task
elif model_name == 'time':
time_entry = get_object_or_404(model, pk=pk)
context['active_nav'] = 'time'
context['edit_url'] = 'time_edit'
context['item'] = time_entry
elif model_name == 'user':
user = get_object_or_404(model, pk=pk)
projects = project_model.objects.filter(
team__in=[user, ], active=True)
projects = projects.order_by(*order_by['project'])
times = time_model.objects.filter(
estimate=None, invoiced=False, user=user)
times = times.order_by(*order_by['time'])
contacts = contact_model.objects.all()
context['active_nav'] = 'dropdown'
context['item'] = user
context['items'] = get_fields([user.profile, ]) # table_items.html
context['profile'] = profile_model.objects.get_or_create(
user=user)[0]
context['projects'] = projects
context['times'] = times
else: # home
if request.user.is_authenticated:
# Items
invoices = invoice_model.objects.filter(last_payment_date=None)
items = set_items_name('invoice', items=invoices)
notes = note_model.objects.filter(active=True, hidden=False)
notes = notes.order_by(*order_by['note'])
items = set_items_name('note', items=notes, _items=items)
projects = project_model.objects.filter(active=True, hidden=False)
projects = projects.order_by(*order_by['project'])
items = set_items_name('project', items=projects, _items=items)
times = time_model.objects.filter(
invoiced=False, project__active=True, user=request.user)
times = times.order_by(*order_by['time'])
items = set_items_name('time', items=times, _items=items)
# Plot
plot_items = report_model.objects.filter(active=True)
# Totals
gross, net = get_invoice_totals(invoice_model)
context['city_data'] = get_client_city(request)
context['dashboard_choices'] = get_setting(
request, app_settings_model, 'dashboard_choices')
context['gross'] = gross
context['invoices'] = invoices
context['items'] = items
context['net'] = net
context['notes'] = notes
context['note_stats'] = get_note_stats(note_model)
context['plot_items'] = plot_items
context['projects'] = projects
context['times'] = times
context['total_hours'] = get_total_hours(times)
context['icon_size'] = get_setting(request, app_settings_model,
'icon_size')
context['icon_color'] = get_setting(request, app_settings_model,
'icon_color')
doc = get_query(request, 'doc')
pdf = get_query(request, 'pdf')
context['doc'] = doc
context['pdf'] = pdf
return context
def is_allowed_to_view(model,
pk,
request,
app_settings_model=None,
profile_model=None):
"""
Normal users can only see their own time entries
"""
msg = 'Sorry, you are not allowed to see that.'
time_entry = get_object_or_404(model, pk=pk)
if not time_entry.user and not request.user.is_staff:
messages.add_message(request, messages.WARNING, msg)
return HttpResponseRedirect(reverse('home'))
if (not time_entry.user.username == request.user.username and
not request.user.is_staff):
messages.add_message(request, messages.WARNING, msg)
return HttpResponseRedirect(reverse('home'))
else:
context = get_page_items(
request,
app_settings_model=app_settings_model,
model=model,
profile_model=profile_model,
pk=pk)
return render(request, 'time.html', context)
def last_month():
"""
Returns last day of last month
"""
first = timezone.now().replace(day=1)
return first - timezone.timedelta(days=1)
def obj_copy(obj):
dup = obj
dup.pk = None
dup.save()
kwargs = {}
kwargs['pk'] = dup.pk
model_name = obj._meta.verbose_name
template_name, url_name = get_template_and_url_names(
model_name=model_name, page_type='edit')
return HttpResponseRedirect(reverse(url_name, kwargs=kwargs))
def obj_remove(obj):
model_name = obj._meta.verbose_name
if model_name == 'time':
url_name = get_template_and_url_names(
model_name=model_name, page_type='home') # Redir to home
else:
url_name = get_template_and_url_names(
model_name=model_name, page_type='index') # Redir to index
obj.delete()
return HttpResponseRedirect(reverse(url_name))
def obj_edit(obj, pk=None):
model_name = obj._meta.verbose_name
template_name, url_name = get_template_and_url_names(
model_name=model_name, page_type='view') # Redir to view
# New or existing object
kwargs = {}
if pk: # Special cases for settings
if model_name == 'Company':
return HttpResponseRedirect(reverse(url_name))
elif model_name == 'app settings':
return HttpResponseRedirect(reverse(url_name))
elif model_name == 'contract settings':
return HttpResponseRedirect(reverse(url_name))
kwargs['pk'] = pk
else: # New
kwargs['pk'] = obj.pk
return HttpResponseRedirect(reverse(url_name, kwargs=kwargs))
def paginate(items, page, page_size):
"""
"""
paginator = Paginator(items, page_size, orphans=5)
try:
items = paginator.page(page)
except PageNotAnInteger:
items = paginator.page(1)
except EmptyPage:
items = paginator.page(paginator.num_pages)
return items
def report_plot(request): # http://stackoverflow.com/a/5515994/185820
"""
"""
values = get_query(request, 'values')
# http://matplotlib.org/examples/api/date_demo.html
x = [
date2num(timezone.datetime.strptime(i[1], '%Y-%m-%d')) for i in values
]
y = [i[0] for i in values]
figure = Figure()
canvas = FigureCanvasAgg(figure)
axes = figure.add_subplot(1, 1, 1)
axes.grid(True)
axes.plot(x, y)
axes.xaxis.set_major_locator(MonthLocator())
axes.xaxis.set_major_formatter(DateFormatter('%m'))
# write image data to a string buffer and get the PNG image bytes
buf = BytesIO()
canvas.print_png(buf)
data = buf.getvalue()
# write image bytes back to the browser
return HttpResponse(data, content_type="image/png")
def set_relationship(obj,
request,
client_model=None,
company_model=None,
estimate_model=None,
invoice_model=None,
project_model=None):
model_name = obj._meta.verbose_name
if model_name == 'contact':
query_client = get_query(request, 'client')
if query_client:
client = get_object_or_404(client_model, pk=query_client)
obj.client = client
obj.save()
return True
elif model_name == 'estimate' or model_name == 'invoice':
query_project = get_query(request, 'project')
if query_project:
project = get_object_or_404(project_model, pk=query_project)
obj.client = project.client
obj.project = project
obj.save()
elif model_name == 'note':
query_client = get_query(request, 'client')
query_company = get_query(request, 'company')
if query_client:
client = get_object_or_404(client_model, pk=query_client)
client.note.add(obj)
client.save()
return True
elif query_company:
company = company_model.get_solo()
company.note.add(obj)
company.save()
elif model_name == 'project':
query_client = get_query(request, 'client')
if query_client:
client = get_object_or_404(client_model, pk=query_client)
obj.client = client
obj.save()
elif model_name == 'time':
obj.user = request.user
query_estimate = get_query(request, 'estimate')
query_invoice = get_query(request, 'invoice')
query_project = get_query(request, 'project')
if not request.user.is_staff: # Staff have more than one project
user_projects = project_model.objects.filter(team__in=[obj.user, ])
if len(user_projects) > 0:
obj.project = user_projects[0]
obj.task = obj.project.task
if query_estimate:
estimate = get_object_or_404(estimate_model, pk=query_estimate)
obj.estimate = estimate
if query_invoice:
invoice = get_object_or_404(invoice_model, pk=query_invoice)
obj.invoice = invoice
if query_project:
project = get_object_or_404(project_model, pk=query_project)
obj.project = project
obj.task = project.task
obj.save()
return True
def set_invoice_totals(times, estimate=None, invoice=None):
"""
Set invoice, estimate and time totals
"""
invoice_amount = invoice_cog = 0
time_entry_amount = time_entry_cog = 0
for time_entry in times:
hours = time_entry.hours
if time_entry.task:
rate = time_entry.task.rate
time_entry_amount = rate * hours
if time_entry.user:
rate = time_entry.user.profile.rate
if rate:
time_entry_cog = rate * hours
time_entry.amount = '%.2f' % time_entry_amount
time_entry.cog = '%.2f' % time_entry_cog
invoice_amount += time_entry_amount
invoice_cog += time_entry_cog
if invoice:
invoice.amount = '%.2f' % invoice_amount
invoice.cog = '%.2f' % invoice_cog
invoice.save()
elif estimate:
estimate.amount = '%.2f' % invoice_amount
estimate.save()
return times
Update
from boto.exception import BotoServerError
from collections import OrderedDict
from decimal import Decimal
from django.conf import settings as django_settings
from django.contrib import messages
from django.contrib.gis.geoip2 import GeoIP2
from django.core.mail import send_mail
from django.core.paginator import Paginator
from django.core.paginator import EmptyPage
from django.core.paginator import PageNotAnInteger
from django.core.urlresolvers import reverse
from django.db.models import Q
from django.db.models import F
from django.db.models import Sum
from django.http import HttpResponse
from django.http import HttpResponseRedirect
from django.shortcuts import get_object_or_404
from django.shortcuts import render
from django.template.loader import render_to_string
from django.utils import timezone
from docx import Document
from docx.enum.text import WD_ALIGN_PARAGRAPH
from faker import Faker
from functools import reduce
from import_export import widgets
from hashlib import md5
from io import BytesIO
from io import StringIO
from lxml import etree
from matplotlib.dates import DateFormatter
from matplotlib.dates import MonthLocator
from matplotlib.dates import date2num
from matplotlib.figure import Figure
from matplotlib.backends.backend_agg import FigureCanvasAgg
from operator import or_ as OR
fake = Faker()
URL_NAMES = {
'app settings': ('settings_app', 'settings_app_edit', ''),
'client': ('client', 'client_edit', 'client_index'),
'contact': ('contact', 'contact_edit', 'contact_index'),
'contract': ('contract', 'contract_edit', 'contract_index'),
'contract settings': ('settings_contract', 'settings_contract_edit', ''),
'Company': ('settings_company', 'settings_company_edit', ''),
'estimate': ('estimate', 'estimate_edit', 'estimate_index'),
'file': ('file', 'file_edit', 'file_index'),
'invoice': ('invoice', 'invoice_edit', 'invoice_index'),
'log': ('log', 'log_edit', 'log_index'),
'newsletter': ('newsletter', 'newsletter_edit', 'newsletter_index'),
'note': ('note', 'note_edit', 'note_index'),
'profile': ('user', 'user_edit', 'user_index'),
'project': ('project', 'project_edit', 'project_index'),
'proposal': ('proposal', 'proposal_edit', 'proposal_index'),
'report': ('report', 'report_edit', 'report_index'),
'service': ('company', 'service_edit', ''),
'task': ('task', 'task_edit', 'task_index'),
'time': ('time_entry', 'time_edit', 'time_index'),
'user': ('user', 'user_edit', 'user_index'),
}
class BooleanWidget(widgets.Widget):
"""
Convert strings to boolean values
"""
def clean(self, value):
if value == 'Yes':
return True
else:
return False
class DecimalWidget(widgets.Widget):
"""
Convert strings to decimal values
"""
def clean(self, value):
if value:
return Decimal(value.replace(',', ''))
else:
return Decimal(0)
class UserWidget(widgets.Widget):
"""
"""
def clean(self, value):
return value
def mail_compose(obj, **kwargs):
context = {}
form = kwargs.get('form')
request = kwargs.get('request')
model_name = obj._meta.verbose_name
recipients = mail_recipients(obj)
if model_name == 'contact':
message = form.cleaned_data['message']
subject = form.cleaned_data['subject']
elif model_name == 'note':
# items = get_fields([obj, ])
# message = render_to_string('table_items.html', {'items': items, })
message = obj.note
subject = obj.title
if 'test' in form.data:
message = fake.text()
subject = fake.text()
if 'html' in form.data: # http://stackoverflow.com/a/28476681/185820
context['html_message'] = render_to_string('cerberus-fluid.html',
{'message': message, })
context['message'] = message
context['recipients'] = recipients
context['request'] = request
context['sender'] = django_settings.EMAIL_FROM
context['subject'] = subject
return context
def mail_obj(request, **kwargs):
query_contact = get_query(request, 'contact')
query_note = get_query(request, 'note')
contact_model = kwargs.get('contact_model')
note_model = kwargs.get('note_model')
if contact_model and query_contact:
obj = contact_model.objects.get(pk=query_contact)
elif note_model and query_note:
obj = note_model.objects.get(pk=query_note)
return obj
def mail_recipients(obj):
model_name = obj._meta.verbose_name
if model_name == 'contact':
return (obj.email, )
elif model_name == 'note':
return [i.email for i in obj.contacts.all()]
def mail_send(**kwargs):
fail_silently = kwargs.get('fail_silently')
html_message = kwargs.get('html_message')
message = kwargs.get('message')
recipients = kwargs.get('recipients')
sender = kwargs.get('sender')
subject = kwargs.get('subject')
try:
send_mail(
subject,
message,
sender,
recipients,
fail_silently=fail_silently,
html_message=html_message)
status = True
return recipients, status
except BotoServerError:
status = False
return recipients, status
def set_check_boxes(obj, query_checkbox, refer, app_settings_model):
model_name = obj._meta.verbose_name
if query_checkbox['active'] == 'on' or query_checkbox[
'active'] == 'off': # Active
if query_checkbox['active'] == 'on':
obj.active = True
else:
obj.active = False
# Auto-hide notes
if model_name == 'note' and app_settings_model:
app_settings = app_settings_model.get_solo()
if app_settings.auto_hide_notes:
obj.hidden = True
elif query_checkbox['subscribe'] == 'on' or query_checkbox[
'subscribe'] == 'off': # Subscribe
if query_checkbox['active'] == 'on':
obj.subscribed = True
else:
obj.subscribed = False
obj.save()
return HttpResponseRedirect(refer)
def edit(request, **kwargs):
context = {}
obj = None
active_nav = kwargs.get('active_nav')
app_settings_model = kwargs.get('app_settings_model')
client_model = kwargs.get('client_model')
company_model = kwargs.get('company_model')
contact_model = kwargs.get('contact_model')
estimate_model = kwargs.get('estimate_model')
form_model = kwargs.get('form_model')
invoice_model = kwargs.get('invoice_model')
model = kwargs.get('model')
note_model = kwargs.get('note_model')
pk = kwargs.get('pk')
project_model = kwargs.get('project_model')
user_model = kwargs.get('user_model')
if pk is None: # New
form = get_form(
client_model=client_model,
form_model=form_model,
invoice_model=invoice_model,
model=model,
user_model=user_model,
request=request)
else: # Existing
obj = get_object_or_404(model, pk=pk)
form = get_form(form_model=form_model, obj=obj)
if request.method == 'POST':
refer = request.META['HTTP_REFERER']
if pk is None:
form = form_model(request.POST)
else:
# Copy or delete
copy = request.POST.get('copy')
delete = request.POST.get('delete')
if copy:
return obj_copy(obj)
if delete:
return obj_remove(obj)
# Check boxes
query_checkbox = get_query(request, 'checkbox')
if query_checkbox['condition']:
return set_check_boxes(obj, query_checkbox, refer,
app_settings_model)
form = form_model(request.POST, instance=obj)
if form.is_valid():
try:
obj = form.save()
set_relationship(
obj,
request,
client_model=client_model,
company_model=company_model,
estimate_model=estimate_model,
invoice_model=invoice_model,
project_model=project_model)
return obj_edit(obj, pk=pk)
except AttributeError:
obj = mail_obj(
request,
contact_model=contact_model,
note_model=note_model)
recipients, status = mail_send(
**mail_compose(
obj, form=form, request=request))
if status:
messages.add_message(request, messages.SUCCESS,
'Mail sent to %s!' %
', '.join(recipients))
else:
messages.add_message(request, messages.WARNING,
'Mail not sent to %s!' %
', '.join(recipients))
context['active_nav'] = active_nav
context['form'] = form
context['item'] = obj
context['pk'] = pk
if company_model:
company = company_model.get_solo()
context['company'] = company
if invoice_model: # Dashboard totals for reporting
gross, net = get_invoice_totals(invoice_model)
context['gross'] = gross
context['net'] = net
if model:
model_name = model._meta.verbose_name
elif contact_model:
model_name = contact_model._meta.verbose_name
elif note_model:
model_name = note_model._meta.verbose_name
template_name = get_template_and_url_names(
model_name=model_name, page_type='edit')
return render(request, template_name, context)
def generate_doc(contract):
"""
https://stackoverflow.com/a/24122313/185820
"""
document = Document()
# Head
task = ''
if contract.task:
task = contract.task
title = document.add_heading(
'ACLARK.NET, LLC %s AGREEMENT PREPARED FOR:' % task, level=1)
title.alignment = WD_ALIGN_PARAGRAPH.CENTER
if contract.client:
client_name = document.add_heading(contract.client.name, level=1)
client_name.alignment = WD_ALIGN_PARAGRAPH.CENTER
client_address = document.add_heading(contract.client.address, level=1)
client_address.alignment = WD_ALIGN_PARAGRAPH.CENTER
parser = etree.HTMLParser() # http://lxml.de/parsing.html
tree = etree.parse(StringIO(contract.body), parser)
# Body
for element in tree.iter():
if element.tag == 'h2':
document.add_heading(element.text, level=2)
elif element.tag == 'p':
document.add_paragraph(element.text)
return document
def get_active_kwarg(model, active=False, user=None):
"""
Kwarg for "active" varies by type
"""
kwargs = {}
model_name = model._meta.verbose_name
if model_name == 'estimate':
# Unaccepted invoices are "active"
if active:
kwargs['accepted_date'] = None
elif model_name == 'invoice':
# Unpaid invoices are "active"
if active:
kwargs['last_payment_date'] = None
elif model_name == 'time':
# Only staff can see all items
if not user.is_staff:
kwargs['user'] = user
# Uninvoiced times are "active"
kwargs['invoiced'] = not (active)
# Estimated times are never "active"
kwargs['estimate'] = None
elif model_name == 'user':
# Use related model's active field
kwargs['profile__active'] = active
else:
# All other models check active field
kwargs['active'] = active
return kwargs
def get_client_city(request):
ip_address = get_client_ip(request)
geo = GeoIP2()
if ip_address:
return geo.city(ip_address)
# https://stackoverflow.com/a/4581997/185820
def get_client_ip(request):
return request.META.get('HTTP_X_REAL_IP')
def get_company_name(company):
if company.name:
company_name = company.name
else:
company_name = fake.text()
company_name = company.name.replace('.', '_')
company_name = company_name.replace(', ', '_')
company_name = company_name.upper()
return company_name
def get_fields(items):
for item in items:
fields = item._meta._get_fields()
item.fields = OrderedDict()
for field in fields:
if not field.is_relation:
item.fields[field.name] = getattr(item, field.name)
return items
def get_form(**kwargs):
"""
Return appropriate form based on new or edit
"""
client_model = kwargs.get('client_model')
form_model = kwargs.get('form_model')
invoice_model = kwargs.get('invoice_model')
model = kwargs.get('model')
obj = kwargs.get('obj')
request = kwargs.get('request')
user_model = kwargs.get('user_model')
query_client = None
query_user = None
if request:
query_user = get_query(request, 'user')
query_client = get_query(request, 'client')
if obj: # Existing object
model_name = obj._meta.verbose_name
if model_name == 'note': # Populate form with tags already set
form = form_model(initial={'tags': obj.tags.all()}, instance=obj)
else:
form = form_model(instance=obj)
else: # New object or mail
if model:
model_name = model._meta.verbose_name
if model_name == 'report' and invoice_model: # Populate new report
# with gross, net.
gross, net = get_invoice_totals(invoice_model)
obj = model(gross=gross, net=net)
form = form_model(instance=obj)
elif model_name == 'contact': # Populate new contact
# with appropriate fields
if query_user:
user = get_object_or_404(user_model, pk=query_user)
obj = model(email=user.email)
elif query_client:
client = get_object_or_404(client_model, pk=query_client)
obj = model(client=client)
form = form_model(instance=obj)
else:
form = form_model()
else:
form = form_model()
return form
def get_index_items(request, model, **kwargs):
"""
"""
context = {}
model_name = model._meta.verbose_name
app_settings_model = kwargs.get('app_settings_model')
active_nav = kwargs.get('active_nav')
columns_visible = kwargs.get('columns_visible')
company_model = kwargs.get('company_model')
edit_url = kwargs.get('edit_url')
order_by = kwargs.get('order_by')
page_size = kwargs.get('page_size')
search_fields = kwargs.get('search_fields')
show_search = kwargs.get('show_search')
if columns_visible:
context['columns_visible'] = columns_visible
if company_model:
company = company_model.get_solo()
context['company'] = company
page = get_query(request, 'page')
paginated = get_query(request, 'paginated')
search = get_query(request, 'search')
# Search is easy
if request.method == 'POST':
if search == u'': # Empty search returns none
context['active_nav'] = active_nav
context['show_search'] = True
return context
else:
return get_search_results(
model,
search_fields,
search,
active_nav=active_nav,
app_settings_model=app_settings_model,
edit_url=edit_url,
request=request)
# Not a search
if model_name == 'note' and get_setting(request, app_settings_model,
'exclude_hidden_notes'):
items = model.objects.exclude(hidden=True)
else:
items = model.objects.all()
# Order items (http://stackoverflow.com/a/20257999/185820)
if order_by is not None:
items = items.order_by(*order_by)
# Don't show items to anon
if not request.user.is_authenticated:
items = []
# Per model extras
if model_name == 'note':
context['note_stats'] = get_note_stats(model)
elif model_name == 'time':
context['total_hours'] = get_total_hours(items)
# Paginate if paginated
if paginated:
page_size = get_setting(
request, app_settings_model, 'page_size', page_size=page_size)
items = paginate(items, page, page_size)
context['active_nav'] = active_nav
context['edit_url'] = edit_url
context['icon_size'] = get_setting(request, app_settings_model,
'icon_size')
context['icon_color'] = get_setting(request, app_settings_model,
'icon_color')
context['page'] = page
context['paginated'] = paginated
context['show_search'] = show_search
items = set_items_name(model_name, items=items)
context['items'] = items
return context
def get_invoice_totals(model):
invoices = model.objects.filter(last_payment_date=None)
invoice_amount = invoice_cog = 0
for invoice in invoices:
if invoice.amount:
invoice_amount += invoice.amount
if invoice.cog:
invoice_cog += invoice.cog
return invoice_amount, invoice_amount - invoice_cog
def get_query(request, query):
"""
"""
# Special handling for some query strings
if query == 'paginated':
paginated = request.GET.get('paginated')
if paginated == u'false':
return False
else:
return True
elif query == 'search' and request.method == 'POST':
return request.POST.get('search', '')
elif query == 'values': # plot
values = request.GET.get('values')
if values:
values = values.split(' ')
else:
values = []
values = [i.split(',') for i in values]
return values
elif query == 'checkbox':
query_checkbox = {}
query_checkbox_active = request.POST.get('checkbox-active')
query_checkbox_subscribe = request.POST.get('checkbox-subscribe')
condition = ( # if any of these exist
query_checkbox_active == 'on' or query_checkbox_active == 'off' or
query_checkbox_subscribe == 'on' or
query_checkbox_subscribe == 'off')
query_checkbox['active'] = query_checkbox_active
query_checkbox['subscribe'] = query_checkbox_subscribe
query_checkbox['condition'] = condition
return query_checkbox
elif query == 'doc':
doc = request.GET.get('doc')
if doc:
return True
else:
return False
elif query == 'pdf':
pdf = request.GET.get('pdf')
if pdf:
return True
else:
return False
elif query == 'test':
test = request.GET.get('test')
if test:
return True
else:
return False
else: # Normal handling
return request.GET.get(query, '')
def get_search_results(model,
search_fields,
search,
active_nav='',
app_settings_model=None,
edit_url='',
request=None):
context = {}
query = []
model_name = model._meta.verbose_name
for field in search_fields:
query.append(Q(**{field + '__icontains': search}))
items = model.objects.filter(reduce(OR, query))
context['active_nav'] = active_nav
context['edit_url'] = edit_url
context['icon_size'] = get_setting(request, app_settings_model,
'icon_size')
context['icon_color'] = get_setting(request, app_settings_model,
'icon_color')
context['show_search'] = True
items = set_items_name(model_name, items=items)
context['items'] = items
return context
def get_setting(request, app_settings_model, setting, page_size=None):
"""
Allow user to override global setting
"""
if not request.user.is_authenticated:
return
dashboard_override = user_pref = None
app_settings = app_settings_model.get_solo()
if setting == 'icon_size':
if hasattr(request.user, 'profile'):
user_pref = request.user.profile.icon_size
if user_pref:
return user_pref
else:
return app_settings.icon_size
elif setting == 'icon_color':
if hasattr(request.user, 'profile'):
user_pref = request.user.profile.icon_color
if user_pref:
return user_pref
else:
return app_settings.icon_color
elif setting == 'page_size':
if hasattr(request.user, 'profile'):
user_pref = request.user.profile.page_size
if user_pref:
return user_pref
elif page_size: # View's page_size preference
return page_size
else:
return app_settings.page_size
elif setting == 'dashboard_choices':
dashboard_choices = app_settings.dashboard_choices
dashboard_override = has_profile = False
if hasattr(request.user, 'profile'):
has_profile = True
if has_profile:
dashboard_override = request.user.profile.dashboard_override
if has_profile and dashboard_override:
dashboard_choices = request.user.profile.dashboard_choices
return dashboard_choices
elif setting == 'exclude_hidden_notes':
return app_settings.exclude_hidden_notes
def get_template_and_url_names(**kwargs):
"""
"""
model_name = kwargs.get('model_name')
page_type = kwargs.get('page_type')
if page_type == 'view':
url_name = URL_NAMES[model_name][0]
template_name = '%s.html' % url_name
return template_name, url_name
elif page_type == 'edit':
template_name = '%s.html' % URL_NAMES[model_name][1]
return template_name
elif page_type == 'home':
url_name = 'home'
return url_name
elif page_type == 'index':
url_name = URL_NAMES[model_name][2]
return url_name
def get_times_for_obj(obj, time_model):
model_name = obj._meta.verbose_name
if model_name == 'invoice':
times_project = time_model.objects.filter(
invoiced=False, project=obj.project, estimate=None, invoice=None)
times_invoice = time_model.objects.filter(invoice=obj)
times = times_project | times_invoice
elif model_name == 'project':
times = time_model.objects.filter(
invoiced=False, estimate=None, invoice=None, project=obj)
return times
def get_total_hours(items):
total_hours = items.aggregate(hours=Sum(F('hours')))
total_hours = total_hours['hours']
return total_hours
def gravatar_url(email):
"""
MD5 hash of email address for use with Gravatar
"""
return django_settings.GRAVATAR_URL % md5(email.lower()).hexdigest()
def set_items_name(model_name, items=None, _items={}):
"""
Share templates by returning dictionary of items e.g.
for item in items.reports
instead of:
for item in reports
"""
_items['%ss' % model_name] = items
return _items
def get_note_stats(note_model):
note_stats = {}
active = len(note_model.objects.filter(active=True))
hidden = len(note_model.objects.filter(hidden=True))
inactive = len(note_model.objects.filter(active=False))
total = len(note_model.objects.all())
not_hidden = inactive - hidden
note_stats['active_note_count'] = active
note_stats['hidden_note_count'] = hidden
note_stats['inactive_note_count'] = inactive
note_stats['total_note_count'] = total
note_stats['not_hidden_count'] = not_hidden
return note_stats
def get_page_items(request, **kwargs):
app_settings_model = kwargs.get('app_settings_model')
company_model = kwargs.get('company_model')
columns_visible = kwargs.get('columns_visible')
contact_model = kwargs.get('contact_model')
contract_model = kwargs.get('contract_model')
estimate_model = kwargs.get('estimate_model')
invoice_model = kwargs.get('invoice_model')
model = kwargs.get('model')
note_model = kwargs.get('note_model')
profile_model = kwargs.get('profile_model')
project_model = kwargs.get('project_model')
report_model = kwargs.get('report_model')
order_by = kwargs.get('order_by')
pk = kwargs.get('pk')
time_model = kwargs.get('time_model')
user_model = kwargs.get('user_model')
context = {}
items = None
if company_model:
company = company_model.get_solo()
context['company'] = company
if columns_visible:
context['columns_visible'] = columns_visible
if model:
model_name = model._meta.verbose_name
context['model_name'] = model_name
if model_name == 'app settings':
app_settings = app_settings_model.get_solo()
context['items'] = get_fields([app_settings, ]) # table_items.html
context['active_tab'] = 'system'
context['active_nav'] = 'dropdown'
elif model_name == 'contract settings':
contract_settings = model.get_solo()
context['items'] = get_fields(
[contract_settings, ]) # table_items.html
context['active_tab'] = 'contract'
context['active_nav'] = 'dropdown'
elif model_name == 'client':
client = get_object_or_404(model, pk=pk)
contacts = contact_model.objects.filter(client=client)
contracts = contract_model.objects.filter(client=client)
projects = project_model.objects.filter(client=client)
context['active_nav'] = 'client'
context['contacts'] = contacts
context['contracts'] = contracts
context['edit_url'] = 'client_edit'
context['item'] = client
context['notes'] = client.note.all()
context['projects'] = projects
elif model_name == 'Company':
company_settings = model.get_solo()
context['items'] = get_fields(
[company_settings, ]) # table_items.html
context['active_nav'] = 'dropdown'
context['active_tab'] = 'company'
elif model_name == 'contact':
contact = get_object_or_404(model, pk=pk)
context['active_nav'] = 'contact'
context['edit_url'] = 'contact_edit'
context['items'] = get_fields([contact, ]) # table_items.html
context['item'] = contact
elif model_name == 'contract':
contract = get_object_or_404(model, pk=pk)
estimate = contract.statement_of_work
if estimate:
times_client = time_model.objects.filter(
client=estimate.client,
estimate=None,
project=None,
invoiced=False,
invoice=None)
times_estimate = time_model.objects.filter(estimate=estimate)
times = times_client | times_estimate
else:
times = None
context['active_nav'] = 'contract'
context['edit_url'] = 'contract_edit'
context['item'] = contract
context['times'] = times
elif model_name == 'estimate':
estimate = get_object_or_404(model, pk=pk)
if not estimate.is_sow:
doc_type = model_name
else:
doc_type = 'statement of work'
times_client = time_model.objects.filter(
client=estimate.client,
estimate=None,
project=None,
invoiced=False,
invoice=None)
times_estimate = time_model.objects.filter(estimate=estimate)
times = times_client | times_estimate
times = times.order_by(*order_by['time'])
times = set_invoice_totals(times, estimate=estimate)
context['active_nav'] = 'estimate'
context['document_type'] = doc_type
context['entries'] = times
context['edit_url'] = 'estimate_edit'
context['item'] = estimate
if model_name == 'file':
file_obj = get_object_or_404(model, pk=pk)
context['active_nav'] = 'dropdown'
context['edit_url'] = 'file_edit'
context['item'] = file_obj
elif model_name == 'invoice':
invoice = get_object_or_404(model, pk=pk)
times = get_times_for_obj(invoice, time_model)
times = times.order_by(*order_by['time'])
times = set_invoice_totals(times, invoice=invoice)
last_payment_date = invoice.last_payment_date
context['active_nav'] = 'invoice'
context['document_type'] = model_name
context['edit_url'] = 'invoice_edit'
context['entries'] = times
context['item'] = invoice
context['invoice'] = True
context['last_payment_date'] = last_payment_date
elif model_name == 'newsletter':
newsletter = get_object_or_404(model, pk=pk)
context['edit_url'] = 'newsletter_edit'
context['active_nav'] = 'newsletter'
context['item'] = newsletter
elif model_name == 'note':
note = get_object_or_404(model, pk=pk)
context['edit_url'] = 'note_edit'
context['active_nav'] = 'note'
context['item'] = note
elif model_name == 'project':
project = get_object_or_404(model, pk=pk)
contacts = contact_model.objects.all()
items = set_items_name('contact', items=contacts)
estimates = estimate_model.objects.filter(
project=project, accepted_date=None)
items = set_items_name('estimate', items=estimates, _items=items)
invoices = invoice_model.objects.filter(
project=project, last_payment_date=None)
items = set_items_name('invoice', items=invoices, _items=items)
times = get_times_for_obj(project, time_model)
times = times.order_by(*order_by['time'])
items = set_items_name('time', items=times, _items=items)
users = user_model.objects.filter(project=project)
items = set_items_name('user', items=users, _items=items)
context['active_nav'] = 'project'
context['edit_url'] = 'project_edit'
context['item'] = project
context['items'] = items
elif model_name == 'proposal':
proposal = get_object_or_404(model, pk=pk)
context['active_nav'] = 'dropdown'
context['edit_url'] = 'proposal_edit'
context['item'] = proposal
elif model_name == 'report':
report = get_object_or_404(model, pk=pk)
reports = model.objects.filter(active=True)
reports = reports.aggregate(
gross=Sum(F('gross')), net=Sum(F('net')))
context['active_nav'] = 'dropdown'
context['cost'] = report.gross - report.net
context['edit_url'] = 'report_edit'
context['item'] = report
elif model_name == 'task':
task = get_object_or_404(model, pk=pk)
context['edit_url'] = 'task_edit'
context['active_nav'] = 'task'
context['item'] = task
elif model_name == 'time':
time_entry = get_object_or_404(model, pk=pk)
context['active_nav'] = 'time'
context['edit_url'] = 'time_edit'
context['item'] = time_entry
elif model_name == 'user':
user = get_object_or_404(model, pk=pk)
projects = project_model.objects.filter(
team__in=[user, ], active=True)
projects = projects.order_by(*order_by['project'])
times = time_model.objects.filter(
estimate=None, invoiced=False, user=user)
times = times.order_by(*order_by['time'])
contacts = contact_model.objects.all()
context['active_nav'] = 'dropdown'
context['item'] = user
context['items'] = get_fields([user.profile, ]) # table_items.html
context['profile'] = profile_model.objects.get_or_create(
user=user)[0]
context['projects'] = projects
context['times'] = times
else: # home
if request.user.is_authenticated:
# Items
invoices = invoice_model.objects.filter(last_payment_date=None)
items = set_items_name('invoice', items=invoices)
notes = note_model.objects.filter(active=True, hidden=False)
notes = notes.order_by(*order_by['note'])
items = set_items_name('note', items=notes, _items=items)
projects = project_model.objects.filter(active=True, hidden=False)
projects = projects.order_by(*order_by['project'])
items = set_items_name('project', items=projects, _items=items)
times = time_model.objects.filter(
invoiced=False, project__active=True, user=request.user)
times = times.order_by(*order_by['time'])
items = set_items_name('time', items=times, _items=items)
# Plot
plot_items = report_model.objects.filter(active=True)
# Totals
gross, net = get_invoice_totals(invoice_model)
context['city_data'] = get_client_city(request)
context['dashboard_choices'] = get_setting(
request, app_settings_model, 'dashboard_choices')
context['gross'] = gross
context['invoices'] = invoices
context['items'] = items
context['net'] = net
context['notes'] = notes
context['note_stats'] = get_note_stats(note_model)
context['plot_items'] = plot_items
context['projects'] = projects
context['times'] = times
context['total_hours'] = get_total_hours(times)
context['icon_size'] = get_setting(request, app_settings_model,
'icon_size')
context['icon_color'] = get_setting(request, app_settings_model,
'icon_color')
doc = get_query(request, 'doc')
pdf = get_query(request, 'pdf')
context['doc'] = doc
context['pdf'] = pdf
return context
def is_allowed_to_view(model,
pk,
request,
app_settings_model=None,
profile_model=None):
"""
Normal users can only see their own time entries
"""
msg = 'Sorry, you are not allowed to see that.'
time_entry = get_object_or_404(model, pk=pk)
if not time_entry.user and not request.user.is_staff:
messages.add_message(request, messages.WARNING, msg)
return HttpResponseRedirect(reverse('home'))
if (not time_entry.user.username == request.user.username and
not request.user.is_staff):
messages.add_message(request, messages.WARNING, msg)
return HttpResponseRedirect(reverse('home'))
else:
context = get_page_items(
request,
app_settings_model=app_settings_model,
model=model,
profile_model=profile_model,
pk=pk)
return render(request, 'time.html', context)
def last_month():
"""
Returns last day of last month
"""
first = timezone.now().replace(day=1)
return first - timezone.timedelta(days=1)
def obj_copy(obj):
dup = obj
dup.pk = None
dup.save()
kwargs = {}
kwargs['pk'] = dup.pk
model_name = obj._meta.verbose_name
template_name, url_name = get_template_and_url_names(
model_name=model_name, page_type='edit')
return HttpResponseRedirect(reverse(url_name, kwargs=kwargs))
def obj_remove(obj):
model_name = obj._meta.verbose_name
if model_name == 'time':
url_name = get_template_and_url_names(
model_name=model_name, page_type='home') # Redir to home
else:
url_name = get_template_and_url_names(
model_name=model_name, page_type='index') # Redir to index
obj.delete()
return HttpResponseRedirect(reverse(url_name))
def obj_edit(obj, pk=None):
model_name = obj._meta.verbose_name
template_name, url_name = get_template_and_url_names(
model_name=model_name, page_type='view') # Redir to view
# New or existing object
kwargs = {}
if pk: # Special cases for settings
if model_name == 'Company':
return HttpResponseRedirect(reverse(url_name))
elif model_name == 'app settings':
return HttpResponseRedirect(reverse(url_name))
elif model_name == 'contract settings':
return HttpResponseRedirect(reverse(url_name))
kwargs['pk'] = pk
else: # New
kwargs['pk'] = obj.pk
return HttpResponseRedirect(reverse(url_name, kwargs=kwargs))
def paginate(items, page, page_size):
"""
"""
paginator = Paginator(items, page_size, orphans=5)
try:
items = paginator.page(page)
except PageNotAnInteger:
items = paginator.page(1)
except EmptyPage:
items = paginator.page(paginator.num_pages)
return items
def report_plot(request): # http://stackoverflow.com/a/5515994/185820
"""
"""
values = get_query(request, 'values')
# http://matplotlib.org/examples/api/date_demo.html
x = [
date2num(timezone.datetime.strptime(i[1], '%Y-%m-%d')) for i in values
]
y = [i[0] for i in values]
figure = Figure()
canvas = FigureCanvasAgg(figure)
axes = figure.add_subplot(1, 1, 1)
axes.grid(True)
axes.plot(x, y)
axes.xaxis.set_major_locator(MonthLocator())
axes.xaxis.set_major_formatter(DateFormatter('%m'))
# write image data to a string buffer and get the PNG image bytes
buf = BytesIO()
canvas.print_png(buf)
data = buf.getvalue()
# write image bytes back to the browser
return HttpResponse(data, content_type="image/png")
def set_relationship(obj,
request,
client_model=None,
company_model=None,
estimate_model=None,
invoice_model=None,
project_model=None):
model_name = obj._meta.verbose_name
if model_name == 'contact':
query_client = get_query(request, 'client')
if query_client:
client = get_object_or_404(client_model, pk=query_client)
obj.client = client
obj.save()
return True
elif model_name == 'estimate' or model_name == 'invoice':
query_project = get_query(request, 'project')
if query_project:
project = get_object_or_404(project_model, pk=query_project)
obj.client = project.client
obj.project = project
obj.save()
elif model_name == 'note':
query_client = get_query(request, 'client')
query_company = get_query(request, 'company')
if query_client:
client = get_object_or_404(client_model, pk=query_client)
client.note.add(obj)
client.save()
return True
elif query_company:
company = company_model.get_solo()
company.note.add(obj)
company.save()
elif model_name == 'project':
query_client = get_query(request, 'client')
if query_client:
client = get_object_or_404(client_model, pk=query_client)
obj.client = client
obj.save()
elif model_name == 'time':
obj.user = request.user
query_estimate = get_query(request, 'estimate')
query_invoice = get_query(request, 'invoice')
query_project = get_query(request, 'project')
if not request.user.is_staff: # Staff have more than one project
user_projects = project_model.objects.filter(team__in=[obj.user, ])
if len(user_projects) > 0:
obj.project = user_projects[0]
obj.task = obj.project.task
if query_estimate:
estimate = get_object_or_404(estimate_model, pk=query_estimate)
obj.estimate = estimate
if query_invoice:
invoice = get_object_or_404(invoice_model, pk=query_invoice)
obj.invoice = invoice
if query_project:
project = get_object_or_404(project_model, pk=query_project)
obj.project = project
obj.task = project.task
obj.save()
return True
def set_invoice_totals(times, estimate=None, invoice=None):
"""
Set invoice, estimate and time totals
"""
invoice_amount = invoice_cog = 0
time_entry_amount = time_entry_cog = 0
for time_entry in times:
hours = time_entry.hours
if time_entry.task:
rate = time_entry.task.rate
time_entry_amount = rate * hours
if time_entry.user:
rate = time_entry.user.profile.rate
if rate:
time_entry_cog = rate * hours
time_entry.amount = '%.2f' % time_entry_amount
time_entry.cog = '%.2f' % time_entry_cog
invoice_amount += time_entry_amount
invoice_cog += time_entry_cog
if invoice:
invoice.amount = '%.2f' % invoice_amount
invoice.cog = '%.2f' % invoice_cog
invoice.save()
elif estimate:
estimate.amount = '%.2f' % invoice_amount
estimate.save()
return times
|
from decimal import Decimal
from django.conf import settings as django_settings
from django.contrib import messages
from django.contrib.auth.models import User
from django.contrib.gis.geoip2 import GeoIP2
from django.core.mail import send_mail as django_send_mail
from django.core.paginator import Paginator
from django.core.paginator import EmptyPage
from django.core.paginator import PageNotAnInteger
from django.core.urlresolvers import reverse
from django.db.models import Q
from django.db.models import F
from django.db.models import Sum
from django.http import HttpResponseRedirect
from django.shortcuts import get_object_or_404
from django.shortcuts import render
from django.template.loader import render_to_string
from django.utils import timezone
from docx import Document
from docx.enum.text import WD_ALIGN_PARAGRAPH
from faker import Faker
from functools import reduce
from import_export import widgets
from hashlib import md5
from io import StringIO
from lxml import etree
from operator import or_ as OR
from smtplib import SMTPSenderRefused
URL_NAMES = {
'client': ('client', 'client_edit', 'client_index'),
'contact': ('contact', 'contact_edit', 'contact_index'),
'contract': ('contract', 'contract_edit', 'contract_index'),
'Company': ('company', 'company_edit', ''),
'estimate': ('estimate', 'estimate_edit', 'estimate_index'),
'file': ('file', 'file_edit', 'file_index'),
'invoice': ('invoice', 'invoice_edit', 'invoice_index'),
'newsletter': ('newsletter', 'newsletter_edit', 'newsletter_index'),
'note': ('note', 'note_edit', 'note_index'),
'profile': ('user', 'user_edit', 'user_index'),
'project': ('project', 'project_edit', 'project_index'),
'proposal': ('proposal', 'proposal_edit', 'proposal_index'),
'report': ('report', 'report_edit', 'report_index'),
'service': ('company', 'service_edit', ''),
'app settings': ('settings', 'settings_edit', ''),
'task': ('task', 'task_edit', 'task_index'),
'time': ('time_entry', 'time_edit', 'time_index'),
'user': ('user', 'user_edit', 'user_index'),
}
class BooleanWidget(widgets.Widget):
"""
Convert strings to boolean values
"""
def clean(self, value):
if value == 'Yes':
return True
else:
return False
class DecimalWidget(widgets.Widget):
"""
Convert strings to decimal values
"""
def clean(self, value):
if value:
return Decimal(value.replace(',', ''))
else:
return Decimal(0)
class UserWidget(widgets.Widget):
"""
"""
def clean(self, value):
return value
def add_user_to_contacts(request, model, pk=None):
"""
"""
if request.method == 'POST':
if pk is None:
return HttpResponseRedirect(reverse('user_index'))
else:
user = get_object_or_404(User, pk=pk)
if not user.email or not user.first_name or not user.last_name:
messages.add_message(request, messages.WARNING,
'No email no contact!')
return HttpResponseRedirect(reverse('user_index'))
contact = model.objects.filter(email=user.email)
if contact:
contact = contact[0].email
messages.add_message(request, messages.WARNING,
'Found duplicate: %s!' % contact)
return HttpResponseRedirect(reverse('user_index'))
contact = model(
email=user.email,
active=True,
first_name=user.first_name,
last_name=user.last_name)
contact.save()
messages.add_message(request, messages.INFO,
'User added to contacts!')
return HttpResponseRedirect(reverse('contact_index'))
def check_boxes(obj, checkbox_active, checkbox_subscribed, refer,
app_settings_model):
if checkbox_active == 'on' or checkbox_active == 'off':
if checkbox_active == 'on':
obj.active = True
else:
obj.active = False
# Special case for note
if obj._meta.verbose_name == 'note' and app_settings_model:
app_settings = app_settings_model.get_solo()
if app_settings.auto_hide_notes:
obj.hidden = True
obj.save()
return HttpResponseRedirect(refer)
if checkbox_subscribed == 'on' or checkbox_subscribed == 'off':
if checkbox_subscribed == 'on':
obj.subscribed = True
else:
obj.subscribed = False
obj.save()
return HttpResponseRedirect(refer)
def create_and_send_mail(request,
log_model,
mail_form=None,
contact=None,
estimate=None,
profile_model=None,
pk=None):
"""
"""
if contact:
form = mail_form(request.POST)
if form.is_valid():
test = form.cleaned_data['test']
if test:
fake = Faker()
subject = fake.text()
message = fake.text()
else:
subject = form.cleaned_data['subject']
message = form.cleaned_data['message']
url = reverse('contact_unsubscribe', kwargs={'pk': pk})
url = ''.join([request.get_host(), url])
to = contact.email
first_name = contact.first_name
if send_mail(
request,
subject,
message,
to,
url=url,
uuid=contact.uuid,
first_name=first_name):
messages.add_message(request, messages.SUCCESS, 'Mail sent!')
log = log_model(entry='Mail sent to %s.' % to)
log.save()
return True
if estimate:
notes = '<ol><li>'
counter = 0
hours = 0
rate = estimate.project.task.rate
start_date = estimate.project.start_date.strftime('%m/%d/%Y')
end_date = estimate.project.end_date.strftime('%m/%d/%Y')
subject = estimate.subject
now = timezone.datetime.now().strftime('%m/%d/%Y at %H:%M:%S')
for entry in estimate.time_set.all():
if counter != 0:
notes += '</li><li>%s <strong>%s hours</strong>.' % (
entry.log, entry.hours)
else:
notes += '%s <strong>%s hours</strong>.' % (entry.log,
entry.hours)
counter += 1
hours += entry.hours
notes += '</li></ol>'
cost = hours * rate
url = reverse('estimate', kwargs={'pk': estimate.pk})
url = ''.join([request.get_host(), url])
message = ''.join([
'<h1 style="text-align: center">Statement of Work</h1><h2>%s '
'total hours of %s at rate of $%s/hour for %s = $%.2f from %s'
' to %s.</h2>' %
(hours, estimate.subject, rate, estimate.client.name, cost,
start_date, end_date), notes
])
profiles = profile_model.objects.filter(app_admin=True)
for profile in profiles:
email = profile.user.email
if send_mail(
request,
'Statement of Work for %s sent on %s.' % (subject, now),
message,
email,
url=url):
log = log_model(
entry='Statement of Work for %s sent on %s to %s.' %
(subject, now, email))
log.save()
messages.add_message(request, messages.SUCCESS, 'Sent to app_admins.')
return True
return False
def daily_burn(project):
try:
days = (project.end_date - project.start_date).days
hours = project.budget
burn = hours / days
return '%.2f' % burn
except (TypeError, ZeroDivisionError):
return ''
def edit(
request,
form_model,
model,
url_name,
template_name,
active_nav=None,
app_settings_model=None,
client_model=None,
company_model=None,
company_note=None,
estimate_model=None,
invoice_model=None,
project_model=None,
task_model=None,
time_model=None,
pk=None, ):
context = {}
obj = None
if pk is None:
form = form_model()
else:
obj = get_object_or_404(model, pk=pk)
form = form_model(instance=obj)
if request.method == 'POST':
refer = request.META['HTTP_REFERER']
if pk is None:
form = form_model(request.POST)
else:
checkbox_active = request.POST.get('checkbox')
checkbox_subscribed = request.POST.get('checkbox-subscribed')
copy = request.POST.get('copy')
delete = request.POST.get('delete')
# Copy or delete
if copy:
# return obj_copy(obj, url_name)
return obj_copy(obj)
if delete:
# return obj_delete(obj, company, request=request)
return obj_delete(obj)
# Check boxes
if (checkbox_active == 'on' or checkbox_active == 'off' or
checkbox_subscribed == 'on' or
checkbox_subscribed == 'off'):
return check_boxes(obj, checkbox_active, checkbox_subscribed,
refer, app_settings_model)
form = form_model(request.POST, instance=obj)
if form.is_valid():
obj = form.save()
set_relationship(
obj,
request,
client_model=client_model,
estimate_model=estimate_model,
invoice_model=invoice_model,
project_model=project_model)
return obj_edit(obj, pk=pk)
context['active_nav'] = active_nav
context['form'] = form
context['item'] = obj
context['pk'] = pk
if company_model:
company = company_model.get_solo()
context['company'] = company
if invoice_model: # Dashboard totals for reporting
gross, net = get_invoice_totals(invoice_model)
context['gross'] = gross
context['net'] = net
return render(request, template_name, context)
def generate_doc(contract):
"""
https://stackoverflow.com/a/24122313/185820
"""
document = Document()
# Head
task = ''
if contract.task:
task = contract.task
title = document.add_heading(
'ACLARK.NET, LLC %s AGREEMENT PREPARED FOR:' % task, level=1)
title.alignment = WD_ALIGN_PARAGRAPH.CENTER
if contract.client:
client_name = document.add_heading(contract.client.name, level=1)
client_name.alignment = WD_ALIGN_PARAGRAPH.CENTER
client_address = document.add_heading(contract.client.address, level=1)
client_address.alignment = WD_ALIGN_PARAGRAPH.CENTER
parser = etree.HTMLParser() # http://lxml.de/parsing.html
tree = etree.parse(StringIO(contract.body), parser)
# Body
for element in tree.iter():
if element.tag == 'h2':
document.add_heading(element.text, level=2)
elif element.tag == 'p':
document.add_paragraph(element.text)
return document
def get_active_kwarg(model, active=False, user=None):
"""
Kwarg for "active" varies by type
"""
kwargs = {}
if model._meta.verbose_name == 'estimate':
# Unaccepted invoices are "active"
if active:
kwargs['accepted_date'] = None
elif model._meta.verbose_name == 'invoice':
# Unpaid invoices are "active"
if active:
kwargs['last_payment_date'] = None
elif model._meta.verbose_name == 'time':
# Only staff can see all items
if not user.is_staff:
kwargs['user'] = user
# Uninvoiced times are "active"
kwargs['invoiced'] = not (active)
# Estimated times are never "active"
kwargs['estimate'] = None
elif model._meta.verbose_name == 'user':
# Use related model's active field
kwargs['profile__active'] = active
else:
# All other models check active field
kwargs['active'] = active
return kwargs
def get_client_city(request):
ip_address = get_client_ip(request)
geo = GeoIP2()
if ip_address:
return geo.city(ip_address)
# https://stackoverflow.com/a/4581997/185820
def get_client_ip(request):
return request.META.get('HTTP_X_REAL_IP')
def get_company_name(company):
if company.name:
company_name = company.name.replace('.', '_')
company_name = company_name.replace(', ', '_')
company_name = company_name.upper()
return company_name
else:
fake = Faker()
return fake.text()
def get_invoice_totals(model):
invoices = model.objects.filter(last_payment_date=None)
total = 0
for invoice in invoices:
if invoice.amount:
total += invoice.amount
return total, total
def get_line_total(entries, entry):
line_total = 0
return line_total
def get_query(request, query):
"""
"""
# Special handling for some query strings
if query == 'paginated':
paginated = request.GET.get('paginated')
if paginated == u'false':
return False
else:
return True
elif query == 'search' and request.method == 'POST':
return request.POST.get('search', '')
elif query == 'values':
values = request.GET.get('values')
if values:
values = values.split(' ')
else:
values = []
values = [i.split(',') for i in values]
return values
else: # Normal handling
return request.GET.get(query, '')
def get_search_results(model,
search_fields,
search,
active_nav='',
app_settings_model=None,
edit_url='',
request=None):
context = {}
query = []
for field in search_fields:
query.append(Q(**{field + '__icontains': search}))
items = model.objects.filter(reduce(OR, query))
context['active_nav'] = active_nav
context['edit_url'] = edit_url
context['icon_size'] = get_setting(request, app_settings_model,
'icon_size')
context['items'] = items
context['show_search'] = True
return context
def get_setting(request, app_settings_model, setting, page_size=None):
"""
Allow user to override global setting
"""
if not request.user.is_authenticated:
return
override = user_pref = None
app_settings = app_settings_model.get_solo()
if setting == 'icon_size':
if hasattr(request.user, 'profile'):
user_pref = request.user.profile.icon_size
if user_pref:
return user_pref
else:
return app_settings.icon_size
if setting == 'page_size':
if hasattr(request.user, 'profile'):
user_pref = request.user.profile.page_size
if user_pref:
return user_pref
elif page_size: # View's page_size preference
return page_size
else:
return app_settings.page_size
if setting == 'dashboard_choices':
if hasattr(request.user, 'profile'):
user_pref = request.user.profile.dashboard_choices
override = request.user.profile.override_dashboard
if override:
return user_pref
else:
return app_settings.dashboard_choices
def get_template_and_url_names(verbose_name, page_type=None):
"""
"""
if page_type == 'view':
url_name = URL_NAMES[verbose_name][0]
template_name = '%s.html' % url_name
return template_name, url_name
elif page_type == 'edit':
url_name = URL_NAMES[verbose_name][1]
template_name = '%s.html' % url_name
return template_name, url_name
elif page_type == 'index':
url_name = URL_NAMES[verbose_name][2]
return url_name
def get_times_for_invoice(invoice, time_model):
times_project = time_model.objects.filter(
invoiced=False, project=invoice.project, estimate=None, invoice=None)
times_invoice = time_model.objects.filter(invoice=invoice)
times = times_project | times_invoice
return times
def gravatar_url(email):
"""
MD5 hash of email address for use with Gravatar
"""
return django_settings.GRAVATAR_URL % md5(email.lower()).hexdigest()
def get_amount(times, invoice=None):
amount = 0
total = 0
for entry in times:
if entry.task:
amount = entry.task.rate * entry.hours
entry.amount = '%.2f' % amount
total += amount
if invoice:
invoice.amount = '%.2f' % total
invoice.save()
return times
def get_index_items(request,
model,
app_settings_model=None,
active_nav=None,
company_model=None,
contact_model=None,
edit_url=None,
order_by=None,
page_size=None,
search_fields=None,
show_search=False):
"""
"""
context = {}
if company_model:
company = company_model.get_solo()
context['company'] = company
page = get_query(request, 'page')
paginated = get_query(request, 'paginated')
search = get_query(request, 'search')
# Search is easy
if request.method == 'POST':
if search == u'': # Empty search returns none
context['active_nav'] = active_nav
context['show_search'] = True
return context
else:
return get_search_results(
model,
search_fields,
search,
active_nav=active_nav,
app_settings_model=app_settings_model,
edit_url=edit_url,
request=request)
# Not a search
items = model.objects.all()
# Order items (http://stackoverflow.com/a/20257999/185820)
if order_by is not None:
items = items.order_by(*order_by)
# Calculate total hours
if model._meta.verbose_name == 'time':
total_hours = items.aggregate(hours=Sum(F('hours')))
total_hours = total_hours['hours']
context['total_hours'] = total_hours
# Calculate cost per report
if model._meta.verbose_name == 'report':
for item in items:
cost = item.gross - item.net
item.cost = cost
item.save()
# Check if user is contact
if model._meta.verbose_name == 'user':
contacts = contact_model.objects.all()
for item in items:
if item.email in [i.email for i in contacts]:
item.is_contact = True
else:
item.is_contact = False
# Don't show items to anon
if not request.user.is_authenticated:
items = []
# Paginate if paginated
if paginated:
page_size = get_setting(
request, app_settings_model, 'page_size', page_size=page_size)
items = paginate(items, page, page_size)
context['active_nav'] = active_nav
context['edit_url'] = edit_url
context['icon_size'] = get_setting(request, app_settings_model,
'icon_size')
context['items'] = items
context['page'] = page
context['paginated'] = paginated
context['show_search'] = show_search
# Provide number of active notes to note_index
if model._meta.verbose_name == 'note':
context['note_stats'] = get_note_stats(model)
return context
def get_note_stats(note_model):
note_stats = {}
active = len(note_model.objects.filter(active=True))
hidden = len(note_model.objects.filter(hidden=True))
inactive = len(note_model.objects.filter(active=False))
total = len(note_model.objects.all())
not_hidden = inactive - hidden
note_stats['active_note_count'] = active
note_stats['hidden_note_count'] = hidden
note_stats['inactive_note_count'] = inactive
note_stats['total_note_count'] = total
note_stats['not_hidden_count'] = not_hidden
return note_stats
def get_page_items(request,
app_settings_model=None,
company_model=None,
contact_model=None,
contract_model=None,
estimate_model=None,
invoice_model=None,
model=None,
note_model=None,
profile_model=None,
project_model=None,
report_model=None,
order_by=None,
pk=None,
time_model=None):
context = {}
if company_model:
company = company_model.get_solo()
context['company'] = company
if model:
if model._meta.verbose_name == 'client':
client = get_object_or_404(model, pk=pk)
contacts = contact_model.objects.filter(client=client)
contracts = contract_model.objects.filter(client=client)
projects = project_model.objects.filter(client=client)
context['active_nav'] = 'client'
context['contacts'] = contacts
context['contracts'] = contracts
context['edit_url'] = 'client_edit'
context['icon_size'] = get_setting(request, app_settings_model,
'icon_size')
context['item'] = client
context['notes'] = client.note.all()
context['projects'] = projects
elif model._meta.verbose_name == 'contract':
contract = get_object_or_404(model, pk=pk)
doc = get_query(request, 'doc')
estimate = contract.statement_of_work
pdf = get_query(request, 'pdf')
if estimate:
times_client = time_model.objects.filter(
client=estimate.client,
estimate=None,
project=None,
invoiced=False,
invoice=None)
times_estimate = time_model.objects.filter(estimate=estimate)
times = times_client | times_estimate
else:
times = None
context['active_nav'] = 'contract'
context['doc'] = doc
context['edit_url'] = 'contract_edit'
context['item'] = contract
context['pdf'] = pdf
context['times'] = times
elif model._meta.verbose_name == 'estimate':
estimate = get_object_or_404(model, pk=pk)
document_type = estimate._meta.verbose_name
document_type_upper = document_type.upper()
document_type_title = document_type.title()
pdf = get_query(request, 'pdf')
times_client = time_model.objects.filter(
client=estimate.client,
estimate=None,
project=None,
invoiced=False,
invoice=None)
times_estimate = time_model.objects.filter(estimate=estimate)
times = times_client | times_estimate
times = get_amount(times)
context['active_nav'] = 'estimate'
context['document_type_upper'] = document_type_upper
context['document_type_title'] = document_type_title
context['entries'] = times
context['edit_url'] = 'estimate_edit'
context['item'] = estimate
context['pdf'] = pdf
if model._meta.verbose_name == 'file':
file_obj = get_object_or_404(model, pk=pk)
context['active_nav'] = 'dropdown'
context['edit_url'] = 'file_edit'
context['icon_size'] = get_setting(request, app_settings_model,
'icon_size')
context['item'] = file_obj
elif model._meta.verbose_name == 'invoice':
invoice = get_object_or_404(model, pk=pk)
document_type = invoice._meta.verbose_name
document_type_upper = document_type.upper()
document_type_title = document_type.title()
times = get_times_for_invoice(invoice, time_model)
times = times.order_by(*order_by['time'])
times = get_amount(times, invoice=invoice)
last_payment_date = invoice.last_payment_date
pdf = get_query(request, 'pdf')
context['active_nav'] = 'invoice'
context['document_type_upper'] = document_type_upper
context['document_type_title'] = document_type_title
context['edit_url'] = 'invoice_edit' # Delete modal
context['entries'] = times
context['item'] = invoice
context['invoice'] = True
context['last_payment_date'] = last_payment_date
context['pdf'] = pdf
elif model._meta.verbose_name == 'project':
project = get_object_or_404(model, pk=pk)
invoices = project.invoice_set.all()
invoice = times = None
if len(invoices) > 0:
invoice = invoices[0]
times = get_times_for_invoice(invoice, time_model)
times = times.order_by(*order_by['time'])
estimates = estimate_model.objects.filter(
project=project, accepted_date=None)
invoices = invoice_model.objects.filter(
project=project, last_payment_date=None)
context['active_nav'] = 'project'
context['edit_url'] = 'project_edit' # Delete modal
context['entries'] = times
context['icon_size'] = get_setting(request, app_settings_model,
'icon_size')
context['estimates'] = estimates
context['invoices'] = invoices
context['item'] = project
context['times'] = times
elif model._meta.verbose_name == 'proposal':
proposal = get_object_or_404(model, pk=pk)
pdf = get_query(request, 'pdf')
context['active_nav'] = 'dropdown'
context['edit_url'] = 'proposal_edit' # Delete modal
context['item'] = proposal
context['pdf'] = pdf
elif model._meta.verbose_name == 'user':
user = get_object_or_404(model, pk=pk)
filters = {
'estimate': None,
'invoiced': False,
'user': user,
}
projects = project_model.objects.filter(
team__in=[user, ], active=True)
projects = projects.order_by(*order_by['project'])
times = time_model.objects.filter(**filters)
times = times.order_by(*order_by['time'])
contacts = contact_model.objects.all()
context['active_nav'] = 'dropdown'
context['is_contact'] = user.email in [i.email for i in contacts]
context['item'] = user
context['profile'] = profile_model.objects.get_or_create(
user=user)[0]
context['projects'] = projects
context['times'] = times
else: # home
invoices = invoice_model.objects.filter(last_payment_date=None)
notes = note_model.objects.filter(active=True)
notes = notes.order_by(*order_by['note'])
projects = project_model.objects.filter(active=True, hidden=False)
projects = projects.order_by(*order_by['project'])
plot_items = report_model.objects.filter(active=True)
gross, net = get_invoice_totals(invoice_model)
context['note_stats'] = get_note_stats(note_model)
context['city_data'] = get_client_city(request)
context['dashboard_choices'] = get_setting(request, app_settings_model,
'dashboard_choices')
context['gross'] = gross
context['invoices'] = invoices
context['icon_size'] = get_setting(request, app_settings_model,
'icon_size')
context['nav_status'] = 'active'
context['net'] = net
context['notes'] = notes
context['plot_items'] = plot_items
context['projects'] = projects
return context
def last_month():
"""
Returns last day of last month
"""
first = timezone.now().replace(day=1)
return first - timezone.timedelta(days=1)
def obj_copy(obj):
dup = obj
dup.pk = None
dup.save()
kwargs = {}
kwargs['pk'] = dup.pk
template_name, url_name = get_template_and_url_names(
obj._meta.verbose_name, page_type='edit')
return HttpResponseRedirect(reverse(url_name, kwargs=kwargs))
def obj_delete(obj):
url_name = get_template_and_url_names(
obj._meta.verbose_name, page_type='index') # Redir to index
obj.delete()
return HttpResponseRedirect(reverse(url_name))
def obj_edit(obj, pk=None):
verbose_name = obj._meta.verbose_name
template_name, url_name = get_template_and_url_names(
verbose_name, page_type='view') # Redir to view
# New or existing object
kwargs = {}
if pk: # Existing
if verbose_name == 'Company': # Special case for company
return HttpResponseRedirect(reverse(url_name))
if verbose_name == 'app settings': # Special case for settings
return HttpResponseRedirect(reverse(url_name))
kwargs['pk'] = pk
else: # New
kwargs['pk'] = obj.pk
return HttpResponseRedirect(reverse(url_name, kwargs=kwargs))
def paginate(items, page, page_size):
"""
"""
paginator = Paginator(items, page_size, orphans=5)
try:
items = paginator.page(page)
except PageNotAnInteger:
items = paginator.page(1)
except EmptyPage:
items = paginator.page(paginator.num_pages)
return items
def send_mail(request,
subject,
message,
to,
url=None,
uuid=None,
first_name=None):
recipients = []
sender = django_settings.EMAIL_FROM
recipients.append(to)
# http://stackoverflow.com/a/28476681/185820
if first_name:
username = first_name
else:
username = to
html_message = render_to_string('cerberus-fluid.html', {
'username': username,
'message': message,
'url': url,
'uuid': uuid,
})
try:
django_send_mail(
subject,
message,
sender,
recipients,
fail_silently=False,
html_message=html_message)
return True
except SMTPSenderRefused:
messages.add_message(request, messages.WARNING, 'SMTPSenderRefused!')
return False
def set_relationship(
obj,
request,
client_model=None,
estimate_model=None,
invoice_model=None,
project_model=None):
if obj._meta.verbose_name in ['contact', 'note']:
query_string_client = get_query(request, 'client')
if query_string_client:
client = get_object_or_404(client_model, pk=query_string_client)
client.note.add(obj)
client.save()
return True
elif obj._meta.verbose_name == 'time':
query_string_invoices = get_query(request, 'invoice')
query_string_project = get_query(request, 'project')
if query_string_invoices:
invoices = query_string_invoices.split(',')
if len(invoices) > 1:
return False
else:
invoice = invoices[0]
invoice = get_object_or_404(invoice_model, pk=invoice)
obj.invoice = invoice
obj.save()
if query_string_project:
project = get_object_or_404(project_model, pk=query_string_project)
obj.task = project.task
obj.save()
return True
Update
from decimal import Decimal
from django.conf import settings as django_settings
from django.contrib import messages
from django.contrib.auth.models import User
from django.contrib.gis.geoip2 import GeoIP2
from django.core.mail import send_mail as django_send_mail
from django.core.paginator import Paginator
from django.core.paginator import EmptyPage
from django.core.paginator import PageNotAnInteger
from django.core.urlresolvers import reverse
from django.db.models import Q
from django.db.models import F
from django.db.models import Sum
from django.http import HttpResponseRedirect
from django.shortcuts import get_object_or_404
from django.shortcuts import render
from django.template.loader import render_to_string
from django.utils import timezone
from docx import Document
from docx.enum.text import WD_ALIGN_PARAGRAPH
from faker import Faker
from functools import reduce
from import_export import widgets
from hashlib import md5
from io import StringIO
from lxml import etree
from operator import or_ as OR
from smtplib import SMTPSenderRefused
URL_NAMES = {
'client': ('client', 'client_edit', 'client_index'),
'contact': ('contact', 'contact_edit', 'contact_index'),
'contract': ('contract', 'contract_edit', 'contract_index'),
'Company': ('company', 'company_edit', ''),
'estimate': ('estimate', 'estimate_edit', 'estimate_index'),
'file': ('file', 'file_edit', 'file_index'),
'invoice': ('invoice', 'invoice_edit', 'invoice_index'),
'newsletter': ('newsletter', 'newsletter_edit', 'newsletter_index'),
'note': ('note', 'note_edit', 'note_index'),
'profile': ('user', 'user_edit', 'user_index'),
'project': ('project', 'project_edit', 'project_index'),
'proposal': ('proposal', 'proposal_edit', 'proposal_index'),
'report': ('report', 'report_edit', 'report_index'),
'service': ('company', 'service_edit', ''),
'app settings': ('settings', 'settings_edit', ''),
'task': ('task', 'task_edit', 'task_index'),
'time': ('time_entry', 'time_edit', 'time_index'),
'user': ('user', 'user_edit', 'user_index'),
}
class BooleanWidget(widgets.Widget):
"""
Convert strings to boolean values
"""
def clean(self, value):
if value == 'Yes':
return True
else:
return False
class DecimalWidget(widgets.Widget):
"""
Convert strings to decimal values
"""
def clean(self, value):
if value:
return Decimal(value.replace(',', ''))
else:
return Decimal(0)
class UserWidget(widgets.Widget):
"""
"""
def clean(self, value):
return value
def add_user_to_contacts(request, model, pk=None):
"""
"""
if request.method == 'POST':
if pk is None:
return HttpResponseRedirect(reverse('user_index'))
else:
user = get_object_or_404(User, pk=pk)
if not user.email or not user.first_name or not user.last_name:
messages.add_message(request, messages.WARNING,
'No email no contact!')
return HttpResponseRedirect(reverse('user_index'))
contact = model.objects.filter(email=user.email)
if contact:
contact = contact[0].email
messages.add_message(request, messages.WARNING,
'Found duplicate: %s!' % contact)
return HttpResponseRedirect(reverse('user_index'))
contact = model(
email=user.email,
active=True,
first_name=user.first_name,
last_name=user.last_name)
contact.save()
messages.add_message(request, messages.INFO,
'User added to contacts!')
return HttpResponseRedirect(reverse('contact_index'))
def check_boxes(obj, checkbox_active, checkbox_subscribed, refer,
app_settings_model):
if checkbox_active == 'on' or checkbox_active == 'off':
if checkbox_active == 'on':
obj.active = True
else:
obj.active = False
# Special case for note
if obj._meta.verbose_name == 'note' and app_settings_model:
app_settings = app_settings_model.get_solo()
if app_settings.auto_hide_notes:
obj.hidden = True
obj.save()
return HttpResponseRedirect(refer)
if checkbox_subscribed == 'on' or checkbox_subscribed == 'off':
if checkbox_subscribed == 'on':
obj.subscribed = True
else:
obj.subscribed = False
obj.save()
return HttpResponseRedirect(refer)
def create_and_send_mail(request,
log_model,
mail_form=None,
contact=None,
estimate=None,
profile_model=None,
pk=None):
"""
"""
if contact:
form = mail_form(request.POST)
if form.is_valid():
test = form.cleaned_data['test']
if test:
fake = Faker()
subject = fake.text()
message = fake.text()
else:
subject = form.cleaned_data['subject']
message = form.cleaned_data['message']
url = reverse('contact_unsubscribe', kwargs={'pk': pk})
url = ''.join([request.get_host(), url])
to = contact.email
first_name = contact.first_name
if send_mail(
request,
subject,
message,
to,
url=url,
uuid=contact.uuid,
first_name=first_name):
messages.add_message(request, messages.SUCCESS, 'Mail sent!')
log = log_model(entry='Mail sent to %s.' % to)
log.save()
return True
if estimate:
notes = '<ol><li>'
counter = 0
hours = 0
rate = estimate.project.task.rate
start_date = estimate.project.start_date.strftime('%m/%d/%Y')
end_date = estimate.project.end_date.strftime('%m/%d/%Y')
subject = estimate.subject
now = timezone.datetime.now().strftime('%m/%d/%Y at %H:%M:%S')
for entry in estimate.time_set.all():
if counter != 0:
notes += '</li><li>%s <strong>%s hours</strong>.' % (
entry.log, entry.hours)
else:
notes += '%s <strong>%s hours</strong>.' % (entry.log,
entry.hours)
counter += 1
hours += entry.hours
notes += '</li></ol>'
cost = hours * rate
url = reverse('estimate', kwargs={'pk': estimate.pk})
url = ''.join([request.get_host(), url])
message = ''.join([
'<h1 style="text-align: center">Statement of Work</h1><h2>%s '
'total hours of %s at rate of $%s/hour for %s = $%.2f from %s'
' to %s.</h2>' %
(hours, estimate.subject, rate, estimate.client.name, cost,
start_date, end_date), notes
])
profiles = profile_model.objects.filter(app_admin=True)
for profile in profiles:
email = profile.user.email
if send_mail(
request,
'Statement of Work for %s sent on %s.' % (subject, now),
message,
email,
url=url):
log = log_model(
entry='Statement of Work for %s sent on %s to %s.' %
(subject, now, email))
log.save()
messages.add_message(request, messages.SUCCESS, 'Sent to app_admins.')
return True
return False
def daily_burn(project):
try:
days = (project.end_date - project.start_date).days
hours = project.budget
burn = hours / days
return '%.2f' % burn
except (TypeError, ZeroDivisionError):
return ''
def edit(
request,
form_model,
model,
url_name,
template_name,
active_nav=None,
app_settings_model=None,
client_model=None,
company_model=None,
company_note=None,
estimate_model=None,
invoice_model=None,
project_model=None,
task_model=None,
time_model=None,
pk=None, ):
context = {}
obj = None
if pk is None:
form = form_model()
else:
obj = get_object_or_404(model, pk=pk)
form = form_model(instance=obj)
if request.method == 'POST':
refer = request.META['HTTP_REFERER']
if pk is None:
form = form_model(request.POST)
else:
checkbox_active = request.POST.get('checkbox')
checkbox_subscribed = request.POST.get('checkbox-subscribed')
copy = request.POST.get('copy')
delete = request.POST.get('delete')
# Copy or delete
if copy:
# return obj_copy(obj, url_name)
return obj_copy(obj)
if delete:
# return obj_delete(obj, company, request=request)
return obj_delete(obj)
# Check boxes
if (checkbox_active == 'on' or checkbox_active == 'off' or
checkbox_subscribed == 'on' or
checkbox_subscribed == 'off'):
return check_boxes(obj, checkbox_active, checkbox_subscribed,
refer, app_settings_model)
form = form_model(request.POST, instance=obj)
if form.is_valid():
obj = form.save()
set_relationship(
obj,
request,
client_model=client_model,
estimate_model=estimate_model,
invoice_model=invoice_model,
project_model=project_model)
return obj_edit(obj, pk=pk)
context['active_nav'] = active_nav
context['form'] = form
context['item'] = obj
context['pk'] = pk
if company_model:
company = company_model.get_solo()
context['company'] = company
if invoice_model: # Dashboard totals for reporting
gross, net = get_invoice_totals(invoice_model)
context['gross'] = gross
context['net'] = net
return render(request, template_name, context)
def generate_doc(contract):
"""
https://stackoverflow.com/a/24122313/185820
"""
document = Document()
# Head
task = ''
if contract.task:
task = contract.task
title = document.add_heading(
'ACLARK.NET, LLC %s AGREEMENT PREPARED FOR:' % task, level=1)
title.alignment = WD_ALIGN_PARAGRAPH.CENTER
if contract.client:
client_name = document.add_heading(contract.client.name, level=1)
client_name.alignment = WD_ALIGN_PARAGRAPH.CENTER
client_address = document.add_heading(contract.client.address, level=1)
client_address.alignment = WD_ALIGN_PARAGRAPH.CENTER
parser = etree.HTMLParser() # http://lxml.de/parsing.html
tree = etree.parse(StringIO(contract.body), parser)
# Body
for element in tree.iter():
if element.tag == 'h2':
document.add_heading(element.text, level=2)
elif element.tag == 'p':
document.add_paragraph(element.text)
return document
def get_active_kwarg(model, active=False, user=None):
"""
Kwarg for "active" varies by type
"""
kwargs = {}
if model._meta.verbose_name == 'estimate':
# Unaccepted invoices are "active"
if active:
kwargs['accepted_date'] = None
elif model._meta.verbose_name == 'invoice':
# Unpaid invoices are "active"
if active:
kwargs['last_payment_date'] = None
elif model._meta.verbose_name == 'time':
# Only staff can see all items
if not user.is_staff:
kwargs['user'] = user
# Uninvoiced times are "active"
kwargs['invoiced'] = not (active)
# Estimated times are never "active"
kwargs['estimate'] = None
elif model._meta.verbose_name == 'user':
# Use related model's active field
kwargs['profile__active'] = active
else:
# All other models check active field
kwargs['active'] = active
return kwargs
def get_client_city(request):
ip_address = get_client_ip(request)
geo = GeoIP2()
if ip_address:
return geo.city(ip_address)
# https://stackoverflow.com/a/4581997/185820
def get_client_ip(request):
return request.META.get('HTTP_X_REAL_IP')
def get_company_name(company):
if company.name:
company_name = company.name.replace('.', '_')
company_name = company_name.replace(', ', '_')
company_name = company_name.upper()
return company_name
else:
fake = Faker()
return fake.text()
def get_invoice_totals(model):
invoices = model.objects.filter(last_payment_date=None)
total = 0
for invoice in invoices:
if invoice.amount:
total += invoice.amount
return total, total
def get_line_total(entries, entry):
line_total = 0
return line_total
def get_query(request, query):
"""
"""
# Special handling for some query strings
if query == 'paginated':
paginated = request.GET.get('paginated')
if paginated == u'false':
return False
else:
return True
elif query == 'search' and request.method == 'POST':
return request.POST.get('search', '')
elif query == 'values':
values = request.GET.get('values')
if values:
values = values.split(' ')
else:
values = []
values = [i.split(',') for i in values]
return values
else: # Normal handling
return request.GET.get(query, '')
def get_search_results(model,
search_fields,
search,
active_nav='',
app_settings_model=None,
edit_url='',
request=None):
context = {}
query = []
for field in search_fields:
query.append(Q(**{field + '__icontains': search}))
items = model.objects.filter(reduce(OR, query))
context['active_nav'] = active_nav
context['edit_url'] = edit_url
context['icon_size'] = get_setting(request, app_settings_model,
'icon_size')
context['items'] = items
context['show_search'] = True
return context
def get_setting(request, app_settings_model, setting, page_size=None):
"""
Allow user to override global setting
"""
if not request.user.is_authenticated:
return
override = user_pref = None
app_settings = app_settings_model.get_solo()
if setting == 'icon_size':
if hasattr(request.user, 'profile'):
user_pref = request.user.profile.icon_size
if user_pref:
return user_pref
else:
return app_settings.icon_size
if setting == 'page_size':
if hasattr(request.user, 'profile'):
user_pref = request.user.profile.page_size
if user_pref:
return user_pref
elif page_size: # View's page_size preference
return page_size
else:
return app_settings.page_size
if setting == 'dashboard_choices':
if hasattr(request.user, 'profile'):
user_pref = request.user.profile.dashboard_choices
override = request.user.profile.override_dashboard
if override:
return user_pref
else:
return app_settings.dashboard_choices
def get_template_and_url_names(verbose_name, page_type=None):
"""
"""
if page_type == 'view':
url_name = URL_NAMES[verbose_name][0]
template_name = '%s.html' % url_name
return template_name, url_name
elif page_type == 'edit':
url_name = URL_NAMES[verbose_name][1]
template_name = '%s.html' % url_name
return template_name, url_name
elif page_type == 'index':
url_name = URL_NAMES[verbose_name][2]
return url_name
def get_times_for_invoice(invoice, time_model):
times_project = time_model.objects.filter(
invoiced=False, project=invoice.project, estimate=None, invoice=None)
times_invoice = time_model.objects.filter(invoice=invoice)
times = times_project | times_invoice
return times
def gravatar_url(email):
"""
MD5 hash of email address for use with Gravatar
"""
return django_settings.GRAVATAR_URL % md5(email.lower()).hexdigest()
def get_amount(times, invoice=None):
amount = 0
total = 0
for entry in times:
if entry.task:
amount = entry.task.rate * entry.hours
entry.amount = '%.2f' % amount
total += amount
if invoice:
invoice.amount = '%.2f' % total
invoice.save()
return times
def get_index_items(request,
model,
app_settings_model=None,
active_nav=None,
company_model=None,
contact_model=None,
edit_url=None,
order_by=None,
page_size=None,
search_fields=None,
show_search=False):
"""
"""
context = {}
if company_model:
company = company_model.get_solo()
context['company'] = company
page = get_query(request, 'page')
paginated = get_query(request, 'paginated')
search = get_query(request, 'search')
# Search is easy
if request.method == 'POST':
if search == u'': # Empty search returns none
context['active_nav'] = active_nav
context['show_search'] = True
return context
else:
return get_search_results(
model,
search_fields,
search,
active_nav=active_nav,
app_settings_model=app_settings_model,
edit_url=edit_url,
request=request)
# Not a search
items = model.objects.all()
# Order items (http://stackoverflow.com/a/20257999/185820)
if order_by is not None:
items = items.order_by(*order_by)
# Calculate total hours
if model._meta.verbose_name == 'time':
total_hours = items.aggregate(hours=Sum(F('hours')))
total_hours = total_hours['hours']
context['total_hours'] = total_hours
# Calculate cost per report
if model._meta.verbose_name == 'report':
for item in items:
cost = item.gross - item.net
item.cost = cost
item.save()
# Check if user is contact
if model._meta.verbose_name == 'user':
contacts = contact_model.objects.all()
for item in items:
if item.email in [i.email for i in contacts]:
item.is_contact = True
else:
item.is_contact = False
# Don't show items to anon
if not request.user.is_authenticated:
items = []
# Paginate if paginated
if paginated:
page_size = get_setting(
request, app_settings_model, 'page_size', page_size=page_size)
items = paginate(items, page, page_size)
context['active_nav'] = active_nav
context['edit_url'] = edit_url
context['icon_size'] = get_setting(request, app_settings_model,
'icon_size')
context['items'] = items
context['page'] = page
context['paginated'] = paginated
context['show_search'] = show_search
# Provide number of active notes to note_index
if model._meta.verbose_name == 'note':
context['note_stats'] = get_note_stats(model)
return context
def get_note_stats(note_model):
note_stats = {}
active = len(note_model.objects.filter(active=True))
hidden = len(note_model.objects.filter(hidden=True))
inactive = len(note_model.objects.filter(active=False))
total = len(note_model.objects.all())
not_hidden = inactive - hidden
note_stats['active_note_count'] = active
note_stats['hidden_note_count'] = hidden
note_stats['inactive_note_count'] = inactive
note_stats['total_note_count'] = total
note_stats['not_hidden_count'] = not_hidden
return note_stats
def get_page_items(request,
app_settings_model=None,
company_model=None,
contact_model=None,
contract_model=None,
estimate_model=None,
invoice_model=None,
model=None,
note_model=None,
profile_model=None,
project_model=None,
report_model=None,
order_by=None,
pk=None,
time_model=None):
context = {}
if company_model:
company = company_model.get_solo()
context['company'] = company
if model:
if model._meta.verbose_name == 'client':
client = get_object_or_404(model, pk=pk)
contacts = contact_model.objects.filter(client=client)
contracts = contract_model.objects.filter(client=client)
projects = project_model.objects.filter(client=client)
context['active_nav'] = 'client'
context['contacts'] = contacts
context['contracts'] = contracts
context['edit_url'] = 'client_edit'
context['icon_size'] = get_setting(request, app_settings_model,
'icon_size')
context['item'] = client
context['notes'] = client.note.all()
context['projects'] = projects
elif model._meta.verbose_name == 'contract':
contract = get_object_or_404(model, pk=pk)
doc = get_query(request, 'doc')
estimate = contract.statement_of_work
pdf = get_query(request, 'pdf')
if estimate:
times_client = time_model.objects.filter(
client=estimate.client,
estimate=None,
project=None,
invoiced=False,
invoice=None)
times_estimate = time_model.objects.filter(estimate=estimate)
times = times_client | times_estimate
else:
times = None
context['active_nav'] = 'contract'
context['doc'] = doc
context['edit_url'] = 'contract_edit'
context['item'] = contract
context['pdf'] = pdf
context['times'] = times
elif model._meta.verbose_name == 'estimate':
estimate = get_object_or_404(model, pk=pk)
document_type = estimate._meta.verbose_name
document_type_upper = document_type.upper()
document_type_title = document_type.title()
pdf = get_query(request, 'pdf')
times_client = time_model.objects.filter(
client=estimate.client,
estimate=None,
project=None,
invoiced=False,
invoice=None)
times_estimate = time_model.objects.filter(estimate=estimate)
times = times_client | times_estimate
times = get_amount(times)
context['active_nav'] = 'estimate'
context['document_type_upper'] = document_type_upper
context['document_type_title'] = document_type_title
context['entries'] = times
context['edit_url'] = 'estimate_edit'
context['item'] = estimate
context['pdf'] = pdf
if model._meta.verbose_name == 'file':
file_obj = get_object_or_404(model, pk=pk)
context['active_nav'] = 'dropdown'
context['edit_url'] = 'file_edit'
context['icon_size'] = get_setting(request, app_settings_model,
'icon_size')
context['item'] = file_obj
elif model._meta.verbose_name == 'invoice':
invoice = get_object_or_404(model, pk=pk)
document_type = invoice._meta.verbose_name
document_type_upper = document_type.upper()
document_type_title = document_type.title()
times = get_times_for_invoice(invoice, time_model)
times = times.order_by(*order_by['time'])
times = get_amount(times, invoice=invoice)
last_payment_date = invoice.last_payment_date
pdf = get_query(request, 'pdf')
context['active_nav'] = 'invoice'
context['document_type_upper'] = document_type_upper
context['document_type_title'] = document_type_title
context['edit_url'] = 'invoice_edit' # Delete modal
context['entries'] = times
context['item'] = invoice
context['invoice'] = True
context['last_payment_date'] = last_payment_date
context['pdf'] = pdf
elif model._meta.verbose_name == 'project':
project = get_object_or_404(model, pk=pk)
invoices = project.invoice_set.all()
invoice = times = None
if len(invoices) > 0:
invoice = invoices[0]
times = get_times_for_invoice(invoice, time_model)
times = times.order_by(*order_by['time'])
estimates = estimate_model.objects.filter(
project=project, accepted_date=None)
invoices = invoice_model.objects.filter(
project=project, last_payment_date=None)
context['active_nav'] = 'project'
context['edit_url'] = 'project_edit' # Delete modal
context['entries'] = times
context['icon_size'] = get_setting(request, app_settings_model,
'icon_size')
context['estimates'] = estimates
context['invoices'] = invoices
context['item'] = project
context['times'] = times
elif model._meta.verbose_name == 'proposal':
proposal = get_object_or_404(model, pk=pk)
pdf = get_query(request, 'pdf')
context['active_nav'] = 'dropdown'
context['edit_url'] = 'proposal_edit' # Delete modal
context['item'] = proposal
context['pdf'] = pdf
elif model._meta.verbose_name == 'user':
user = get_object_or_404(model, pk=pk)
filters = {
'estimate': None,
'invoiced': False,
'user': user,
}
projects = project_model.objects.filter(
team__in=[user, ], active=True)
projects = projects.order_by(*order_by['project'])
times = time_model.objects.filter(**filters)
times = times.order_by(*order_by['time'])
contacts = contact_model.objects.all()
context['active_nav'] = 'dropdown'
context['is_contact'] = user.email in [i.email for i in contacts]
context['item'] = user
context['profile'] = profile_model.objects.get_or_create(
user=user)[0]
context['projects'] = projects
context['times'] = times
else: # home
invoices = invoice_model.objects.filter(last_payment_date=None)
notes = note_model.objects.filter(active=True)
notes = notes.order_by(*order_by['note'])
projects = project_model.objects.filter(active=True, hidden=False)
projects = projects.order_by(*order_by['project'])
plot_items = report_model.objects.filter(active=True)
gross, net = get_invoice_totals(invoice_model)
context['note_stats'] = get_note_stats(note_model)
context['city_data'] = get_client_city(request)
context['dashboard_choices'] = get_setting(request, app_settings_model,
'dashboard_choices')
context['gross'] = gross
context['invoices'] = invoices
context['icon_size'] = get_setting(request, app_settings_model,
'icon_size')
context['nav_status'] = 'active'
context['net'] = net
context['notes'] = notes
context['plot_items'] = plot_items
context['projects'] = projects
return context
def last_month():
"""
Returns last day of last month
"""
first = timezone.now().replace(day=1)
return first - timezone.timedelta(days=1)
def obj_copy(obj):
dup = obj
dup.pk = None
dup.save()
kwargs = {}
kwargs['pk'] = dup.pk
template_name, url_name = get_template_and_url_names(
obj._meta.verbose_name, page_type='edit')
return HttpResponseRedirect(reverse(url_name, kwargs=kwargs))
def obj_delete(obj):
url_name = get_template_and_url_names(
obj._meta.verbose_name, page_type='index') # Redir to index
obj.delete()
return HttpResponseRedirect(reverse(url_name))
def obj_edit(obj, pk=None):
verbose_name = obj._meta.verbose_name
template_name, url_name = get_template_and_url_names(
verbose_name, page_type='view') # Redir to view
# New or existing object
kwargs = {}
if pk: # Existing
if verbose_name == 'Company': # Special case for company
return HttpResponseRedirect(reverse(url_name))
if verbose_name == 'app settings': # Special case for settings
return HttpResponseRedirect(reverse(url_name))
kwargs['pk'] = pk
else: # New
kwargs['pk'] = obj.pk
return HttpResponseRedirect(reverse(url_name, kwargs=kwargs))
def paginate(items, page, page_size):
"""
"""
paginator = Paginator(items, page_size, orphans=5)
try:
items = paginator.page(page)
except PageNotAnInteger:
items = paginator.page(1)
except EmptyPage:
items = paginator.page(paginator.num_pages)
return items
def send_mail(request,
subject,
message,
to,
url=None,
uuid=None,
first_name=None):
recipients = []
sender = django_settings.EMAIL_FROM
recipients.append(to)
# http://stackoverflow.com/a/28476681/185820
if first_name:
username = first_name
else:
username = to
html_message = render_to_string('cerberus-fluid.html', {
'username': username,
'message': message,
'url': url,
'uuid': uuid,
})
try:
django_send_mail(
subject,
message,
sender,
recipients,
fail_silently=False,
html_message=html_message)
return True
except SMTPSenderRefused:
messages.add_message(request, messages.WARNING, 'SMTPSenderRefused!')
return False
def set_relationship(
obj,
request,
client_model=None,
estimate_model=None,
invoice_model=None,
project_model=None):
verbose_name = obj._meta.verbose_name
if verbose_name in ['contact', 'note']:
query_string_client = get_query(request, 'client')
if query_string_client:
client = get_object_or_404(client_model, pk=query_string_client)
related_obj = getattr(client, verbose_name)
related_obj.add(obj)
related_obj.save()
return True
elif verbose_name == 'time':
query_string_invoices = get_query(request, 'invoice')
query_string_project = get_query(request, 'project')
if query_string_invoices:
invoices = query_string_invoices.split(',')
if len(invoices) > 1:
return False
else:
invoice = invoices[0]
invoice = get_object_or_404(invoice_model, pk=invoice)
obj.invoice = invoice
obj.save()
if query_string_project:
project = get_object_or_404(project_model, pk=query_string_project)
obj.task = project.task
obj.save()
return True
|
#!/usr/bin/env python
from argparse import ArgumentParser
from ServerConfig import General
from ServerConfig import Storage
from ServerConfig import Microbench
from ServerConfig import TellStore
from mbclient import startMBClient
from mbserver import startMBServer
from storage import *
from observer import *
from functools import partial
from stop_java_unmount_memfs import stop_java_unmount_memfs
import time
import os
import sys
import signal
import logging
logging.basicConfig()
def exitGracefully(signal, frame):
stop_java_unmount_memfs()
sys.exit(0)
def sqliteOut():
storage = Storage.storage.__class__.__name__.lower()
if Storage.storage == TellStore:
storage = storage + "_{0}".format(TellStore.approach)
# else:
# print "Error: current storage not supported"
# exit(1)
numStorages = len(Storage.servers) + len(Storage.servers1)
numMBServers = len(Microbench.servers0) + len(Microbench.servers1)
numClients = Microbench.clients
numAnalytical = Microbench.analyticalClients
res = "{0}_{1}storages_{2}clients_{3}scans".format(storage, numStorages, numClients, numAnalytical)
res += "_{0}infinioBatch".format(Microbench.infinioBatch)
return res
def runMBench(outdir, onlyPopulation = False):
# do as for many experiments we have to run
## start storages
if Storage.storage == TellStore:
stObserver = Observer("Initialize network server")
if Storage.storage == Cassandra:
stObserver = Observer("No host ID found")
else:
stObserver = None
if stObserver == None:
storageClients = startStorage([])
else:
storageClients = startStorage([stObserver])
# wait for notification that they have started
stObserver.waitFor(len(Storage.servers) + len(Storage.servers1))
print "Storage started"
## start microbenchmark server
mbObserver = Observer("Started mbench server")
serverClients = startMBServer([mbObserver])
mbObserver.waitFor(len(Microbench.servers0) + len(Microbench.servers1))
print "Server started... Continue with population"
clients = Microbench.clients
Microbench.clients = 10*(len(Microbench.servers0) + len(Microbench.servers1)) - 1
res = startMBClient(True, "{0}/{1}_population".format(outdir, sqliteOut()))
Microbench.clients = clients
if res != 0:
print "Population failed"
exit(res)
print "Population done"
if not onlyPopulation:
res = startMBClient(False, "{0}/{1}".format(outdir, sqliteOut()))
if res != 0:
print "Benchmark failed"
exit(res)
for client in serverClients:
client.kill()
for client in storageClients:
client.kill()
for client in serverClients:
client.join()
for client in storageClients:
client.join()
if (Storage.storage == Cassandra or Storage.storage == Hadoop or Storage.storage == Hbase):
stop_java_unmount_memfs()
def configForAnalytics():
Microbench.analyticalClients = 1
Microbench.clients = 0
Microbench.infinioBatch = 16
if len(Microbench.servers0) > 0:
Microbench.servers1 = []
while len(Microbench.servers0) > 1:
del Microbench.servers0[-1]
def configGetPut():
Microbench.analyticalClients = 0
Microbench.clientThreads = 4
Microbench.clients = 10*(len(Microbench.servers0) + len(Microbench.servers1)) - 1
Microbench.threads = 1 if Storage.storage == TellStore else 4
Microbench.insertProb = 0.166
Microbench.updateProb = 0.166
Microbench.deleteProb = 0.166
def configMixed():
configGetPut()
Microbench.analyticalClients = 1
def experiment1a(outdir):
# edit Microbenchmark class
configGetPut()
runMBench(outdir)
def experiment1a_singlebatch(outdir):
configGetPut()
old = Microbench.infinioBatch
Microbench.infinioBatch = 1
runMBench(outdir)
Microbench.infinioBatch = old
def experiment1b(outdir):
configGetPut()
Microbench.insertProb = 0.0
Microbench.updateProb = 0.0
Microbench.deleteProb = 0.0
runMBench(outdir)
def experiment2a(outdir):
configForAnalytics()
runMBench(outdir)
def experiment3(outdir):
configGetPut()
Microbench.analyticalClients = 1
runMBench(outdir)
def varyBatching(experiment, outdir):
for i in [1,2,4,8,16,32,64]:
Microbench.infinioBatch = i
experiment(outdir)
Microbench.infinioBatch = 16
def scalingExperiment(experiment, outdir, numNodes):
Storage.master = 'euler07'
Storage.servers = []
servers = ['euler04', 'euler05', 'euler06', 'euler02']
servers.reverse()
mservers0 = ['euler03', 'euler08', 'euler09', 'euler10', 'euler11', 'euler01']
mservers1 = servers + ['euler03', 'euler08', 'euler09', 'euler10', 'euler11', 'euler01']
mservers0.reverse()
mservers1.reverse()
Microbench.servers0 = []
Microbench.servers1 = []
while len(Storage.servers) < numNodes:
Storage.servers.append(servers.pop())
while 3*len(Storage.servers) > len(Microbench.servers0) + len(Microbench.servers1):
if len(mservers0) != 0:
Microbench.servers0.append(mservers0.pop())
else:
Microbench.servers1.append(mservers1.pop())
experiment(outdir)
def runOnTell(experiment, outdir, numNodes):
for approach in ["rowstore"]: #["logstructured", "columnmap", "rowstore"]:
TellStore.approach = approach
TellStore.setDefaultMemorySize()
for num in numNodes:
experiment(outdir, num)
def runOnOthers(experiment, outdir, numNodes):
for num in numNodes:
experiment(outdir,num)
def runAllBenchmarks(outdir, experiments):
#print "#######################################"
#print " RUN EXPERIMENT 1a_singlebatch"
#print "#######################################"
#o = '{0}/experiment1a_singlebatch'.format(outdir)
#if os.path.isdir(o):
# raise RuntimeError('{0} exists'.format(o))
#os.mkdir(o)
#runOnTell(partial(scalingExperiment, experiment1a_singlebatch), o, [1,2,3,4])
if Storage.storage == TellStore:
runOn = runOnTell
else:
runOn = runOnOthers
if len(experiments) == 0 or "experiment1a" in experiments:
print "#######################################"
print " RUN EXPERIMENT 1a"
print "#######################################"
o = '{0}/experiment1a'.format(outdir)
if os.path.isdir(o):
raise RuntimeError('{0} exists'.format(o))
os.mkdir(o)
runOn(partial(scalingExperiment, experiment1a), o, [1,2,3,4])
if len(experiments) == 0 or "experiment1b" in experiments:
# Experiment 1b
print "#######################################"
print " RUN EXPERIMENT 1b"
print "#######################################"
o = '{0}/experiment1b'.format(outdir)
if os.path.isdir(o):
raise RuntimeError('{0} exists'.format(o))
os.mkdir(o)
runOn(partial(scalingExperiment, experiment1b), o, [1,2,3,4])
if (len(experiments) == 0 or "experiment1c" in experiments) and Storage.storage == TellStore:
# Experiment 1c
# No experiment needed here (inserts are measured for all experiments)
# o = '{0}/experiment1c'.format(outdir)
# if os.path.isdir(o):
# raise RuntimeError('{0} exists'.format(o))
# os.mkdir(o)
# runOnTell(partial(scalingExperiment, experiment1c), o, [1,2,3,4])
# Experiment 1d
print "#######################################"
print " RUN EXPERIMENT 1d"
print "#######################################"
o = '{0}/experiment1d'.format(outdir)
if os.path.isdir(o):
raise RuntimeError('{0} exists'.format(o))
os.mkdir(o)
runOn(partial(scalingExperiment, partial(varyBatching, experiment1a)), o, [2])
if len(experiments) == 0 or "experiment2a" in experiments:
# Experiment 2a
print "#######################################"
print " RUN EXPERIMENT 2a"
print "#######################################"
o = '{0}/experiment2a'.format(outdir)
if os.path.isdir(o):
raise RuntimeError('{0} exists'.format(o))
os.mkdir(o)
runOn(partial(scalingExperiment, experiment2a), o, [2])
if len(experiments) == 0 or "experiment3" in experiments:
# Experiment 3
print "#######################################"
print " RUN EXPERIMENT 3"
print "#######################################"
o = '{0}/experiment3'.format(outdir)
if os.path.isdir(o):
raise RuntimeError('{0} exists'.format(o))
os.mkdir(o)
runOn(partial(scalingExperiment, experiment3), o, [1, 2, 3, 4])
if __name__ == "__main__":
signal.signal(signal.SIGINT, exitGracefully)
out = 'results'
parser = ArgumentParser()
parser.add_argument("-o", help="Output directory", default=out)
parser.add_argument('experiments', metavar='E', type=str, nargs='*', help='Experiments to run (none defaults to all)')
args = parser.parse_args()
if not os.path.isdir(out):
os.mkdir(out)
runAllBenchmarks(out, args.experiments)
removed unused code in experiment runner
#!/usr/bin/env python
from argparse import ArgumentParser
from ServerConfig import General
from ServerConfig import Storage
from ServerConfig import Microbench
from ServerConfig import TellStore
from mbclient import startMBClient
from mbserver import startMBServer
from storage import *
from observer import *
from functools import partial
from stop_java_unmount_memfs import stop_java_unmount_memfs
import time
import os
import sys
import signal
import logging
logging.basicConfig()
def exitGracefully(signal, frame):
stop_java_unmount_memfs()
sys.exit(0)
def sqliteOut():
storage = Storage.storage.__class__.__name__.lower()
if Storage.storage == TellStore:
storage = storage + "_{0}".format(TellStore.approach)
# else:
# print "Error: current storage not supported"
# exit(1)
numStorages = len(Storage.servers) + len(Storage.servers1)
numMBServers = len(Microbench.servers0) + len(Microbench.servers1)
numClients = Microbench.clients
numAnalytical = Microbench.analyticalClients
res = "{0}_{1}storages_{2}clients_{3}scans".format(storage, numStorages, numClients, numAnalytical)
res += "_{0}infinioBatch".format(Microbench.infinioBatch)
return res
def runMBench(outdir, onlyPopulation = False):
# do as for many experiments we have to run
## start storages
if Storage.storage == TellStore:
stObserver = Observer("Initialize network server")
if Storage.storage == Cassandra:
stObserver = Observer("No host ID found")
else:
stObserver = None
if stObserver == None:
storageClients = startStorage([])
else:
storageClients = startStorage([stObserver])
# wait for notification that they have started
stObserver.waitFor(len(Storage.servers) + len(Storage.servers1))
print "Storage started"
## start microbenchmark server
mbObserver = Observer("Started mbench server")
serverClients = startMBServer([mbObserver])
mbObserver.waitFor(len(Microbench.servers0) + len(Microbench.servers1))
print "Server started... Continue with population"
clients = Microbench.clients
Microbench.clients = 10*(len(Microbench.servers0) + len(Microbench.servers1)) - 1
res = startMBClient(True, "{0}/{1}_population".format(outdir, sqliteOut()))
Microbench.clients = clients
if res != 0:
print "Population failed"
exit(res)
print "Population done"
if not onlyPopulation:
res = startMBClient(False, "{0}/{1}".format(outdir, sqliteOut()))
if res != 0:
print "Benchmark failed"
exit(res)
for client in serverClients:
client.kill()
for client in storageClients:
client.kill()
for client in serverClients:
client.join()
for client in storageClients:
client.join()
if (Storage.storage == Cassandra or Storage.storage == Hadoop or Storage.storage == Hbase):
stop_java_unmount_memfs()
def configForAnalytics():
Microbench.analyticalClients = 1
Microbench.clients = 0
Microbench.infinioBatch = 16
if len(Microbench.servers0) > 0:
Microbench.servers1 = []
while len(Microbench.servers0) > 1:
del Microbench.servers0[-1]
def configGetPut():
Microbench.analyticalClients = 0
Microbench.clientThreads = 4
Microbench.clients = 10*(len(Microbench.servers0) + len(Microbench.servers1)) - 1
Microbench.threads = 1 if Storage.storage == TellStore else 4
Microbench.insertProb = 0.166
Microbench.updateProb = 0.166
Microbench.deleteProb = 0.166
def configMixed():
configGetPut()
Microbench.analyticalClients = 1
def experiment1a(outdir):
# edit Microbenchmark class
configGetPut()
runMBench(outdir)
def experiment1a_singlebatch(outdir):
configGetPut()
old = Microbench.infinioBatch
Microbench.infinioBatch = 1
runMBench(outdir)
Microbench.infinioBatch = old
def experiment1b(outdir):
configGetPut()
Microbench.insertProb = 0.0
Microbench.updateProb = 0.0
Microbench.deleteProb = 0.0
runMBench(outdir)
def experiment2a(outdir):
configForAnalytics()
runMBench(outdir)
def experiment3(outdir):
configGetPut()
Microbench.analyticalClients = 1
runMBench(outdir)
def varyBatching(experiment, outdir):
for i in [1,2,4,8,16,32,64]:
Microbench.infinioBatch = i
experiment(outdir)
Microbench.infinioBatch = 16
def scalingExperiment(experiment, outdir, numNodes):
Storage.master = 'euler07'
Storage.servers = []
servers = ['euler04', 'euler05', 'euler06', 'euler02']
servers.reverse()
mservers0 = ['euler03', 'euler08', 'euler09', 'euler10', 'euler11', 'euler01']
mservers1 = servers + ['euler03', 'euler08', 'euler09', 'euler10', 'euler11', 'euler01']
mservers0.reverse()
mservers1.reverse()
Microbench.servers0 = []
Microbench.servers1 = []
while len(Storage.servers) < numNodes:
Storage.servers.append(servers.pop())
while 3*len(Storage.servers) > len(Microbench.servers0) + len(Microbench.servers1):
if len(mservers0) != 0:
Microbench.servers0.append(mservers0.pop())
else:
Microbench.servers1.append(mservers1.pop())
experiment(outdir)
def runOnTell(experiment, outdir, numNodes):
for approach in ["rowstore"]: #["logstructured", "columnmap", "rowstore"]:
TellStore.approach = approach
TellStore.setDefaultMemorySize()
for num in numNodes:
experiment(outdir, num)
def runOnOthers(experiment, outdir, numNodes):
for num in numNodes:
experiment(outdir,num)
def runAllBenchmarks(outdir, experiments):
if Storage.storage == TellStore:
runOn = runOnTell
else:
runOn = runOnOthers
if len(experiments) == 0 or "experiment1a" in experiments:
print "#######################################"
print " RUN EXPERIMENT 1a"
print "#######################################"
o = '{0}/experiment1a'.format(outdir)
if os.path.isdir(o):
raise RuntimeError('{0} exists'.format(o))
os.mkdir(o)
runOn(partial(scalingExperiment, experiment1a), o, [1,2,3,4])
if len(experiments) == 0 or "experiment1b" in experiments:
# Experiment 1b
print "#######################################"
print " RUN EXPERIMENT 1b"
print "#######################################"
o = '{0}/experiment1b'.format(outdir)
if os.path.isdir(o):
raise RuntimeError('{0} exists'.format(o))
os.mkdir(o)
runOn(partial(scalingExperiment, experiment1b), o, [1,2,3,4])
if (len(experiments) == 0 or "experiment1c" in experiments) and Storage.storage == TellStore:
# Experiment 1c
# No experiment needed here (inserts are measured for all experiments)
# o = '{0}/experiment1c'.format(outdir)
# if os.path.isdir(o):
# raise RuntimeError('{0} exists'.format(o))
# os.mkdir(o)
# runOnTell(partial(scalingExperiment, experiment1c), o, [1,2,3,4])
# Experiment 1d
print "#######################################"
print " RUN EXPERIMENT 1d"
print "#######################################"
o = '{0}/experiment1d'.format(outdir)
if os.path.isdir(o):
raise RuntimeError('{0} exists'.format(o))
os.mkdir(o)
runOn(partial(scalingExperiment, partial(varyBatching, experiment1a)), o, [2])
if len(experiments) == 0 or "experiment2a" in experiments:
# Experiment 2a
print "#######################################"
print " RUN EXPERIMENT 2a"
print "#######################################"
o = '{0}/experiment2a'.format(outdir)
if os.path.isdir(o):
raise RuntimeError('{0} exists'.format(o))
os.mkdir(o)
runOn(partial(scalingExperiment, experiment2a), o, [2])
if len(experiments) == 0 or "experiment3" in experiments:
# Experiment 3
print "#######################################"
print " RUN EXPERIMENT 3"
print "#######################################"
o = '{0}/experiment3'.format(outdir)
if os.path.isdir(o):
raise RuntimeError('{0} exists'.format(o))
os.mkdir(o)
runOn(partial(scalingExperiment, experiment3), o, [1, 2, 3, 4])
if __name__ == "__main__":
signal.signal(signal.SIGINT, exitGracefully)
out = 'results'
parser = ArgumentParser()
parser.add_argument("-o", help="Output directory", default=out)
parser.add_argument('experiments', metavar='E', type=str, nargs='*', help='Experiments to run (none defaults to all)')
args = parser.parse_args()
if not os.path.isdir(out):
os.mkdir(out)
runAllBenchmarks(out, args.experiments)
|
from collections import OrderedDict
from decimal import Decimal
from django.conf import settings
from django.contrib import messages
from django.contrib.auth.models import User
from django.core.mail import send_mail
from django.core.paginator import Paginator
from django.core.paginator import EmptyPage
from django.core.paginator import PageNotAnInteger
from django.core.urlresolvers import reverse
from django.db.models import Q
from django.http import HttpResponseRedirect
from django.shortcuts import get_object_or_404
from django.shortcuts import render
from django.utils import timezone
from import_export import widgets
from md5 import md5
import datetime
import operator
import re
class BooleanWidget(widgets.Widget):
"""
Convert strings to boolean values
"""
def clean(self, value):
if value == 'Yes':
return True
else:
return False
class DecimalWidget(widgets.Widget):
"""
Convert strings to decimal values
"""
def clean(self, value):
if value:
return Decimal(value.replace(',', ''))
else:
return Decimal(0)
class UserWidget(widgets.Widget):
"""
"""
def clean(self, value):
return value
def add_user_to_contacts(request, model, pk=None):
"""
"""
if request.method == 'POST':
if pk is None:
return HttpResponseRedirect(reverse('user_index'))
else:
contact = request.POST.get('contact')
user = get_object_or_404(User, pk=pk)
if not user.email or not user.first_name or not user.last_name:
messages.add_message(request, messages.INFO,
'No email no contact!')
return HttpResponseRedirect(reverse('user_index'))
contact = model(email=user.email,
active=True,
first_name=user.first_name,
last_name=user.last_name)
contact.save()
messages.add_message(request, messages.INFO,
'User added to contacts!')
return HttpResponseRedirect(reverse('contact_index'))
def class_name_pk(self):
"""
Concatenate class name and id
"""
return '-'.join([self.__class__.__name__.lower(), str(self.pk)])
def dashboard_total(invoices):
results = OrderedDict()
gross = 0
net = 0
for invoice in invoices:
results[invoice] = {}
results[invoice]['subtotal'] = invoice.subtotal
results[invoice]['amount'] = invoice.amount
if invoice.subtotal:
gross += invoice.subtotal
if invoice.amount:
net += invoice.amount
return gross, net
def edit(request,
form_model,
model,
url_name,
template,
amount=None,
client=None,
clients=[],
company=None,
context={},
gross=None,
kwargs={},
net=None,
pk=None,
paid_amount=None,
project=None,
projects=[],
subtotal=None,
task=None,
tasks=[]):
obj = None
if pk is None:
form = form_model()
# Populate new report with gross and net calculated
# from active invoices
if form._meta.model._meta.verbose_name == 'report':
obj = model(gross=gross, net=net)
form = form_model(instance=obj)
# Limit time entry project, client
# and task choices
if form._meta.model._meta.verbose_name == 'time':
form.fields['project'].queryset = projects
form.fields['client'].queryset = clients
form.fields['task'].queryset = tasks
# Limit project client choices
if form._meta.model._meta.verbose_name == 'project':
form.fields['client'].queryset = clients
# Populate time entry form fields with project, client
# and task values
if project:
entry = model(project=project,
client=project.client,
task=project.task)
form = form_model(instance=entry)
# Populate time entry form fields with client and
# task values
elif client and task:
entry = model(client=client, task=task)
form = form_model(instance=entry)
# Populate project entry form fields with client value
elif client:
entry = model(client=client)
form = form_model(instance=entry)
# Populate time entry form fields with task value
elif task:
entry = model(task=task)
form = form_model(instance=entry)
else:
obj = get_object_or_404(model, pk=pk)
form = form_model(instance=obj)
if request.method == 'POST':
if pk is None:
form = form_model(request.POST)
else:
copy = request.POST.get('copy')
delete = request.POST.get('delete')
if copy:
dup = obj
dup.pk = None
dup.save()
kwargs = {}
kwargs['pk'] = dup.pk
if obj._meta.verbose_name == 'time':
url_name = 'entry_edit'
return HttpResponseRedirect(reverse(url_name, kwargs=kwargs))
if delete:
url_name = 'home'
# Decrement invoice counter
if (obj._meta.verbose_name == 'invoice' and
company.invoice_counter):
company.invoice_counter -= 1
company.save()
# Decrement estimate counter
if (obj._meta.verbose_name == 'estimate' and
company.estimate_counter):
company.estimate_counter -= 1
company.save()
# Redir to appropriate index
if obj._meta.verbose_name == 'client':
url_name = 'client_index'
if obj._meta.verbose_name == 'contact':
url_name = 'contact_index'
if obj._meta.verbose_name == 'task':
url_name = 'task_index'
if obj._meta.verbose_name == 'time':
url_name = 'entry_index'
if obj._meta.verbose_name == 'project':
url_name = 'project_index'
if obj._meta.verbose_name == 'report':
url_name = 'report_index'
obj.delete()
return HttpResponseRedirect(reverse(url_name))
checkbox = request.POST.get('checkbox')
checkbox_publish = request.POST.get('checkbox-publish')
# Redir to appropriate index for checkbox & checkbox_publish
if obj._meta.verbose_name == 'client':
url_name = 'client_index'
if obj._meta.verbose_name == 'contact':
url_name = 'contact_index'
if client:
url_name = 'client'
kwargs['pk'] = client.pk
if obj._meta.verbose_name == 'project':
url_name = 'project_index'
if obj._meta.verbose_name == 'task':
url_name = 'task_index'
if checkbox == 'on' or checkbox == 'off':
kwargs = {}
if checkbox == 'on':
obj.active = True
else:
obj.active = False
obj.save()
return HttpResponseRedirect(reverse(url_name, kwargs=kwargs))
if checkbox_publish == 'on' or checkbox_publish == 'off':
kwargs = {}
if checkbox_publish == 'on':
obj.active = True
else:
obj.active = False
obj.save()
return HttpResponseRedirect(reverse(url_name, kwargs=kwargs))
if amount and subtotal and paid_amount:
obj.amount = amount
obj.subtotal = subtotal
obj.paid_amount = paid_amount
obj.save()
return HttpResponseRedirect(reverse(url_name))
elif amount and subtotal:
obj.amount = amount
obj.subtotal = subtotal
obj.save()
return HttpResponseRedirect(reverse(url_name))
elif amount:
obj.amount = amount
obj.save()
return HttpResponseRedirect(reverse(url_name))
form = form_model(request.POST, instance=obj)
if form.is_valid():
obj = form.save()
# Time entry
if obj.__class__.__name__ == 'Time' and pk is None:
# Assign user to time entry on creation
obj.user = User.objects.get(username=request.user)
obj.save()
# Send mail when time entry created
sender = settings.DEFAULT_FROM_EMAIL
subject = 'Time entry'
message = '%s entered time! %s' % (
obj.user.username,
obj.get_absolute_url(request.get_host()))
recipients = [settings.DEFAULT_FROM_EMAIL, ]
send_mail(subject,
message,
sender,
recipients,
fail_silently=False)
# Assign and increment invoice counter
if (obj._meta.verbose_name == 'invoice' and
company.invoice_counter and pk is None):
company.invoice_counter += 1
company.save()
obj.document_id = company.invoice_counter
obj.save()
# Assign and increment estimate counter
if (obj._meta.verbose_name == 'estimate' and
company.estimate_counter and pk is None):
company.estimate_counter += 1
company.save()
obj.document_id = company.estimate_counter
obj.save()
# Assign client to invoice
if obj._meta.verbose_name == 'invoice' and obj.project:
if obj.project.client and not obj.client:
obj.client = obj.project.client
obj.save()
return HttpResponseRedirect(reverse(url_name, kwargs=kwargs))
context['item'] = obj
context['form'] = form
return render(request, template, context)
def entries_total(queryset):
"""
Add estimate and invoice time entries, could be an aggregate
(https://docs.djangoproject.com/en/1.9/topics/db/aggregation/)
"""
entries = OrderedDict()
total = 0
running_total_co = 0
running_total_dev = 0
running_total_hours = 0
for entry in queryset:
entries[entry] = {}
hours = entry.hours
if hours:
running_total_hours += hours
entries[entry]['date'] = entry.date
entries[entry]['hours'] = hours
entries[entry]['notes'] = entry.notes
entries[entry]['pk'] = entry.pk
entries[entry]['user'] = entry.user
entries[entry]['task'] = entry.task
line_total = 0
line_total_co = 0
line_total_dev = 0
line_total_client = 0
if entry.task:
rate = entry.task.rate
entries[entry]['rate'] = rate
if rate:
line_total_co = rate * hours
entries[entry]['line_total_co'] = line_total_co
running_total_co += line_total_co
if entry.user and entry.project:
if hasattr(entry.user, 'profile'):
if entry.user.profile.rate:
line_total_dev = entry.user.profile.rate * hours
entries[entry]['line_total_dev'] = line_total_dev
running_total_dev += line_total_dev
if entry.project:
line_total = line_total_co - line_total_dev
line_total_client = line_total_co
entries[entry]['line_total_client'] = '%.2f' % line_total_client
else:
line_total = line_total_co
entries[entry]['line_total'] = '%.2f' % line_total
total = running_total_co - running_total_dev
return (entries, running_total_co, running_total_dev, running_total_hours,
total)
def gravatar_url(email):
"""
MD5 hash of email address for use with Gravatar
"""
return settings.GRAVATAR_URL % md5(email.lower()).hexdigest()
def last_month():
"""
Returns last day of last month
"""
first = timezone.now().replace(day=1)
return first - timezone.timedelta(days=1)
def paginate(items, page):
"""
"""
paginator = Paginator(items, 10, orphans=5)
try:
items = paginator.page(page)
except PageNotAnInteger:
items = paginator.page(1)
except EmptyPage:
items = paginator.page(paginator.num_pages)
return items
def search(request, model, fields, order_by=None, context={}):
"""
"""
results = []
query = []
active = request.GET.get('active')
page = request.GET.get('page')
search = None
if active:
if model._meta.verbose_name == 'time':
results = model.objects.filter(invoiced=False, estimate=None)
elif model._meta.verbose_name == 'invoice':
results = model.objects.filter(last_payment_date=None)
elif model._meta.verbose_name == 'estimate':
results = model.objects.filter(accepted_date=None)
else:
results = model.objects.filter(active=True)
results = results.order_by(order_by)
if request.user.is_staff:
return context, results
else:
return context, []
if request.POST:
search = request.POST.get('search', '')
if 'date' in fields:
expr = re.compile('(\d\d)/(\d\d)/(\d\d\d\d)')
if expr.match(search):
match = list(expr.match(search).groups())
match.reverse()
dt = datetime.date(int(match[0]), int(match[2]), int(match[1]))
results = model.objects.filter(date__day=dt.day,
date__month=dt.month,
date__year=dt.year)
else:
for field in fields:
query.append(Q(**{field + '__icontains': search}))
results = model.objects.filter(reduce(operator.or_, query))
else:
for field in fields:
query.append(Q(**{field + '__icontains': search}))
results = model.objects.filter(reduce(operator.or_, query))
else:
if model._meta.verbose_name == 'time':
if request.user.is_staff:
results = model.objects.all()
else:
results = model.objects.filter(user=request.user)
else:
results = model.objects.all()
if order_by:
results = results.order_by(order_by)
if not search:
results = paginate(results, page)
return context, results
Update
from collections import OrderedDict
from decimal import Decimal
from django.conf import settings
from django.contrib import messages
from django.contrib.auth.models import User
from django.core.mail import send_mail
from django.core.paginator import Paginator
from django.core.paginator import EmptyPage
from django.core.paginator import PageNotAnInteger
from django.core.urlresolvers import reverse
from django.db.models import Q
from django.http import HttpResponseRedirect
from django.shortcuts import get_object_or_404
from django.shortcuts import render
from django.utils import timezone
from import_export import widgets
from md5 import md5
import datetime
import operator
import re
class BooleanWidget(widgets.Widget):
"""
Convert strings to boolean values
"""
def clean(self, value):
if value == 'Yes':
return True
else:
return False
class DecimalWidget(widgets.Widget):
"""
Convert strings to decimal values
"""
def clean(self, value):
if value:
return Decimal(value.replace(',', ''))
else:
return Decimal(0)
class UserWidget(widgets.Widget):
"""
"""
def clean(self, value):
return value
def add_user_to_contacts(request, model, pk=None):
"""
"""
if request.method == 'POST':
if pk is None:
return HttpResponseRedirect(reverse('user_index'))
else:
contact = request.POST.get('contact')
user = get_object_or_404(User, pk=pk)
if not user.email or not user.first_name or not user.last_name:
messages.add_message(request, messages.INFO,
'No email no contact!')
return HttpResponseRedirect(reverse('user_index'))
contact = model(email=user.email,
active=True,
first_name=user.first_name,
last_name=user.last_name)
contact.save()
messages.add_message(request, messages.INFO,
'User added to contacts!')
return HttpResponseRedirect(reverse('contact_index'))
def class_name_pk(self):
"""
Concatenate class name and id
"""
return '-'.join([self.__class__.__name__.lower(), str(self.pk)])
def dashboard_total(invoices):
results = OrderedDict()
gross = 0
net = 0
for invoice in invoices:
results[invoice] = {}
results[invoice]['subtotal'] = invoice.subtotal
results[invoice]['amount'] = invoice.amount
if invoice.subtotal:
gross += invoice.subtotal
if invoice.amount:
net += invoice.amount
return gross, net
def edit(request,
form_model,
model,
url_name,
template,
amount=None,
client=None,
clients=[],
company=None,
context={},
gross=None,
kwargs={},
net=None,
pk=None,
paid_amount=None,
project=None,
projects=[],
subtotal=None,
task=None,
tasks=[]):
obj = None
if pk is None:
form = form_model()
# Populate new report with gross and net calculated
# from active invoices
if form._meta.model._meta.verbose_name == 'report':
obj = model(gross=gross, net=net)
form = form_model(instance=obj)
# Limit time entry project, client
# and task choices
if form._meta.model._meta.verbose_name == 'time':
form.fields['project'].queryset = projects
form.fields['client'].queryset = clients
form.fields['task'].queryset = tasks
# Limit project client choices
if form._meta.model._meta.verbose_name == 'project':
form.fields['client'].queryset = clients
# Populate time entry form fields with project, client
# and task values
if project:
entry = model(project=project,
client=project.client,
task=project.task)
form = form_model(instance=entry)
# Populate time entry form fields with client and
# task values
elif client and task:
entry = model(client=client, task=task)
form = form_model(instance=entry)
# Populate project entry form fields with client value
elif client:
entry = model(client=client)
form = form_model(instance=entry)
# Populate time entry form fields with task value
elif task:
entry = model(task=task)
form = form_model(instance=entry)
else:
obj = get_object_or_404(model, pk=pk)
form = form_model(instance=obj)
if request.method == 'POST':
if pk is None:
form = form_model(request.POST)
else:
copy = request.POST.get('copy')
delete = request.POST.get('delete')
if copy:
dup = obj
dup.pk = None
dup.save()
kwargs = {}
kwargs['pk'] = dup.pk
if obj._meta.verbose_name == 'time':
url_name = 'entry_edit'
return HttpResponseRedirect(reverse(url_name, kwargs=kwargs))
if delete:
url_name = 'home'
# Decrement invoice counter
if (obj._meta.verbose_name == 'invoice' and
company.invoice_counter):
company.invoice_counter -= 1
company.save()
# Decrement estimate counter
if (obj._meta.verbose_name == 'estimate' and
company.estimate_counter):
company.estimate_counter -= 1
company.save()
# Redir to appropriate index
if obj._meta.verbose_name == 'client':
url_name = 'client_index'
if obj._meta.verbose_name == 'contact':
url_name = 'contact_index'
if obj._meta.verbose_name == 'task':
url_name = 'task_index'
if obj._meta.verbose_name == 'time':
url_name = 'entry_index'
if obj._meta.verbose_name == 'project':
url_name = 'project_index'
if obj._meta.verbose_name == 'report':
url_name = 'report_index'
obj.delete()
return HttpResponseRedirect(reverse(url_name))
checkbox = request.POST.get('checkbox')
checkbox_publish = request.POST.get('checkbox-publish')
# Redir to appropriate index for checkbox & checkbox_publish
if obj._meta.verbose_name == 'client':
url_name = 'client_index'
if obj._meta.verbose_name == 'contact':
url_name = 'contact_index'
if client:
url_name = 'client'
kwargs['pk'] = client.pk
if obj._meta.verbose_name == 'project':
url_name = 'project_index'
if obj._meta.verbose_name == 'task':
url_name = 'task_index'
if checkbox == 'on' or checkbox == 'off':
kwargs = {}
if checkbox == 'on':
obj.active = True
else:
obj.active = False
obj.save()
return HttpResponseRedirect(reverse(url_name, kwargs=kwargs))
if checkbox_publish == 'on' or checkbox_publish == 'off':
kwargs = {}
if checkbox_publish == 'on':
obj.published = True
else:
obj.published = False
obj.save()
return HttpResponseRedirect(reverse(url_name, kwargs=kwargs))
if amount and subtotal and paid_amount:
obj.amount = amount
obj.subtotal = subtotal
obj.paid_amount = paid_amount
obj.save()
return HttpResponseRedirect(reverse(url_name))
elif amount and subtotal:
obj.amount = amount
obj.subtotal = subtotal
obj.save()
return HttpResponseRedirect(reverse(url_name))
elif amount:
obj.amount = amount
obj.save()
return HttpResponseRedirect(reverse(url_name))
form = form_model(request.POST, instance=obj)
if form.is_valid():
obj = form.save()
# Time entry
if obj.__class__.__name__ == 'Time' and pk is None:
# Assign user to time entry on creation
obj.user = User.objects.get(username=request.user)
obj.save()
# Send mail when time entry created
sender = settings.DEFAULT_FROM_EMAIL
subject = 'Time entry'
message = '%s entered time! %s' % (
obj.user.username,
obj.get_absolute_url(request.get_host()))
recipients = [settings.DEFAULT_FROM_EMAIL, ]
send_mail(subject,
message,
sender,
recipients,
fail_silently=False)
# Assign and increment invoice counter
if (obj._meta.verbose_name == 'invoice' and
company.invoice_counter and pk is None):
company.invoice_counter += 1
company.save()
obj.document_id = company.invoice_counter
obj.save()
# Assign and increment estimate counter
if (obj._meta.verbose_name == 'estimate' and
company.estimate_counter and pk is None):
company.estimate_counter += 1
company.save()
obj.document_id = company.estimate_counter
obj.save()
# Assign client to invoice
if obj._meta.verbose_name == 'invoice' and obj.project:
if obj.project.client and not obj.client:
obj.client = obj.project.client
obj.save()
return HttpResponseRedirect(reverse(url_name, kwargs=kwargs))
context['item'] = obj
context['form'] = form
return render(request, template, context)
def entries_total(queryset):
"""
Add estimate and invoice time entries, could be an aggregate
(https://docs.djangoproject.com/en/1.9/topics/db/aggregation/)
"""
entries = OrderedDict()
total = 0
running_total_co = 0
running_total_dev = 0
running_total_hours = 0
for entry in queryset:
entries[entry] = {}
hours = entry.hours
if hours:
running_total_hours += hours
entries[entry]['date'] = entry.date
entries[entry]['hours'] = hours
entries[entry]['notes'] = entry.notes
entries[entry]['pk'] = entry.pk
entries[entry]['user'] = entry.user
entries[entry]['task'] = entry.task
line_total = 0
line_total_co = 0
line_total_dev = 0
line_total_client = 0
if entry.task:
rate = entry.task.rate
entries[entry]['rate'] = rate
if rate:
line_total_co = rate * hours
entries[entry]['line_total_co'] = line_total_co
running_total_co += line_total_co
if entry.user and entry.project:
if hasattr(entry.user, 'profile'):
if entry.user.profile.rate:
line_total_dev = entry.user.profile.rate * hours
entries[entry]['line_total_dev'] = line_total_dev
running_total_dev += line_total_dev
if entry.project:
line_total = line_total_co - line_total_dev
line_total_client = line_total_co
entries[entry]['line_total_client'] = '%.2f' % line_total_client
else:
line_total = line_total_co
entries[entry]['line_total'] = '%.2f' % line_total
total = running_total_co - running_total_dev
return (entries, running_total_co, running_total_dev, running_total_hours,
total)
def gravatar_url(email):
"""
MD5 hash of email address for use with Gravatar
"""
return settings.GRAVATAR_URL % md5(email.lower()).hexdigest()
def last_month():
"""
Returns last day of last month
"""
first = timezone.now().replace(day=1)
return first - timezone.timedelta(days=1)
def paginate(items, page):
"""
"""
paginator = Paginator(items, 10, orphans=5)
try:
items = paginator.page(page)
except PageNotAnInteger:
items = paginator.page(1)
except EmptyPage:
items = paginator.page(paginator.num_pages)
return items
def search(request, model, fields, order_by=None, context={}):
"""
"""
results = []
query = []
active = request.GET.get('active')
page = request.GET.get('page')
search = None
if active:
if model._meta.verbose_name == 'time':
results = model.objects.filter(invoiced=False, estimate=None)
elif model._meta.verbose_name == 'invoice':
results = model.objects.filter(last_payment_date=None)
elif model._meta.verbose_name == 'estimate':
results = model.objects.filter(accepted_date=None)
else:
results = model.objects.filter(active=True)
results = results.order_by(order_by)
if request.user.is_staff:
return context, results
else:
return context, []
if request.POST:
search = request.POST.get('search', '')
if 'date' in fields:
expr = re.compile('(\d\d)/(\d\d)/(\d\d\d\d)')
if expr.match(search):
match = list(expr.match(search).groups())
match.reverse()
dt = datetime.date(int(match[0]), int(match[2]), int(match[1]))
results = model.objects.filter(date__day=dt.day,
date__month=dt.month,
date__year=dt.year)
else:
for field in fields:
query.append(Q(**{field + '__icontains': search}))
results = model.objects.filter(reduce(operator.or_, query))
else:
for field in fields:
query.append(Q(**{field + '__icontains': search}))
results = model.objects.filter(reduce(operator.or_, query))
else:
if model._meta.verbose_name == 'time':
if request.user.is_staff:
results = model.objects.all()
else:
results = model.objects.filter(user=request.user)
else:
results = model.objects.all()
if order_by:
results = results.order_by(order_by)
if not search:
results = paginate(results, page)
return context, results
|
from django.http import HttpResponse
import datetime
import json
from api.models import Client, Event, Coc
from django.http import HttpResponseRedirect
import collections
from decimal import Decimal
from django.forms.models import model_to_dict
from django.db.models import F
from dateutil import parser
from django.db.models import Q
from django.contrib.auth import logout
from custom_decorators import print_errors
def json_custom_parser(obj):
if isinstance(obj, Decimal):
return str(obj)
elif not isinstance(obj, basestring) and isinstance(obj, collections.Iterable):
return list(obj)
elif isinstance(obj, datetime.datetime) or isinstance(obj, datetime.date):
dot_ix = 19 # 'YYYY-MM-DDTHH:MM:SS.mmmmmm+HH:MM'.find('.')
return obj.isoformat()[:dot_ix]
else:
raise TypeError(obj)
def ask_for_help(request):
"""
user_input = {
"client_id": "",
"coc_location_id": "",
"details": ""
}
"""
user_input = request.body
user_input['referred_from_coc_location_id'] = -1 #From client
user_input['event_type'] = "referral"
Event(**user_input).save()
return HttpResponse(json.dumps({
"status": "success"
}, default=json_custom_parser), content_type='application/json', status=200)
def log_note(request):
"""
user_input = {
"client_id": "",
"coc_location_id": "",
"details": ""
}
"""
user_input = json.loads(request.body)
user_input['event_type'] = "note"
Event(**user_input).save()
return HttpResponse(json.dumps({
"status": "success"
}, default=json_custom_parser), content_type='application/json', status=200)
def grant_bed(request):
"""
user_input = {
"client_id": "",
"coc_location_id": "",
"details": ""
}
"""
user_input = json.loads(request.body)
user_input['event_type'] = "shelter"
Event(**user_input).save()
coc_loc = Coc.objects.get(id=user_input['coc_location_id'])
coc_loc.beds_available = F('beds_available') - 1
coc_loc.save()
return HttpResponse(json.dumps({
"status": "success"
}, default=json_custom_parser), content_type='application/json', status=200)
def submit_referral(request):
"""
user_input = {
"client_id": "",
"coc_location_id": "",
"referred_from_coc_location_id": "",
"details": ""
}
"""
user_input = json.loads(request.body)
user_input['event_type'] = "referral"
Event(**user_input).save()
return HttpResponse(json.dumps({
"status": "success"
}, default=json_custom_parser), content_type='application/json', status=200)
def update_client_info(request):
"""
user_input = {
"phone_number": "",
"first_name": "Robert",
"last_name": "Hanks",
"middle_name": "J",
"pregnant": False,
"dob": "1/1/1953",
"gender": "Male",
"veteran": True,
"marital_status": "",
"education": "",
"sex_offender": False,
"ssn": "100090077",
"race": "Black",
"number_of_children": 0,
"id": 90077,
"occupation": ""
}
"""
updates = json.loads(request.body)
updates['phone_number'] = ''.join([c for c in updates['phone_number'] if c in '1234567890'])
Client.objects.filter(id=updates['id']).update(**updates)
return HttpResponse(json.dumps({
"status": "success"
}, default=json_custom_parser), content_type='application/json', status=200)
def get_client_info(request):
"""
user_input = {
"id": "3"
}
"""
user_input = json.loads(request.body)
client_id = user_input['id']
data = model_to_dict(Client.objects.get(id=client_id))
data['events'] = []
client_events = Event.objects.filter(client_id=client_id)
coc_id_list = []
client_id_list = []
for ce in client_events:
coc_id_list.append(ce.coc_location_id)
client_id_list.append(ce.client_id)
coc_name_lookup = {}
print "coc_id_list", coc_id_list
for c in Coc.objects.filter(id__in=coc_id_list):
coc_name_lookup[str(c.id)] = c.name
print "coc_name_lookup", coc_name_lookup
client_deets_lookup = {}
for c in Client.objects.filter(id__in=client_id_list):
client_deets_lookup[str(c.id)] = {
"name": c.first_name + " " + c.middle_name + " " + c.last_name,
"phone_number": c.phone_number
}
for ce in client_events:
ev = model_to_dict(ce)
print "ev", ev
ev['coc_name'] = coc_name_lookup[ev['coc_location_id']]
ev['client_name'] = client_deets_lookup[ev['client_id']]['name']
ev['phone_number'] = client_deets_lookup[ev['client_id']]['phone_number']
data['events'].append(ev)
return HttpResponse(json.dumps({
"status": "success",
"data": data
}, default=json_custom_parser), content_type='application/json', status=200)
def get_clients(request):
"""
user_input = {
"phone": 3,
"name": "bob"
}
"""
user_input = json.loads(request.body)
results = []
matches = {}
name_pieces = user_input['name'].split(' ')[0]
first_name = name_pieces[0]
last_name = name_pieces[-1]
#Matching phone numbers
for c in Client.objects.filter(phone_number=user_input['phone_number']):
if c['id'] not in matches: #block duplicates
matches[c['id']] = 1
results.append(model_to_dict(c))
#Matching Full Names
for c in Client.objects.filter(first_name=first_name, last_name=last_name):
if c['id'] not in matches: #block duplicates
matches[c['id']] = 1
results.append(model_to_dict(c))
#Matching Last Names
for c in Client.objects.filter(last_name=last_name):
if c['id'] not in matches: #block duplicates
matches[c['id']] = 1
results.append(model_to_dict(c))
#Matching First Names
for c in Client.objects.filter(first_name=first_name):
if c['id'] not in matches: #block duplicates
matches[c['id']] = 1
results.append(model_to_dict(c))
return HttpResponse(json.dumps({
"status": "success",
"data": results
}, default=json_custom_parser), content_type='application/json', status=200)
def get_cocs(request):
"""
user_input = {
???
}
"""
user_input = json.loads(request.body)
results = []
for c in Coc.objects.all():
results.append(model_to_dict(c))
return HttpResponse(json.dumps({
"status": "success",
"data": results
}, default=json_custom_parser), content_type='application/json', status=200)
def get_coc_info(request):
"""
user_input = {
"id": 3
}
"""
user_input = json.loads(request.body)
data = Coc.objects.get(id=user_input['id'])
data = model_to_dict(Coc.objects.get(id=user_input['id']))
coc_events = Event.objects.filter(coc_location_id=user_input['id']).exclude(referred_from_coc_location_id=0)
data['events'] = coc_events
return HttpResponse(json.dumps({
"status": "success",
"data": data
}, default=json_custom_parser), content_type='application/json', status=200)
def load_frontend(request):
return HttpResponseRedirect("/static/index.html")
@print_errors
def sms_received(request):
#Automatically reset user's sessions if they haven't communicated in 5 minutes
if 'last_validated' in request.session:
session_expiry = (parser.parse(request.session.get('last_validated', '2000-01-01')) + datetime.timedelta(minutes=5))
if session_expiry < datetime.datetime.now():
print "Session expired! Session expiry time", session_expiry, " | current time", datetime.datetime.now()
del request.session['last_validated']
logout(request)
else:
request.session['last_validated'] = datetime.datetime.now().isoformat()
input_from_user = request.POST.get('Body', '')
if input_from_user.lower() == "restart":
logout(request)
if 'greeting' not in request.session:
#New user!
request.session['greeting'] = input_from_user
twil = '''<?xml version="1.0" encoding="UTF-8"?>
<Response>
<Message method="GET">Need shelter for the night? We can help. Reply to this message with your current location (e.g. 911 Washington Ave, Saint Louis, MO 63304)</Message>
</Response>
'''
return HttpResponse(twil, content_type='application/xml', status=200)
elif 'location' not in request.session:
request.session['location'] = input_from_user
twil = '''<?xml version="1.0" encoding="UTF-8"?>
<Response>
<Message method="GET">Reply to this with a number 1-3 corresponding to what best describes you. Are you a single male (1), a single female (2), or a family (3)?</Message>
</Response>
'''
return HttpResponse(twil, content_type='application/xml', status=200)
elif 'status' not in request.session:
if input_from_user == '1':
request.session['status'] = "single_male"
elif input_from_user == '2':
request.session['status'] = "single_female"
elif input_from_user == '3':
request.session['status'] = "family"
else:
twil = '''<?xml version="1.0" encoding="UTF-8"?>
<Response>
<Message method="GET">Invalid response, must reply with a number 1-3. Are you a single male (1), a single female (2), or a family (3)?</Message>
</Response>
'''
return HttpResponse(twil, content_type='application/xml', status=200)
twil = '''<?xml version="1.0" encoding="UTF-8"?>
<Response>
<Message method="GET">Reply to this with a number 1-3 corresponding to what best describes you. Are you a veteran (1), pregnant (2), or neither (3)?</Message>
</Response>
'''
return HttpResponse(twil, content_type='application/xml', status=200)
elif 'special_status' not in request.session:
if input_from_user == '1':
request.session['special_status'] = "veteran"
elif input_from_user == '2':
request.session['special_status'] = "pregnant"
elif input_from_user == '3':
request.session['special_status'] = "none"
else:
twil = '''<?xml version="1.0" encoding="UTF-8"?>
<Response>
<Message method="GET">Invalid response, must reply with a number 1-3. Are you a veteran (1), pregnant (2), or neither (3)?</Message>
</Response>
'''
return HttpResponse(twil, content_type='application/xml', status=200)
#Find best matching CoC
#Todo add filters for single male/female/vet etc
#Todo add filter by distance from location
best_shelter = Coc.objects.filter(coc_type="shelter").order_by('?')[0]
twil = '<?xml version="1.0" encoding="UTF-8"?> \
<Response> \
<Message method="GET">Your best bet is '+best_shelter.name+', located at '+best_shelter.address+'. Their phone number is '+best_shelter.phone_number+' and they currently have '+str(best_shelter.beds_available)+' beds available.</Message> \
</Response> \
'
return HttpResponse(twil, content_type='application/xml', status=200)
add requested data
from django.http import HttpResponse
import datetime
import json
from api.models import Client, Event, Coc
from django.http import HttpResponseRedirect
import collections
from decimal import Decimal
from django.forms.models import model_to_dict
from django.db.models import F
from dateutil import parser
from django.db.models import Q
from django.contrib.auth import logout
from custom_decorators import print_errors
def json_custom_parser(obj):
if isinstance(obj, Decimal):
return str(obj)
elif not isinstance(obj, basestring) and isinstance(obj, collections.Iterable):
return list(obj)
elif isinstance(obj, datetime.datetime) or isinstance(obj, datetime.date):
dot_ix = 19 # 'YYYY-MM-DDTHH:MM:SS.mmmmmm+HH:MM'.find('.')
return obj.isoformat()[:dot_ix]
else:
raise TypeError(obj)
def ask_for_help(request):
"""
user_input = {
"client_id": "",
"coc_location_id": "",
"details": ""
}
"""
user_input = request.body
user_input['referred_from_coc_location_id'] = -1 #From client
user_input['event_type'] = "referral"
Event(**user_input).save()
return HttpResponse(json.dumps({
"status": "success"
}, default=json_custom_parser), content_type='application/json', status=200)
def log_note(request):
"""
user_input = {
"client_id": "",
"coc_location_id": "",
"details": ""
}
"""
user_input = json.loads(request.body)
user_input['event_type'] = "note"
Event(**user_input).save()
return HttpResponse(json.dumps({
"status": "success"
}, default=json_custom_parser), content_type='application/json', status=200)
def grant_bed(request):
"""
user_input = {
"client_id": "",
"coc_location_id": "",
"details": ""
}
"""
user_input = json.loads(request.body)
user_input['event_type'] = "shelter"
Event(**user_input).save()
coc_loc = Coc.objects.get(id=user_input['coc_location_id'])
coc_loc.beds_available = F('beds_available') - 1
coc_loc.save()
return HttpResponse(json.dumps({
"status": "success"
}, default=json_custom_parser), content_type='application/json', status=200)
def submit_referral(request):
"""
user_input = {
"client_id": "",
"coc_location_id": "",
"referred_from_coc_location_id": "",
"details": ""
}
"""
user_input = json.loads(request.body)
user_input['event_type'] = "referral"
Event(**user_input).save()
return HttpResponse(json.dumps({
"status": "success"
}, default=json_custom_parser), content_type='application/json', status=200)
def update_client_info(request):
"""
user_input = {
"phone_number": "",
"first_name": "Robert",
"last_name": "Hanks",
"middle_name": "J",
"pregnant": False,
"dob": "1/1/1953",
"gender": "Male",
"veteran": True,
"marital_status": "",
"education": "",
"sex_offender": False,
"ssn": "100090077",
"race": "Black",
"number_of_children": 0,
"id": 90077,
"occupation": ""
}
"""
updates = json.loads(request.body)
updates['phone_number'] = ''.join([c for c in updates['phone_number'] if c in '1234567890'])
Client.objects.filter(id=updates['id']).update(**updates)
return HttpResponse(json.dumps({
"status": "success"
}, default=json_custom_parser), content_type='application/json', status=200)
def get_client_info(request):
"""
user_input = {
"id": "3"
}
"""
user_input = json.loads(request.body)
client_id = user_input['id']
data = model_to_dict(Client.objects.get(id=client_id))
data['events'] = []
client_events = Event.objects.filter(client_id=client_id)
coc_id_list = []
client_id_list = []
for ce in client_events:
coc_id_list.append(ce.coc_location_id)
client_id_list.append(ce.client_id)
coc_name_lookup = {}
for c in Coc.objects.filter(id__in=coc_id_list):
coc_name_lookup[str(c.id)] = c.name
client_deets_lookup = {}
for c in Client.objects.filter(id__in=client_id_list):
client_deets_lookup[str(c.id)] = {
"name": c.first_name + " " + c.middle_name + " " + c.last_name,
"phone_number": c.phone_number
}
for ce in client_events:
ev = model_to_dict(ce)
print "ev", ev
ev['coc_name'] = coc_name_lookup[ev['coc_location_id']]
ev['client_name'] = client_deets_lookup[ev['client_id']]['name']
ev['client_phone_number'] = client_deets_lookup[ev['client_id']]['phone_number']
data['events'].append(ev)
return HttpResponse(json.dumps({
"status": "success",
"data": data
}, default=json_custom_parser), content_type='application/json', status=200)
def get_clients(request):
"""
user_input = {
"phone": 3,
"name": "bob"
}
"""
user_input = json.loads(request.body)
results = []
matches = {}
name_pieces = user_input['name'].split(' ')[0]
first_name = name_pieces[0]
last_name = name_pieces[-1]
#Matching phone numbers
for c in Client.objects.filter(phone_number=user_input['phone_number']):
if c['id'] not in matches: #block duplicates
matches[c['id']] = 1
results.append(model_to_dict(c))
#Matching Full Names
for c in Client.objects.filter(first_name=first_name, last_name=last_name):
if c['id'] not in matches: #block duplicates
matches[c['id']] = 1
results.append(model_to_dict(c))
#Matching Last Names
for c in Client.objects.filter(last_name=last_name):
if c['id'] not in matches: #block duplicates
matches[c['id']] = 1
results.append(model_to_dict(c))
#Matching First Names
for c in Client.objects.filter(first_name=first_name):
if c['id'] not in matches: #block duplicates
matches[c['id']] = 1
results.append(model_to_dict(c))
return HttpResponse(json.dumps({
"status": "success",
"data": results
}, default=json_custom_parser), content_type='application/json', status=200)
def get_cocs(request):
"""
user_input = {
???
}
"""
user_input = json.loads(request.body)
results = []
for c in Coc.objects.all():
results.append(model_to_dict(c))
return HttpResponse(json.dumps({
"status": "success",
"data": results
}, default=json_custom_parser), content_type='application/json', status=200)
def get_coc_info(request):
"""
user_input = {
"id": 3
}
"""
#user_input = json.loads(request.body)
user_input = {
"id": 10
}
data = Coc.objects.get(id=user_input['id'])
data = model_to_dict(Coc.objects.get(id=user_input['id']))
data['events'] = []
client_events = Event.objects.filter(coc_location_id=user_input['id']).exclude(referred_from_coc_location_id=0)
coc_id_list = []
client_id_list = []
for ce in client_events:
coc_id_list.append(ce.coc_location_id)
client_id_list.append(ce.client_id)
coc_name_lookup = {}
for c in Coc.objects.filter(id__in=coc_id_list):
coc_name_lookup[str(c.id)] = c.name
client_deets_lookup = {}
for c in Client.objects.filter(id__in=client_id_list):
client_deets_lookup[str(c.id)] = {
"name": c.first_name + " " + c.middle_name + " " + c.last_name,
"phone_number": c.phone_number
}
for ce in client_events:
ev = model_to_dict(ce)
print "ev", ev
ev['coc_name'] = coc_name_lookup[ev['coc_location_id']]
ev['client_name'] = client_deets_lookup[ev['client_id']]['name']
ev['client_phone_number'] = client_deets_lookup[ev['client_id']]['phone_number']
data['events'].append(ev)
return HttpResponse(json.dumps({
"status": "success",
"data": data
}, default=json_custom_parser), content_type='application/json', status=200)
def load_frontend(request):
return HttpResponseRedirect("/static/index.html")
@print_errors
def sms_received(request):
#Automatically reset user's sessions if they haven't communicated in 5 minutes
if 'last_validated' in request.session:
session_expiry = (parser.parse(request.session.get('last_validated', '2000-01-01')) + datetime.timedelta(minutes=5))
if session_expiry < datetime.datetime.now():
print "Session expired! Session expiry time", session_expiry, " | current time", datetime.datetime.now()
del request.session['last_validated']
logout(request)
else:
request.session['last_validated'] = datetime.datetime.now().isoformat()
input_from_user = request.POST.get('Body', '')
if input_from_user.lower() == "restart":
logout(request)
if 'greeting' not in request.session:
#New user!
request.session['greeting'] = input_from_user
twil = '''<?xml version="1.0" encoding="UTF-8"?>
<Response>
<Message method="GET">Need shelter for the night? We can help. Reply to this message with your current location (e.g. 911 Washington Ave, Saint Louis, MO 63304)</Message>
</Response>
'''
return HttpResponse(twil, content_type='application/xml', status=200)
elif 'location' not in request.session:
request.session['location'] = input_from_user
twil = '''<?xml version="1.0" encoding="UTF-8"?>
<Response>
<Message method="GET">Reply to this with a number 1-3 corresponding to what best describes you. Are you a single male (1), a single female (2), or a family (3)?</Message>
</Response>
'''
return HttpResponse(twil, content_type='application/xml', status=200)
elif 'status' not in request.session:
if input_from_user == '1':
request.session['status'] = "single_male"
elif input_from_user == '2':
request.session['status'] = "single_female"
elif input_from_user == '3':
request.session['status'] = "family"
else:
twil = '''<?xml version="1.0" encoding="UTF-8"?>
<Response>
<Message method="GET">Invalid response, must reply with a number 1-3. Are you a single male (1), a single female (2), or a family (3)?</Message>
</Response>
'''
return HttpResponse(twil, content_type='application/xml', status=200)
twil = '''<?xml version="1.0" encoding="UTF-8"?>
<Response>
<Message method="GET">Reply to this with a number 1-3 corresponding to what best describes you. Are you a veteran (1), pregnant (2), or neither (3)?</Message>
</Response>
'''
return HttpResponse(twil, content_type='application/xml', status=200)
elif 'special_status' not in request.session:
if input_from_user == '1':
request.session['special_status'] = "veteran"
elif input_from_user == '2':
request.session['special_status'] = "pregnant"
elif input_from_user == '3':
request.session['special_status'] = "none"
else:
twil = '''<?xml version="1.0" encoding="UTF-8"?>
<Response>
<Message method="GET">Invalid response, must reply with a number 1-3. Are you a veteran (1), pregnant (2), or neither (3)?</Message>
</Response>
'''
return HttpResponse(twil, content_type='application/xml', status=200)
#Find best matching CoC
#Todo add filters for single male/female/vet etc
#Todo add filter by distance from location
best_shelter = Coc.objects.filter(coc_type="shelter").order_by('?')[0]
twil = '<?xml version="1.0" encoding="UTF-8"?> \
<Response> \
<Message method="GET">Your best bet is '+best_shelter.name+', located at '+best_shelter.address+'. Their phone number is '+best_shelter.phone_number+' and they currently have '+str(best_shelter.beds_available)+' beds available.</Message> \
</Response> \
'
return HttpResponse(twil, content_type='application/xml', status=200)
|
from .forms import ClientForm
from .forms import CompanyForm
from .forms import ContactForm
from .forms import EstimateForm
from .forms import InvoiceForm
from .forms import MailForm
from .forms import NoteForm
from .forms import ProfileForm
from .forms import ProjectForm
from .forms import ReportForm
from .forms import TaskForm
from .models import Client
from .models import Company
from .models import Contact
from .models import Estimate
from .models import Invoice
from .models import Note
from .models import Profile
from .models import Project
from .models import Report
from .models import Service
from .models import Testimonial
from .models import Task
from .models import Time
from .serializers import ClientSerializer
from .serializers import ProfileSerializer
from .serializers import ServiceSerializer
from .serializers import TestimonialSerializer
from .utils import add_user_to_contacts
from .utils import index_items
from .utils import certbot_data
from .utils import daily_burn
from .utils import dashboard_items
from .utils import dashboard_totals
from .utils import edit
from .utils import entries_total
from .utils import get_query
from .utils import send_mail
from datetime import datetime
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.contrib.admin.views.decorators import staff_member_required
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.db.models import F, Sum
from django.http import HttpResponseRedirect
from django.http import HttpResponse
from django.shortcuts import get_object_or_404
from django.shortcuts import render
from django_xhtml2pdf.utils import generate_pdf
from io import BytesIO
from matplotlib.dates import DateFormatter
from matplotlib.dates import MonthLocator
from matplotlib.dates import date2num
from matplotlib.figure import Figure
from matplotlib.backends.backend_agg import FigureCanvasAgg
from rest_framework import viewsets
# Create your views here.
class ClientViewSet(viewsets.ModelViewSet):
"""
"""
queryset = Client.objects.filter(published=True).order_by('name')
serializer_class = ClientSerializer
class ServiceViewSet(viewsets.ModelViewSet):
"""
"""
queryset = Service.objects.filter(active=True).order_by('name')
serializer_class = ServiceSerializer
class TestimonialViewSet(viewsets.ModelViewSet):
"""
"""
queryset = Testimonial.objects.filter(active=True).order_by('-issue_date')
serializer_class = TestimonialSerializer
class ProfileViewSet(viewsets.ModelViewSet):
"""
"""
queryset = Profile.objects.filter(
published=True).order_by('user__first_name')
serializer_class = ProfileSerializer
def certbot(request): # http://stackoverflow.com/a/24817024
data = cerbot_data()
return HttpResponse(data)
@staff_member_required
def client(request, pk=None):
context = {}
client = get_object_or_404(Client, pk=pk)
contacts = Contact.objects.filter(client=client)
contacts = contacts.order_by('-pk')
projects = Project.objects.filter(client=client)
projects = projects.order_by('-start_date')
context['edit_url'] = 'client_edit'
context['item'] = client
context['contacts'] = contacts
context['projects'] = projects
return render(request, 'client.html', context)
@staff_member_required
def client_edit(request, pk=None):
kwargs = {}
url_name = 'client_index'
if pk:
kwargs['pk'] = pk
url_name = 'client'
return edit(
request,
ClientForm,
Client,
url_name,
'client_edit.html',
kwargs=kwargs,
pk=pk)
@staff_member_required
def client_index(request):
fields = ('address', 'name')
order_by = '-pk'
context = index_items(request, Client, fields, order_by=order_by)
context['edit_url'] = 'client_edit' # Delete form modal
return render(request, 'client_index.html', context)
@staff_member_required
def company_edit(request, pk=None):
return edit(
request, CompanyForm, Company, 'company', 'company_edit.html', pk=1)
@staff_member_required
def company(request):
context = {}
company = Company.get_solo()
context['company'] = company
return render(request, 'company.html', context)
@staff_member_required
def contact(request, pk=None):
context = {}
contact = get_object_or_404(Contact, pk=pk)
context['edit_url'] = 'contact_edit' # Delete form modal
context['item'] = contact
return render(request, 'contact.html', context)
@staff_member_required
def contact_edit(request, pk=None):
url_name = 'contact_index'
kwargs = {}
if pk:
kwargs['pk'] = pk
url_name = 'contact'
client = request.GET.get('client')
if client:
client = get_object_or_404(Client, pk=client)
url_name = 'contact_index'
return edit(
request,
ContactForm,
Contact,
url_name,
'contact_edit.html',
client=client,
kwargs=kwargs,
pk=pk)
@staff_member_required
def contact_index(request):
fields = ('first_name', 'last_name', 'email', 'notes')
order_by = '-pk'
context = index_items(request, Contact, fields, order_by=order_by)
context['edit_url'] = 'contact_edit' # Delete form modal
return render(request, 'contact_index.html', context)
@staff_member_required
def contact_mail(request, pk=None):
context = {}
contact = get_object_or_404(Contact, pk=pk)
if request.method == 'POST':
form = MailForm(request.POST)
if form.is_valid():
subject = form.cleaned_data['subject']
message = form.cleaned_data['message']
send_mail(request, subject, message, contact.email)
messages.add_message(request, messages.SUCCESS, 'Mail sent!')
return HttpResponseRedirect(reverse('contact_index'))
else:
form = MailForm()
context['form'] = form
context['contact'] = contact
return render(request, 'contact_mail.html', context)
@staff_member_required
def estimate(request, pk=None):
context = {}
company = Company.get_solo()
if company:
context['company'] = company
pdf = get_query(request, 'pdf')
context['pdf'] = pdf
estimate = get_object_or_404(Estimate, pk=pk)
document_id = str(estimate.document_id)
document_type = estimate._meta.verbose_name
document_type_upper = document_type.upper()
document_type_title = document_type.title()
context['item'] = estimate
context['document_type_upper'] = document_type_upper
context['document_type_title'] = document_type_title
context['edit_url'] = 'estimate_edit'
times_client = Time.objects.filter(
client=estimate.client,
estimate=None,
project=None,
invoiced=False,
invoice=None)
times_estimate = Time.objects.filter(estimate=estimate)
times = times_client | times_estimate
times = times.order_by('-date')
entries, subtotal, paid_amount, hours, amount = entries_total(times)
context['entries'] = entries
context['amount'] = amount
context['paid_amount'] = paid_amount
context['subtotal'] = subtotal
context['hours'] = hours
if pdf:
company_name = ''
if company.name:
company_name = company.name.replace('.', '_')
company_name = company_name.replace(', ', '_')
company_name = company_name.upper()
response = HttpResponse(content_type='application/pdf')
filename = '_'.join([document_type_upper, document_id, company_name])
response['Content-Disposition'] = 'filename=%s.pdf' % filename
return generate_pdf(
'pdf_table.html', context=context, file_object=response)
else:
return render(request, 'estimate.html', context)
@staff_member_required
def estimate_edit(request, pk=None):
kwargs = {}
url_name = 'estimate_index'
amount = request.GET.get('amount')
paid_amount = request.GET.get('paid_amount')
subtotal = request.GET.get('subtotal')
times = request.GET.get('times')
company = Company.get_solo()
if pk:
kwargs['pk'] = pk
url_name = 'estimate'
if times:
estimate = get_object_or_404(Estimate, pk=pk)
times = Time.objects.filter(pk__in=[int(i) for i in times.split(',')])
for entry in times:
entry.estimate = estimate
entry.save()
return edit(
request,
EstimateForm,
Estimate,
url_name,
'estimate_edit.html',
amount=amount,
kwargs=kwargs,
paid_amount=paid_amount,
pk=pk,
subtotal=subtotal,
company=company)
@staff_member_required
def estimate_index(request):
company = Company.get_solo()
fields = ('subject', )
order_by = '-issue_date'
context = index_items(request, Estimate, fields, order_by=order_by)
context['edit_url'] = 'estimate_edit' # Delete form modal
context['company'] = company
return render(request, 'estimate_index.html', context)
def home(request):
context = {}
company = Company.get_solo()
gross, net = dashboard_totals(Invoice)
projects = dashboard_items(
Project, order_by='client__name', active=True, hidden=False)
# http://stackoverflow.com/a/35044521
for project in projects:
project.daily_burn = daily_burn(project)
invoices = Invoice.objects.filter(
last_payment_date=None).order_by('amount')
context['edit_url'] = 'project_edit' # Delete form modal
context['company'] = company
context['projects'] = projects
context['invoices'] = invoices
context['gross'] = gross
context['net'] = net
return render(request, 'dashboard.html', context)
@staff_member_required
def invoice(request, pk=None):
context = {}
company = Company.get_solo()
if company:
context['company'] = company
pdf = get_query(request, 'pdf')
context['pdf'] = pdf
invoice = get_object_or_404(Invoice, pk=pk)
document_id = str(invoice.document_id)
document_type = invoice._meta.verbose_name
document_type_upper = document_type.upper()
document_type_title = document_type.title()
context['edit_url'] = 'invoice_edit' # Delete form modal
context['item'] = invoice
context['document_type_upper'] = document_type_upper
context['document_type_title'] = document_type_title
times_project = Time.objects.filter(
invoiced=False, project=invoice.project, estimate=None, invoice=None)
times_invoice = Time.objects.filter(invoice=invoice)
times = times_project | times_invoice
times = times.order_by('-date')
entries, subtotal, paid_amount, hours, amount = entries_total(times)
context['entries'] = entries
context['amount'] = amount
context['paid_amount'] = paid_amount
context['subtotal'] = subtotal
context['hours'] = hours
context['invoice'] = True
if pdf:
response = HttpResponse(content_type='application/pdf')
if company.name:
company_name = company.name.replace('.', '_')
company_name = company_name.replace(', ', '_')
company_name = company_name.upper()
else:
company_name = 'COMPANY'
filename = '_'.join([document_type_upper, document_id, company_name])
response['Content-Disposition'] = 'filename=%s.pdf' % filename
return generate_pdf(
'pdf_table.html', context=context, file_object=response)
else:
return render(request, 'invoice.html', context)
@staff_member_required
def invoice_edit(request, pk=None):
kwargs = {}
amount = request.GET.get('amount')
paid_amount = request.GET.get('paid_amount')
subtotal = request.GET.get('subtotal')
times = request.GET.get('times')
paid = request.GET.get('paid')
company = Company.get_solo()
project = request.GET.get('project')
url_name = 'invoice_index'
if project:
project = get_object_or_404(Project, pk=project)
if pk:
kwargs['pk'] = pk
url_name = 'invoice'
invoice = get_object_or_404(Invoice, pk=pk)
if invoice.project:
if invoice.project.client and not invoice.client:
invoice.client = invoice.project.client
invoice.save()
if paid and times:
times = Time.objects.filter(pk__in=[int(i) for i in times.split(',')])
for entry in times:
entry.invoiced = True
entry.save()
elif times:
invoice = get_object_or_404(Invoice, pk=pk)
times = Time.objects.filter(pk__in=[int(i) for i in times.split(',')])
for entry in times:
entry.invoice = invoice
entry.save()
return edit(
request,
InvoiceForm,
Invoice,
url_name,
'invoice_edit.html',
amount=amount,
kwargs=kwargs,
paid_amount=paid_amount,
paid=paid,
pk=pk,
project=project,
subtotal=subtotal,
company=company)
@staff_member_required
def invoice_index(request):
company = Company.get_solo()
fields = (
'client__name',
'document_id',
'issue_date',
'project__name',
'subject', )
order_by = '-issue_date'
context = index_items(request, Invoice, fields, order_by=order_by)
context['company'] = company
context['edit_url'] = 'invoice_edit' # Delete form modal
return render(request, 'invoice_index.html', context)
@staff_member_required
def note(request, pk=None):
context = {}
pdf = get_query(request, 'pdf')
context['pdf'] = pdf
note = get_object_or_404(Note, pk=pk)
notes = Note.objects.filter(note=note)
notes = notes.order_by('-pk')
context['edit_url'] = 'note_edit'
context['item'] = note
if pdf:
response = HttpResponse(content_type='application/pdf')
response['Content-Disposition'] = 'filename=note.pdf'
return generate_pdf(
'pdf_note.html', context=context, file_object=response)
else:
return render(request, 'note.html', context)
@staff_member_required
def note_edit(request, pk=None):
kwargs = {}
url_name = 'note_index'
if pk:
kwargs['pk'] = pk
url_name = 'note'
return edit(
request,
NoteForm,
Note,
url_name,
'note_edit.html',
kwargs=kwargs,
pk=pk)
@staff_member_required
def note_index(request, pk=None):
fields = ()
context = index_items(request, Note, fields)
context['edit_url'] = 'note_edit' # Delete form modal
return render(request, 'note_index.html', context)
@staff_member_required
def project(request, pk=None):
context = {}
project = get_object_or_404(Project, pk=pk)
times = Time.objects.filter(
project=project, invoiced=False).order_by('-date')
invoices = Invoice.objects.filter(project=project)
context['company'] = Company.get_solo()
context['edit_url'] = 'project_edit' # Delete form modal
context['item'] = project
context['times'] = times
context['invoices'] = invoices
context['daily_burn'] = daily_burn(project)
return render(request, 'project.html', context)
@staff_member_required
def project_edit(request, pk=None):
url_name = 'project_index'
kwargs = {}
clients = []
if pk:
kwargs['pk'] = pk
url_name = 'project'
else:
clients = Client.objects.filter(active=True)
client = request.GET.get('client')
if client:
client = get_object_or_404(Client, pk=client)
url_name = 'client_index'
return edit(
request,
ProjectForm,
Project,
url_name,
'project_edit.html',
client=client,
clients=clients,
kwargs=kwargs,
pk=pk)
@staff_member_required
def project_index(request, pk=None):
fields = ('id', 'name')
order_by = '-start_date'
context = index_items(request, Project, fields, order_by=order_by)
context['edit_url'] = 'project_edit' # Delete form modal
return render(request, 'project_index.html', context)
@staff_member_required
def report(request, pk=None):
context = {}
report = get_object_or_404(Report, pk=pk)
context['edit_url'] = 'report_edit' # Delete form modal
context['item'] = report
context['cost'] = report.gross - report.net
return render(request, 'report.html', context)
@staff_member_required
def report_index(request):
show_plot = False
reports = Report.objects.all()
reports = reports.aggregate(gross=Sum(F('gross')), net=Sum(F('net')))
company = Company.get_solo()
fields = ('id', 'name', 'gross', 'net')
context = index_items(request, Report, fields, order_by='date')
if reports['gross'] is not None and reports['net'] is not None:
cost = reports['gross'] - reports['net']
else:
reports['gross'] = 0
reports['net'] = 0
cost = 0
if len(context['items']) > 1:
show_plot = True
context['reports'] = reports
context['company'] = company
context['cost'] = cost
context['edit_url'] = 'report_edit' # Delete form modal
context['show_plot'] = show_plot
return render(request, 'report_index.html', context)
@staff_member_required
def report_edit(request, pk=None):
kwargs = {}
url_name = 'report_index'
gross, net = dashboard_totals(Invoice)
if pk:
kwargs['pk'] = pk
url_name = 'report'
return edit(
request,
ReportForm,
Report,
url_name,
'report_edit.html',
gross=gross,
kwargs=kwargs,
net=net,
pk=pk)
def report_plot(request): # http://stackoverflow.com/a/5515994/185820
"""
"""
values = get_query(request, 'values')
# http://matplotlib.org/examples/api/date_demo.html
x = [date2num(datetime.strptime(i[1], '%Y-%m-%d')) for i in values]
y = [i[0] for i in values]
figure = Figure()
canvas = FigureCanvasAgg(figure)
axes = figure.add_subplot(1, 1, 1)
axes.grid(True)
axes.plot(x, y)
axes.xaxis.set_major_locator(MonthLocator())
axes.xaxis.set_major_formatter(DateFormatter('%m'))
# write image data to a string buffer and get the PNG image bytes
buf = BytesIO()
canvas.print_png(buf)
data = buf.getvalue()
# write image bytes back to the browser
return HttpResponse(data, content_type="image/png")
@staff_member_required
def task(request, pk=None):
context = {}
task = get_object_or_404(Task, pk=pk)
context['edit_url'] = 'task_edit' # Delete form modal
context['item'] = task
return render(request, 'task.html', context)
@staff_member_required
def task_edit(request, pk=None):
kwargs = {}
url_name = 'task_index'
if pk:
kwargs['pk'] = pk
url_name = 'task'
return edit(
request,
TaskForm,
Task,
url_name,
'task_edit.html',
pk=pk,
kwargs=kwargs)
@staff_member_required
def task_index(request):
order_by = '-pk'
fields = ('name', )
context = index_items(request, Task, fields, order_by=order_by)
context['edit_url'] = 'task_edit' # Delete form modal
return render(request, 'task_index.html', context)
@login_required
def time(request, pk=None):
context = {}
entry = get_object_or_404(Time, pk=pk)
if not entry.user and not request.user.is_staff:
return HttpResponseRedirect(reverse('admin:index'))
if entry.user:
if (not entry.user.username == request.user.username and
not request.user.is_staff):
return HttpResponseRedirect(reverse('admin:index'))
context['edit_url'] = 'entry_edit' # Delete form modal
context['item'] = entry
return render(request, 'time.html', context)
@login_required
def time_edit(request, pk=None):
kwargs = {}
url_name = 'entry_index'
if pk is not None:
entry = get_object_or_404(Time, pk=pk)
if entry.user:
if (entry.user.username != request.user.username and
not request.user.is_staff):
return HttpResponseRedirect(reverse('admin:index'))
else:
if not request.user.is_staff:
return HttpResponseRedirect(reverse('admin:index'))
if pk:
kwargs['pk'] = pk
url_name = 'entry'
client = request.GET.get('client')
project = request.GET.get('project')
task = None
if client:
client = get_object_or_404(Client, pk=client)
if project:
project = get_object_or_404(Project, pk=project)
if project.task:
task = get_object_or_404(Task, pk=project.task.pk)
projects = Project.objects.filter(team=request.user.pk)
clients = Client.objects.filter(
pk__in=[i.client.pk for i in projects if i.client])
tasks = Task.objects.filter(pk__in=[i.task.pk for i in projects if i.task])
if request.user.is_staff:
from .forms import TimeAdminForm as TimeForm
else:
from .forms import TimeForm
return edit(
request,
TimeForm,
Time,
url_name,
'time_edit.html',
client=client,
clients=clients,
pk=pk,
project=project,
projects=projects,
task=task,
tasks=tasks,
kwargs=kwargs)
@login_required
def time_index(request):
fields = ('client__name', 'date', 'notes', 'pk', 'project__name',
'invoice__document_id', 'user__username')
order_by = '-pk'
context = index_items(request, Time, fields, order_by=order_by)
context['edit_url'] = 'entry_edit' # Delete form modal
return render(request, 'time_index.html', context)
@login_required
def user(request, pk=None):
context = {}
company = Company.get_solo()
user = get_object_or_404(User, pk=pk)
profile = Profile.objects.get_or_create(user=user)[0]
times = Time.objects.filter(user=user, estimate=None, invoiced=False)
total_hours = times.aggregate(hours=Sum(F('hours')))
total_hours = total_hours['hours']
if profile.rate and total_hours:
total_dollars = profile.rate * total_hours
else:
total_dollars = 0
context['company'] = company
context['edit_url'] = 'user_edit' # Delete form modal
context['profile'] = profile
context['request'] = request
context['item'] = user
context['times'] = times
context['total_hours'] = total_hours
context['total_dollars'] = '%.2f' % total_dollars
if request.user.pk == int(pk) or request.user.is_staff:
return render(request, 'user.html', context)
else:
return HttpResponseRedirect(reverse('home'))
@staff_member_required
def user_contact(request, pk=None):
return add_user_to_contacts(request, Contact, pk=pk)
@login_required
def user_edit(request, pk=None):
context = {}
kwargs = {}
user = get_object_or_404(User, pk=pk)
context['user'] = user
url_name = 'user_index'
if pk:
kwargs['pk'] = pk
url_name = 'user'
return edit(
request,
ProfileForm,
Profile,
url_name,
'user_edit.html',
kwargs=kwargs,
pk=pk,
context=context)
@staff_member_required
def user_index(request):
company = Company.get_solo()
fields = ('first_name', 'last_name', 'email')
context = index_items(request, User, fields)
context['company'] = company
return render(request, 'user_index.html', context)
Update
from .forms import ClientForm
from .forms import CompanyForm
from .forms import ContactForm
from .forms import EstimateForm
from .forms import InvoiceForm
from .forms import MailForm
from .forms import NoteForm
from .forms import ProfileForm
from .forms import ProjectForm
from .forms import ReportForm
from .forms import TaskForm
from .models import Client
from .models import Company
from .models import Contact
from .models import Estimate
from .models import Invoice
from .models import Note
from .models import Profile
from .models import Project
from .models import Report
from .models import Service
from .models import Testimonial
from .models import Task
from .models import Time
from .serializers import ClientSerializer
from .serializers import ProfileSerializer
from .serializers import ServiceSerializer
from .serializers import TestimonialSerializer
from .utils import add_user_to_contacts
from .utils import index_items
from .utils import certbot_data
from .utils import daily_burn
from .utils import dashboard_items
from .utils import dashboard_totals
from .utils import edit
from .utils import entries_total
from .utils import get_query
from .utils import send_mail
from datetime import datetime
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.contrib.admin.views.decorators import staff_member_required
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.db.models import F, Sum
from django.http import HttpResponseRedirect
from django.http import HttpResponse
from django.shortcuts import get_object_or_404
from django.shortcuts import render
from django_xhtml2pdf.utils import generate_pdf
from io import BytesIO
from matplotlib.dates import DateFormatter
from matplotlib.dates import MonthLocator
from matplotlib.dates import date2num
from matplotlib.figure import Figure
from matplotlib.backends.backend_agg import FigureCanvasAgg
from rest_framework import viewsets
# Create your views here.
class ClientViewSet(viewsets.ModelViewSet):
"""
"""
queryset = Client.objects.filter(published=True).order_by('name')
serializer_class = ClientSerializer
class ServiceViewSet(viewsets.ModelViewSet):
"""
"""
queryset = Service.objects.filter(active=True).order_by('name')
serializer_class = ServiceSerializer
class TestimonialViewSet(viewsets.ModelViewSet):
"""
"""
queryset = Testimonial.objects.filter(active=True).order_by('-issue_date')
serializer_class = TestimonialSerializer
class ProfileViewSet(viewsets.ModelViewSet):
"""
"""
queryset = Profile.objects.filter(
published=True).order_by('user__first_name')
serializer_class = ProfileSerializer
def certbot(request): # http://stackoverflow.com/a/24817024
data = certbot_data()
return HttpResponse(data)
@staff_member_required
def client(request, pk=None):
context = {}
client = get_object_or_404(Client, pk=pk)
contacts = Contact.objects.filter(client=client)
contacts = contacts.order_by('-pk')
projects = Project.objects.filter(client=client)
projects = projects.order_by('-start_date')
context['edit_url'] = 'client_edit'
context['item'] = client
context['contacts'] = contacts
context['projects'] = projects
return render(request, 'client.html', context)
@staff_member_required
def client_edit(request, pk=None):
kwargs = {}
url_name = 'client_index'
if pk:
kwargs['pk'] = pk
url_name = 'client'
return edit(
request,
ClientForm,
Client,
url_name,
'client_edit.html',
kwargs=kwargs,
pk=pk)
@staff_member_required
def client_index(request):
fields = ('address', 'name')
order_by = '-pk'
context = index_items(request, Client, fields, order_by=order_by)
context['edit_url'] = 'client_edit' # Delete form modal
return render(request, 'client_index.html', context)
@staff_member_required
def company_edit(request, pk=None):
return edit(
request, CompanyForm, Company, 'company', 'company_edit.html', pk=1)
@staff_member_required
def company(request):
context = {}
company = Company.get_solo()
context['company'] = company
return render(request, 'company.html', context)
@staff_member_required
def contact(request, pk=None):
context = {}
contact = get_object_or_404(Contact, pk=pk)
context['edit_url'] = 'contact_edit' # Delete form modal
context['item'] = contact
return render(request, 'contact.html', context)
@staff_member_required
def contact_edit(request, pk=None):
url_name = 'contact_index'
kwargs = {}
if pk:
kwargs['pk'] = pk
url_name = 'contact'
client = request.GET.get('client')
if client:
client = get_object_or_404(Client, pk=client)
url_name = 'contact_index'
return edit(
request,
ContactForm,
Contact,
url_name,
'contact_edit.html',
client=client,
kwargs=kwargs,
pk=pk)
@staff_member_required
def contact_index(request):
fields = ('first_name', 'last_name', 'email', 'notes')
order_by = '-pk'
context = index_items(request, Contact, fields, order_by=order_by)
context['edit_url'] = 'contact_edit' # Delete form modal
return render(request, 'contact_index.html', context)
@staff_member_required
def contact_mail(request, pk=None):
context = {}
contact = get_object_or_404(Contact, pk=pk)
if request.method == 'POST':
form = MailForm(request.POST)
if form.is_valid():
subject = form.cleaned_data['subject']
message = form.cleaned_data['message']
send_mail(request, subject, message, contact.email)
messages.add_message(request, messages.SUCCESS, 'Mail sent!')
return HttpResponseRedirect(reverse('contact_index'))
else:
form = MailForm()
context['form'] = form
context['contact'] = contact
return render(request, 'contact_mail.html', context)
@staff_member_required
def estimate(request, pk=None):
context = {}
company = Company.get_solo()
if company:
context['company'] = company
pdf = get_query(request, 'pdf')
context['pdf'] = pdf
estimate = get_object_or_404(Estimate, pk=pk)
document_id = str(estimate.document_id)
document_type = estimate._meta.verbose_name
document_type_upper = document_type.upper()
document_type_title = document_type.title()
context['item'] = estimate
context['document_type_upper'] = document_type_upper
context['document_type_title'] = document_type_title
context['edit_url'] = 'estimate_edit'
times_client = Time.objects.filter(
client=estimate.client,
estimate=None,
project=None,
invoiced=False,
invoice=None)
times_estimate = Time.objects.filter(estimate=estimate)
times = times_client | times_estimate
times = times.order_by('-date')
entries, subtotal, paid_amount, hours, amount = entries_total(times)
context['entries'] = entries
context['amount'] = amount
context['paid_amount'] = paid_amount
context['subtotal'] = subtotal
context['hours'] = hours
if pdf:
company_name = ''
if company.name:
company_name = company.name.replace('.', '_')
company_name = company_name.replace(', ', '_')
company_name = company_name.upper()
response = HttpResponse(content_type='application/pdf')
filename = '_'.join([document_type_upper, document_id, company_name])
response['Content-Disposition'] = 'filename=%s.pdf' % filename
return generate_pdf(
'pdf_table.html', context=context, file_object=response)
else:
return render(request, 'estimate.html', context)
@staff_member_required
def estimate_edit(request, pk=None):
kwargs = {}
url_name = 'estimate_index'
amount = request.GET.get('amount')
paid_amount = request.GET.get('paid_amount')
subtotal = request.GET.get('subtotal')
times = request.GET.get('times')
company = Company.get_solo()
if pk:
kwargs['pk'] = pk
url_name = 'estimate'
if times:
estimate = get_object_or_404(Estimate, pk=pk)
times = Time.objects.filter(pk__in=[int(i) for i in times.split(',')])
for entry in times:
entry.estimate = estimate
entry.save()
return edit(
request,
EstimateForm,
Estimate,
url_name,
'estimate_edit.html',
amount=amount,
kwargs=kwargs,
paid_amount=paid_amount,
pk=pk,
subtotal=subtotal,
company=company)
@staff_member_required
def estimate_index(request):
company = Company.get_solo()
fields = ('subject', )
order_by = '-issue_date'
context = index_items(request, Estimate, fields, order_by=order_by)
context['edit_url'] = 'estimate_edit' # Delete form modal
context['company'] = company
return render(request, 'estimate_index.html', context)
def home(request):
context = {}
company = Company.get_solo()
gross, net = dashboard_totals(Invoice)
projects = dashboard_items(
Project, order_by='client__name', active=True, hidden=False)
# http://stackoverflow.com/a/35044521
for project in projects:
project.daily_burn = daily_burn(project)
invoices = Invoice.objects.filter(
last_payment_date=None).order_by('amount')
context['edit_url'] = 'project_edit' # Delete form modal
context['company'] = company
context['projects'] = projects
context['invoices'] = invoices
context['gross'] = gross
context['net'] = net
return render(request, 'dashboard.html', context)
@staff_member_required
def invoice(request, pk=None):
context = {}
company = Company.get_solo()
if company:
context['company'] = company
pdf = get_query(request, 'pdf')
context['pdf'] = pdf
invoice = get_object_or_404(Invoice, pk=pk)
document_id = str(invoice.document_id)
document_type = invoice._meta.verbose_name
document_type_upper = document_type.upper()
document_type_title = document_type.title()
context['edit_url'] = 'invoice_edit' # Delete form modal
context['item'] = invoice
context['document_type_upper'] = document_type_upper
context['document_type_title'] = document_type_title
times_project = Time.objects.filter(
invoiced=False, project=invoice.project, estimate=None, invoice=None)
times_invoice = Time.objects.filter(invoice=invoice)
times = times_project | times_invoice
times = times.order_by('-date')
entries, subtotal, paid_amount, hours, amount = entries_total(times)
context['entries'] = entries
context['amount'] = amount
context['paid_amount'] = paid_amount
context['subtotal'] = subtotal
context['hours'] = hours
context['invoice'] = True
if pdf:
response = HttpResponse(content_type='application/pdf')
if company.name:
company_name = company.name.replace('.', '_')
company_name = company_name.replace(', ', '_')
company_name = company_name.upper()
else:
company_name = 'COMPANY'
filename = '_'.join([document_type_upper, document_id, company_name])
response['Content-Disposition'] = 'filename=%s.pdf' % filename
return generate_pdf(
'pdf_table.html', context=context, file_object=response)
else:
return render(request, 'invoice.html', context)
@staff_member_required
def invoice_edit(request, pk=None):
kwargs = {}
amount = request.GET.get('amount')
paid_amount = request.GET.get('paid_amount')
subtotal = request.GET.get('subtotal')
times = request.GET.get('times')
paid = request.GET.get('paid')
company = Company.get_solo()
project = request.GET.get('project')
url_name = 'invoice_index'
if project:
project = get_object_or_404(Project, pk=project)
if pk:
kwargs['pk'] = pk
url_name = 'invoice'
invoice = get_object_or_404(Invoice, pk=pk)
if invoice.project:
if invoice.project.client and not invoice.client:
invoice.client = invoice.project.client
invoice.save()
if paid and times:
times = Time.objects.filter(pk__in=[int(i) for i in times.split(',')])
for entry in times:
entry.invoiced = True
entry.save()
elif times:
invoice = get_object_or_404(Invoice, pk=pk)
times = Time.objects.filter(pk__in=[int(i) for i in times.split(',')])
for entry in times:
entry.invoice = invoice
entry.save()
return edit(
request,
InvoiceForm,
Invoice,
url_name,
'invoice_edit.html',
amount=amount,
kwargs=kwargs,
paid_amount=paid_amount,
paid=paid,
pk=pk,
project=project,
subtotal=subtotal,
company=company)
@staff_member_required
def invoice_index(request):
company = Company.get_solo()
fields = (
'client__name',
'document_id',
'issue_date',
'project__name',
'subject', )
order_by = '-issue_date'
context = index_items(request, Invoice, fields, order_by=order_by)
context['company'] = company
context['edit_url'] = 'invoice_edit' # Delete form modal
return render(request, 'invoice_index.html', context)
@staff_member_required
def note(request, pk=None):
context = {}
pdf = get_query(request, 'pdf')
context['pdf'] = pdf
note = get_object_or_404(Note, pk=pk)
notes = Note.objects.filter(note=note)
notes = notes.order_by('-pk')
context['edit_url'] = 'note_edit'
context['item'] = note
if pdf:
response = HttpResponse(content_type='application/pdf')
response['Content-Disposition'] = 'filename=note.pdf'
return generate_pdf(
'pdf_note.html', context=context, file_object=response)
else:
return render(request, 'note.html', context)
@staff_member_required
def note_edit(request, pk=None):
kwargs = {}
url_name = 'note_index'
if pk:
kwargs['pk'] = pk
url_name = 'note'
return edit(
request,
NoteForm,
Note,
url_name,
'note_edit.html',
kwargs=kwargs,
pk=pk)
@staff_member_required
def note_index(request, pk=None):
fields = ()
context = index_items(request, Note, fields)
context['edit_url'] = 'note_edit' # Delete form modal
return render(request, 'note_index.html', context)
@staff_member_required
def project(request, pk=None):
context = {}
project = get_object_or_404(Project, pk=pk)
times = Time.objects.filter(
project=project, invoiced=False).order_by('-date')
invoices = Invoice.objects.filter(project=project)
context['company'] = Company.get_solo()
context['edit_url'] = 'project_edit' # Delete form modal
context['item'] = project
context['times'] = times
context['invoices'] = invoices
context['daily_burn'] = daily_burn(project)
return render(request, 'project.html', context)
@staff_member_required
def project_edit(request, pk=None):
url_name = 'project_index'
kwargs = {}
clients = []
if pk:
kwargs['pk'] = pk
url_name = 'project'
else:
clients = Client.objects.filter(active=True)
client = request.GET.get('client')
if client:
client = get_object_or_404(Client, pk=client)
url_name = 'client_index'
return edit(
request,
ProjectForm,
Project,
url_name,
'project_edit.html',
client=client,
clients=clients,
kwargs=kwargs,
pk=pk)
@staff_member_required
def project_index(request, pk=None):
fields = ('id', 'name')
order_by = '-start_date'
context = index_items(request, Project, fields, order_by=order_by)
context['edit_url'] = 'project_edit' # Delete form modal
return render(request, 'project_index.html', context)
@staff_member_required
def report(request, pk=None):
context = {}
report = get_object_or_404(Report, pk=pk)
context['edit_url'] = 'report_edit' # Delete form modal
context['item'] = report
context['cost'] = report.gross - report.net
return render(request, 'report.html', context)
@staff_member_required
def report_index(request):
show_plot = False
reports = Report.objects.all()
reports = reports.aggregate(gross=Sum(F('gross')), net=Sum(F('net')))
company = Company.get_solo()
fields = ('id', 'name', 'gross', 'net')
context = index_items(request, Report, fields, order_by='date')
if reports['gross'] is not None and reports['net'] is not None:
cost = reports['gross'] - reports['net']
else:
reports['gross'] = 0
reports['net'] = 0
cost = 0
if len(context['items']) > 1:
show_plot = True
context['reports'] = reports
context['company'] = company
context['cost'] = cost
context['edit_url'] = 'report_edit' # Delete form modal
context['show_plot'] = show_plot
return render(request, 'report_index.html', context)
@staff_member_required
def report_edit(request, pk=None):
kwargs = {}
url_name = 'report_index'
gross, net = dashboard_totals(Invoice)
if pk:
kwargs['pk'] = pk
url_name = 'report'
return edit(
request,
ReportForm,
Report,
url_name,
'report_edit.html',
gross=gross,
kwargs=kwargs,
net=net,
pk=pk)
def report_plot(request): # http://stackoverflow.com/a/5515994/185820
"""
"""
values = get_query(request, 'values')
# http://matplotlib.org/examples/api/date_demo.html
x = [date2num(datetime.strptime(i[1], '%Y-%m-%d')) for i in values]
y = [i[0] for i in values]
figure = Figure()
canvas = FigureCanvasAgg(figure)
axes = figure.add_subplot(1, 1, 1)
axes.grid(True)
axes.plot(x, y)
axes.xaxis.set_major_locator(MonthLocator())
axes.xaxis.set_major_formatter(DateFormatter('%m'))
# write image data to a string buffer and get the PNG image bytes
buf = BytesIO()
canvas.print_png(buf)
data = buf.getvalue()
# write image bytes back to the browser
return HttpResponse(data, content_type="image/png")
@staff_member_required
def task(request, pk=None):
context = {}
task = get_object_or_404(Task, pk=pk)
context['edit_url'] = 'task_edit' # Delete form modal
context['item'] = task
return render(request, 'task.html', context)
@staff_member_required
def task_edit(request, pk=None):
kwargs = {}
url_name = 'task_index'
if pk:
kwargs['pk'] = pk
url_name = 'task'
return edit(
request,
TaskForm,
Task,
url_name,
'task_edit.html',
pk=pk,
kwargs=kwargs)
@staff_member_required
def task_index(request):
order_by = '-pk'
fields = ('name', )
context = index_items(request, Task, fields, order_by=order_by)
context['edit_url'] = 'task_edit' # Delete form modal
return render(request, 'task_index.html', context)
@login_required
def time(request, pk=None):
context = {}
entry = get_object_or_404(Time, pk=pk)
if not entry.user and not request.user.is_staff:
return HttpResponseRedirect(reverse('admin:index'))
if entry.user:
if (not entry.user.username == request.user.username and
not request.user.is_staff):
return HttpResponseRedirect(reverse('admin:index'))
context['edit_url'] = 'entry_edit' # Delete form modal
context['item'] = entry
return render(request, 'time.html', context)
@login_required
def time_edit(request, pk=None):
kwargs = {}
url_name = 'entry_index'
if pk is not None:
entry = get_object_or_404(Time, pk=pk)
if entry.user:
if (entry.user.username != request.user.username and
not request.user.is_staff):
return HttpResponseRedirect(reverse('admin:index'))
else:
if not request.user.is_staff:
return HttpResponseRedirect(reverse('admin:index'))
if pk:
kwargs['pk'] = pk
url_name = 'entry'
client = request.GET.get('client')
project = request.GET.get('project')
task = None
if client:
client = get_object_or_404(Client, pk=client)
if project:
project = get_object_or_404(Project, pk=project)
if project.task:
task = get_object_or_404(Task, pk=project.task.pk)
projects = Project.objects.filter(team=request.user.pk)
clients = Client.objects.filter(
pk__in=[i.client.pk for i in projects if i.client])
tasks = Task.objects.filter(pk__in=[i.task.pk for i in projects if i.task])
if request.user.is_staff:
from .forms import TimeAdminForm as TimeForm
else:
from .forms import TimeForm
return edit(
request,
TimeForm,
Time,
url_name,
'time_edit.html',
client=client,
clients=clients,
pk=pk,
project=project,
projects=projects,
task=task,
tasks=tasks,
kwargs=kwargs)
@login_required
def time_index(request):
fields = ('client__name', 'date', 'notes', 'pk', 'project__name',
'invoice__document_id', 'user__username')
order_by = '-pk'
context = index_items(request, Time, fields, order_by=order_by)
context['edit_url'] = 'entry_edit' # Delete form modal
return render(request, 'time_index.html', context)
@login_required
def user(request, pk=None):
context = {}
company = Company.get_solo()
user = get_object_or_404(User, pk=pk)
profile = Profile.objects.get_or_create(user=user)[0]
times = Time.objects.filter(user=user, estimate=None, invoiced=False)
total_hours = times.aggregate(hours=Sum(F('hours')))
total_hours = total_hours['hours']
if profile.rate and total_hours:
total_dollars = profile.rate * total_hours
else:
total_dollars = 0
context['company'] = company
context['edit_url'] = 'user_edit' # Delete form modal
context['profile'] = profile
context['request'] = request
context['item'] = user
context['times'] = times
context['total_hours'] = total_hours
context['total_dollars'] = '%.2f' % total_dollars
if request.user.pk == int(pk) or request.user.is_staff:
return render(request, 'user.html', context)
else:
return HttpResponseRedirect(reverse('home'))
@staff_member_required
def user_contact(request, pk=None):
return add_user_to_contacts(request, Contact, pk=pk)
@login_required
def user_edit(request, pk=None):
context = {}
kwargs = {}
user = get_object_or_404(User, pk=pk)
context['user'] = user
url_name = 'user_index'
if pk:
kwargs['pk'] = pk
url_name = 'user'
return edit(
request,
ProfileForm,
Profile,
url_name,
'user_edit.html',
kwargs=kwargs,
pk=pk,
context=context)
@staff_member_required
def user_index(request):
company = Company.get_solo()
fields = ('first_name', 'last_name', 'email')
context = index_items(request, User, fields)
context['company'] = company
return render(request, 'user_index.html', context)
|
import json
from django.db.models import Q
from django.shortcuts import get_object_or_404
from django.contrib.auth.models import User
from django.utils.translation import ugettext as _
from rest_framework import viewsets
from rest_framework import exceptions
from rest_framework import permissions
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework.reverse import reverse
from rest_framework.decorators import action
from rest_framework.settings import api_settings
from rest_framework.renderers import BaseRenderer
from api import serializers as api_serializers
from api import mixins
from api import tools as utils
from utils.user_auth import check_and_set_form_by_id
from main.models import UserProfile
from odk_logger.models import XForm, Instance
from odk_viewer.models import ParsedInstance
from api.models import Project, OrganizationProfile, ProjectXForm, Team
class UserViewSet(viewsets.ReadOnlyModelViewSet):
"""
This endpoint allows you to list and retrieve user's first and last names.
## List Users
> Example
>
> curl -X GET https://formhub.org/api/v1/users
> Response:
> [
> {
> "username": "demo",
> "first_name": "First",
> "last_name": "Last"
> },
> {
> "username": "another_demo",
> "first_name": "Another",
> "last_name": "Demo"
> },
> ...
> ]
## Retrieve a specific user info
<pre class="prettyprint"><b>GET</b> /api/v1/users/{username}</pre>
> Example:
>
> curl -X GET https://formhub.org/api/v1/users/demo
> Response:
>
> {
> "username": "demo",
> "first_name": "First",
> "last_name": "Last"
> }
"""
queryset = User.objects.all()
serializer_class = api_serializers.UserSerializer
lookup_field = 'username'
permission_classes = [permissions.DjangoModelPermissions, ]
def get_queryset(self):
user = self.request.user
if user.is_anonymous():
user = User.objects.get(pk=-1)
return User.objects.filter(
Q(pk__in=user.userprofile_set.values('user')) | Q(pk=user.pk))
class UserProfileViewSet(mixins.ObjectLookupMixin, viewsets.ModelViewSet):
"""
List, Retrieve, Update, Create/Register users.
## Register a new User
<pre class="prettyprint"><b>POST</b> /api/v1/profiles</pre>
> Example
>
> {
> "username": "demo",
> "name": "Demo User",
> "email": "demo@localhost.com",
> "city": "Kisumu",
> "country": "KE",
> ...
> }
## List User Profiles
<pre class="prettyprint"><b>GET</b> /api/v1/profiles</pre>
> Example
>
> curl -X GET https://formhub.org/api/v1/profiles
> Response
>
> [
> {
> "url": "https://formhub.org/api/v1/profiles/demo",
> "username": "demo",
> "name": "Demo User",
> "email": "demo@localhost.com",
> "city": "",
> "country": "",
> "organization": "",
> "website": "",
> "twitter": "",
> "gravatar": "https://secure.gravatar.com/avatar/xxxxxx",
> "require_auth": false,
> "user": "https://formhub.org/api/v1/users/demo"
> },
> {
> ...}, ...
> ]
## Retrieve User Profile Information
<pre class="prettyprint"><b>GET</b> /api/v1/profiles/{username}</pre>
> Example
>
> curl -X GET https://formhub.org/api/v1/profiles/demo
> Response
>
> {
> "url": "https://formhub.org/api/v1/profiles/demo",
> "username": "demo",
> "name": "Demo User",
> "email": "demo@localhost.com",
> "city": "",
> "country": "",
> "organization": "",
> "website": "",
> "twitter": "",
> "gravatar": "https://secure.gravatar.com/avatar/xxxxxx",
> "require_auth": false,
> "user": "https://formhub.org/api/v1/users/demo"
> }
"""
queryset = UserProfile.objects.all()
serializer_class = api_serializers.UserProfileSerializer
lookup_field = 'user'
permission_classes = [permissions.DjangoModelPermissions, ]
ordering = ('user__username', )
def get_queryset(self):
user = self.request.user
if user.is_anonymous():
user = User.objects.get(pk=-1)
return UserProfile.objects.filter(
Q(user__in=user.userprofile_set.values('user')) | Q(user=user))
class OrgProfileViewSet(mixins.ObjectLookupMixin, viewsets.ModelViewSet):
"""
List, Retrieve, Update, Create/Register Organizations
## Register a new Organization
<pre class="prettyprint"><b>POST</b> /api/v1/orgs</pre>
> Example
>
> {
> "org": "modilabs",
> "name": "Modi Labs Research",
> "email": "modilabs@localhost.com",
> "city": "New York",
> "country": "US",
> ...
> }
## List of Organizations
<pre class="prettyprint"><b>GET</b> /api/v1/orgs</pre>
> Example
>
> curl -X GET https://formhub.org/api/v1/orgs
> Response
>
> [
> {
> "url": "https://formhub.org/api/v1/orgs/modilabs",
> "org": "modilabs",
> "name": "Modi Labs Research",
> "email": "modilabs@localhost.com",
> "city": "New York",
> "country": "US",
> "website": "",
> "twitter": "",
> "gravatar": "https://secure.gravatar.com/avatar/xxxxxx",
> "require_auth": false,
> "user": "https://formhub.org/api/v1/users/modilabs"
> "creator": "https://formhub.org/api/v1/users/demo"
> },
> {
> ...}, ...
> ]
## Retrieve Organization Profile Information
<pre class="prettyprint"><b>GET</b> /api/v1/orgs/{username}</pre>
> Example
>
> curl -X GET https://formhub.org/api/v1/orgs/modilabs
> Response
>
> {
> "url": "https://formhub.org/api/v1/orgs/modilabs",
> "org": "modilabs",
> "name": "Modi Labs Research",
> "email": "modilabs@localhost.com",
> "city": "New York",
> "country": "US",
> "website": "",
> "twitter": "",
> "gravatar": "https://secure.gravatar.com/avatar/xxxxxx",
> "require_auth": false,
> "user": "https://formhub.org/api/v1/users/modilabs"
> "creator": "https://formhub.org/api/v1/users/demo"
> }
"""
queryset = OrganizationProfile.objects.all()
serializer_class = api_serializers.OrganizationSerializer
lookup_field = 'user'
def get_queryset(self):
user = self.request.user
if user.is_anonymous():
user = User.objects.get(pk=-1)
return user.organizationprofile_set.all()
class SurveyRenderer(BaseRenderer):
media_type = 'application/xml'
format = 'xml'
charset = 'utf-8'
def render(self, data, accepted_media_type=None, renderer_context=None):
return data
class XFormViewSet(mixins.MultiLookupMixin, viewsets.ReadOnlyModelViewSet):
"""
List, Retrieve Published Forms.
Where:
- `owner` - is the organization or user to which the form(s) belong to.
- `pk` - is the project id
- `formid` - is the form id
## Get Form Information
<pre class="prettyprint">
<b>GET</b> /api/v1/forms/<code>{formid}</code>
<b>GET</b> /api/v1/projects/<code>{owner}</code>/<code>{pk}</code>/forms/<code>{formid}</code></pre>
> Example
>
> curl -X GET https://formhub.org/api/v1/forms/28058
> Response
>
> {
> "url": "https://formhub.org/api/v1/forms/modilabs/28058",
> "formid": 28058,
> "uuid": "853196d7d0a74bca9ecfadbf7e2f5c1f",
> "id_string": "Birds",
> "sms_id_string": "Birds",
> "title": "Birds",
> "allows_sms": false,
> "bamboo_dataset": "",
> "description": "",
> "downloadable": true,
> "encrypted": false,
> "is_crowd_form": false,
> "owner": "https://formhub.org/api/v1/users/modilabs",
> "public": false,
> "public_data": false,
> "date_created": "2013-07-25T14:14:22.892Z",
> "date_modified": "2013-07-25T14:14:22.892Z"
> }
## List Forms
<pre class="prettyprint">
<b>GET</b> /api/v1/forms
<b>GET</b> /api/v1/forms/<code>{owner}</code></pre>
> Example
>
> curl -X GET https://formhub.org/api/v1/forms/modilabs
> Response
>
> [{
> "url": "https://formhub.org/api/v1/forms/modilabs/28058",
> "formid": 28058,
> "uuid": "853196d7d0a74bca9ecfadbf7e2f5c1f",
> "id_string": "Birds",
> "sms_id_string": "Birds",
> "title": "Birds",
> ...
> }, ...]
## Get `JSON` | `XML` Form Representation
<pre class="prettyprint">
<b>GET</b> /api/v1/forms/<code>{owner}</code>/<code>{formid}</code>/form.<code>{format}</code></pre>
> JSON Example
>
> curl -X GET https://formhub.org/api/v1/forms/28058/form.json
> Response
>
> {
> "name": "Birds",
> "title": "Birds",
> "default_language": "default",
> "id_string": "Birds",
> "type": "survey",
> "children": [
> {
> "type": "text",
> "name": "name",
> "label": "1. What is your name?"
> },
> ...
> ]
> }
> XML Example
>
> curl -X GET https://formhub.org/api/v1/forms/modilabs/28058/form.xml
> Response
>
> <?xml version="1.0" encoding="utf-8"?>
> <h:html xmlns="http://www.w3.org/2002/xforms" ...>
> <h:head>
> <h:title>Birds</h:title>
> <model>
> <itext>
> .....
> </h:body>
> </h:html>
"""
renderer_classes = api_settings.DEFAULT_RENDERER_CLASSES + [SurveyRenderer]
queryset = XForm.objects.all()
serializer_class = api_serializers.XFormSerializer
lookup_fields = ('owner', 'pk')
lookup_field = 'owner'
def get_queryset(self):
user = self.request.user
if user.is_anonymous():
user = User.objects.get(pk=-1)
user_forms = user.xforms.values('pk')
project_forms = user.projectxform_set.values('xform')
return XForm.objects.filter(
Q(pk__in=user_forms) | Q(pk__in=project_forms))
@action(methods=['GET'])
def form(self, request, format=None, **kwargs):
if not format:
format = 'json'
self.object = self.get_object()
if format == 'xml':
data = self.object.xml
else:
data = json.loads(self.object.json)
return Response(data)
@action(methods=['GET'])
def bookmarks(self, request, format=None, **kwargs):
data = self.get_object()
return Response(list(data.tags.names()))
class ProjectViewSet(mixins.MultiLookupMixin,
mixins.CreateModelMixin,
mixins.RetrieveModelMixin,
mixins.ListModelMixin, viewsets.GenericViewSet):
"""
List, Retrieve, Update, Create Project and Project Forms
Where:
- `owner` - is the organization to which the project(s) belong to.
- `pk` - is the project id
- `formid` - is the form id
## Register a new Organization Project
<pre class="prettyprint">
<b>POST</b> /api/v1/projects/<code>{owner}</code></pre>
> Example
>
> {
> "url": "https://formhub.org/api/v1/projects/modilabs/1",
> "owner": "https://formhub.org/api/v1/users/modilabs",
> "name": "project 1",
> "date_created": "2013-07-24T13:37:39Z",
> "date_modified": "2013-07-24T13:37:39Z"
> }
## List of Organization's Projects
<pre class="prettyprint"><b>GET</b> /api/v1/projects <b>or</b>
<b>GET</b> /api/v1/projects/<code>{owner}</code></pre>
> Example
>
> curl -X GET https://formhub.org/api/v1/projects
> curl -X GET https://formhub.org/api/v1/projects/modilabs
> Response
>
> [
> {
> "url": "https://formhub.org/api/v1/projects/modilabs/1",
> "owner": "https://formhub.org/api/v1/users/modilabs",
> "name": "project 1",
> "date_created": "2013-07-24T13:37:39Z",
> "date_modified": "2013-07-24T13:37:39Z"
> },
> {
> "url": "https://formhub.org/api/v1/projects/modilabs/4",
> "owner": "https://formhub.org/api/v1/users/modilabs",
> "name": "project 2",
> "date_created": "2013-07-24T13:59:10Z",
> "date_modified": "2013-07-24T13:59:10Z"
> }, ...
> ]
## Retrieve Project Information
<pre class="prettyprint">
<b>GET</b> /api/v1/projects/<code>{owner}</code>/<code>{pk}</code></pre>
> Example
>
> curl -X GET https://formhub.org/api/v1/projects/modilabs/1
> Response
>
> {
> "url": "https://formhub.org/api/v1/projects/modilabs/1",
> "owner": "https://formhub.org/api/v1/users/modilabs",
> "name": "project 1",
> "date_created": "2013-07-24T13:37:39Z",
> "date_modified": "2013-07-24T13:37:39Z"
> }
## Upload XLSForm to a project
<pre class="prettyprint">
<b>GET</b> /api/v1/projects/<code>{owner}</code>/<code>{pk}</code>/forms</pre>
> Example
>
> curl -X POST -F xls_file=@/path/to/form.xls https://formhub.org/api/v1/projects/modilabs/1/forms
> Response
>
> {
> "url": "https://formhub.org/api/v1/forms/28058",
> "formid": 28058,
> "uuid": "853196d7d0a74bca9ecfadbf7e2f5c1f",
> "id_string": "Birds",
> "sms_id_string": "Birds",
> "title": "Birds",
> "allows_sms": false,
> "bamboo_dataset": "",
> "description": "",
> "downloadable": true,
> "encrypted": false,
> "is_crowd_form": false,
> "owner": "modilabs",
> "public": false,
> "public_data": false,
> "date_created": "2013-07-25T14:14:22.892Z",
> "date_modified": "2013-07-25T14:14:22.892Z"
> }
## Get Form Information for a project
<pre class="prettyprint">
<b>GET</b> /api/v1/projects/<code>{owner}</code>/<code>{pk}</code>/forms/<code>{formid}</code></pre>
> Example
>
> curl -X GET https://formhub.org/api/v1/projects/modilabs/1/forms/28058
> Response
>
> {
> "url": "https://formhub.org/api/v1/forms/28058",
> "formid": 28058,
> "uuid": "853196d7d0a74bca9ecfadbf7e2f5c1f",
> "id_string": "Birds",
> "sms_id_string": "Birds",
> "title": "Birds",
> "allows_sms": false,
> "bamboo_dataset": "",
> "description": "",
> "downloadable": true,
> "encrypted": false,
> "is_crowd_form": false,
> "owner": "modilabs",
> "public": false,
> "public_data": false,
> "date_created": "2013-07-25T14:14:22.892Z",
> "date_modified": "2013-07-25T14:14:22.892Z"
> }
"""
queryset = Project.objects.all()
serializer_class = api_serializers.ProjectSerializer
lookup_fields = ('owner', 'pk')
lookup_field = 'owner'
extra_lookup_fields = None
def get_queryset(self):
user = self.request.user
if user.is_anonymous():
user = User.objects.get(pk=-1)
return user.project_creator.all()
def get_object(self, queryset=None):
pk = self.kwargs.get('pk', None)
if pk is not None:
try:
int(pk)
except ValueError:
raise exceptions.ParseError(
detail=_(u"The path parameter {pk} "
u"should be a number, '%s' given instead." % pk))
return super(ProjectViewSet, self).get_object(queryset)
def list(self, request, **kwargs):
filter = {}
if 'owner' in kwargs:
filter['organization__username'] = kwargs['owner']
# filter['created_by'] = request.user
qs = self.get_queryset()
qs = self.filter_queryset(qs)
self.object_list = qs.filter(**filter)
serializer = self.get_serializer(self.object_list, many=True)
return Response(serializer.data)
@action(methods=['POST', 'GET'], extra_lookup_fields=['formid', ])
def forms(self, request, **kwargs):
"""
POST - publish xlsform file to a specific project.
xls_file -- xlsform file object
"""
project = get_object_or_404(
Project, pk=kwargs.get('pk', None),
organization__username=kwargs.get('owner', None))
if request.method.upper() == 'POST':
survey = utils.publish_project_xform(request, project)
if isinstance(survey, XForm):
serializer = api_serializers.XFormSerializer(
survey, context={'request': request})
return Response(serializer.data, status=201)
return Response(survey, status=400)
filter = {'project': project}
many = True
if 'formid' in kwargs:
many = False
filter['xform__pk'] = int(kwargs.get('formid'))
if many:
qs = ProjectXForm.objects.filter(**filter)
data = [px.xform for px in qs]
else:
qs = get_object_or_404(ProjectXForm, **filter)
data = qs.xform
serializer = api_serializers.XFormSerializer(
data, many=many, context={'request': request})
return Response(serializer.data)
class TeamViewSet(viewsets.ModelViewSet):
"""
This endpoint allows you to create, update and view team information.
## GET List of Teams within an Organization.
Provides a json list of teams within a specified organization
and the projects the team is assigned to, where:
* `org` - is the unique organization name identifier
<pre class="prettyprint">
<b>GET</b> /api/v1/teams
<b>GET</b> /api/v1/teams/<code>{org}</code>
</pre>
> Example
>
> curl -X GET https://formhub.org/api/v1/teams/bruize
> Response
>
> [
> {
> "url": "https://formhub.org/api/v1/teams/bruize/1",
> "name": "Owners",
> "organization": "https://formhub.org/api/v1/users/bruize",
> "projects": []
> },
> {
> "url": "https://formhub.org/api/v1/teams/bruize/2",
> "name": "demo team",
> "organization": "https://formhub.org/api/v1/users/bruize",
> "projects": []
> }
> ]
## GET Team Info for a specific team.
Shows teams details and the projects the team is assigned to, where:
* `org` - is the unique organization name identifier
* `pk` - unique identifier for the team
<pre class="prettyprint">
<b>GET</b> /api/v1/teams/<code>{org}</code>/<code>{pk}</code>
</pre>
> Example
>
> curl -X GET https://formhub.org/api/v1/teams/bruize/1
> Response
>
> {
> "url": "https://formhub.org/api/v1/teams/bruize/1",
> "name": "Owners",
> "organization": "https://formhub.org/api/v1/users/bruize",
> "projects": []
> }
"""
queryset = Team.objects.all()
serializer_class = api_serializers.TeamSerializer
lookup_fields = ('owner', 'pk')
lookup_field = 'owner'
extra_lookup_fields = None
def get_queryset(self):
user = self.request.user
if user.is_anonymous():
user = User.objects.get(pk=-1)
orgs = user.organizationprofile_set.values('user')
return Team.objects.filter(organization__in=orgs)
def get_object(self):
if 'owner' not in self.kwargs and 'pk' not in self.kwargs:
raise exceptions.ParseError(
'Expected URL keyword argument `owner` and `pk`.'
)
filter = {
'organization__username': self.kwargs['owner'],
'pk': self.kwargs['pk']
}
qs = self.filter_queryset(self.get_queryset())
return get_object_or_404(qs, **filter)
def list(self, request, **kwargs):
filter = {}
if 'owner' in kwargs:
filter['organization__username'] = kwargs['owner']
qs = self.filter_queryset(self.get_queryset())
self.object_list = qs.filter(**filter)
serializer = self.get_serializer(self.object_list, many=True)
return Response(serializer.data)
class DataList(APIView):
"""
This endpoint provides access to submitted data in JSON format. Where:
* `owner` - is organization or user whom the data belongs to
* `formid` - the form unique identifier
* `dataid` - submission data unique identifier
## GET JSON List of data end points
This is a json list of the data end points of `owner` forms
and/or including public forms and forms shared with `owner`.
<pre class="prettyprint">
<b>GET</b> /api/v1/data
<b>GET</b> /api/v1/data/<code>{owner}</code></pre>
> Example
>
> curl -X GET https://formhub.org/api/v1/data/modilabs
> Response
>
> {
> "dhis2form": "https://formhub.org/api/v1/data/modilabs/4240",
> "exp_one": "https://formhub.org/api/v1/data/modilabs/13789",
> "userone": "https://formhub.org/api/v1/data/modilabs/10417",
> }
## Get Submitted data for a specific form
Provides a list of json submitted data for a specific form.
<pre class="prettyprint">
<b>GET</b> /api/v1/data/<code>{owner}</code>/<code>{formid}</code></pre>
> Example
>
> curl -X GET https://formhub.org/api/v1/data/modilabs/22845
> Response
>
> [
> {
> "_id": 4503,
> "_bamboo_dataset_id": "",
> "_deleted_at": null,
> "expense_type": "service",
> "_xform_id_string": "exp",
> "_geolocation": [
> null,
> null
> ],
> "end": "2013-01-03T10:26:25.674+03",
> "start": "2013-01-03T10:25:17.409+03",
> "expense_date": "2011-12-23",
> "_status": "submitted_via_web",
> "today": "2013-01-03",
> "_uuid": "2e599f6fe0de42d3a1417fb7d821c859",
> "imei": "351746052013466",
> "formhub/uuid": "46ea15e2b8134624a47e2c4b77eef0d4",
> "kind": "monthly",
> "_submission_time": "2013-01-03T02:27:19",
> "required": "yes",
> "_attachments": [],
> "item": "Rent",
> "amount": "35000.0",
> "deviceid": "351746052013466",
> "subscriberid": "639027...60317"
> },
> {
> ....
> "subscriberid": "639027...60317"
> }
> ]
## Get a single data submission for a given form
Get a single specific submission json data providing `formid`
and `dataid` as url path parameters, where:
* `owner` - is organization or user whom the data belongs to
* `formid` - is the identifying number for a specific form
* `dataid` - is the unique id of the data, the value of `_id` or `_uuid`
<pre class="prettyprint">
<b>GET</b> /api/v1/data/<code>{owner}</code>/<code>{formid}</code>/<code>{dataid}</code></pre>
> Example
>
> curl -X GET https://formhub.org/api/v1/data/modilabs/22845/4503
> Response
>
> {
> "_id": 4503,
> "_bamboo_dataset_id": "",
> "_deleted_at": null,
> "expense_type": "service",
> "_xform_id_string": "exp",
> "_geolocation": [
> null,
> null
> ],
> "end": "2013-01-03T10:26:25.674+03",
> "start": "2013-01-03T10:25:17.409+03",
> "expense_date": "2011-12-23",
> "_status": "submitted_via_web",
> "today": "2013-01-03",
> "_uuid": "2e599f6fe0de42d3a1417fb7d821c859",
> "imei": "351746052013466",
> "formhub/uuid": "46ea15e2b8134624a47e2c4b77eef0d4",
> "kind": "monthly",
> "_submission_time": "2013-01-03T02:27:19",
> "required": "yes",
> "_attachments": [],
> "item": "Rent",
> "amount": "35000.0",
> "deviceid": "351746052013466",
> "subscriberid": "639027...60317"
> },
> {
> ....
> "subscriberid": "639027...60317"
> }
> ]
"""
queryset = Instance.objects.all()
def _get_formlist_data_points(self, request, owner=None):
xforms = []
# list public points incase anonymous user
if request.user.is_anonymous():
xforms = XForm.public_forms().order_by('?')[:10]
else:
xforms = XForm.objects.filter(user__username=owner)
rs = {}
for xform in xforms:
point = {u"%s" % xform.id_string:
reverse("data-list", kwargs={
"formid": xform.pk,
"owner": xform.user.username},
request=request)}
rs.update(point)
return rs
def _get_form_data(self, xform, **kwargs):
margs = {
'username': xform.user.username,
'id_string': xform.id_string,
'query': kwargs.get('query', None),
'fields': kwargs.get('fields', None),
'sort': kwargs.get('sort', None)
}
# TODO: Possibly add "url" field to all data records
cursor = ParsedInstance.query_mongo(**margs)
records = list(record for record in cursor)
return records
def get(self, request, owner=None, formid=None, dataid=None, **kwargs):
"""
Display submission data.
If no parameter is given, it displays a dictionary of public data urls.
formid - primary key for the form
dataid - primary key for the data submission
"""
data = None
xform = None
query = None
if owner is None and not request.user.is_anonymous():
owner = request.user.username
if not formid and not dataid:
data = self._get_formlist_data_points(request, owner)
if formid:
xform = check_and_set_form_by_id(int(formid), request)
if not xform:
raise exceptions.PermissionDenied(
_("You do not have permission to "
"view data from this form."))
if xform and dataid and dataid == 'bookmarks':
return Response(list(xform.tags.names()))
if xform and dataid:
query = json.dumps({'_id': int(dataid)})
rquery = request.QUERY_PARAMS.get('query', None)
if rquery:
rquery = json.loads(rquery)
if query:
rquery.update(json.loads(query))
tags = rquery.get('_tags', None)
if tags and isinstance(tags, list):
rquery['_tags'] = {'$all': tags}
query = json.dumps(rquery)
if xform:
data = self._get_form_data(xform, query=query)
if dataid and len(data):
data = data[0]
return Response(data)
updated docs for query and tags
import json
from django.db.models import Q
from django.shortcuts import get_object_or_404
from django.contrib.auth.models import User
from django.utils.translation import ugettext as _
from rest_framework import viewsets
from rest_framework import exceptions
from rest_framework import permissions
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework.reverse import reverse
from rest_framework.decorators import action
from rest_framework.settings import api_settings
from rest_framework.renderers import BaseRenderer
from api import serializers as api_serializers
from api import mixins
from api import tools as utils
from utils.user_auth import check_and_set_form_by_id
from main.models import UserProfile
from odk_logger.models import XForm, Instance
from odk_viewer.models import ParsedInstance
from api.models import Project, OrganizationProfile, ProjectXForm, Team
class UserViewSet(viewsets.ReadOnlyModelViewSet):
"""
This endpoint allows you to list and retrieve user's first and last names.
## List Users
> Example
>
> curl -X GET https://formhub.org/api/v1/users
> Response:
> [
> {
> "username": "demo",
> "first_name": "First",
> "last_name": "Last"
> },
> {
> "username": "another_demo",
> "first_name": "Another",
> "last_name": "Demo"
> },
> ...
> ]
## Retrieve a specific user info
<pre class="prettyprint"><b>GET</b> /api/v1/users/{username}</pre>
> Example:
>
> curl -X GET https://formhub.org/api/v1/users/demo
> Response:
>
> {
> "username": "demo",
> "first_name": "First",
> "last_name": "Last"
> }
"""
queryset = User.objects.all()
serializer_class = api_serializers.UserSerializer
lookup_field = 'username'
permission_classes = [permissions.DjangoModelPermissions, ]
def get_queryset(self):
user = self.request.user
if user.is_anonymous():
user = User.objects.get(pk=-1)
return User.objects.filter(
Q(pk__in=user.userprofile_set.values('user')) | Q(pk=user.pk))
class UserProfileViewSet(mixins.ObjectLookupMixin, viewsets.ModelViewSet):
"""
List, Retrieve, Update, Create/Register users.
## Register a new User
<pre class="prettyprint"><b>POST</b> /api/v1/profiles</pre>
> Example
>
> {
> "username": "demo",
> "name": "Demo User",
> "email": "demo@localhost.com",
> "city": "Kisumu",
> "country": "KE",
> ...
> }
## List User Profiles
<pre class="prettyprint"><b>GET</b> /api/v1/profiles</pre>
> Example
>
> curl -X GET https://formhub.org/api/v1/profiles
> Response
>
> [
> {
> "url": "https://formhub.org/api/v1/profiles/demo",
> "username": "demo",
> "name": "Demo User",
> "email": "demo@localhost.com",
> "city": "",
> "country": "",
> "organization": "",
> "website": "",
> "twitter": "",
> "gravatar": "https://secure.gravatar.com/avatar/xxxxxx",
> "require_auth": false,
> "user": "https://formhub.org/api/v1/users/demo"
> },
> {
> ...}, ...
> ]
## Retrieve User Profile Information
<pre class="prettyprint"><b>GET</b> /api/v1/profiles/{username}</pre>
> Example
>
> curl -X GET https://formhub.org/api/v1/profiles/demo
> Response
>
> {
> "url": "https://formhub.org/api/v1/profiles/demo",
> "username": "demo",
> "name": "Demo User",
> "email": "demo@localhost.com",
> "city": "",
> "country": "",
> "organization": "",
> "website": "",
> "twitter": "",
> "gravatar": "https://secure.gravatar.com/avatar/xxxxxx",
> "require_auth": false,
> "user": "https://formhub.org/api/v1/users/demo"
> }
"""
queryset = UserProfile.objects.all()
serializer_class = api_serializers.UserProfileSerializer
lookup_field = 'user'
permission_classes = [permissions.DjangoModelPermissions, ]
ordering = ('user__username', )
def get_queryset(self):
user = self.request.user
if user.is_anonymous():
user = User.objects.get(pk=-1)
return UserProfile.objects.filter(
Q(user__in=user.userprofile_set.values('user')) | Q(user=user))
class OrgProfileViewSet(mixins.ObjectLookupMixin, viewsets.ModelViewSet):
"""
List, Retrieve, Update, Create/Register Organizations
## Register a new Organization
<pre class="prettyprint"><b>POST</b> /api/v1/orgs</pre>
> Example
>
> {
> "org": "modilabs",
> "name": "Modi Labs Research",
> "email": "modilabs@localhost.com",
> "city": "New York",
> "country": "US",
> ...
> }
## List of Organizations
<pre class="prettyprint"><b>GET</b> /api/v1/orgs</pre>
> Example
>
> curl -X GET https://formhub.org/api/v1/orgs
> Response
>
> [
> {
> "url": "https://formhub.org/api/v1/orgs/modilabs",
> "org": "modilabs",
> "name": "Modi Labs Research",
> "email": "modilabs@localhost.com",
> "city": "New York",
> "country": "US",
> "website": "",
> "twitter": "",
> "gravatar": "https://secure.gravatar.com/avatar/xxxxxx",
> "require_auth": false,
> "user": "https://formhub.org/api/v1/users/modilabs"
> "creator": "https://formhub.org/api/v1/users/demo"
> },
> {
> ...}, ...
> ]
## Retrieve Organization Profile Information
<pre class="prettyprint"><b>GET</b> /api/v1/orgs/{username}</pre>
> Example
>
> curl -X GET https://formhub.org/api/v1/orgs/modilabs
> Response
>
> {
> "url": "https://formhub.org/api/v1/orgs/modilabs",
> "org": "modilabs",
> "name": "Modi Labs Research",
> "email": "modilabs@localhost.com",
> "city": "New York",
> "country": "US",
> "website": "",
> "twitter": "",
> "gravatar": "https://secure.gravatar.com/avatar/xxxxxx",
> "require_auth": false,
> "user": "https://formhub.org/api/v1/users/modilabs"
> "creator": "https://formhub.org/api/v1/users/demo"
> }
"""
queryset = OrganizationProfile.objects.all()
serializer_class = api_serializers.OrganizationSerializer
lookup_field = 'user'
def get_queryset(self):
user = self.request.user
if user.is_anonymous():
user = User.objects.get(pk=-1)
return user.organizationprofile_set.all()
class SurveyRenderer(BaseRenderer):
media_type = 'application/xml'
format = 'xml'
charset = 'utf-8'
def render(self, data, accepted_media_type=None, renderer_context=None):
return data
class XFormViewSet(mixins.MultiLookupMixin, viewsets.ReadOnlyModelViewSet):
"""
List, Retrieve Published Forms.
Where:
- `owner` - is the organization or user to which the form(s) belong to.
- `pk` - is the project id
- `formid` - is the form id
## Get Form Information
<pre class="prettyprint">
<b>GET</b> /api/v1/forms/<code>{formid}</code>
<b>GET</b> /api/v1/projects/<code>{owner}</code>/<code>{pk}</code>/forms/<code>{formid}</code></pre>
> Example
>
> curl -X GET https://formhub.org/api/v1/forms/28058
> Response
>
> {
> "url": "https://formhub.org/api/v1/forms/modilabs/28058",
> "formid": 28058,
> "uuid": "853196d7d0a74bca9ecfadbf7e2f5c1f",
> "id_string": "Birds",
> "sms_id_string": "Birds",
> "title": "Birds",
> "allows_sms": false,
> "bamboo_dataset": "",
> "description": "",
> "downloadable": true,
> "encrypted": false,
> "is_crowd_form": false,
> "owner": "https://formhub.org/api/v1/users/modilabs",
> "public": false,
> "public_data": false,
> "date_created": "2013-07-25T14:14:22.892Z",
> "date_modified": "2013-07-25T14:14:22.892Z"
> }
## List Forms
<pre class="prettyprint">
<b>GET</b> /api/v1/forms
<b>GET</b> /api/v1/forms/<code>{owner}</code></pre>
> Example
>
> curl -X GET https://formhub.org/api/v1/forms/modilabs
> Response
>
> [{
> "url": "https://formhub.org/api/v1/forms/modilabs/28058",
> "formid": 28058,
> "uuid": "853196d7d0a74bca9ecfadbf7e2f5c1f",
> "id_string": "Birds",
> "sms_id_string": "Birds",
> "title": "Birds",
> ...
> }, ...]
## Get `JSON` | `XML` Form Representation
<pre class="prettyprint">
<b>GET</b> /api/v1/forms/<code>{owner}</code>/<code>{formid}</code>/form.<code>{format}</code></pre>
> JSON Example
>
> curl -X GET https://formhub.org/api/v1/forms/28058/form.json
> Response
>
> {
> "name": "Birds",
> "title": "Birds",
> "default_language": "default",
> "id_string": "Birds",
> "type": "survey",
> "children": [
> {
> "type": "text",
> "name": "name",
> "label": "1. What is your name?"
> },
> ...
> ]
> }
> XML Example
>
> curl -X GET https://formhub.org/api/v1/forms/modilabs/28058/form.xml
> Response
>
> <?xml version="1.0" encoding="utf-8"?>
> <h:html xmlns="http://www.w3.org/2002/xforms" ...>
> <h:head>
> <h:title>Birds</h:title>
> <model>
> <itext>
> .....
> </h:body>
> </h:html>
## Get list of Tags for a specific Form
<pre class="prettyprint">
<b>GET</b> /api/v1/forms/<code>{owner}</code>/<code>{formid}</code>/bookmarks</pre>
> Request
>
> curl -X GET https://formhub.org/api/v1/forms/28058/bookmarks
> Response
>
> ["old", "smart", "clean house"]
"""
renderer_classes = api_settings.DEFAULT_RENDERER_CLASSES + [SurveyRenderer]
queryset = XForm.objects.all()
serializer_class = api_serializers.XFormSerializer
lookup_fields = ('owner', 'pk')
lookup_field = 'owner'
def get_queryset(self):
user = self.request.user
if user.is_anonymous():
user = User.objects.get(pk=-1)
user_forms = user.xforms.values('pk')
project_forms = user.projectxform_set.values('xform')
return XForm.objects.filter(
Q(pk__in=user_forms) | Q(pk__in=project_forms))
@action(methods=['GET'])
def form(self, request, format=None, **kwargs):
if not format:
format = 'json'
self.object = self.get_object()
if format == 'xml':
data = self.object.xml
else:
data = json.loads(self.object.json)
return Response(data)
@action(methods=['GET'])
def bookmarks(self, request, format=None, **kwargs):
data = self.get_object()
return Response(list(data.tags.names()))
class ProjectViewSet(mixins.MultiLookupMixin,
mixins.CreateModelMixin,
mixins.RetrieveModelMixin,
mixins.ListModelMixin, viewsets.GenericViewSet):
"""
List, Retrieve, Update, Create Project and Project Forms
Where:
- `owner` - is the organization to which the project(s) belong to.
- `pk` - is the project id
- `formid` - is the form id
## Register a new Organization Project
<pre class="prettyprint">
<b>POST</b> /api/v1/projects/<code>{owner}</code></pre>
> Example
>
> {
> "url": "https://formhub.org/api/v1/projects/modilabs/1",
> "owner": "https://formhub.org/api/v1/users/modilabs",
> "name": "project 1",
> "date_created": "2013-07-24T13:37:39Z",
> "date_modified": "2013-07-24T13:37:39Z"
> }
## List of Organization's Projects
<pre class="prettyprint"><b>GET</b> /api/v1/projects <b>or</b>
<b>GET</b> /api/v1/projects/<code>{owner}</code></pre>
> Example
>
> curl -X GET https://formhub.org/api/v1/projects
> curl -X GET https://formhub.org/api/v1/projects/modilabs
> Response
>
> [
> {
> "url": "https://formhub.org/api/v1/projects/modilabs/1",
> "owner": "https://formhub.org/api/v1/users/modilabs",
> "name": "project 1",
> "date_created": "2013-07-24T13:37:39Z",
> "date_modified": "2013-07-24T13:37:39Z"
> },
> {
> "url": "https://formhub.org/api/v1/projects/modilabs/4",
> "owner": "https://formhub.org/api/v1/users/modilabs",
> "name": "project 2",
> "date_created": "2013-07-24T13:59:10Z",
> "date_modified": "2013-07-24T13:59:10Z"
> }, ...
> ]
## Retrieve Project Information
<pre class="prettyprint">
<b>GET</b> /api/v1/projects/<code>{owner}</code>/<code>{pk}</code></pre>
> Example
>
> curl -X GET https://formhub.org/api/v1/projects/modilabs/1
> Response
>
> {
> "url": "https://formhub.org/api/v1/projects/modilabs/1",
> "owner": "https://formhub.org/api/v1/users/modilabs",
> "name": "project 1",
> "date_created": "2013-07-24T13:37:39Z",
> "date_modified": "2013-07-24T13:37:39Z"
> }
## Upload XLSForm to a project
<pre class="prettyprint">
<b>GET</b> /api/v1/projects/<code>{owner}</code>/<code>{pk}</code>/forms</pre>
> Example
>
> curl -X POST -F xls_file=@/path/to/form.xls https://formhub.org/api/v1/projects/modilabs/1/forms
> Response
>
> {
> "url": "https://formhub.org/api/v1/forms/28058",
> "formid": 28058,
> "uuid": "853196d7d0a74bca9ecfadbf7e2f5c1f",
> "id_string": "Birds",
> "sms_id_string": "Birds",
> "title": "Birds",
> "allows_sms": false,
> "bamboo_dataset": "",
> "description": "",
> "downloadable": true,
> "encrypted": false,
> "is_crowd_form": false,
> "owner": "modilabs",
> "public": false,
> "public_data": false,
> "date_created": "2013-07-25T14:14:22.892Z",
> "date_modified": "2013-07-25T14:14:22.892Z"
> }
## Get Form Information for a project
<pre class="prettyprint">
<b>GET</b> /api/v1/projects/<code>{owner}</code>/<code>{pk}</code>/forms/<code>{formid}</code></pre>
> Example
>
> curl -X GET https://formhub.org/api/v1/projects/modilabs/1/forms/28058
> Response
>
> {
> "url": "https://formhub.org/api/v1/forms/28058",
> "formid": 28058,
> "uuid": "853196d7d0a74bca9ecfadbf7e2f5c1f",
> "id_string": "Birds",
> "sms_id_string": "Birds",
> "title": "Birds",
> "allows_sms": false,
> "bamboo_dataset": "",
> "description": "",
> "downloadable": true,
> "encrypted": false,
> "is_crowd_form": false,
> "owner": "modilabs",
> "public": false,
> "public_data": false,
> "date_created": "2013-07-25T14:14:22.892Z",
> "date_modified": "2013-07-25T14:14:22.892Z"
> }
"""
queryset = Project.objects.all()
serializer_class = api_serializers.ProjectSerializer
lookup_fields = ('owner', 'pk')
lookup_field = 'owner'
extra_lookup_fields = None
def get_queryset(self):
user = self.request.user
if user.is_anonymous():
user = User.objects.get(pk=-1)
return user.project_creator.all()
def get_object(self, queryset=None):
pk = self.kwargs.get('pk', None)
if pk is not None:
try:
int(pk)
except ValueError:
raise exceptions.ParseError(
detail=_(u"The path parameter {pk} "
u"should be a number, '%s' given instead." % pk))
return super(ProjectViewSet, self).get_object(queryset)
def list(self, request, **kwargs):
filter = {}
if 'owner' in kwargs:
filter['organization__username'] = kwargs['owner']
# filter['created_by'] = request.user
qs = self.get_queryset()
qs = self.filter_queryset(qs)
self.object_list = qs.filter(**filter)
serializer = self.get_serializer(self.object_list, many=True)
return Response(serializer.data)
@action(methods=['POST', 'GET'], extra_lookup_fields=['formid', ])
def forms(self, request, **kwargs):
"""
POST - publish xlsform file to a specific project.
xls_file -- xlsform file object
"""
project = get_object_or_404(
Project, pk=kwargs.get('pk', None),
organization__username=kwargs.get('owner', None))
if request.method.upper() == 'POST':
survey = utils.publish_project_xform(request, project)
if isinstance(survey, XForm):
serializer = api_serializers.XFormSerializer(
survey, context={'request': request})
return Response(serializer.data, status=201)
return Response(survey, status=400)
filter = {'project': project}
many = True
if 'formid' in kwargs:
many = False
filter['xform__pk'] = int(kwargs.get('formid'))
if many:
qs = ProjectXForm.objects.filter(**filter)
data = [px.xform for px in qs]
else:
qs = get_object_or_404(ProjectXForm, **filter)
data = qs.xform
serializer = api_serializers.XFormSerializer(
data, many=many, context={'request': request})
return Response(serializer.data)
class TeamViewSet(viewsets.ModelViewSet):
"""
This endpoint allows you to create, update and view team information.
## GET List of Teams within an Organization.
Provides a json list of teams within a specified organization
and the projects the team is assigned to, where:
* `org` - is the unique organization name identifier
<pre class="prettyprint">
<b>GET</b> /api/v1/teams
<b>GET</b> /api/v1/teams/<code>{org}</code>
</pre>
> Example
>
> curl -X GET https://formhub.org/api/v1/teams/bruize
> Response
>
> [
> {
> "url": "https://formhub.org/api/v1/teams/bruize/1",
> "name": "Owners",
> "organization": "https://formhub.org/api/v1/users/bruize",
> "projects": []
> },
> {
> "url": "https://formhub.org/api/v1/teams/bruize/2",
> "name": "demo team",
> "organization": "https://formhub.org/api/v1/users/bruize",
> "projects": []
> }
> ]
## GET Team Info for a specific team.
Shows teams details and the projects the team is assigned to, where:
* `org` - is the unique organization name identifier
* `pk` - unique identifier for the team
<pre class="prettyprint">
<b>GET</b> /api/v1/teams/<code>{org}</code>/<code>{pk}</code>
</pre>
> Example
>
> curl -X GET https://formhub.org/api/v1/teams/bruize/1
> Response
>
> {
> "url": "https://formhub.org/api/v1/teams/bruize/1",
> "name": "Owners",
> "organization": "https://formhub.org/api/v1/users/bruize",
> "projects": []
> }
"""
queryset = Team.objects.all()
serializer_class = api_serializers.TeamSerializer
lookup_fields = ('owner', 'pk')
lookup_field = 'owner'
extra_lookup_fields = None
def get_queryset(self):
user = self.request.user
if user.is_anonymous():
user = User.objects.get(pk=-1)
orgs = user.organizationprofile_set.values('user')
return Team.objects.filter(organization__in=orgs)
def get_object(self):
if 'owner' not in self.kwargs and 'pk' not in self.kwargs:
raise exceptions.ParseError(
'Expected URL keyword argument `owner` and `pk`.'
)
filter = {
'organization__username': self.kwargs['owner'],
'pk': self.kwargs['pk']
}
qs = self.filter_queryset(self.get_queryset())
return get_object_or_404(qs, **filter)
def list(self, request, **kwargs):
filter = {}
if 'owner' in kwargs:
filter['organization__username'] = kwargs['owner']
qs = self.filter_queryset(self.get_queryset())
self.object_list = qs.filter(**filter)
serializer = self.get_serializer(self.object_list, many=True)
return Response(serializer.data)
class DataList(APIView):
"""
This endpoint provides access to submitted data in JSON format. Where:
* `owner` - is organization or user whom the data belongs to
* `formid` - the form unique identifier
* `dataid` - submission data unique identifier
## GET JSON List of data end points
This is a json list of the data end points of `owner` forms
and/or including public forms and forms shared with `owner`.
<pre class="prettyprint">
<b>GET</b> /api/v1/data
<b>GET</b> /api/v1/data/<code>{owner}</code></pre>
> Example
>
> curl -X GET https://formhub.org/api/v1/data/modilabs
> Response
>
> {
> "dhis2form": "https://formhub.org/api/v1/data/modilabs/4240",
> "exp_one": "https://formhub.org/api/v1/data/modilabs/13789",
> "userone": "https://formhub.org/api/v1/data/modilabs/10417",
> }
## Get Submitted data for a specific form
Provides a list of json submitted data for a specific form.
<pre class="prettyprint">
<b>GET</b> /api/v1/data/<code>{owner}</code>/<code>{formid}</code></pre>
> Example
>
> curl -X GET https://formhub.org/api/v1/data/modilabs/22845
> Response
>
> [
> {
> "_id": 4503,
> "_bamboo_dataset_id": "",
> "_deleted_at": null,
> "expense_type": "service",
> "_xform_id_string": "exp",
> "_geolocation": [
> null,
> null
> ],
> "end": "2013-01-03T10:26:25.674+03",
> "start": "2013-01-03T10:25:17.409+03",
> "expense_date": "2011-12-23",
> "_status": "submitted_via_web",
> "today": "2013-01-03",
> "_uuid": "2e599f6fe0de42d3a1417fb7d821c859",
> "imei": "351746052013466",
> "formhub/uuid": "46ea15e2b8134624a47e2c4b77eef0d4",
> "kind": "monthly",
> "_submission_time": "2013-01-03T02:27:19",
> "required": "yes",
> "_attachments": [],
> "item": "Rent",
> "amount": "35000.0",
> "deviceid": "351746052013466",
> "subscriberid": "639027...60317"
> },
> {
> ....
> "subscriberid": "639027...60317"
> }
> ]
## Get a single data submission for a given form
Get a single specific submission json data providing `formid`
and `dataid` as url path parameters, where:
* `owner` - is organization or user whom the data belongs to
* `formid` - is the identifying number for a specific form
* `dataid` - is the unique id of the data, the value of `_id` or `_uuid`
<pre class="prettyprint">
<b>GET</b> /api/v1/data/<code>{owner}</code>/<code>{formid}</code>/<code>{dataid}</code></pre>
> Example
>
> curl -X GET https://formhub.org/api/v1/data/modilabs/22845/4503
> Response
>
> {
> "_id": 4503,
> "_bamboo_dataset_id": "",
> "_deleted_at": null,
> "expense_type": "service",
> "_xform_id_string": "exp",
> "_geolocation": [
> null,
> null
> ],
> "end": "2013-01-03T10:26:25.674+03",
> "start": "2013-01-03T10:25:17.409+03",
> "expense_date": "2011-12-23",
> "_status": "submitted_via_web",
> "today": "2013-01-03",
> "_uuid": "2e599f6fe0de42d3a1417fb7d821c859",
> "imei": "351746052013466",
> "formhub/uuid": "46ea15e2b8134624a47e2c4b77eef0d4",
> "kind": "monthly",
> "_submission_time": "2013-01-03T02:27:19",
> "required": "yes",
> "_attachments": [],
> "item": "Rent",
> "amount": "35000.0",
> "deviceid": "351746052013466",
> "subscriberid": "639027...60317"
> },
> {
> ....
> "subscriberid": "639027...60317"
> }
> ]
## Query submitted data of a specific form
Provides a list of json submitted data for a specific form. Use `query`
parameter to apply form data specific, see
<a href="http://www.mongodb.org/display/DOCS/Querying.">
http://www.mongodb.org/display/DOCS/Querying</a>.
For more details see
<a href="https://github.com/modilabs/formhub/wiki/Formhub-Access-Points-(API)#api-parameters">
API Parameters</a>.
<pre class="prettyprint">
<b>GET</b> /api/v1/data/<code>{owner}</code>/<code>{formid}</code>?query={"field":"value"}</pre>
> Example
>
> curl -X GET
> https://formhub.org/api/v1/data/modilabs/22845?query={"kind": "monthly"}
> Response
>
> [
> {
> "_id": 4503,
> "_bamboo_dataset_id": "",
> "_deleted_at": null,
> "expense_type": "service",
> "_xform_id_string": "exp",
> "_geolocation": [
> null,
> null
> ],
> "end": "2013-01-03T10:26:25.674+03",
> "start": "2013-01-03T10:25:17.409+03",
> "expense_date": "2011-12-23",
> "_status": "submitted_via_web",
> "today": "2013-01-03",
> "_uuid": "2e599f6fe0de42d3a1417fb7d821c859",
> "imei": "351746052013466",
> "formhub/uuid": "46ea15e2b8134624a47e2c4b77eef0d4",
> "kind": "monthly",
> "_submission_time": "2013-01-03T02:27:19",
> "required": "yes",
> "_attachments": [],
> "item": "Rent",
> "amount": "35000.0",
> "deviceid": "351746052013466",
> "subscriberid": "639027...60317"
> },
> {
> ....
> "subscriberid": "639027...60317"
> }
> ]
## Query submitted data of a specific form using Tags
Provides a list of json submitted data for a specific form matching specific
tags. Use `query` parameter to apply form data specific. To filter by tags the
`query` should be `?query={"_tags": ["monthly", "sunny"]}`.
The `_tags` should be a list, for one item for example
`?query={"_tags": ["monthly"]}`.
<pre class="prettyprint">
<b>GET</b> /api/v1/data/<code>{owner}</code>/<code>{formid}</code>?query={"_tags":["tag1", "tag2"]}</pre>
> Example
>
> curl -X GET
> https://formhub.org/api/v1/data/modilabs/22845?query={"_tags": ["monthly"]}
"""
queryset = Instance.objects.all()
def _get_formlist_data_points(self, request, owner=None):
xforms = []
# list public points incase anonymous user
if request.user.is_anonymous():
xforms = XForm.public_forms().order_by('?')[:10]
else:
xforms = XForm.objects.filter(user__username=owner)
rs = {}
for xform in xforms:
point = {u"%s" % xform.id_string:
reverse("data-list", kwargs={
"formid": xform.pk,
"owner": xform.user.username},
request=request)}
rs.update(point)
return rs
def _get_form_data(self, xform, **kwargs):
margs = {
'username': xform.user.username,
'id_string': xform.id_string,
'query': kwargs.get('query', None),
'fields': kwargs.get('fields', None),
'sort': kwargs.get('sort', None)
}
# TODO: Possibly add "url" field to all data records
cursor = ParsedInstance.query_mongo(**margs)
records = list(record for record in cursor)
return records
def get(self, request, owner=None, formid=None, dataid=None, **kwargs):
"""
Display submission data.
If no parameter is given, it displays a dictionary of public data urls.
formid - primary key for the form
dataid - primary key for the data submission
"""
data = None
xform = None
query = None
if owner is None and not request.user.is_anonymous():
owner = request.user.username
if not formid and not dataid:
data = self._get_formlist_data_points(request, owner)
if formid:
xform = check_and_set_form_by_id(int(formid), request)
if not xform:
raise exceptions.PermissionDenied(
_("You do not have permission to "
"view data from this form."))
if xform and dataid and dataid == 'bookmarks':
return Response(list(xform.tags.names()))
if xform and dataid:
query = json.dumps({'_id': int(dataid)})
rquery = request.QUERY_PARAMS.get('query', None)
if rquery:
rquery = json.loads(rquery)
if query:
rquery.update(json.loads(query))
tags = rquery.get('_tags', None)
if tags and isinstance(tags, list):
rquery['_tags'] = {'$all': tags}
query = json.dumps(rquery)
if xform:
data = self._get_form_data(xform, query=query)
if dataid and len(data):
data = data[0]
return Response(data)
|
# from django.shortcuts import render
# Create your views here.
from django.contrib.auth.models import User
from rest_framework import viewsets, mixins, permissions, exceptions
from .models import Notebook, Note, Task
from .serializers import UserSerializer, NotebookSerializer, NoteSerializer, TaskSerializer
class UserViewSet(viewsets.ReadOnlyModelViewSet):
class Permissions(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
return request.user == obj or request.user.is_staff
queryset = User.objects.all()
serializer_class = UserSerializer
permission_classes = (permissions.IsAuthenticated, Permissions)
def get_queryset(self):
if self.action == 'retrieve':
return self.queryset
else:
# list
if self.request.user.is_staff:
return self.queryset
else:
return self.queryset.filter(id=self.request.user.id)
class NotebookViewSet(mixins.CreateModelMixin,
mixins.RetrieveModelMixin,
mixins.UpdateModelMixin,
mixins.DestroyModelMixin,
viewsets.GenericViewSet):
class Permissions(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
return request.user == obj.user or (request.user.is_staff and request.method in permissions.SAFE_METHODS)
queryset = Notebook.objects.all()
serializer_class = NotebookSerializer
permission_classes = (permissions.IsAuthenticated, Permissions)
def perform_create(self, serializer):
serializer.save(user=self.request.user)
class NoteViewSet(mixins.CreateModelMixin,
mixins.RetrieveModelMixin,
mixins.UpdateModelMixin,
mixins.DestroyModelMixin,
viewsets.GenericViewSet):
class Permissions(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
return request.user == obj.notebook.user or \
(request.user.is_staff and request.method in permissions.SAFE_METHODS)
queryset = Note.objects.all()
serializer_class = NoteSerializer
permission_classes = (permissions.IsAuthenticated, Permissions)
def _validate_notebook(self, serializer):
if serializer.validated_data['notebook'].user != self.request.user:
raise exceptions.PermissionDenied()
def perform_create(self, serializer):
self._validate_notebook(serializer)
serializer.save()
def perform_update(self, serializer):
self._validate_notebook(serializer)
serializer.save()
class TaskViewSet(mixins.CreateModelMixin,
mixins.RetrieveModelMixin,
mixins.UpdateModelMixin,
mixins.DestroyModelMixin,
viewsets.GenericViewSet):
class Permissions(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
return request.user == obj.user or (request.user.is_staff and request.method in permissions.SAFE_METHODS)
queryset = Task.objects.all()
serializer_class = TaskSerializer
permission_classes = (permissions.IsAuthenticated, Permissions)
def perform_create(self, serializer):
serializer.save(user=self.request.user)
match user ids, not instances
# from django.shortcuts import render
# Create your views here.
from django.contrib.auth.models import User
from rest_framework import viewsets, mixins, permissions, exceptions
from .models import Notebook, Note, Task
from .serializers import UserSerializer, NotebookSerializer, NoteSerializer, TaskSerializer
class UserViewSet(viewsets.ReadOnlyModelViewSet):
class Permissions(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
return request.user.id == obj.id or request.user.is_staff
queryset = User.objects.all()
serializer_class = UserSerializer
permission_classes = (permissions.IsAuthenticated, Permissions)
def get_queryset(self):
if self.action == 'retrieve':
return self.queryset
else:
# list
if self.request.user.is_staff:
return self.queryset
else:
return self.queryset.filter(id=self.request.user.id)
class NotebookViewSet(mixins.CreateModelMixin,
mixins.RetrieveModelMixin,
mixins.UpdateModelMixin,
mixins.DestroyModelMixin,
viewsets.GenericViewSet):
class Permissions(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
return request.user.id == obj.user_id or \
(request.user.is_staff and request.method in permissions.SAFE_METHODS)
queryset = Notebook.objects.all()
serializer_class = NotebookSerializer
permission_classes = (permissions.IsAuthenticated, Permissions)
def perform_create(self, serializer):
serializer.save(user=self.request.user)
class NoteViewSet(mixins.CreateModelMixin,
mixins.RetrieveModelMixin,
mixins.UpdateModelMixin,
mixins.DestroyModelMixin,
viewsets.GenericViewSet):
class Permissions(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
return request.user.id == obj.notebook.user_id or \
(request.user.is_staff and request.method in permissions.SAFE_METHODS)
queryset = Note.objects.all()
serializer_class = NoteSerializer
permission_classes = (permissions.IsAuthenticated, Permissions)
def _validate_notebook(self, serializer):
if serializer.validated_data['notebook'].user_id != self.request.user.id:
raise exceptions.PermissionDenied("can not add note to another user's notebook")
def perform_create(self, serializer):
self._validate_notebook(serializer)
serializer.save()
def perform_update(self, serializer):
self._validate_notebook(serializer)
serializer.save()
class TaskViewSet(mixins.CreateModelMixin,
mixins.RetrieveModelMixin,
mixins.UpdateModelMixin,
mixins.DestroyModelMixin,
viewsets.GenericViewSet):
class Permissions(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
return request.user.id == obj.user_id or (request.user.is_staff and request.method in permissions.SAFE_METHODS)
queryset = Task.objects.all()
serializer_class = TaskSerializer
permission_classes = (permissions.IsAuthenticated, Permissions)
def perform_create(self, serializer):
serializer.save(user=self.request.user)
|
import re
import json
from decimal import Decimal
from django import forms
from django.conf import settings
from django.utils.safestring import mark_safe
re_id = re.compile('id="([^"]+)"')
class DateWidget(forms.TextInput):
def __init__(self, attrs={}):
super(DateWidget, self).__init__(attrs={
'class': 'vDateField', 'size': '10'
})
class TimeWidget(forms.TextInput):
def __init__(self, attrs={}):
super(TimeWidget, self).__init__(attrs={
'class': 'vTimeField', 'size': '8'
})
class ToggleBillableWidget(forms.Select):
def __init__(self, billable, *args, **kwargs):
self.billable_map = billable
super(ToggleBillableWidget, self).__init__(*args, **kwargs)
def render(self, name, value, attrs=None, choices=()):
output = super(ToggleBillableWidget, self).render(name, value, attrs,
choices)
return output + """
<script type='text/javascript'>
var billable_map = %s;
console.log(billable_map);
jQuery(function() {
console.log(billable_map);
jQuery('#id_project').change(function() {
console.log(billable_map[jQuery(this).val()]);
jQuery('#id_billable').attr('checked',
billable_map[jQuery(this).val()]);
});
});
</script>
""" % json.dumps(self.billable_map)
Removed widgets.py
|
# coding=utf-8
from rest_framework import viewsets
from .models import AirCondition, AirAverage
from .serializers import AirAverageSerializer, AirConditionSerializer
class AirConditionViewSets(viewsets.ReadOnlyModelViewSet):
queryset = AirCondition.objects.all().order_by('-time')[:12] # 12 hours
serializer_class = AirConditionSerializer
class AirAverageViewSets(viewsets.ReadOnlyModelViewSet):
queryset = AirAverage.objects.all().order_by('-from_time')[:10] # 5 days
serializer_class = AirAverageSerializer
Refine viewsets to support mutli-cities
# coding=utf-8
import logging
from rest_framework import viewsets
from .models import AirCondition, AirAverage
from .serializers import AirAverageSerializer, AirConditionSerializer
class AirConditionViewSets(viewsets.ReadOnlyModelViewSet):
serializer_class = AirConditionSerializer
lookup_url_kwarg = 'city'
def get_queryset(self):
city_name = self.request.QUERY_PARAMS.get(self.lookup_url_kwarg)
queryset = AirCondition.objects.filter(city=city_name)
return queryset.order_by('-time')[:12]
class AirAverageViewSets(viewsets.ReadOnlyModelViewSet):
serializer_class = AirAverageSerializer
lookup_url_kwarg = 'city'
def get_queryset(self):
city_name = self.request.QUERY_PARAMS.get(self.lookup_url_kwarg)
queryset = AirAverage.objects.filter(city=city_name)
return queryset.order_by('-from_time')[:10]
|
# This file is part of beets.
# Copyright 2015
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Open metadata information in a text editor to let the user edit it.
"""
from __future__ import (division, absolute_import, print_function,
unicode_literals)
from beets import plugins
from beets import util
from beets import library
from beets import ui
from beets.ui.commands import _do_query
import subprocess
import yaml
import collections
from tempfile import NamedTemporaryFile
import os
def edit(filename):
"""Open `filename` in a test editor.
"""
cmd = util.shlex_split(util.editor_command())
cmd.append(filename)
subprocess.call(cmd)
def dump(arg):
"""Dump an object as YAML for editing.
"""
return yaml.safe_dump_all(
arg,
allow_unicode=True,
default_flow_style=False,
)
def load(s):
"""Read a YAML string back to an object.
"""
return yaml.load_all(s)
class EditPlugin(plugins.BeetsPlugin):
def __init__(self):
super(EditPlugin, self).__init__()
self.config.add({
'albumfields': 'album albumartist',
'itemfields': 'track title artist album',
'not_fields': 'id path',
})
# the albumfields field in your config sets the tags that
# you want to see/change for albums.
# Defaults to album albumartist.
# the ID tag will always be listed as it is used to identify the item
self.albumfields = self.config['albumfields'].as_str_seq()
# the itemfields field in your config sets the tags that
# you want to see/change or items.
# Defaults to track title artist album.
# the ID tag will always be listed as it is used to identify the item
self.itemfields = self.config['itemfields'].as_str_seq()
# the not_fields field in your config sets the tags that
# will not be changed.
# If you happen to change them, they will be restored to the original
# value. The ID of an item will never be changed.
self.not_fields = self.config['not_fields'].as_str_seq()
def commands(self):
edit_command = ui.Subcommand(
'edit',
help='interactively edit metadata'
)
edit_command.parser.add_option(
'-e', '--extra',
action='append',
type='choice',
choices=library.Item.all_keys() +
library.Album.all_keys(),
help='add additional fields to edit',
)
edit_command.parser.add_option(
'--all',
action='store_true', dest='all',
help='edit all fields',
)
edit_command.parser.add_all_common_options()
edit_command.func = self.editor_music
return [edit_command]
def editor_music(self, lib, opts, args):
# Get the objects to edit.
query = ui.decargs(args)
items, albums = _do_query(lib, query, opts.album, False)
objs = albums if opts.album else items
if not objs:
ui.print_('Nothing to edit.')
return
# Get the content to edit as raw data structures.
if opts.all:
data = self.get_all_fields(objs)
else:
fields = self.get_fields_from(objs, opts)
data = self.get_selected_fields(fields, objs, opts)
# Present the YAML to the user and let her change it.
new_data = self.change_objs(data)
changed_objs = self.check_diff(data, new_data)
if changed_objs is None:
# Editing failed.
return
# Save the new data.
self.save_items(changed_objs, lib, opts)
def get_fields_from(self, objs, opts):
# construct a list of fields we need
# see if we need album or item fields
fields = self.albumfields if opts.album else self.itemfields
# if opts.extra is given add those
if opts.extra:
fields.extend([f for f in opts.extra if f not in fields])
# make sure we got the id for identification
if 'id' not in fields:
fields.insert(0, 'id')
# we need all the fields
if opts.all:
fields = None
ui.print_(ui.colorize('text_warning', "edit all fields from:"))
else:
for it in fields:
if opts.album:
# check if it is really an albumfield
if it not in library.Album.all_keys():
ui.print_(
"{} not in albumfields.Removed it.".format(
ui.colorize(
'text_warning', it)))
fields.remove(it)
else:
# if it is not an itemfield remove it
if it not in library.Item.all_keys():
ui.print_(
"{} not in itemfields.Removed it.".format(
ui.colorize(
'text_warning', it)))
fields.remove(it)
return fields
def get_selected_fields(self, myfields, objs, opts):
return [[{field: obj[field]}for field in myfields]for obj in objs]
def get_all_fields(self, objs):
return [[{field: obj[field]}for field in sorted(obj._fields)]
for obj in objs]
def change_objs(self, dict_items):
# Ask the user to edit the data.
new = NamedTemporaryFile(suffix='.yaml', delete=False)
new.write(dump(dict_items))
new.close()
edit(new.name)
# Parse the updated data.
with open(new.name) as f:
new_str = f.read()
os.remove(new.name)
try:
return load(new_str)
except yaml.YAMLError as e:
ui.print_("Invalid YAML: {}".format(e))
return None
def nice_format(self, newset):
# format the results so that we have an ID at the top
# that we can change to a userfrienly title/artist format
# when we present our results
wellformed = collections.defaultdict(dict)
for item in newset:
for field in item:
wellformed[item[0].values()[0]].update(field)
return wellformed
def save_items(self, oldnewlist, lib, opts):
oldset, newset = zip(*oldnewlist)
niceNewSet = self.nice_format(newset)
niceOldSet = self.nice_format(oldset)
niceCombiSet = zip(niceOldSet.items(), niceNewSet.items())
changedObjs = []
for o, n in niceCombiSet:
if opts.album:
ob = lib.get_album(int(n[0]))
else:
ob = lib.get_item(n[0])
# change id to item-string
ob.update(n[1]) # update the object
changedObjs.append(ob)
# see the changes we made
for obj in changedObjs:
ui.show_model_changes(obj)
self.save_write(changedObjs)
def save_write(self, changedob):
if not ui.input_yn(
ui.colorize('action_default', 'really modify? (y/n)')):
return
for ob in changedob:
self._log.debug('saving changes to {}', ob)
ob.try_sync(ui.should_write())
return
def check_diff(self, old_data, new_data):
return filter(None, map(self.reduce_it, old_data, new_data))
def reduce_it(self, ol, nl):
# if there is a forbidden field it resets them
if ol != nl:
for x in range(0, len(nl)):
if ol[x] != nl[x] and ol[x].keys()[0]in self.not_fields:
nl[x] = ol[x]
ui.print_("reset forbidden field.")
if ol != nl: # only keep objects that have changed
return ol, nl
Start expunging `opts` carry-through
A dedicated command function takes care of the options and turns them into
normal variables.
# This file is part of beets.
# Copyright 2015
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Open metadata information in a text editor to let the user edit it.
"""
from __future__ import (division, absolute_import, print_function,
unicode_literals)
from beets import plugins
from beets import util
from beets import library
from beets import ui
from beets.ui.commands import _do_query
import subprocess
import yaml
import collections
from tempfile import NamedTemporaryFile
import os
def edit(filename):
"""Open `filename` in a test editor.
"""
cmd = util.shlex_split(util.editor_command())
cmd.append(filename)
subprocess.call(cmd)
def dump(arg):
"""Dump an object as YAML for editing.
"""
return yaml.safe_dump_all(
arg,
allow_unicode=True,
default_flow_style=False,
)
def load(s):
"""Read a YAML string back to an object.
"""
return yaml.load_all(s)
class EditPlugin(plugins.BeetsPlugin):
def __init__(self):
super(EditPlugin, self).__init__()
self.config.add({
'albumfields': 'album albumartist',
'itemfields': 'track title artist album',
'not_fields': 'id path',
})
# the albumfields field in your config sets the tags that
# you want to see/change for albums.
# Defaults to album albumartist.
# the ID tag will always be listed as it is used to identify the item
self.albumfields = self.config['albumfields'].as_str_seq()
# the itemfields field in your config sets the tags that
# you want to see/change or items.
# Defaults to track title artist album.
# the ID tag will always be listed as it is used to identify the item
self.itemfields = self.config['itemfields'].as_str_seq()
# the not_fields field in your config sets the tags that
# will not be changed.
# If you happen to change them, they will be restored to the original
# value. The ID of an item will never be changed.
self.not_fields = self.config['not_fields'].as_str_seq()
def commands(self):
edit_command = ui.Subcommand(
'edit',
help='interactively edit metadata'
)
edit_command.parser.add_option(
'-e', '--extra',
action='append',
type='choice',
choices=library.Item.all_keys() +
library.Album.all_keys(),
help='add additional fields to edit',
)
edit_command.parser.add_option(
'--all',
action='store_true', dest='all',
help='edit all fields',
)
edit_command.parser.add_all_common_options()
edit_command.func = self._edit_command
return [edit_command]
def _edit_command(self, lib, opts, args):
"""The CLI command function for the `beet edit` command.
"""
# Get the objects to edit.
query = ui.decargs(args)
items, albums = _do_query(lib, query, opts.album, False)
objs = albums if opts.album else items
if not objs:
ui.print_('Nothing to edit.')
return
# Get the fields to edit.
if opts.all:
fields = None
else:
fields = self.get_fields_from(objs, opts.album, opts.extra)
# TODO
# fields.extend([f for f in opts.extra if f not in fields])
self.edit(lib, opts.album, objs, fields)
def edit(self, lib, album, objs, fields):
"""The core editor logic.
- `lib`: The `Library` object.
- `album`: A flag indicating whether we're editing Items or Albums.
- `objs`: The `Item`s or `Album`s to edit.
- `fields`: The set of field names to edit.
"""
# Get the content to edit as raw data structures.
if fields:
data = self.get_selected_fields(fields, objs)
else:
data = self.get_all_fields(objs)
# Present the YAML to the user and let her change it.
new_data = self.change_objs(data)
changed_objs = self.check_diff(data, new_data)
if changed_objs is None:
# Editing failed.
return
# Save the new data.
self.save_items(changed_objs, lib, album)
def get_fields_from(self, objs, album, extra):
# construct a list of fields we need
# see if we need album or item fields
fields = self.albumfields if album else self.itemfields
# if opts.extra is given add those
if extra:
fields.extend([f for f in extra if f not in fields])
# make sure we got the id for identification
if 'id' not in fields:
fields.insert(0, 'id')
for it in fields:
if album:
# check if it is really an albumfield
if it not in library.Album.all_keys():
ui.print_(
"{} not in albumfields.Removed it.".format(
ui.colorize(
'text_warning', it)))
fields.remove(it)
else:
# if it is not an itemfield remove it
if it not in library.Item.all_keys():
ui.print_(
"{} not in itemfields.Removed it.".format(
ui.colorize(
'text_warning', it)))
fields.remove(it)
return fields
def get_selected_fields(self, myfields, objs):
return [[{field: obj[field]}for field in myfields]for obj in objs]
def get_all_fields(self, objs):
return [[{field: obj[field]}for field in sorted(obj._fields)]
for obj in objs]
def change_objs(self, dict_items):
# Ask the user to edit the data.
new = NamedTemporaryFile(suffix='.yaml', delete=False)
new.write(dump(dict_items))
new.close()
edit(new.name)
# Parse the updated data.
with open(new.name) as f:
new_str = f.read()
os.remove(new.name)
try:
return load(new_str)
except yaml.YAMLError as e:
ui.print_("Invalid YAML: {}".format(e))
return None
def nice_format(self, newset):
# format the results so that we have an ID at the top
# that we can change to a userfrienly title/artist format
# when we present our results
wellformed = collections.defaultdict(dict)
for item in newset:
for field in item:
wellformed[item[0].values()[0]].update(field)
return wellformed
def save_items(self, oldnewlist, lib, album):
oldset, newset = zip(*oldnewlist)
niceNewSet = self.nice_format(newset)
niceOldSet = self.nice_format(oldset)
niceCombiSet = zip(niceOldSet.items(), niceNewSet.items())
changedObjs = []
for o, n in niceCombiSet:
if album:
ob = lib.get_album(int(n[0]))
else:
ob = lib.get_item(n[0])
# change id to item-string
ob.update(n[1]) # update the object
changedObjs.append(ob)
# see the changes we made
for obj in changedObjs:
ui.show_model_changes(obj)
self.save_write(changedObjs)
def save_write(self, changedob):
if not ui.input_yn(
ui.colorize('action_default', 'really modify? (y/n)')):
return
for ob in changedob:
self._log.debug('saving changes to {}', ob)
ob.try_sync(ui.should_write())
return
def check_diff(self, old_data, new_data):
return filter(None, map(self.reduce_it, old_data, new_data))
def reduce_it(self, ol, nl):
# if there is a forbidden field it resets them
if ol != nl:
for x in range(0, len(nl)):
if ol[x] != nl[x] and ol[x].keys()[0]in self.not_fields:
nl[x] = ol[x]
ui.print_("reset forbidden field.")
if ol != nl: # only keep objects that have changed
return ol, nl
|
# -*- coding: utf-8 -*-
## This file is part of Invenio.
## Copyright (C) 2009, 2010, 2011, 2012 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""mod_python->WSGI Framework"""
import sys
import os
import re
import cgi
import gc
import inspect
from fnmatch import fnmatch
from urlparse import urlparse, urlunparse
from wsgiref.util import FileWrapper
from invenio.webinterface_handler_wsgi_utils import table
from invenio.webinterface_handler_config import \
HTTP_STATUS_MAP, SERVER_RETURN, OK, DONE, \
HTTP_NOT_FOUND, HTTP_INTERNAL_SERVER_ERROR
from invenio.config import CFG_WEBDIR, CFG_SITE_LANG, \
CFG_WEBSTYLE_HTTP_STATUS_ALERT_LIST, CFG_DEVEL_SITE, CFG_SITE_URL, \
CFG_SITE_SECURE_URL, CFG_WEBSTYLE_REVERSE_PROXY_IPS
from invenio.errorlib import register_exception
## TODO for future reimplementation of stream_file
#from invenio.bibdocfile import StreamFileException
from flask import request, after_this_request
## Magic regexp to search for usage of CFG_SITE_URL within src/href or
## any src usage of an external website
_RE_HTTPS_REPLACES = re.compile(r"\b((?:src\s*=|url\s*\()\s*[\"']?)http\://", re.I)
## Regexp to verify that the IP starts with a number (filter cases where 'unknown')
## It is faster to verify only the start (585 ns) compared with verifying
## the whole ip address - re.compile('^\d+\.\d+\.\d+\.\d+$') (1.01 µs)
_RE_IPADDRESS_START = re.compile("^\d+\.")
def _http_replace_func(match):
## src external_site -> CFG_SITE_SECURE_URL/sslredirect/external_site
return match.group(1) + CFG_SITE_SECURE_URL + '/sslredirect/'
_ESCAPED_CFG_SITE_URL = cgi.escape(CFG_SITE_URL, True)
_ESCAPED_CFG_SITE_SECURE_URL = cgi.escape(CFG_SITE_SECURE_URL, True)
def https_replace(html):
html = html.replace(_ESCAPED_CFG_SITE_URL, _ESCAPED_CFG_SITE_SECURE_URL)
return _RE_HTTPS_REPLACES.sub(_http_replace_func, html)
class InputProcessed(object):
"""
Auxiliary class used when reading input.
@see: <http://www.wsgi.org/wsgi/Specifications/handling_post_forms>.
"""
def read(self, *args):
raise EOFError('The wsgi.input stream has already been consumed')
readline = readlines = __iter__ = read
from werkzeug import BaseResponse, ResponseStreamMixin, \
CommonResponseDescriptorsMixin
class Response(BaseResponse, ResponseStreamMixin,
CommonResponseDescriptorsMixin):
"""
Full featured response object implementing :class:`ResponseStreamMixin`
to add support for the `stream` property.
"""
class SimulatedModPythonRequest(object):
"""
mod_python like request object.
Minimum and cleaned implementation to make moving out of mod_python
easy.
@see: <http://www.modpython.org/live/current/doc-html/pyapi-mprequest.html>
"""
def __init__(self, environ, start_response):
self.response = Response()
self.__environ = environ
self.__start_response = start_response
self.__response_sent_p = False
self.__content_type_set_p = False
self.__buffer = ''
self.__low_level_headers = []
self.__filename = None
self.__disposition_type = None
self.__bytes_sent = 0
self.__allowed_methods = []
self.__cleanups = []
self.headers_out = {'Cache-Control': None}
#self.headers_out.update(dict(request.headers))
## See: <http://www.python.org/dev/peps/pep-0333/#the-write-callable>
self.__write = None
self.__write_error = False
self.__errors = environ['wsgi.errors']
self.__headers_in = table([])
self.__tainted = False
self.__is_https = self.__environ.get('wsgi.url_scheme') == 'https'
self.__replace_https = False
self.track_writings = False
self.__what_was_written = ""
for key, value in environ.iteritems():
if key.startswith('HTTP_'):
self.__headers_in[key[len('HTTP_'):].replace('_', '-')] = value
if environ.get('CONTENT_LENGTH'):
self.__headers_in['content-length'] = environ['CONTENT_LENGTH']
if environ.get('CONTENT_TYPE'):
self.__headers_in['content-type'] = environ['CONTENT_TYPE']
def get_wsgi_environ(self):
return self.__environ
def get_post_form(self):
""" Returns only POST form. """
self.__tainted = True
return request.values.to_dict(flat=True)
def get_response_sent_p(self):
return self.__response_sent_p
def get_low_level_headers(self):
return self.__low_level_headers
def get_buffer(self):
return self.__buffer
def write(self, string, flush=1):
if isinstance(string, unicode):
self.__buffer += string.encode('utf8')
else:
self.__buffer += string
if flush:
self.flush()
def flush(self):
self.send_http_header()
if self.__buffer:
self.__bytes_sent += len(self.__buffer)
try:
if not self.__write_error:
if self.__replace_https:
self.__write(https_replace(self.__buffer))
else:
if self.__buffer:
self.__write(self.__buffer)
if self.track_writings:
if self.__replace_https:
self.__what_was_written += https_replace(self.__buffer)
else:
self.__what_was_written += self.__buffer
except IOError, err:
if "failed to write data" in str(err) or "client connection closed" in str(err):
## Let's just log this exception without alerting the admin:
register_exception(req=self)
self.__write_error = True ## This flag is there just
## to not report later other errors to the admin.
else:
raise
self.__buffer = ''
def set_content_type(self, content_type):
self.__content_type_set_p = True
self.response.content_type = content_type
if self.__is_https:
if content_type.startswith("text/html") or content_type.startswith("application/rss+xml"):
self.__replace_https = True
def get_content_type(self):
return self.response.content_type
def send_http_header(self):
for (k, v) in self.__low_level_headers:
self.response.headers[k] = v
for k, v in self.headers_out.iteritems():
self.response.headers[k] = v
self.__write = self.response.stream.write
def get_unparsed_uri(self):
return '?'.join([self.__environ['PATH_INFO'], self.__environ['QUERY_STRING']])
def get_uri(self):
return request.environ['PATH_INFO']
def get_headers_in(self):
return request.headers
def get_subprocess_env(self):
return self.__environ
def add_common_vars(self):
pass
def get_args(self):
return request.environ['QUERY_STRING']
def get_remote_ip(self):
if 'X-FORWARDED-FOR' in self.__headers_in and \
self.__headers_in.get('X-FORWARDED-SERVER', '') == \
self.__headers_in.get('X-FORWARDED-HOST', '') == \
urlparse(CFG_SITE_URL)[1]:
# we are using proxy setup
if self.__environ.get('REMOTE_ADDR') in CFG_WEBSTYLE_REVERSE_PROXY_IPS:
# we trust this proxy
ip_list = self.__headers_in['X-FORWARDED-FOR'].split(',')
for ip in ip_list:
if _RE_IPADDRESS_START.match(ip):
return ip
# no IP has the correct format, return a default IP
return '10.0.0.10'
else:
# we don't trust this proxy
register_exception(prefix="You are running in a proxy configuration, but the " + \
"CFG_WEBSTYLE_REVERSE_PROXY_IPS variable does not contain " + \
"the IP of your proxy, thus the remote IP addresses of your " + \
"clients are not trusted. Please configure this variable.",
alert_admin=True)
return '10.0.0.11'
return request.remote_addr
def get_remote_host(self):
return request.environ['REMOTE_HOST']
def get_header_only(self):
return request.environ['REQUEST_METHOD'] == 'HEAD'
def set_status(self, status):
self.response.status_code = status
def get_status(self):
return self.response.status_code
def get_wsgi_status(self):
return '%s %s' % (self.response.status_code,
HTTP_STATUS_MAP.get(int(self.response.status_code),
'Explanation not available'))
def sendfile(self, path, offset=0, the_len=-1):
try:
self.send_http_header()
file_to_send = open(path)
file_to_send.seek(offset)
file_wrapper = FileWrapper(file_to_send)
count = 0
if the_len < 0:
for chunk in file_wrapper:
count += len(chunk)
self.__bytes_sent += len(chunk)
self.__write(chunk)
else:
for chunk in file_wrapper:
if the_len >= len(chunk):
the_len -= len(chunk)
count += len(chunk)
self.__bytes_sent += len(chunk)
self.__write(chunk)
else:
count += the_len
self.__bytes_sent += the_len
self.__write(chunk[:the_len])
break
except IOError, err:
if "failed to write data" in str(err) or "client connection closed" in str(err):
## Let's just log this exception without alerting the admin:
register_exception(req=self)
else:
raise
return self.__bytes_sent
def set_content_length(self, content_length):
if content_length is not None:
self.response.headers['content-length'] = str(content_length)
else:
del self.response.headers['content-length']
def is_https(self):
return self.__is_https
def get_method(self):
return request.environ['REQUEST_METHOD']
def get_hostname(self):
return request.environ.get('HTTP_HOST', '')
def set_filename(self, filename):
self.__filename = filename
if self.__disposition_type is None:
self.__disposition_type = 'inline'
self.response.headers['content-disposition'] = '%s; filename=%s' % (self.__disposition_type, self.__filename)
def set_encoding(self, encoding):
if encoding:
self.response.headers['content-encoding'] = str(encoding)
else:
del self.response.headers['content-encoding']
def get_bytes_sent(self):
return self.__bytes_sent
def log_error(self, message):
self.__errors.write(message.strip() + '\n')
def get_content_type_set_p(self):
return self.__content_type_set_p and \
bool(self.response.headers['content-type'])
def allow_methods(self, methods, reset=0):
if reset:
self.__allowed_methods = []
self.__allowed_methods += [method.upper().strip() for method in methods]
def get_allowed_methods(self):
return self.__allowed_methods
def readline(self, hint=None):
try:
return self.__environ['wsgi.input'].readline(hint)
except TypeError:
## the hint param is not part of wsgi pep, although
## it's great to exploit it in when reading FORM
## with large files, in order to avoid filling up the memory
## Too bad it's not there :-(
return self.__environ['wsgi.input'].readline()
def readlines(self, hint=None):
return self.__environ['wsgi.input'].readlines(hint)
def read(self, hint=None):
return self.__environ['wsgi.input'].read(hint)
def register_cleanup(self, callback, data=None):
@after_this_request
def f(response):
callback(data)
def get_cleanups(self):
return self.__cleanups
def get_referer(self):
return request.referrer
def get_what_was_written(self):
return self.__what_was_written
def __str__(self):
from pprint import pformat
out = ""
for key in dir(self):
try:
if not callable(getattr(self, key)) and not key.startswith("_SimulatedModPythonRequest") and not key.startswith('__'):
out += 'req.%s: %s\n' % (key, pformat(getattr(self, key)))
except:
pass
return out
def get_original_wsgi_environment(self):
"""
Return the original WSGI environment used to initialize this request
object.
@return: environ, start_response
@raise AssertionError: in case the environment has been altered, i.e.
either the input has been consumed or something has already been
written to the output.
"""
assert not self.__tainted, "The original WSGI environment is tainted since at least req.write or req.form has been used."
return self.__environ, self.__start_response
content_type = property(get_content_type, set_content_type)
unparsed_uri = property(get_unparsed_uri)
uri = property(get_uri)
headers_in = property(get_headers_in)
subprocess_env = property(get_subprocess_env)
args = property(get_args)
header_only = property(get_header_only)
status = property(get_status, set_status)
method = property(get_method)
hostname = property(get_hostname)
filename = property(fset=set_filename)
encoding = property(fset=set_encoding)
bytes_sent = property(get_bytes_sent)
content_type_set_p = property(get_content_type_set_p)
allowed_methods = property(get_allowed_methods)
response_sent_p = property(get_response_sent_p)
form = property(get_post_form)
remote_ip = property(get_remote_ip)
remote_host = property(get_remote_host)
referer = property(get_referer)
what_was_written = property(get_what_was_written)
def alert_admin_for_server_status_p(status, referer):
"""
Check the configuration variable
CFG_WEBSTYLE_HTTP_STATUS_ALERT_LIST to see if the exception should
be registered and the admin should be alerted.
"""
status = str(status)
for pattern in CFG_WEBSTYLE_HTTP_STATUS_ALERT_LIST:
pattern = pattern.lower()
must_have_referer = False
if pattern.endswith('r'):
## e.g. "404 r"
must_have_referer = True
pattern = pattern[:-1].strip() ## -> "404"
if fnmatch(status, pattern) and (not must_have_referer or referer):
return True
return False
def application(environ, start_response, handler=None):
"""
Entry point for wsgi.
"""
## Needed for mod_wsgi, see: <http://code.google.com/p/modwsgi/wiki/ApplicationIssues>
req = SimulatedModPythonRequest(environ, start_response)
#print 'Starting mod_python simulation'
try:
if handler is None:
from invenio.webinterface_layout import invenio_handler
invenio_handler(req)
else:
handler(req)
req.flush()
## TODO for future reimplementation of stream_file
#except StreamFileException as e:
# return e.value
except SERVER_RETURN, status:
redirection, = status.args
from werkzeug.wrappers import BaseResponse
if isinstance(redirection, BaseResponse):
return redirection
status = int(str(status))
if status == 404:
from werkzeug.exceptions import NotFound
raise NotFound()
if status not in (OK, DONE):
req.status = status
req.headers_out['content-type'] = 'text/html'
admin_to_be_alerted = alert_admin_for_server_status_p(status,
req.headers_in.get('referer'))
if admin_to_be_alerted:
register_exception(req=req, alert_admin=True)
if not req.response_sent_p:
start_response(req.get_wsgi_status(), req.get_low_level_headers(), sys.exc_info())
map(req.write, generate_error_page(req, admin_to_be_alerted))
req.flush()
finally:
##for (callback, data) in req.get_cleanups():
## callback(data)
#if hasattr(req, '_session'):
# ## The session handler saves for caching a request_wrapper
# ## in req.
# ## This saves req as an attribute, creating a circular
# ## reference.
# ## Since we have have reached the end of the request handler
# ## we can safely drop the request_wrapper so to avoid
# ## memory leaks.
# delattr(req, '_session')
#if hasattr(req, '_user_info'):
# ## For the same reason we can delete the user_info.
# delattr(req, '_user_info')
## as suggested in
## <http://www.python.org/doc/2.3.5/lib/module-gc.html>
del gc.garbage[:]
return req.response
def generate_error_page(req, admin_was_alerted=True, page_already_started=False):
"""
Returns an iterable with the error page to be sent to the user browser.
"""
from invenio.webpage import page
from invenio import template
webstyle_templates = template.load('webstyle')
ln = req.form.get('ln', CFG_SITE_LANG)
if page_already_started:
return [webstyle_templates.tmpl_error_page(status=req.get_wsgi_status(), ln=ln, admin_was_alerted=admin_was_alerted)]
else:
return [page(title=req.get_wsgi_status(), body=webstyle_templates.tmpl_error_page(status=req.get_wsgi_status(), ln=ln, admin_was_alerted=admin_was_alerted), language=ln, req=req)]
def is_static_path(path):
"""
Returns True if path corresponds to an exsting file under CFG_WEBDIR.
@param path: the path.
@type path: string
@return: True if path corresponds to an exsting file under CFG_WEBDIR.
@rtype: bool
"""
path = os.path.abspath(CFG_WEBDIR + path)
if path.startswith(CFG_WEBDIR) and os.path.isfile(path):
return path
return None
def is_mp_legacy_publisher_path(path):
"""
Checks path corresponds to an exsting Python file under CFG_WEBDIR.
@param path: the path.
@type path: string
@return: the path of the module to load and the function to call there.
@rtype: tuple
"""
path = path.split('/')
for index, component in enumerate(path):
if component.endswith('.py'):
possible_module = os.path.abspath(CFG_WEBDIR + os.path.sep + os.path.sep.join(path[:index + 1]))
possible_handler = '/'.join(path[index + 1:]).strip()
if possible_handler.startswith('_'):
return None, None
if not possible_handler:
possible_handler = 'index'
if os.path.exists(possible_module) and possible_module.startswith(CFG_WEBDIR):
return (possible_module, possible_handler)
else:
return None, None
def mp_legacy_publisher(req, possible_module, possible_handler):
"""
mod_python legacy publisher minimum implementation.
"""
from invenio.session import get_session
from invenio.webinterface_handler import CFG_HAS_HTTPS_SUPPORT, CFG_FULL_HTTPS
the_module = open(possible_module).read()
module_globals = {}
exec(the_module, module_globals)
if possible_handler in module_globals and callable(module_globals[possible_handler]):
from invenio.webinterface_handler import _check_result
## req is the required first parameter of any handler
expected_args = list(inspect.getargspec(module_globals[possible_handler])[0])
if not expected_args or 'req' != expected_args[0]:
## req was not the first argument. Too bad!
raise SERVER_RETURN, HTTP_NOT_FOUND
## the req.form must be casted to dict because of Python 2.4 and earlier
## otherwise any object exposing the mapping interface can be
## used with the magic **
from flask import request
form = dict(request.values.to_dict(flat=True))
for key, value in form.items():
## FIXME: this is a backward compatibility workaround
## because most of the old administration web handler
## expect parameters to be of type str.
## When legacy publisher will be removed all this
## pain will go away anyway :-)
if isinstance(value, unicode):
form[key] = value.encode('utf8')
else:
## NOTE: this is a workaround for e.g. legacy webupload
## that is still using legacy publisher and expect to
## have a file (Field) instance instead of a string.
form[key] = value
if (CFG_FULL_HTTPS or CFG_HAS_HTTPS_SUPPORT and get_session(req).need_https) and not req.is_https():
from invenio.urlutils import redirect_to_url
# We need to isolate the part of the URI that is after
# CFG_SITE_URL, and append that to our CFG_SITE_SECURE_URL.
original_parts = urlparse(req.unparsed_uri)
plain_prefix_parts = urlparse(CFG_SITE_URL)
secure_prefix_parts = urlparse(CFG_SITE_SECURE_URL)
# Compute the new path
plain_path = original_parts[2]
plain_path = secure_prefix_parts[2] + \
plain_path[len(plain_prefix_parts[2]):]
# ...and recompose the complete URL
final_parts = list(secure_prefix_parts)
final_parts[2] = plain_path
final_parts[-3:] = original_parts[-3:]
target = urlunparse(final_parts)
redirect_to_url(req, target)
try:
return _check_result(req, module_globals[possible_handler](req, **form))
except TypeError, err:
if ("%s() got an unexpected keyword argument" % possible_handler) in str(err) or ('%s() takes at least' % possible_handler) in str(err):
inspected_args = inspect.getargspec(module_globals[possible_handler])
expected_args = list(inspected_args[0])
expected_defaults = list(inspected_args[3])
expected_args.reverse()
expected_defaults.reverse()
register_exception(req=req, prefix="Wrong GET parameter set in calling a legacy publisher handler for %s: expected_args=%s, found_args=%s" % (possible_handler, repr(expected_args), repr(req.form.keys())), alert_admin=CFG_DEVEL_SITE)
cleaned_form = {}
for index, arg in enumerate(expected_args):
if arg == 'req':
continue
if index < len(expected_defaults):
cleaned_form[arg] = form.get(arg, expected_defaults[index])
else:
cleaned_form[arg] = form.get(arg, None)
return _check_result(req, module_globals[possible_handler](req, **cleaned_form))
else:
raise
else:
raise SERVER_RETURN, HTTP_NOT_FOUND
def check_wsgiref_testing_feasability():
"""
In order to use wsgiref for running Invenio, CFG_SITE_URL and
CFG_SITE_SECURE_URL must not use HTTPS because SSL is not supported.
"""
if CFG_SITE_URL.lower().startswith('https'):
print >> sys.stderr, """
ERROR: SSL is not supported by the wsgiref simple server implementation.
Please set CFG_SITE_URL not to start with "https".
Currently CFG_SITE_URL is set to: "%s".""" % CFG_SITE_URL
sys.exit(1)
if CFG_SITE_SECURE_URL.lower().startswith('https'):
print >> sys.stderr, """
ERROR: SSL is not supported by the wsgiref simple server implementation.
Please set CFG_SITE_SECURE_URL not to start with "https".
Currently CFG_SITE_SECURE_URL is set to: "%s".""" % CFG_SITE_SECURE_URL
sys.exit(1)
def wsgi_handler_test(port=80):
"""
Simple WSGI testing environment based on wsgiref.
"""
check_wsgiref_testing_feasability()
from invenio.webinterface_handler_flask import create_invenio_flask_app
app = create_invenio_flask_app(wsgi_serve_static_files=True)
app.run(debug=True, port=port)
def main():
from optparse import OptionParser
parser = OptionParser()
parser.add_option('-t', '--test', action='store_true',
dest='test', default=False,
help="Run a WSGI test server via wsgiref (not using Apache).")
parser.add_option('-p', '--port', type='int', dest='port', default='80',
help="The port where the WSGI test server will listen. [80]")
(options, args) = parser.parse_args()
if options.test:
wsgi_handler_test(options.port)
else:
parser.print_help()
if __name__ == "__main__":
main()
WebStyle: fix for remote host getter
# -*- coding: utf-8 -*-
## This file is part of Invenio.
## Copyright (C) 2009, 2010, 2011, 2012 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""mod_python->WSGI Framework"""
import sys
import os
import re
import cgi
import gc
import inspect
from fnmatch import fnmatch
from urlparse import urlparse, urlunparse
from wsgiref.util import FileWrapper
from invenio.webinterface_handler_wsgi_utils import table
from invenio.webinterface_handler_config import \
HTTP_STATUS_MAP, SERVER_RETURN, OK, DONE, \
HTTP_NOT_FOUND, HTTP_INTERNAL_SERVER_ERROR
from invenio.config import CFG_WEBDIR, CFG_SITE_LANG, \
CFG_WEBSTYLE_HTTP_STATUS_ALERT_LIST, CFG_DEVEL_SITE, CFG_SITE_URL, \
CFG_SITE_SECURE_URL, CFG_WEBSTYLE_REVERSE_PROXY_IPS
from invenio.errorlib import register_exception
## TODO for future reimplementation of stream_file
#from invenio.bibdocfile import StreamFileException
from flask import request, after_this_request
## Magic regexp to search for usage of CFG_SITE_URL within src/href or
## any src usage of an external website
_RE_HTTPS_REPLACES = re.compile(r"\b((?:src\s*=|url\s*\()\s*[\"']?)http\://", re.I)
## Regexp to verify that the IP starts with a number (filter cases where 'unknown')
## It is faster to verify only the start (585 ns) compared with verifying
## the whole ip address - re.compile('^\d+\.\d+\.\d+\.\d+$') (1.01 µs)
_RE_IPADDRESS_START = re.compile("^\d+\.")
def _http_replace_func(match):
## src external_site -> CFG_SITE_SECURE_URL/sslredirect/external_site
return match.group(1) + CFG_SITE_SECURE_URL + '/sslredirect/'
_ESCAPED_CFG_SITE_URL = cgi.escape(CFG_SITE_URL, True)
_ESCAPED_CFG_SITE_SECURE_URL = cgi.escape(CFG_SITE_SECURE_URL, True)
def https_replace(html):
html = html.replace(_ESCAPED_CFG_SITE_URL, _ESCAPED_CFG_SITE_SECURE_URL)
return _RE_HTTPS_REPLACES.sub(_http_replace_func, html)
class InputProcessed(object):
"""
Auxiliary class used when reading input.
@see: <http://www.wsgi.org/wsgi/Specifications/handling_post_forms>.
"""
def read(self, *args):
raise EOFError('The wsgi.input stream has already been consumed')
readline = readlines = __iter__ = read
from werkzeug import BaseResponse, ResponseStreamMixin, \
CommonResponseDescriptorsMixin
class Response(BaseResponse, ResponseStreamMixin,
CommonResponseDescriptorsMixin):
"""
Full featured response object implementing :class:`ResponseStreamMixin`
to add support for the `stream` property.
"""
class SimulatedModPythonRequest(object):
"""
mod_python like request object.
Minimum and cleaned implementation to make moving out of mod_python
easy.
@see: <http://www.modpython.org/live/current/doc-html/pyapi-mprequest.html>
"""
def __init__(self, environ, start_response):
self.response = Response()
self.__environ = environ
self.__start_response = start_response
self.__response_sent_p = False
self.__content_type_set_p = False
self.__buffer = ''
self.__low_level_headers = []
self.__filename = None
self.__disposition_type = None
self.__bytes_sent = 0
self.__allowed_methods = []
self.__cleanups = []
self.headers_out = {'Cache-Control': None}
#self.headers_out.update(dict(request.headers))
## See: <http://www.python.org/dev/peps/pep-0333/#the-write-callable>
self.__write = None
self.__write_error = False
self.__errors = environ['wsgi.errors']
self.__headers_in = table([])
self.__tainted = False
self.__is_https = self.__environ.get('wsgi.url_scheme') == 'https'
self.__replace_https = False
self.track_writings = False
self.__what_was_written = ""
for key, value in environ.iteritems():
if key.startswith('HTTP_'):
self.__headers_in[key[len('HTTP_'):].replace('_', '-')] = value
if environ.get('CONTENT_LENGTH'):
self.__headers_in['content-length'] = environ['CONTENT_LENGTH']
if environ.get('CONTENT_TYPE'):
self.__headers_in['content-type'] = environ['CONTENT_TYPE']
def get_wsgi_environ(self):
return self.__environ
def get_post_form(self):
""" Returns only POST form. """
self.__tainted = True
return request.values.to_dict(flat=True)
def get_response_sent_p(self):
return self.__response_sent_p
def get_low_level_headers(self):
return self.__low_level_headers
def get_buffer(self):
return self.__buffer
def write(self, string, flush=1):
if isinstance(string, unicode):
self.__buffer += string.encode('utf8')
else:
self.__buffer += string
if flush:
self.flush()
def flush(self):
self.send_http_header()
if self.__buffer:
self.__bytes_sent += len(self.__buffer)
try:
if not self.__write_error:
if self.__replace_https:
self.__write(https_replace(self.__buffer))
else:
if self.__buffer:
self.__write(self.__buffer)
if self.track_writings:
if self.__replace_https:
self.__what_was_written += https_replace(self.__buffer)
else:
self.__what_was_written += self.__buffer
except IOError, err:
if "failed to write data" in str(err) or "client connection closed" in str(err):
## Let's just log this exception without alerting the admin:
register_exception(req=self)
self.__write_error = True ## This flag is there just
## to not report later other errors to the admin.
else:
raise
self.__buffer = ''
def set_content_type(self, content_type):
self.__content_type_set_p = True
self.response.content_type = content_type
if self.__is_https:
if content_type.startswith("text/html") or content_type.startswith("application/rss+xml"):
self.__replace_https = True
def get_content_type(self):
return self.response.content_type
def send_http_header(self):
for (k, v) in self.__low_level_headers:
self.response.headers[k] = v
for k, v in self.headers_out.iteritems():
self.response.headers[k] = v
self.__write = self.response.stream.write
def get_unparsed_uri(self):
return '?'.join([self.__environ['PATH_INFO'], self.__environ['QUERY_STRING']])
def get_uri(self):
return request.environ['PATH_INFO']
def get_headers_in(self):
return request.headers
def get_subprocess_env(self):
return self.__environ
def add_common_vars(self):
pass
def get_args(self):
return request.environ['QUERY_STRING']
def get_remote_ip(self):
if 'X-FORWARDED-FOR' in self.__headers_in and \
self.__headers_in.get('X-FORWARDED-SERVER', '') == \
self.__headers_in.get('X-FORWARDED-HOST', '') == \
urlparse(CFG_SITE_URL)[1]:
# we are using proxy setup
if self.__environ.get('REMOTE_ADDR') in CFG_WEBSTYLE_REVERSE_PROXY_IPS:
# we trust this proxy
ip_list = self.__headers_in['X-FORWARDED-FOR'].split(',')
for ip in ip_list:
if _RE_IPADDRESS_START.match(ip):
return ip
# no IP has the correct format, return a default IP
return '10.0.0.10'
else:
# we don't trust this proxy
register_exception(prefix="You are running in a proxy configuration, but the " + \
"CFG_WEBSTYLE_REVERSE_PROXY_IPS variable does not contain " + \
"the IP of your proxy, thus the remote IP addresses of your " + \
"clients are not trusted. Please configure this variable.",
alert_admin=True)
return '10.0.0.11'
return request.remote_addr
def get_remote_host(self):
return request.environ.get('REMOTE_HOST', # apache
request.environ.get('HTTP_HOST',
'0.0.0.0')) # not found
def get_header_only(self):
return request.environ['REQUEST_METHOD'] == 'HEAD'
def set_status(self, status):
self.response.status_code = status
def get_status(self):
return self.response.status_code
def get_wsgi_status(self):
return '%s %s' % (self.response.status_code,
HTTP_STATUS_MAP.get(int(self.response.status_code),
'Explanation not available'))
def sendfile(self, path, offset=0, the_len=-1):
try:
self.send_http_header()
file_to_send = open(path)
file_to_send.seek(offset)
file_wrapper = FileWrapper(file_to_send)
count = 0
if the_len < 0:
for chunk in file_wrapper:
count += len(chunk)
self.__bytes_sent += len(chunk)
self.__write(chunk)
else:
for chunk in file_wrapper:
if the_len >= len(chunk):
the_len -= len(chunk)
count += len(chunk)
self.__bytes_sent += len(chunk)
self.__write(chunk)
else:
count += the_len
self.__bytes_sent += the_len
self.__write(chunk[:the_len])
break
except IOError, err:
if "failed to write data" in str(err) or "client connection closed" in str(err):
## Let's just log this exception without alerting the admin:
register_exception(req=self)
else:
raise
return self.__bytes_sent
def set_content_length(self, content_length):
if content_length is not None:
self.response.headers['content-length'] = str(content_length)
else:
del self.response.headers['content-length']
def is_https(self):
return self.__is_https
def get_method(self):
return request.environ['REQUEST_METHOD']
def get_hostname(self):
return request.environ.get('HTTP_HOST', '')
def set_filename(self, filename):
self.__filename = filename
if self.__disposition_type is None:
self.__disposition_type = 'inline'
self.response.headers['content-disposition'] = '%s; filename=%s' % (self.__disposition_type, self.__filename)
def set_encoding(self, encoding):
if encoding:
self.response.headers['content-encoding'] = str(encoding)
else:
del self.response.headers['content-encoding']
def get_bytes_sent(self):
return self.__bytes_sent
def log_error(self, message):
self.__errors.write(message.strip() + '\n')
def get_content_type_set_p(self):
return self.__content_type_set_p and \
bool(self.response.headers['content-type'])
def allow_methods(self, methods, reset=0):
if reset:
self.__allowed_methods = []
self.__allowed_methods += [method.upper().strip() for method in methods]
def get_allowed_methods(self):
return self.__allowed_methods
def readline(self, hint=None):
try:
return self.__environ['wsgi.input'].readline(hint)
except TypeError:
## the hint param is not part of wsgi pep, although
## it's great to exploit it in when reading FORM
## with large files, in order to avoid filling up the memory
## Too bad it's not there :-(
return self.__environ['wsgi.input'].readline()
def readlines(self, hint=None):
return self.__environ['wsgi.input'].readlines(hint)
def read(self, hint=None):
return self.__environ['wsgi.input'].read(hint)
def register_cleanup(self, callback, data=None):
@after_this_request
def f(response):
callback(data)
def get_cleanups(self):
return self.__cleanups
def get_referer(self):
return request.referrer
def get_what_was_written(self):
return self.__what_was_written
def __str__(self):
from pprint import pformat
out = ""
for key in dir(self):
try:
if not callable(getattr(self, key)) and not key.startswith("_SimulatedModPythonRequest") and not key.startswith('__'):
out += 'req.%s: %s\n' % (key, pformat(getattr(self, key)))
except:
pass
return out
def get_original_wsgi_environment(self):
"""
Return the original WSGI environment used to initialize this request
object.
@return: environ, start_response
@raise AssertionError: in case the environment has been altered, i.e.
either the input has been consumed or something has already been
written to the output.
"""
assert not self.__tainted, "The original WSGI environment is tainted since at least req.write or req.form has been used."
return self.__environ, self.__start_response
content_type = property(get_content_type, set_content_type)
unparsed_uri = property(get_unparsed_uri)
uri = property(get_uri)
headers_in = property(get_headers_in)
subprocess_env = property(get_subprocess_env)
args = property(get_args)
header_only = property(get_header_only)
status = property(get_status, set_status)
method = property(get_method)
hostname = property(get_hostname)
filename = property(fset=set_filename)
encoding = property(fset=set_encoding)
bytes_sent = property(get_bytes_sent)
content_type_set_p = property(get_content_type_set_p)
allowed_methods = property(get_allowed_methods)
response_sent_p = property(get_response_sent_p)
form = property(get_post_form)
remote_ip = property(get_remote_ip)
remote_host = property(get_remote_host)
referer = property(get_referer)
what_was_written = property(get_what_was_written)
def alert_admin_for_server_status_p(status, referer):
"""
Check the configuration variable
CFG_WEBSTYLE_HTTP_STATUS_ALERT_LIST to see if the exception should
be registered and the admin should be alerted.
"""
status = str(status)
for pattern in CFG_WEBSTYLE_HTTP_STATUS_ALERT_LIST:
pattern = pattern.lower()
must_have_referer = False
if pattern.endswith('r'):
## e.g. "404 r"
must_have_referer = True
pattern = pattern[:-1].strip() ## -> "404"
if fnmatch(status, pattern) and (not must_have_referer or referer):
return True
return False
def application(environ, start_response, handler=None):
"""
Entry point for wsgi.
"""
## Needed for mod_wsgi, see: <http://code.google.com/p/modwsgi/wiki/ApplicationIssues>
req = SimulatedModPythonRequest(environ, start_response)
#print 'Starting mod_python simulation'
try:
if handler is None:
from invenio.webinterface_layout import invenio_handler
invenio_handler(req)
else:
handler(req)
req.flush()
## TODO for future reimplementation of stream_file
#except StreamFileException as e:
# return e.value
except SERVER_RETURN, status:
redirection, = status.args
from werkzeug.wrappers import BaseResponse
if isinstance(redirection, BaseResponse):
return redirection
status = int(str(status))
if status == 404:
from werkzeug.exceptions import NotFound
raise NotFound()
if status not in (OK, DONE):
req.status = status
req.headers_out['content-type'] = 'text/html'
admin_to_be_alerted = alert_admin_for_server_status_p(status,
req.headers_in.get('referer'))
if admin_to_be_alerted:
register_exception(req=req, alert_admin=True)
if not req.response_sent_p:
start_response(req.get_wsgi_status(), req.get_low_level_headers(), sys.exc_info())
map(req.write, generate_error_page(req, admin_to_be_alerted))
req.flush()
finally:
##for (callback, data) in req.get_cleanups():
## callback(data)
#if hasattr(req, '_session'):
# ## The session handler saves for caching a request_wrapper
# ## in req.
# ## This saves req as an attribute, creating a circular
# ## reference.
# ## Since we have have reached the end of the request handler
# ## we can safely drop the request_wrapper so to avoid
# ## memory leaks.
# delattr(req, '_session')
#if hasattr(req, '_user_info'):
# ## For the same reason we can delete the user_info.
# delattr(req, '_user_info')
## as suggested in
## <http://www.python.org/doc/2.3.5/lib/module-gc.html>
del gc.garbage[:]
return req.response
def generate_error_page(req, admin_was_alerted=True, page_already_started=False):
"""
Returns an iterable with the error page to be sent to the user browser.
"""
from invenio.webpage import page
from invenio import template
webstyle_templates = template.load('webstyle')
ln = req.form.get('ln', CFG_SITE_LANG)
if page_already_started:
return [webstyle_templates.tmpl_error_page(status=req.get_wsgi_status(), ln=ln, admin_was_alerted=admin_was_alerted)]
else:
return [page(title=req.get_wsgi_status(), body=webstyle_templates.tmpl_error_page(status=req.get_wsgi_status(), ln=ln, admin_was_alerted=admin_was_alerted), language=ln, req=req)]
def is_static_path(path):
"""
Returns True if path corresponds to an exsting file under CFG_WEBDIR.
@param path: the path.
@type path: string
@return: True if path corresponds to an exsting file under CFG_WEBDIR.
@rtype: bool
"""
path = os.path.abspath(CFG_WEBDIR + path)
if path.startswith(CFG_WEBDIR) and os.path.isfile(path):
return path
return None
def is_mp_legacy_publisher_path(path):
"""
Checks path corresponds to an exsting Python file under CFG_WEBDIR.
@param path: the path.
@type path: string
@return: the path of the module to load and the function to call there.
@rtype: tuple
"""
path = path.split('/')
for index, component in enumerate(path):
if component.endswith('.py'):
possible_module = os.path.abspath(CFG_WEBDIR + os.path.sep + os.path.sep.join(path[:index + 1]))
possible_handler = '/'.join(path[index + 1:]).strip()
if possible_handler.startswith('_'):
return None, None
if not possible_handler:
possible_handler = 'index'
if os.path.exists(possible_module) and possible_module.startswith(CFG_WEBDIR):
return (possible_module, possible_handler)
else:
return None, None
def mp_legacy_publisher(req, possible_module, possible_handler):
"""
mod_python legacy publisher minimum implementation.
"""
from invenio.session import get_session
from invenio.webinterface_handler import CFG_HAS_HTTPS_SUPPORT, CFG_FULL_HTTPS
the_module = open(possible_module).read()
module_globals = {}
exec(the_module, module_globals)
if possible_handler in module_globals and callable(module_globals[possible_handler]):
from invenio.webinterface_handler import _check_result
## req is the required first parameter of any handler
expected_args = list(inspect.getargspec(module_globals[possible_handler])[0])
if not expected_args or 'req' != expected_args[0]:
## req was not the first argument. Too bad!
raise SERVER_RETURN, HTTP_NOT_FOUND
## the req.form must be casted to dict because of Python 2.4 and earlier
## otherwise any object exposing the mapping interface can be
## used with the magic **
from flask import request
form = dict(request.values.to_dict(flat=True))
for key, value in form.items():
## FIXME: this is a backward compatibility workaround
## because most of the old administration web handler
## expect parameters to be of type str.
## When legacy publisher will be removed all this
## pain will go away anyway :-)
if isinstance(value, unicode):
form[key] = value.encode('utf8')
else:
## NOTE: this is a workaround for e.g. legacy webupload
## that is still using legacy publisher and expect to
## have a file (Field) instance instead of a string.
form[key] = value
if (CFG_FULL_HTTPS or CFG_HAS_HTTPS_SUPPORT and get_session(req).need_https) and not req.is_https():
from invenio.urlutils import redirect_to_url
# We need to isolate the part of the URI that is after
# CFG_SITE_URL, and append that to our CFG_SITE_SECURE_URL.
original_parts = urlparse(req.unparsed_uri)
plain_prefix_parts = urlparse(CFG_SITE_URL)
secure_prefix_parts = urlparse(CFG_SITE_SECURE_URL)
# Compute the new path
plain_path = original_parts[2]
plain_path = secure_prefix_parts[2] + \
plain_path[len(plain_prefix_parts[2]):]
# ...and recompose the complete URL
final_parts = list(secure_prefix_parts)
final_parts[2] = plain_path
final_parts[-3:] = original_parts[-3:]
target = urlunparse(final_parts)
redirect_to_url(req, target)
try:
return _check_result(req, module_globals[possible_handler](req, **form))
except TypeError, err:
if ("%s() got an unexpected keyword argument" % possible_handler) in str(err) or ('%s() takes at least' % possible_handler) in str(err):
inspected_args = inspect.getargspec(module_globals[possible_handler])
expected_args = list(inspected_args[0])
expected_defaults = list(inspected_args[3])
expected_args.reverse()
expected_defaults.reverse()
register_exception(req=req, prefix="Wrong GET parameter set in calling a legacy publisher handler for %s: expected_args=%s, found_args=%s" % (possible_handler, repr(expected_args), repr(req.form.keys())), alert_admin=CFG_DEVEL_SITE)
cleaned_form = {}
for index, arg in enumerate(expected_args):
if arg == 'req':
continue
if index < len(expected_defaults):
cleaned_form[arg] = form.get(arg, expected_defaults[index])
else:
cleaned_form[arg] = form.get(arg, None)
return _check_result(req, module_globals[possible_handler](req, **cleaned_form))
else:
raise
else:
raise SERVER_RETURN, HTTP_NOT_FOUND
def check_wsgiref_testing_feasability():
"""
In order to use wsgiref for running Invenio, CFG_SITE_URL and
CFG_SITE_SECURE_URL must not use HTTPS because SSL is not supported.
"""
if CFG_SITE_URL.lower().startswith('https'):
print >> sys.stderr, """
ERROR: SSL is not supported by the wsgiref simple server implementation.
Please set CFG_SITE_URL not to start with "https".
Currently CFG_SITE_URL is set to: "%s".""" % CFG_SITE_URL
sys.exit(1)
if CFG_SITE_SECURE_URL.lower().startswith('https'):
print >> sys.stderr, """
ERROR: SSL is not supported by the wsgiref simple server implementation.
Please set CFG_SITE_SECURE_URL not to start with "https".
Currently CFG_SITE_SECURE_URL is set to: "%s".""" % CFG_SITE_SECURE_URL
sys.exit(1)
def wsgi_handler_test(port=80):
"""
Simple WSGI testing environment based on wsgiref.
"""
check_wsgiref_testing_feasability()
from invenio.webinterface_handler_flask import create_invenio_flask_app
app = create_invenio_flask_app(wsgi_serve_static_files=True)
app.run(debug=True, port=port)
def main():
from optparse import OptionParser
parser = OptionParser()
parser.add_option('-t', '--test', action='store_true',
dest='test', default=False,
help="Run a WSGI test server via wsgiref (not using Apache).")
parser.add_option('-p', '--port', type='int', dest='port', default='80',
help="The port where the WSGI test server will listen. [80]")
(options, args) = parser.parse_args()
if options.test:
wsgi_handler_test(options.port)
else:
parser.print_help()
if __name__ == "__main__":
main()
|
"""Provides classes for handling API requests."""
# -*- coding: utf-8 -*-
from distutils.util import strtobool
from itertools import chain
import logging
import json
from django.utils import timezone
from datetime import timedelta
import dateutil.parser
import requests
from cachetools.func import ttl_cache
from django.contrib.auth.models import User
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import Permission
from django.contrib.auth.models import Group
from django.contrib.gis.geos import GEOSGeometry, Polygon
from django.db.models import Q
from django.http import JsonResponse, HttpResponse, HttpResponseNotFound
from django.views.decorators.http import require_http_methods
from django.core.exceptions import ValidationError as DjangoValidationError
from jobs.models import HDXExportRegion, PartnerExportRegion, Job, SavedFeatureSelection
from rest_framework import filters, permissions, status, viewsets
from rest_framework.decorators import detail_route
from rest_framework.renderers import JSONRenderer
from rest_framework.response import Response
from rest_framework.serializers import ValidationError
from api.serializers import (ConfigurationSerializer, ExportRunSerializer, ExportTaskSerializer,
HDXExportRegionListSerializer,
HDXExportRegionSerializer, JobGeomSerializer,
PartnerExportRegionListSerializer, PartnerExportRegionSerializer,
JobSerializer)
from tasks.models import ExportRun
from tasks.task_runners import ExportTaskRunner
from .permissions import IsHDXAdmin, IsOwnerOrReadOnly, IsMemberOfGroup
from .renderers import HOTExportApiRenderer
# Get an instance of a logger
LOG = logging.getLogger(__name__)
# controls how api responses are rendered
renderer_classes = (JSONRenderer, HOTExportApiRenderer)
def bbox_to_geom(s):
try:
return GEOSGeometry(Polygon.from_bbox(s.split(',')), srid=4326)
except Exception:
raise ValidationError({'bbox': 'Query bounding box is malformed.'})
class JobViewSet(viewsets.ModelViewSet):
"""
##Export API Endpoint.
Main endpoint for export creation and managment. Provides endpoints
for creating, listing and deleting export jobs.
Updates to existing jobs are not supported as exports can be cloned.
Request data should be posted as `application/json`.
<code>
curl -v -H "Content-Type: application/json" -H "Authorization: Token [your token]"
--data @request.json http://EXPORT_TOOL_URL/api/jobs
</code>
To monitor the resulting export run retreive the `uid` value from the returned json
and call http://export.hotosm.org/api/runs?job_uid=[the returned uid]
"""
serializer_class = JobSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly,
IsOwnerOrReadOnly)
lookup_field = 'uid'
filter_backends = (filters.OrderingFilter, filters.SearchFilter, )
search_fields = ('name', 'description', 'event', 'user__username')
ordering_fields = ('__all__',)
ordering = ('-updated_at')
def get_queryset(self):
user = self.request.user
queryset = Job.objects
all = strtobool(self.request.query_params.get('all', 'false')) or self.action != "list"
bbox = self.request.query_params.get('bbox', None)
before = self.request.query_params.get('before', None)
after = self.request.query_params.get('after', None)
if before is not None:
queryset = queryset.filter(Q(created_at__lte=before))
if after is not None:
queryset = queryset.filter(Q(created_at__gte=after))
if bbox is not None:
bbox = bbox_to_geom(bbox)
queryset = queryset.filter(Q(the_geom__within=bbox))
if not all:
queryset = queryset.filter(Q(user_id=user.id))
return queryset.filter(Q(user_id=user.id) | Q(published=True))
def perform_create(self, serializer):
if Job.objects.filter(created_at__gt=timezone.now()-timedelta(minutes=60),user=self.request.user).count() > 10:
raise ValidationError({"the_geom":["You are rate limited to 5 exports per hour."]})
job = serializer.save()
task_runner = ExportTaskRunner()
task_runner.run_task(job_uid=str(job.uid))
@detail_route()
def geom(self, request, uid=None):
job = Job.objects.get(uid=uid)
geom_serializer = JobGeomSerializer(job)
return Response(geom_serializer.data)
class ConfigurationViewSet(viewsets.ModelViewSet):
""" API endpoints for stored YAML configurations.
Note that these are mutable - a configuration can be edited."""
serializer_class = ConfigurationSerializer
permission_classes = (IsOwnerOrReadOnly,
permissions.IsAuthenticatedOrReadOnly)
lookup_field = 'uid'
filter_backends = (filters.OrderingFilter, filters.SearchFilter, )
search_fields = ('name', 'description')
ordering_fields = ('__all__')
def get_queryset(self):
user = self.request.user
queryset = SavedFeatureSelection.objects.filter(deleted=False).order_by('-pinned','name')
all = strtobool(self.request.query_params.get('all', 'false')) or self.action != "list"
if not all:
queryset = queryset.filter(Q(user_id=user.id))
return queryset.filter(Q(user_id=user.id) | Q(public=True))
class ExportRunViewSet(viewsets.ModelViewSet):
"""
Export Run API Endpoint.
Poll this endpoint for querying export runs.
"""
serializer_class = ExportRunSerializer
permission_classes = (permissions.AllowAny, )
lookup_field = 'uid'
def create(self, request, format='json'):
"""
runs the job.
"""
if ExportRun.objects.filter(created_at__gt=timezone.now()-timedelta(minutes=1),user=request.user).count() >= 1:
return Response({'status': 'RATE_LIMITED'}, status=status.HTTP_400_BAD_REQUEST)
job_uid = request.query_params.get('job_uid', None)
task_runner = ExportTaskRunner()
task_runner.run_task(job_uid=job_uid, user=request.user)
return Response({'status': 'OK'}, status=status.HTTP_201_CREATED)
def get_queryset(self):
return ExportRun.objects.all().order_by('-started_at')
def retrieve(self, request, uid=None, *args, **kwargs):
"""
Get a single Export Run.
"""
queryset = ExportRun.objects.filter(uid=uid)
serializer = self.get_serializer(
queryset, many=True, context={'request': request})
return Response(serializer.data, status=status.HTTP_200_OK)
def list(self, request, *args, **kwargs):
"""
List the Export Runs for a single Job.
"""
job_uid = self.request.query_params.get('job_uid', None)
queryset = self.filter_queryset(
ExportRun.objects.filter(job__uid=job_uid).order_by('-started_at'))
serializer = self.get_serializer(
queryset, many=True, context={'request': request})
return Response(serializer.data, status=status.HTTP_200_OK)
class HDXExportRegionViewSet(viewsets.ModelViewSet):
""" API endpoint for HDX regions.
Viewing and editing these is limited to a set of admins."""
ordering_fields = '__all__'
ordering = ('job__description',)
permission_classes = (IsHDXAdmin, )
filter_backends = (filters.OrderingFilter, filters.SearchFilter, )
search_fields = ('job__name', 'job__description')
def get_queryset(self):
queryset = HDXExportRegion.objects.filter(deleted=False)
schedule_period = self.request.query_params.get('schedule_period', None)
if schedule_period not in [None,'any']:
queryset = queryset.filter(Q(schedule_period=schedule_period))
return queryset.prefetch_related(
'job__runs__tasks').defer('job__the_geom')
def get_serializer_class(self):
if self.action == "list":
return HDXExportRegionListSerializer
return HDXExportRegionSerializer
def perform_create(self, serializer):
serializer.save()
if settings.SYNC_TO_HDX:
serializer.instance.sync_to_hdx()
else:
print("Stubbing interaction with HDX API.")
def perform_update(self, serializer):
serializer.save()
if settings.SYNC_TO_HDX:
serializer.instance.sync_to_hdx()
else:
print("Stubbing interaction with HDX API.")
class PartnerExportRegionViewSet(viewsets.ModelViewSet):
# get only Regions that belong to the user's Groups.
ordering_fields = '__all__'
ordering = ('job__description',)
filter_backends = (filters.OrderingFilter, filters.SearchFilter, )
search_fields = ('job__name', 'job__description')
permission_classes = (IsMemberOfGroup,)
def get_queryset(self):
group_ids = self.request.user.groups.values_list('id')
return PartnerExportRegion.objects.filter(deleted=False,group_id__in=group_ids).prefetch_related(
'job__runs__tasks').defer('job__the_geom')
def get_serializer_class(self):
if self.action == "list":
return PartnerExportRegionListSerializer
return PartnerExportRegionSerializer
@require_http_methods(['GET'])
def permalink(request, uid):
try:
job = Job.objects.filter(uid=uid).first()
if not job:
return HttpResponseNotFound()
run = job.runs.filter(status='COMPLETED').latest('finished_at')
serializer = ExportTaskSerializer(run.tasks.all(),many=True)
return HttpResponse(JSONRenderer().render(serializer.data))
except DjangoValidationError:
return HttpResponseNotFound()
@require_http_methods(['GET'])
def stats(request):
last_100 = Job.objects.order_by('-created_at')[:100]
last_100_bboxes = [job.the_geom.extent for job in last_100]
def users(days):
return User.objects.filter(date_joined__gte=timezone.now()-timedelta(days=days)).count()
def exports(days):
return Job.objects.filter(created_at__gte=timezone.now()-timedelta(days=days)).count()
new_users = [users(1),users(7),users(30)]
new_exports = [exports(1),exports(7),exports(30)]
return HttpResponse(json.dumps({'new_users':new_users,'new_exports':new_exports,'last_100_bboxes':last_100_bboxes}))
@require_http_methods(['GET'])
@login_required()
def request_geonames(request):
"""Geocode with GeoNames."""
payload = {
'maxRows': 20,
'username': 'osm_export_tool',
'style': 'full',
'q': request.GET.get('q')
}
geonames_url = getattr(settings, 'GEONAMES_API_URL')
if geonames_url:
response = requests.get(geonames_url, params=payload).json()
assert (isinstance(response, dict))
return JsonResponse(response)
else:
return JsonResponse(
{
'error': 'A url was not provided for geonames'
},
status=500, )
@ttl_cache(ttl=60)
@require_http_methods(['GET'])
@login_required()
def get_overpass_timestamp(request):
"""
Endpoint to show the last OSM update timestamp on the Create page.
this sometimes fails, returning a HTTP 200 but empty content.
"""
r = requests.get('{}timestamp'.format(settings.OVERPASS_API_URL))
return JsonResponse({'timestamp': dateutil.parser.parse(r.content)})
@login_required()
def get_overpass_status(request):
r = requests.get('{}status'.format(settings.OVERPASS_API_URL))
return HttpResponse(r.content)
@require_http_methods(['GET'])
@login_required()
def get_user_permissions(request):
user = request.user
permissions = []
if user.is_superuser:
permissions = Permission.objects.all().values_list(
'content_type__app_label', 'codename')
else:
permissions = chain(
user.user_permissions.all().values_list('content_type__app_label',
'codename'),
Permission.objects.filter(group__user=user).values_list(
'content_type__app_label', 'codename'))
return JsonResponse({
"username": user.username,
"permissions":
list(map(lambda pair: ".".join(pair), (set(permissions))))
})
# get a list of partner organizations and their numeric IDs.
# this can be exposed to the public.
@require_http_methods(['GET'])
@login_required()
def get_groups(request):
groups = [{'id':g.id,'name':g.name} for g in Group.objects.filter(is_partner=True)]
return JsonResponse({'groups':groups})
rate limit is 5 #2 [#329]
"""Provides classes for handling API requests."""
# -*- coding: utf-8 -*-
from distutils.util import strtobool
from itertools import chain
import logging
import json
from django.utils import timezone
from datetime import timedelta
import dateutil.parser
import requests
from cachetools.func import ttl_cache
from django.contrib.auth.models import User
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import Permission
from django.contrib.auth.models import Group
from django.contrib.gis.geos import GEOSGeometry, Polygon
from django.db.models import Q
from django.http import JsonResponse, HttpResponse, HttpResponseNotFound
from django.views.decorators.http import require_http_methods
from django.core.exceptions import ValidationError as DjangoValidationError
from jobs.models import HDXExportRegion, PartnerExportRegion, Job, SavedFeatureSelection
from rest_framework import filters, permissions, status, viewsets
from rest_framework.decorators import detail_route
from rest_framework.renderers import JSONRenderer
from rest_framework.response import Response
from rest_framework.serializers import ValidationError
from api.serializers import (ConfigurationSerializer, ExportRunSerializer, ExportTaskSerializer,
HDXExportRegionListSerializer,
HDXExportRegionSerializer, JobGeomSerializer,
PartnerExportRegionListSerializer, PartnerExportRegionSerializer,
JobSerializer)
from tasks.models import ExportRun
from tasks.task_runners import ExportTaskRunner
from .permissions import IsHDXAdmin, IsOwnerOrReadOnly, IsMemberOfGroup
from .renderers import HOTExportApiRenderer
# Get an instance of a logger
LOG = logging.getLogger(__name__)
# controls how api responses are rendered
renderer_classes = (JSONRenderer, HOTExportApiRenderer)
def bbox_to_geom(s):
try:
return GEOSGeometry(Polygon.from_bbox(s.split(',')), srid=4326)
except Exception:
raise ValidationError({'bbox': 'Query bounding box is malformed.'})
class JobViewSet(viewsets.ModelViewSet):
"""
##Export API Endpoint.
Main endpoint for export creation and managment. Provides endpoints
for creating, listing and deleting export jobs.
Updates to existing jobs are not supported as exports can be cloned.
Request data should be posted as `application/json`.
<code>
curl -v -H "Content-Type: application/json" -H "Authorization: Token [your token]"
--data @request.json http://EXPORT_TOOL_URL/api/jobs
</code>
To monitor the resulting export run retreive the `uid` value from the returned json
and call http://export.hotosm.org/api/runs?job_uid=[the returned uid]
"""
serializer_class = JobSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly,
IsOwnerOrReadOnly)
lookup_field = 'uid'
filter_backends = (filters.OrderingFilter, filters.SearchFilter, )
search_fields = ('name', 'description', 'event', 'user__username')
ordering_fields = ('__all__',)
ordering = ('-updated_at')
def get_queryset(self):
user = self.request.user
queryset = Job.objects
all = strtobool(self.request.query_params.get('all', 'false')) or self.action != "list"
bbox = self.request.query_params.get('bbox', None)
before = self.request.query_params.get('before', None)
after = self.request.query_params.get('after', None)
if before is not None:
queryset = queryset.filter(Q(created_at__lte=before))
if after is not None:
queryset = queryset.filter(Q(created_at__gte=after))
if bbox is not None:
bbox = bbox_to_geom(bbox)
queryset = queryset.filter(Q(the_geom__within=bbox))
if not all:
queryset = queryset.filter(Q(user_id=user.id))
return queryset.filter(Q(user_id=user.id) | Q(published=True))
def perform_create(self, serializer):
if Job.objects.filter(created_at__gt=timezone.now()-timedelta(minutes=60),user=self.request.user).count() > 5:
raise ValidationError({"the_geom":["You are rate limited to 5 exports per hour."]})
job = serializer.save()
task_runner = ExportTaskRunner()
task_runner.run_task(job_uid=str(job.uid))
@detail_route()
def geom(self, request, uid=None):
job = Job.objects.get(uid=uid)
geom_serializer = JobGeomSerializer(job)
return Response(geom_serializer.data)
class ConfigurationViewSet(viewsets.ModelViewSet):
""" API endpoints for stored YAML configurations.
Note that these are mutable - a configuration can be edited."""
serializer_class = ConfigurationSerializer
permission_classes = (IsOwnerOrReadOnly,
permissions.IsAuthenticatedOrReadOnly)
lookup_field = 'uid'
filter_backends = (filters.OrderingFilter, filters.SearchFilter, )
search_fields = ('name', 'description')
ordering_fields = ('__all__')
def get_queryset(self):
user = self.request.user
queryset = SavedFeatureSelection.objects.filter(deleted=False).order_by('-pinned','name')
all = strtobool(self.request.query_params.get('all', 'false')) or self.action != "list"
if not all:
queryset = queryset.filter(Q(user_id=user.id))
return queryset.filter(Q(user_id=user.id) | Q(public=True))
class ExportRunViewSet(viewsets.ModelViewSet):
"""
Export Run API Endpoint.
Poll this endpoint for querying export runs.
"""
serializer_class = ExportRunSerializer
permission_classes = (permissions.AllowAny, )
lookup_field = 'uid'
def create(self, request, format='json'):
"""
runs the job.
"""
if ExportRun.objects.filter(created_at__gt=timezone.now()-timedelta(minutes=1),user=request.user).count() >= 1:
return Response({'status': 'RATE_LIMITED'}, status=status.HTTP_400_BAD_REQUEST)
job_uid = request.query_params.get('job_uid', None)
task_runner = ExportTaskRunner()
task_runner.run_task(job_uid=job_uid, user=request.user)
return Response({'status': 'OK'}, status=status.HTTP_201_CREATED)
def get_queryset(self):
return ExportRun.objects.all().order_by('-started_at')
def retrieve(self, request, uid=None, *args, **kwargs):
"""
Get a single Export Run.
"""
queryset = ExportRun.objects.filter(uid=uid)
serializer = self.get_serializer(
queryset, many=True, context={'request': request})
return Response(serializer.data, status=status.HTTP_200_OK)
def list(self, request, *args, **kwargs):
"""
List the Export Runs for a single Job.
"""
job_uid = self.request.query_params.get('job_uid', None)
queryset = self.filter_queryset(
ExportRun.objects.filter(job__uid=job_uid).order_by('-started_at'))
serializer = self.get_serializer(
queryset, many=True, context={'request': request})
return Response(serializer.data, status=status.HTTP_200_OK)
class HDXExportRegionViewSet(viewsets.ModelViewSet):
""" API endpoint for HDX regions.
Viewing and editing these is limited to a set of admins."""
ordering_fields = '__all__'
ordering = ('job__description',)
permission_classes = (IsHDXAdmin, )
filter_backends = (filters.OrderingFilter, filters.SearchFilter, )
search_fields = ('job__name', 'job__description')
def get_queryset(self):
queryset = HDXExportRegion.objects.filter(deleted=False)
schedule_period = self.request.query_params.get('schedule_period', None)
if schedule_period not in [None,'any']:
queryset = queryset.filter(Q(schedule_period=schedule_period))
return queryset.prefetch_related(
'job__runs__tasks').defer('job__the_geom')
def get_serializer_class(self):
if self.action == "list":
return HDXExportRegionListSerializer
return HDXExportRegionSerializer
def perform_create(self, serializer):
serializer.save()
if settings.SYNC_TO_HDX:
serializer.instance.sync_to_hdx()
else:
print("Stubbing interaction with HDX API.")
def perform_update(self, serializer):
serializer.save()
if settings.SYNC_TO_HDX:
serializer.instance.sync_to_hdx()
else:
print("Stubbing interaction with HDX API.")
class PartnerExportRegionViewSet(viewsets.ModelViewSet):
# get only Regions that belong to the user's Groups.
ordering_fields = '__all__'
ordering = ('job__description',)
filter_backends = (filters.OrderingFilter, filters.SearchFilter, )
search_fields = ('job__name', 'job__description')
permission_classes = (IsMemberOfGroup,)
def get_queryset(self):
group_ids = self.request.user.groups.values_list('id')
return PartnerExportRegion.objects.filter(deleted=False,group_id__in=group_ids).prefetch_related(
'job__runs__tasks').defer('job__the_geom')
def get_serializer_class(self):
if self.action == "list":
return PartnerExportRegionListSerializer
return PartnerExportRegionSerializer
@require_http_methods(['GET'])
def permalink(request, uid):
try:
job = Job.objects.filter(uid=uid).first()
if not job:
return HttpResponseNotFound()
run = job.runs.filter(status='COMPLETED').latest('finished_at')
serializer = ExportTaskSerializer(run.tasks.all(),many=True)
return HttpResponse(JSONRenderer().render(serializer.data))
except DjangoValidationError:
return HttpResponseNotFound()
@require_http_methods(['GET'])
def stats(request):
last_100 = Job.objects.order_by('-created_at')[:100]
last_100_bboxes = [job.the_geom.extent for job in last_100]
def users(days):
return User.objects.filter(date_joined__gte=timezone.now()-timedelta(days=days)).count()
def exports(days):
return Job.objects.filter(created_at__gte=timezone.now()-timedelta(days=days)).count()
new_users = [users(1),users(7),users(30)]
new_exports = [exports(1),exports(7),exports(30)]
return HttpResponse(json.dumps({'new_users':new_users,'new_exports':new_exports,'last_100_bboxes':last_100_bboxes}))
@require_http_methods(['GET'])
@login_required()
def request_geonames(request):
"""Geocode with GeoNames."""
payload = {
'maxRows': 20,
'username': 'osm_export_tool',
'style': 'full',
'q': request.GET.get('q')
}
geonames_url = getattr(settings, 'GEONAMES_API_URL')
if geonames_url:
response = requests.get(geonames_url, params=payload).json()
assert (isinstance(response, dict))
return JsonResponse(response)
else:
return JsonResponse(
{
'error': 'A url was not provided for geonames'
},
status=500, )
@ttl_cache(ttl=60)
@require_http_methods(['GET'])
@login_required()
def get_overpass_timestamp(request):
"""
Endpoint to show the last OSM update timestamp on the Create page.
this sometimes fails, returning a HTTP 200 but empty content.
"""
r = requests.get('{}timestamp'.format(settings.OVERPASS_API_URL))
return JsonResponse({'timestamp': dateutil.parser.parse(r.content)})
@login_required()
def get_overpass_status(request):
r = requests.get('{}status'.format(settings.OVERPASS_API_URL))
return HttpResponse(r.content)
@require_http_methods(['GET'])
@login_required()
def get_user_permissions(request):
user = request.user
permissions = []
if user.is_superuser:
permissions = Permission.objects.all().values_list(
'content_type__app_label', 'codename')
else:
permissions = chain(
user.user_permissions.all().values_list('content_type__app_label',
'codename'),
Permission.objects.filter(group__user=user).values_list(
'content_type__app_label', 'codename'))
return JsonResponse({
"username": user.username,
"permissions":
list(map(lambda pair: ".".join(pair), (set(permissions))))
})
# get a list of partner organizations and their numeric IDs.
# this can be exposed to the public.
@require_http_methods(['GET'])
@login_required()
def get_groups(request):
groups = [{'id':g.id,'name':g.name} for g in Group.objects.filter(is_partner=True)]
return JsonResponse({'groups':groups})
|
# This file is part of beets.
# Copyright 2012, Blemjhoo Tezoulbr <baobab@heresiarch.info>.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
""" Clears tag fields in media files."""
from __future__ import print_function
import sys
import re
from beets.plugins import BeetsPlugin
from beets import ui
from beets.library import ITEM_KEYS
from beets.importer import action
__author__ = 'baobab@heresiarch.info'
__version__ = '0.9'
class ZeroPlugin(BeetsPlugin):
_instance = None
debug = False
fields = []
patterns = {}
warned = False
def __new__(cls, *args, **kwargs):
if cls._instance is None:
cls._instance = super(ZeroPlugin,
cls).__new__(cls, *args, **kwargs)
return cls._instance
def __str__(self):
return ('[zero]\n debug = {0}\n fields = {1}\n patterns = {2}\n'
' warned = {3}'.format(self.debug, self.fields, self.patterns,
self.warned))
def dbg(self, *args):
"""Prints message to stderr."""
if self.debug:
print('[zero]', *args, file=sys.stderr)
def configure(self, config):
if not config.has_section('zero'):
self.dbg('plugin is not configured')
return
self.debug = ui.config_val(config, 'zero', 'debug', True, bool)
for f in ui.config_val(config, 'zero', 'fields', '').split():
if f not in ITEM_KEYS:
self.dbg(
'invalid field \"{0}\" (try \'beet fields\')'.format(f)
)
else:
self.fields.append(f)
p = ui.config_val(config, 'zero', f, '').split()
if p:
self.patterns[f] = p
else:
self.patterns[f] = ['.']
if self.debug:
print(self, file=sys.stderr)
def import_task_choice_event(self, task, config):
"""Listen for import_task_choice event."""
if self.debug:
self.dbg('listen: import_task_choice')
if task.choice_flag == action.ASIS and not self.warned:
self.dbg('cannot zero in \"as-is\" mode')
self.warned = True
# TODO request write in as-is mode
@classmethod
def match_patterns(cls, field, patterns):
"""Check if field (as string) is matching any of the patterns in
the list.
"""
for p in patterns:
if re.findall(p, unicode(field), flags=re.IGNORECASE):
return True
return False
def write_event(self, item):
"""Listen for write event."""
if self.debug:
self.dbg('listen: write')
if not self.fields:
self.dbg('no fields, nothing to do')
return
for fn in self.fields:
try:
fval = getattr(item, fn)
except AttributeError:
self.dbg('? no such field: {0}'.format(fn))
else:
if not self.match_patterns(fval, self.patterns[fn]):
self.dbg('\"{0}\" ({1}) is not match any of: {2}'
.format(fval, fn, ' '.join(self.patterns[fn])))
continue
self.dbg('\"{0}\" ({1}) match: {2}'
.format(fval, fn, ' '.join(self.patterns[fn])))
setattr(item, fn, type(fval)())
self.dbg('{0}={1}'.format(fn, getattr(item, fn)))
@ZeroPlugin.listen('import_task_choice')
def zero_choice(task, config):
ZeroPlugin().import_task_choice_event(task, config)
@ZeroPlugin.listen('write')
def zero_write(item):
ZeroPlugin().write_event(item)
# simple test
if __name__ == '__main__':
print(ZeroPlugin().match_patterns('test', ['[0-9]']))
print(ZeroPlugin().match_patterns('test', ['.']))
zero plugin: ver 0.10 - code cleanup
# This file is part of beets.
# Copyright 2012, Blemjhoo Tezoulbr <baobab@heresiarch.info>.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
""" Clears tag fields in media files."""
import re
import logging
from beets.plugins import BeetsPlugin
from beets import ui
from beets.library import ITEM_KEYS
from beets.importer import action
__author__ = 'baobab@heresiarch.info'
__version__ = '0.10'
class ZeroPlugin(BeetsPlugin):
_instance = None
_log = logging.getLogger('beets')
fields = []
patterns = {}
warned = False
def __new__(cls, *args, **kwargs):
if cls._instance is None:
cls._instance = super(ZeroPlugin,
cls).__new__(cls, *args, **kwargs)
return cls._instance
def __str__(self):
return ('[zero]\n fields = {0}\n patterns = {1}\n warned = {2}'
.format(self.fields, self.patterns, self.warned))
def configure(self, config):
if not config.has_section('zero'):
self._log.warn('[zero] plugin is not configured')
return
for f in ui.config_val(config, 'zero', 'fields', '').split():
if f not in ITEM_KEYS:
self._log.error('[zero] invalid field: {0}'.format(f))
else:
self.fields.append(f)
p = ui.config_val(config, 'zero', f, '').split()
if p:
self.patterns[f] = p
else:
self.patterns[f] = ['.']
def import_task_choice_event(self, task, config):
"""Listen for import_task_choice event."""
if task.choice_flag == action.ASIS and not self.warned:
self._log.warn('[zero] cannot zero in \"as-is\" mode')
self.warned = True
# TODO request write in as-is mode
@classmethod
def match_patterns(cls, field, patterns):
"""Check if field (as string) is matching any of the patterns in
the list.
"""
for p in patterns:
if re.findall(p, unicode(field), flags=re.IGNORECASE):
return True
return False
def write_event(self, item):
"""Listen for write event."""
if not self.fields:
self._log.warn('[zero] no fields, nothing to do')
return
for fn in self.fields:
try:
fval = getattr(item, fn)
except AttributeError:
self._log.error('[zero] no such field: {0}'.format(fn))
else:
if not self.match_patterns(fval, self.patterns[fn]):
self._log.debug('[zero] \"{0}\" ({1}) not match: {2}'
.format(fval, fn,
' '.join(self.patterns[fn])))
continue
self._log.debug('[zero] \"{0}\" ({1}) match: {2}'
.format(fval, fn, ' '.join(self.patterns[fn])))
setattr(item, fn, type(fval)())
self._log.debug('[zero] {0}={1}'.format(fn, getattr(item, fn)))
@ZeroPlugin.listen('import_task_choice')
def zero_choice(task, config):
ZeroPlugin().import_task_choice_event(task, config)
@ZeroPlugin.listen('write')
def zero_write(item):
ZeroPlugin().write_event(item)
|
from django.db.models.query import RawQuerySet, RawQuerySet
from django.template import RequestContext
from django.shortcuts import redirect, get_object_or_404, render_to_response
from django.core.paginator import Paginator, InvalidPage, EmptyPage
from django.http import Http404,HttpResponseServerError,HttpResponseRedirect, HttpResponse
from django import forms
def generic_row(request, model=None, pk=None, partial_row='generic/partials/partial_row.html', selectable=True):
if not (model and pk):
return HttpResponseServerError
object = get_object_or_404(model, pk=pk)
return render_to_response(partial_row, {'object':object},
context_instance=RequestContext(request))
def generic(request,
model=None,
queryset=None,
template_object_name='object_list',
base_template='generic/base.html',
partial_base='generic/partials/partial_base.html',
partial_header='generic/partials/partial_header.html',
partial_row='generic/partials/partial_row.html',
paginator_template='generic/partials/pagination.html',
paginated=True,
selectable=True,
objects_per_page=25,
columns=[('object', False, '')],
filter_forms=[],
action_forms=[]):
if not model:
return HttpResponseServerError
if callable(queryset):
queryset = queryset()
object_list = queryset or model.objects.all()
if type(object_list) == RawQuerySet:
object_list = list(object_list)
class ResultsForm(forms.Form):
results = forms.ModelMultipleChoiceField(queryset=object_list, widget=forms.CheckboxSelectMultiple())
class_dict = {}
action_form_instances = []
for action_class in action_forms:
form_instance = action_class()
fully_qualified_class_name = "%s.%s" % (form_instance.__module__, form_instance.__class__.__name__)
class_dict[fully_qualified_class_name] = action_class
action_form_instances.append((fully_qualified_class_name,action_class(),))
filter_form_instances = []
for filter_class in filter_forms:
form_instance = filter_class()
filter_form_instances.append(form_instance)
response_template = base_template
page = 1
selected=False
status_message=''
status_message_type=''
sort_column = ''
sort_ascending = True
if request.method == 'POST':
page_action = request.POST.get('page_action', '')
sort_action = request.POST.get('sort_action', '')
sort_column = request.POST.get('sort_column', '')
sort_ascending = request.POST.get('sort_ascending', 'True')
action_taken = request.POST.get('action', '')
if page_action:
object_list = request.session['object_list']
try:
page = int(request.POST.get('page_num', '1'))
except ValueError:
pass
elif sort_action:
# retrieve the original, unsorted, unpaginated list,
# as some sorts will turn the initial queryset into a list
object_list = request.session['filtered_list']
sort_ascending = (sort_ascending == 'True')
for column_name, sortable, sort_param, sorter in columns:
if sortable and sort_param == sort_column:
object_list = sorter.sort(sort_column, object_list, sort_ascending)
elif action_taken:
everything_selected = request.POST.get('select_everything', None)
results = []
if everything_selected:
results = request.session['object_list']
else:
resultsform = ResultsForm(request.POST)
if resultsform.is_valid():
results = resultsform.cleaned_data['results']
action_class = class_dict[action_taken]
action_instance = action_class(request.POST)
if action_instance.is_valid():
status_message, status_message_type = action_instance.perform(request, results)
else:
for form_class in filter_forms:
form_instance = form_class(request.POST)
if form_instance.is_valid():
object_list = form_instance.filter(request, object_list)
selected = True
# store the original, unsorted, unpaginated list,
# as some sorts will turn the initial queryset into a list
request.session['filtered_list'] = object_list
response_template = partial_base
else:
# store the full set of models, in queryset form, in the
# session, for the case of sorting the full list
request.session['filtered_list'] = object_list
request.session['object_list'] = object_list
total = len(object_list)
paginator = None
ranges = []
if paginated:
paginator = Paginator(object_list, objects_per_page)
# If page request is out of range, deliver last page of results.
try:
object_list = paginator.page(page).object_list
except (EmptyPage, InvalidPage):
object_list = paginator.page(paginator.num_pages).object_list
page = num_pages
if paginator.num_pages > 10:
low_range = []
mid_range = []
high_range = []
low_range = range(1, 6)
high_range = range(paginator.num_pages - 4, paginator.num_pages + 1)
if page < 10:
low_range += range(6, min(paginator.num_pages,page + 5))
mid_range = range(10, paginator.num_pages - 10, 10)
ranges.append(low_range)
ranges.append(mid_range)
ranges.append(high_range)
elif page > paginator.num_pages - 10:
high_range = range(max(0, page - 5), paginator.num_pages - 4) + high_range
mid_range = range(10, paginator.num_pages - 10, 10)
ranges.append(low_range)
ranges.append(mid_range)
ranges.append(high_range)
else:
ranges.append(low_range)
ranges.append(range(10, max(0, page - 2), 10))
ranges.append(range(max(0, page - 2), min(paginator.num_pages, page + 3)))
ranges.append(range((round(min(paginator.num_pages, page+3)/10) + 1)*10, paginator.num_pages - 10, 10))
ranges.append(high_range)
else:
ranges.append(paginator.page_range)
return render_to_response(response_template, {
'partial_base':partial_base,
'partial_header':partial_header,
'partial_row':partial_row,
'paginator_template':paginator_template,
template_object_name:object_list, # for custom templates
'object_list':object_list, # allow generic templates to still
# access the object list in the same way
'paginator':paginator,
'filter_forms':filter_form_instances,
'action_forms':action_form_instances,
'paginated':paginated,
'total':total,
'selectable':selectable,
'columns':columns,
'sort_column':sort_column,
'sort_ascending':sort_ascending,
'page':page,
'ranges':ranges,
'selected':selected,
'status_message':status_message,
'status_message_type':status_message_type,
'base_template':'layout.html',
},context_instance=RequestContext(request))
minor documentation, added default sorting column
from django.db.models.query import RawQuerySet, RawQuerySet
from django.template import RequestContext
from django.shortcuts import redirect, get_object_or_404, render_to_response
from django.core.paginator import Paginator, InvalidPage, EmptyPage
from django.http import Http404,HttpResponseServerError,HttpResponseRedirect, HttpResponse
from django import forms
def generic_row(request, model=None, pk=None, partial_row='generic/partials/partial_row.html', selectable=True):
if not (model and pk):
return HttpResponseServerError
object = get_object_or_404(model, pk=pk)
return render_to_response(partial_row, {'object':object},
context_instance=RequestContext(request))
def generic(request,
model=None,
queryset=None,
template_object_name='object_list',
base_template='generic/base.html',
partial_base='generic/partials/partial_base.html',
partial_header='generic/partials/partial_header.html',
partial_row='generic/partials/partial_row.html',
paginator_template='generic/partials/pagination.html',
paginated=True,
selectable=True,
objects_per_page=25,
columns=[('object', False, '')],
sort_column='',
sort_ascending=True,
filter_forms=[],
action_forms=[]):
# model parameter is required
if not model:
return HttpResponseServerError
# querysets can be calls to a function for dynamic, run-time retrieval
if callable(queryset):
queryset = queryset()
# the default list is either a queryset parameter, or all
# objects from the model parameter
object_list = queryset or model.objects.all()
if type(object_list) == RawQuerySet:
object_list = list(object_list)
# dynamically create a form class to represent the list of selected results,
# for performing actions
class ResultsForm(forms.Form):
results = forms.ModelMultipleChoiceField(queryset=object_list, widget=forms.CheckboxSelectMultiple())
class_dict = {}
action_form_instances = []
for action_class in action_forms:
form_instance = action_class()
fully_qualified_class_name = "%s.%s" % (form_instance.__module__, form_instance.__class__.__name__)
# we need both a dictionary of action forms (for looking up actions performed)
# and a list of tuple for rendering within the template in a particular order
class_dict[fully_qualified_class_name] = action_class
action_form_instances.append((fully_qualified_class_name,action_class(),))
filter_form_instances = []
for filter_class in filter_forms:
form_instance = filter_class()
filter_form_instances.append(form_instance)
# define some defaults
response_template = base_template
page = 1
selected=False
status_message=''
status_message_type=''
if request.method == 'POST':
page_action = request.POST.get('page_action', '')
sort_action = request.POST.get('sort_action', '')
sort_column = request.POST.get('sort_column', '')
sort_ascending = request.POST.get('sort_ascending', 'True')
action_taken = request.POST.get('action', '')
if page_action:
object_list = request.session['object_list']
try:
page = int(request.POST.get('page_num', '1'))
except ValueError:
pass
elif sort_action:
# retrieve the original, unsorted, unpaginated list,
# as some sorts will turn the initial queryset into a list
object_list = request.session['filtered_list']
sort_ascending = (sort_ascending == 'True')
for column_name, sortable, sort_param, sorter in columns:
if sortable and sort_param == sort_column:
object_list = sorter.sort(sort_column, object_list, sort_ascending)
elif action_taken:
everything_selected = request.POST.get('select_everything', None)
results = []
if everything_selected:
results = request.session['object_list']
else:
resultsform = ResultsForm(request.POST)
if resultsform.is_valid():
results = resultsform.cleaned_data['results']
action_class = class_dict[action_taken]
action_instance = action_class(request.POST)
if action_instance.is_valid():
status_message, status_message_type = action_instance.perform(request, results)
else:
for form_class in filter_forms:
form_instance = form_class(request.POST)
if form_instance.is_valid():
object_list = form_instance.filter(request, object_list)
selected = True
# store the original, unsorted, unpaginated list,
# as some sorts will turn the initial queryset into a list
request.session['filtered_list'] = object_list
response_template = partial_base
else:
# store the full set of models, in queryset form, in the
# session, for the case of sorting the full list
request.session['filtered_list'] = object_list
# calls to this view can define a default sorting order,
# if it's an initial GET request we should perform this sort here
if sort_column:
for column_name, sortable, sort_param, sorter in columns:
if sortable and sort_param == sort_column:
object_list = sorter.sort(sort_column, object_list, sort_ascending)
request.session['object_list'] = object_list
total = len(object_list)
paginator = None
ranges = []
if paginated:
paginator = Paginator(object_list, objects_per_page)
# If page request is out of range, deliver last page of results.
try:
object_list = paginator.page(page).object_list
except (EmptyPage, InvalidPage):
object_list = paginator.page(paginator.num_pages).object_list
page = num_pages
if paginator.num_pages > 10:
low_range = []
mid_range = []
high_range = []
low_range = range(1, 6)
high_range = range(paginator.num_pages - 4, paginator.num_pages + 1)
if page < 10:
low_range += range(6, min(paginator.num_pages,page + 5))
mid_range = range(10, paginator.num_pages - 10, 10)
ranges.append(low_range)
ranges.append(mid_range)
ranges.append(high_range)
elif page > paginator.num_pages - 10:
high_range = range(max(0, page - 5), paginator.num_pages - 4) + high_range
mid_range = range(10, paginator.num_pages - 10, 10)
ranges.append(low_range)
ranges.append(mid_range)
ranges.append(high_range)
else:
ranges.append(low_range)
ranges.append(range(10, max(0, page - 2), 10))
ranges.append(range(max(0, page - 2), min(paginator.num_pages, page + 3)))
ranges.append(range((round(min(paginator.num_pages, page+3)/10) + 1)*10, paginator.num_pages - 10, 10))
ranges.append(high_range)
else:
ranges.append(paginator.page_range)
return render_to_response(response_template, {
'partial_base':partial_base,
'partial_header':partial_header,
'partial_row':partial_row,
'paginator_template':paginator_template,
template_object_name:object_list, # for custom templates
'object_list':object_list, # allow generic templates to still
# access the object list in the same way
'paginator':paginator,
'filter_forms':filter_form_instances,
'action_forms':action_form_instances,
'paginated':paginated,
'total':total,
'selectable':selectable,
'columns':columns,
'sort_column':sort_column,
'sort_ascending':sort_ascending,
'page':page,
'ranges':ranges,
'selected':selected,
'status_message':status_message,
'status_message_type':status_message_type,
'base_template':'layout.html',
},context_instance=RequestContext(request)) |
[Python] Singleton class example
|
from . quote_fields import quote_definitions, quote_dtypes
from io import StringIO
import pandas as pd
import numpy as np
from datetime import datetime, timedelta
from requests import Session
# TODO look into read_csv use_cols option for speedups
# TODO Fix doc comment formatting on methods
class ActiveTick:
def __init__(self, host='127.0.0.1', port=5000, cache=False):
# Active tick HTTP proxy config
self.host = host
self.port = port
self.r = Session()
self.cache = cache
self._date_fmt = '%Y%m%d%H%M%S'
# Contains generator for stream once requested
self.stream_ = None
def _date_wrap(self, date):
# wrapper to allow for np.datetime64 and convert to string
ts = pd.to_datetime(str(date))
return ts.strftime(self._date_fmt)
def _format_symbols(self, symbols):
"""
symbols - string (returns unchanged) or list of symbols to concat with + symbols (string, list)
Formats list of symbols for ActiveTick URL
:param symbols:
:return:
String
"""
if not isinstance(symbols, str):
symbols = '+'.join(symbols)
return symbols
def _date_parser(self, date_format):
"""
Date parser function factory for pandas csv parsing of ActiveTick data
:param date_format:
Format used for parsing date (string)
:return:
(list of datetimes)
"""
def date_parser(dates):
return [ datetime.strptime(date, date_format) for date in dates ]
return date_parser
def quoteData(self, symbols, quoteFields):
"""
symbols - Symbol (or iterable of multiple symbols) for contracts, ie SPY, AAPL--130308C00440000 (string, iter)
quote_fields - List of all fields of interest (string, list)
# Example:
# look to quoteFields.py for the lookup table used and available fields
atClient.quoteData('SPY', ['LastPrice' , 'BidSize', 'AskSize'])
# returns pandas DataFrame with columns named
:return:
pandas.DataFrame() indexed on the symbol column with columns with requested quoteFields
with extra status meta data regarding the request and symbols, to just get a DataFrame
with the requested fields quoteData('SPY', fields)[fields]
"""
names = ['symbol', 'symbol_status']
def __name_fmt(names, field):
names += ["{f}_field_id".format(f=field),
"{f}_status".format(f=field),
"{f}_datatype".format(f=field),
"{f}".format(f=field)]
return names
if not isinstance(quoteFields, str):
# Create column names from quoteFields
for field in quoteFields:
names = __name_fmt(names, field)
# TODO: Declare specific dtypes for each column in names
# Translate from human readable quoteFields to IDs
quoteFields = map(lambda field: quote_definitions[field], quoteFields)
quoteFields = '+'.join(quoteFields)
else:
# Only one quoteField as string
names = __name_fmt(names, quoteFields)
quoteFields = quote_definitions[quoteFields]
url = "http://{host}:{port}/quoteData?symbol={symbols}&field={quoteFields}".format(
host=self.host,
port=self.port,
symbols=self._format_symbols(symbols),
quoteFields=quoteFields
)
# GET request is made and the CSV is read into a Pandas DataFrame
df = pd.read_csv(url, header=None, names=names, index_col='symbol')
return df
def quoteStream(self, symbols, timeout=None):
"""
symbols - string or iter of symbols
# Example
# res is an instance of requests iter_lines()
res = at.quoteStream('SPY')
for quote in res:
print(quote)
:param timeout:
integer, how many seconds to keep connection open
:return:
returns lazy iterator see requests iter_lines() that can be looped over to access streaming data
"""
# TODO: Start, pause, stop quote stream
def __tickParse(tick):
tick = tick.decode('utf-8')
if tick[0] is 'Q':
names = ['type', 'symbol', 'cond', 'bid_ex', 'ask_ex', 'bid', 'ask', 'bidz', 'askz', 'datetime']
dtype = {'type': object,
'symbol': object,
'cond': np.uint8,
'bid_ex': object,
'ask_ex': object,
'bid': np.float32,
'ask': np.float32,
'bidz': np.uint32,
'askz': np.uint32,
'datetime': object}
else:
names = ['type', 'symbol', 'flags', 'cond1', 'cond2', 'cond3', 'cond4', 'last_ex', 'last', 'lastz',
'datetime']
dtype = {
'type': object,
'symbol': object,
'flags': object,
'cond1': np.int8,
'cond2': np.int8,
'cond3': np.int8,
'cond4': np.int8,
'last_ex': object,
'last': np.float32,
'lastz': np.uint32
}
date_format = '%Y%m%d%H%M%S%f'
parse_date = self._date_parser(date_format)
return pd.read_csv(StringIO(tick), names=names, index_col='type', dtype=dtype,
parse_dates=['datetime'], date_parser=parse_date)
url = 'http://{host}:{port}/quoteStream?symbol={symbols}'.format(
host=self.host,
port=self.port,
symbols=self._format_symbols(symbols)
)
self.stream_ = self.r.get(url, stream=True, timeout=timeout)
pandas_stream = map(__tickParse, self.stream_.iter_lines())
first_line = next(pandas_stream)
return pandas_stream
def barData(self, symbol, historyType='I', intradayMinutes=60,
beginTime=datetime(datetime.now().year, datetime.now().month, 1), endTime=datetime.now()):
"""
:param symbol:
Takes only one symbol, string
:param historyType:
Takes 'I', 'D' or 'W' as a string (Intraday 0, Daily 1 or Weekly 0)
:param intradayMinutes:
If historyType is 'I' select a bar size: 0 to 60 minutes (int)
:param beginTime:
Beginning date for query (datetime)
:param endTime:
Ending date for query (datetime)
:return:
Pandas DataFrame OHLCV indexed on the datetime
"""
history_lookup = {
'I': 0,
'D': 1,
'W': 2
}
def __getIntradayMinutesAttr():
# Returns URL segment for intraday minutes if needed
if historyType is not 'I':
attr_str = ''
else:
attr_str = 'intradayMinutes={intradayMinutes}&'.format(intradayMinutes=str(intradayMinutes))
return attr_str
beginTime_s = self._date_wrap(beginTime)
endTime_s = self._date_wrap(endTime)
cache_key = "AT:BARDATA:{symbol}:{historyType}:{intradayMinutes}:{beginTime}:{endTime}"
cache_key = cache_key.format(
symbol=symbol,
historyType=history_lookup[historyType],
intradayMinutes=intradayMinutes,
beginTime=beginTime_s,
endTime=endTime_s)
# If the data is cached
if self.cache and self.cache.exists(cache_key):
return pd.read_msgpack(self.cache.get(cache_key))
url = 'http://{host}:{port}/barData?symbol={symbol}&historyType={historyType}' \
'&{intradayMintuesAttr}beginTime={beginTime}&endTime={endTime}'
url = url.format(
host=self.host,
port=self.port,
symbol=symbol,
historyType=history_lookup[historyType],
intradayMintuesAttr=__getIntradayMinutesAttr(),
beginTime=beginTime_s,
endTime=endTime_s)
dtypes = {'datetime': object,
'open': np.float32,
'high': np.float32,
'low': np.float32,
'close': np.float32,
'volume': np.uint32}
df = pd.read_csv(url, header=None, names=['datetime', 'open', 'high', 'low', 'close', 'volume'],
index_col='datetime', parse_dates=['datetime'], dtype=dtypes)
# Cache the data
if self.cache:
self.cache.set(cache_key, df.to_msgpack(compress='zlib'))
return df
def tickData(self, symbol, trades=False, quotes=True,
beginTime=datetime.now() - timedelta(minutes=15),
endTime=datetime.now()):
"""
Gets tick level data in between a time range, limited to returning 100,000 quotes/trades at a time
:param symbol:
String, ticker for symbol in ActiveTick format
:param trades:
Boolean, whether to return trade ticks
:param quotes:
Boolean whether to return quote ticks
:param beginTime:
datetime beginning of date range
:param endTime:
datetime end of date range
:return:
"""
tick_date_fmt = '%Y%m%d%H%M%S%f'
date_parser = self._date_parser(tick_date_fmt)
q_names = ['type',
'datetime',
'bid',
'ask',
'bidz',
'askz',
'bidx',
'askx',
'cond']
t_names = ['type',
'datetime',
'last',
'lastz',
'lastx',
'cond1',
'cond2',
'cond3',
'cond4']
def __get_trades(df):
trades_df = df[df[0] == 'T'].copy()
trades_df.columns = t_names
trades_df.loc[:, 'last'] = trades_df.loc[:, 'last'].astype(np.float32)
trades_df.loc[:, 'lastz'] = trades_df.loc[:, 'lastz'].astype(np.uint32)
trades_df.loc[:, ['cond1', 'cond2', 'cond3', 'cond4']] = trades_df.loc[:, ['cond1',
'cond2',
'cond3',
'cond4']].astype(np.uint8)
return trades_df
def __get_quotes(df):
quotes_df = df[df[0] == 'Q'].copy()
quotes_df.columns = q_names
quotes_df.loc[:, ['bid', 'ask']] = quotes_df.loc[:, ['bid', 'ask']].astype(np.float32)
quotes_df.loc[:, ['bidz', 'askz']] = quotes_df.loc[:, ['bidz', 'askz']].astype(np.uint32)
quotes_df.loc[:, 'cond'] = quotes_df.loc[:, 'cond'].astype(np.uint8)
return quotes_df
def __at_request(url, names):
if(names):
date_col = 'datetime'
else:
date_col = 1
del q_names[1]
del t_names[1]
try:
df = pd.read_csv(url, header=None,
engine='c',
index_col=date_col,
parse_dates=[date_col],
names=names,
date_parser=date_parser)
return df
except Exception as e:
print('caught exception:', e)
print('No or malformed data: ', url)
return pd.DataFrame()
if not trades and not quotes:
return pd.DataFrame()
beginTime_s = self._date_wrap(beginTime)
endTime_s = self._date_wrap(endTime)
cache_key = 'AT:TICKDATA:{symbol}:{trades}:{quotes}:{beginTime}:{endTime}'
cache_key = cache_key.format(
symbol=symbol,
trades=int(trades),
quotes=int(quotes),
beginTime=beginTime_s,
endTime=endTime_s
)
# Return cached data
if self.cache and self.cache.exists(cache_key):
return pd.read_msgpack(self.cache.get(cache_key))
# Retrieve data not found in cache
else:
url = 'http://{host}:{port}/tickData?symbol={symbol}&trades={trades}' \
'"es={quotes}&beginTime={beginTime}&endTime={endTime}'
url = url.format(
host=self.host,
port=self.port,
symbol=symbol,
trades=int(trades),
quotes=int(quotes),
beginTime=beginTime_s,
endTime=endTime_s
)
# Quote column names
if quotes and not trades:
df = __at_request(url, q_names)
# Trade columns names
if trades and not quotes:
df = __at_request(url, t_names)
if trades and quotes:
df = __at_request(url, None)
if not df.empty:
df = __get_trades(df).append(__get_quotes(df)).sort_index(axis=0)
if self.cache:
self.cache.set(cache_key, df.to_msgpack(compress='zlib'))
return df
def optionChain(self, symbol):
"""
Returns unnamed pandas dataframe of option symbols currently listed for underlying symbol
:param symbol:
String, ticker symbol for underlying
:return:
Raw unnamed dataframe from ActiveTick
"""
url = 'http://{host}:{port}/optionChain?symbol={symbol}'.format(
host=self.host,
port=self.port,
symbol=symbol)
df = pd.read_csv(url)
return df
__version__ = '0.12.1'
__url__ = 'https://github.com/uberscientist/activetick_http'
if __name__ == '__main__'
print('ActiveTick Python Module' + __version__ +
', attaches to ActiveTick HTTP Proxy, returns Pandas DataFrames.\n'
'http://www.activetick.com/activetick/contents/PersonalServicesDataAPIDownload.aspx',
'Git repo:' + __url__,
'Uses pytest for tests.\n',
'Has optional (recommended) Redis (http://redis.io) caching built in..', sep='\n')
typo, deleted a :
from . quote_fields import quote_definitions, quote_dtypes
from io import StringIO
import pandas as pd
import numpy as np
from datetime import datetime, timedelta
from requests import Session
# TODO look into read_csv use_cols option for speedups
# TODO Fix doc comment formatting on methods
class ActiveTick:
def __init__(self, host='127.0.0.1', port=5000, cache=False):
# Active tick HTTP proxy config
self.host = host
self.port = port
self.r = Session()
self.cache = cache
self._date_fmt = '%Y%m%d%H%M%S'
# Contains generator for stream once requested
self.stream_ = None
def _date_wrap(self, date):
# wrapper to allow for np.datetime64 and convert to string
ts = pd.to_datetime(str(date))
return ts.strftime(self._date_fmt)
def _format_symbols(self, symbols):
"""
symbols - string (returns unchanged) or list of symbols to concat with + symbols (string, list)
Formats list of symbols for ActiveTick URL
:param symbols:
:return:
String
"""
if not isinstance(symbols, str):
symbols = '+'.join(symbols)
return symbols
def _date_parser(self, date_format):
"""
Date parser function factory for pandas csv parsing of ActiveTick data
:param date_format:
Format used for parsing date (string)
:return:
(list of datetimes)
"""
def date_parser(dates):
return [ datetime.strptime(date, date_format) for date in dates ]
return date_parser
def quoteData(self, symbols, quoteFields):
"""
symbols - Symbol (or iterable of multiple symbols) for contracts, ie SPY, AAPL--130308C00440000 (string, iter)
quote_fields - List of all fields of interest (string, list)
# Example:
# look to quoteFields.py for the lookup table used and available fields
atClient.quoteData('SPY', ['LastPrice' , 'BidSize', 'AskSize'])
# returns pandas DataFrame with columns named
:return:
pandas.DataFrame() indexed on the symbol column with columns with requested quoteFields
with extra status meta data regarding the request and symbols, to just get a DataFrame
with the requested fields quoteData('SPY', fields)[fields]
"""
names = ['symbol', 'symbol_status']
def __name_fmt(names, field):
names += ["{f}_field_id".format(f=field),
"{f}_status".format(f=field),
"{f}_datatype".format(f=field),
"{f}".format(f=field)]
return names
if not isinstance(quoteFields, str):
# Create column names from quoteFields
for field in quoteFields:
names = __name_fmt(names, field)
# TODO: Declare specific dtypes for each column in names
# Translate from human readable quoteFields to IDs
quoteFields = map(lambda field: quote_definitions[field], quoteFields)
quoteFields = '+'.join(quoteFields)
else:
# Only one quoteField as string
names = __name_fmt(names, quoteFields)
quoteFields = quote_definitions[quoteFields]
url = "http://{host}:{port}/quoteData?symbol={symbols}&field={quoteFields}".format(
host=self.host,
port=self.port,
symbols=self._format_symbols(symbols),
quoteFields=quoteFields
)
# GET request is made and the CSV is read into a Pandas DataFrame
df = pd.read_csv(url, header=None, names=names, index_col='symbol')
return df
def quoteStream(self, symbols, timeout=None):
"""
symbols - string or iter of symbols
# Example
# res is an instance of requests iter_lines()
res = at.quoteStream('SPY')
for quote in res:
print(quote)
:param timeout:
integer, how many seconds to keep connection open
:return:
returns lazy iterator see requests iter_lines() that can be looped over to access streaming data
"""
# TODO: Start, pause, stop quote stream
def __tickParse(tick):
tick = tick.decode('utf-8')
if tick[0] is 'Q':
names = ['type', 'symbol', 'cond', 'bid_ex', 'ask_ex', 'bid', 'ask', 'bidz', 'askz', 'datetime']
dtype = {'type': object,
'symbol': object,
'cond': np.uint8,
'bid_ex': object,
'ask_ex': object,
'bid': np.float32,
'ask': np.float32,
'bidz': np.uint32,
'askz': np.uint32,
'datetime': object}
else:
names = ['type', 'symbol', 'flags', 'cond1', 'cond2', 'cond3', 'cond4', 'last_ex', 'last', 'lastz',
'datetime']
dtype = {
'type': object,
'symbol': object,
'flags': object,
'cond1': np.int8,
'cond2': np.int8,
'cond3': np.int8,
'cond4': np.int8,
'last_ex': object,
'last': np.float32,
'lastz': np.uint32
}
date_format = '%Y%m%d%H%M%S%f'
parse_date = self._date_parser(date_format)
return pd.read_csv(StringIO(tick), names=names, index_col='type', dtype=dtype,
parse_dates=['datetime'], date_parser=parse_date)
url = 'http://{host}:{port}/quoteStream?symbol={symbols}'.format(
host=self.host,
port=self.port,
symbols=self._format_symbols(symbols)
)
self.stream_ = self.r.get(url, stream=True, timeout=timeout)
pandas_stream = map(__tickParse, self.stream_.iter_lines())
first_line = next(pandas_stream)
return pandas_stream
def barData(self, symbol, historyType='I', intradayMinutes=60,
beginTime=datetime(datetime.now().year, datetime.now().month, 1), endTime=datetime.now()):
"""
:param symbol:
Takes only one symbol, string
:param historyType:
Takes 'I', 'D' or 'W' as a string (Intraday 0, Daily 1 or Weekly 0)
:param intradayMinutes:
If historyType is 'I' select a bar size: 0 to 60 minutes (int)
:param beginTime:
Beginning date for query (datetime)
:param endTime:
Ending date for query (datetime)
:return:
Pandas DataFrame OHLCV indexed on the datetime
"""
history_lookup = {
'I': 0,
'D': 1,
'W': 2
}
def __getIntradayMinutesAttr():
# Returns URL segment for intraday minutes if needed
if historyType is not 'I':
attr_str = ''
else:
attr_str = 'intradayMinutes={intradayMinutes}&'.format(intradayMinutes=str(intradayMinutes))
return attr_str
beginTime_s = self._date_wrap(beginTime)
endTime_s = self._date_wrap(endTime)
cache_key = "AT:BARDATA:{symbol}:{historyType}:{intradayMinutes}:{beginTime}:{endTime}"
cache_key = cache_key.format(
symbol=symbol,
historyType=history_lookup[historyType],
intradayMinutes=intradayMinutes,
beginTime=beginTime_s,
endTime=endTime_s)
# If the data is cached
if self.cache and self.cache.exists(cache_key):
return pd.read_msgpack(self.cache.get(cache_key))
url = 'http://{host}:{port}/barData?symbol={symbol}&historyType={historyType}' \
'&{intradayMintuesAttr}beginTime={beginTime}&endTime={endTime}'
url = url.format(
host=self.host,
port=self.port,
symbol=symbol,
historyType=history_lookup[historyType],
intradayMintuesAttr=__getIntradayMinutesAttr(),
beginTime=beginTime_s,
endTime=endTime_s)
dtypes = {'datetime': object,
'open': np.float32,
'high': np.float32,
'low': np.float32,
'close': np.float32,
'volume': np.uint32}
df = pd.read_csv(url, header=None, names=['datetime', 'open', 'high', 'low', 'close', 'volume'],
index_col='datetime', parse_dates=['datetime'], dtype=dtypes)
# Cache the data
if self.cache:
self.cache.set(cache_key, df.to_msgpack(compress='zlib'))
return df
def tickData(self, symbol, trades=False, quotes=True,
beginTime=datetime.now() - timedelta(minutes=15),
endTime=datetime.now()):
"""
Gets tick level data in between a time range, limited to returning 100,000 quotes/trades at a time
:param symbol:
String, ticker for symbol in ActiveTick format
:param trades:
Boolean, whether to return trade ticks
:param quotes:
Boolean whether to return quote ticks
:param beginTime:
datetime beginning of date range
:param endTime:
datetime end of date range
:return:
"""
tick_date_fmt = '%Y%m%d%H%M%S%f'
date_parser = self._date_parser(tick_date_fmt)
q_names = ['type',
'datetime',
'bid',
'ask',
'bidz',
'askz',
'bidx',
'askx',
'cond']
t_names = ['type',
'datetime',
'last',
'lastz',
'lastx',
'cond1',
'cond2',
'cond3',
'cond4']
def __get_trades(df):
trades_df = df[df[0] == 'T'].copy()
trades_df.columns = t_names
trades_df.loc[:, 'last'] = trades_df.loc[:, 'last'].astype(np.float32)
trades_df.loc[:, 'lastz'] = trades_df.loc[:, 'lastz'].astype(np.uint32)
trades_df.loc[:, ['cond1', 'cond2', 'cond3', 'cond4']] = trades_df.loc[:, ['cond1',
'cond2',
'cond3',
'cond4']].astype(np.uint8)
return trades_df
def __get_quotes(df):
quotes_df = df[df[0] == 'Q'].copy()
quotes_df.columns = q_names
quotes_df.loc[:, ['bid', 'ask']] = quotes_df.loc[:, ['bid', 'ask']].astype(np.float32)
quotes_df.loc[:, ['bidz', 'askz']] = quotes_df.loc[:, ['bidz', 'askz']].astype(np.uint32)
quotes_df.loc[:, 'cond'] = quotes_df.loc[:, 'cond'].astype(np.uint8)
return quotes_df
def __at_request(url, names):
if(names):
date_col = 'datetime'
else:
date_col = 1
del q_names[1]
del t_names[1]
try:
df = pd.read_csv(url, header=None,
engine='c',
index_col=date_col,
parse_dates=[date_col],
names=names,
date_parser=date_parser)
return df
except Exception as e:
print('caught exception:', e)
print('No or malformed data: ', url)
return pd.DataFrame()
if not trades and not quotes:
return pd.DataFrame()
beginTime_s = self._date_wrap(beginTime)
endTime_s = self._date_wrap(endTime)
cache_key = 'AT:TICKDATA:{symbol}:{trades}:{quotes}:{beginTime}:{endTime}'
cache_key = cache_key.format(
symbol=symbol,
trades=int(trades),
quotes=int(quotes),
beginTime=beginTime_s,
endTime=endTime_s
)
# Return cached data
if self.cache and self.cache.exists(cache_key):
return pd.read_msgpack(self.cache.get(cache_key))
# Retrieve data not found in cache
else:
url = 'http://{host}:{port}/tickData?symbol={symbol}&trades={trades}' \
'"es={quotes}&beginTime={beginTime}&endTime={endTime}'
url = url.format(
host=self.host,
port=self.port,
symbol=symbol,
trades=int(trades),
quotes=int(quotes),
beginTime=beginTime_s,
endTime=endTime_s
)
# Quote column names
if quotes and not trades:
df = __at_request(url, q_names)
# Trade columns names
if trades and not quotes:
df = __at_request(url, t_names)
if trades and quotes:
df = __at_request(url, None)
if not df.empty:
df = __get_trades(df).append(__get_quotes(df)).sort_index(axis=0)
if self.cache:
self.cache.set(cache_key, df.to_msgpack(compress='zlib'))
return df
def optionChain(self, symbol):
"""
Returns unnamed pandas dataframe of option symbols currently listed for underlying symbol
:param symbol:
String, ticker symbol for underlying
:return:
Raw unnamed dataframe from ActiveTick
"""
url = 'http://{host}:{port}/optionChain?symbol={symbol}'.format(
host=self.host,
port=self.port,
symbol=symbol)
df = pd.read_csv(url)
return df
__version__ = '0.12.1'
__url__ = 'https://github.com/uberscientist/activetick_http'
if __name__ == '__main__':
print('ActiveTick Python Module' + __version__ +
', attaches to ActiveTick HTTP Proxy, returns Pandas DataFrames.\n'
'http://www.activetick.com/activetick/contents/PersonalServicesDataAPIDownload.aspx',
'Git repo:' + __url__,
'Uses pytest for tests.\n',
'Has optional (recommended) Redis (http://redis.io) caching built in..', sep='\n')
|
from django.core.management.base import BaseCommand
from morphgnt_api.models import Word
def u(s):
return s.decode("utf-8")
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument("filename", help="filename to import")
def handle(self, *args, **options):
filename = options["filename"]
last_book = None
with open(filename, "rb") as f:
for line in f:
word_id, verse_id, paragraph_id, sentence_id, \
pos, parse, \
crit_text, text, word, norm, lemma, \
dep_type, head = line.strip().split()
Word(
word_id=word_id,
verse_id=verse_id,
paragraph_id=paragraph_id,
sentence_id=sentence_id,
pos=pos,
parse=parse,
crit_text=u(crit_text),
text=u(text),
word=u(word),
norm=u(norm),
lemma=u(lemma),
dep_type=dep_type,
head=head
).save()
if verse_id[:2] != last_book:
print verse_id[:2]
last_book = verse_id[:2]
handle null head better on import
from django.core.management.base import BaseCommand
from morphgnt_api.models import Word
def u(s):
return s.decode("utf-8")
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument("filename", help="filename to import")
def handle(self, *args, **options):
filename = options["filename"]
last_book = None
with open(filename, "rb") as f:
for line in f:
word_id, verse_id, paragraph_id, sentence_id, \
pos, parse, \
crit_text, text, word, norm, lemma, \
dep_type, head = line.strip().split()
Word(
word_id=word_id,
verse_id=verse_id,
paragraph_id=paragraph_id,
sentence_id=sentence_id,
pos=pos,
parse=parse,
crit_text=u(crit_text),
text=u(text),
word=u(word),
norm=u(norm),
lemma=u(lemma),
dep_type=dep_type,
head=head if head != "None" else None,
).save()
if verse_id[:2] != last_book:
print verse_id[:2]
last_book = verse_id[:2]
|
#!/usr/bin/env python
import rospy
import math
import time
from geometry_msgs.msg import Twist
from sensor_msgs.msg import LaserScan
class WallAvoid(object):
def __init__(self, timeout=None):
rospy.init_node("WallAvoid")
self.turnCoef = [(x ** 2 - 8100) / 10000000.0 for x in range(-90, 0)] + [(-x ** 2 + 8100) / 10000000.0 for x in range(0, 91)]
self.speedCoef = [(-x ** 2 + 8100) / 10000000.0 for x in range(-90,91)]
self.last_speed = 0
self.last_turn = 0
rospy.Subscriber("/scan", LaserScan, self._latestScan)
self.pub = rospy.Publisher("/cmd_vel", Twist, queue_size=10)
rospy.loginfo("Ready to get out there and avoid some walls!")
rospy.logdebug(self.turnCoef)
rospy.logdebug(self.speedCoef)
self.timeout = None
if timeout:
self.timeout = time.time() + timeout
rospy.spin()
def isTimedout(self):
return self.timeout <= time.time()
def _latestScan(self, data):
if self.timeout and self.timeout <= time.time():
rospy.signal_shutdown("Execution timer expired")
turnVal = 0
speedVal = 0
right_zone = data.ranges[0:65]
front_zone = data.ranges[65:115]
left_zone = data.ranges[115:180]
front_zone_avg = sum(front_zone) / len(front_zone)
right_zone_avg = sum(right_zone) / len(right_zone)
left_zone_avg = sum(left_zone) / len(left_zone)
# If average is really REALLY close, might want to back up instead
if front_zone_avg < 1.5 or min(front_zone) < 0.8:
speedVal = -0.1
if last_turn > 0
rospy.loginfo("Backing up to the left...")
turnVal = 0.5
else:
rospy.loginfo("Backing up to the right...")
turnVal = -0.3
else:
rospy.loginfo("Normal hallway")
for p in range(0, 181):
# Inf range return means its over 10m from the LIDAR
if math.isinf(data.ranges[p]) or math.isnan(data.ranges[p]):
speedVal = speedVal + (self.speedCoef[p] * 10)
# Don't account long ranges into turn calcs
else:
speedVal = speedVal + (self.speedCoef[p] * data.ranges[p])
# Turn away from walls
turnVal = turnVal + (self.turnCoef[p] * data.ranges[p])
speedVal = min(speedVal * 1.2, 0.4) # sets max speed
turnVal = turnVal * 1.4
if front_zone_avg < 2.0:
turnVal = turnVal * 2.0
speedVal = speedVal * 1.1
cmd = Twist()
cmd.linear.x = speedVal
cmd.angular.z = turnVal
self.last_speed = speedVal
self.last_turn = turnVal
rospy.loginfo(cmd)
self.pub.publish(cmd)
# standard ros boilerplate
if __name__ == "__main__":
try:
run = WallAvoid() # seconds
except rospy.ROSInterruptException:
pass
Forgot a colen...
#!/usr/bin/env python
import rospy
import math
import time
from geometry_msgs.msg import Twist
from sensor_msgs.msg import LaserScan
class WallAvoid(object):
def __init__(self, timeout=None):
rospy.init_node("WallAvoid")
self.turnCoef = [(x ** 2 - 8100) / 10000000.0 for x in range(-90, 0)] + [(-x ** 2 + 8100) / 10000000.0 for x in range(0, 91)]
self.speedCoef = [(-x ** 2 + 8100) / 10000000.0 for x in range(-90,91)]
self.last_speed = 0
self.last_turn = 0
rospy.Subscriber("/scan", LaserScan, self._latestScan)
self.pub = rospy.Publisher("/cmd_vel", Twist, queue_size=10)
rospy.loginfo("Ready to get out there and avoid some walls!")
rospy.logdebug(self.turnCoef)
rospy.logdebug(self.speedCoef)
self.timeout = None
if timeout:
self.timeout = time.time() + timeout
rospy.spin()
def isTimedout(self):
return self.timeout <= time.time()
def _latestScan(self, data):
if self.timeout and self.timeout <= time.time():
rospy.signal_shutdown("Execution timer expired")
turnVal = 0
speedVal = 0
right_zone = data.ranges[0:65]
front_zone = data.ranges[65:115]
left_zone = data.ranges[115:180]
front_zone_avg = sum(front_zone) / len(front_zone)
right_zone_avg = sum(right_zone) / len(right_zone)
left_zone_avg = sum(left_zone) / len(left_zone)
# If average is really REALLY close, might want to back up instead
if front_zone_avg < 1.5 or min(front_zone) < 0.8:
speedVal = -0.1
if last_turn > 0:
rospy.loginfo("Backing up to the left...")
turnVal = 0.5
else:
rospy.loginfo("Backing up to the right...")
turnVal = -0.3
else:
rospy.loginfo("Normal hallway")
for p in range(0, 181):
# Inf range return means its over 10m from the LIDAR
if math.isinf(data.ranges[p]) or math.isnan(data.ranges[p]):
speedVal = speedVal + (self.speedCoef[p] * 10)
# Don't account long ranges into turn calcs
else:
speedVal = speedVal + (self.speedCoef[p] * data.ranges[p])
# Turn away from walls
turnVal = turnVal + (self.turnCoef[p] * data.ranges[p])
speedVal = min(speedVal * 1.2, 0.4) # sets max speed
turnVal = turnVal * 1.4
if front_zone_avg < 2.0:
turnVal = turnVal * 2.0
speedVal = speedVal * 1.1
cmd = Twist()
cmd.linear.x = speedVal
cmd.angular.z = turnVal
self.last_speed = speedVal
self.last_turn = turnVal
rospy.loginfo(cmd)
self.pub.publish(cmd)
# standard ros boilerplate
if __name__ == "__main__":
try:
run = WallAvoid() # seconds
except rospy.ROSInterruptException:
pass
|
#!/usr/bin/env python
# vim: ai ts=4 sts=4 et sw=4
import datetime
import time
from django.db.models.query import RawQuerySet, RawQuerySet
from django.template import RequestContext
from django.shortcuts import redirect, get_object_or_404, render_to_response
from django.core.paginator import Paginator, InvalidPage, EmptyPage
from django.http import Http404, HttpResponseServerError, HttpResponseRedirect, HttpResponse
from django import forms
from django.contrib.auth.models import User
from generic.models import Dashboard, Module, ModuleParams, StaticModuleContent
from django.db.models import Count
from django.views.decorators.cache import cache_control
from .utils import copy_dashboard, get_dates, set_default_dates
def generic_row(request, model=None, pk=None, partial_row='generic/partials/partial_row.html', selectable=True):
if not (model and pk):
return HttpResponseServerError
object = get_object_or_404(model, pk=pk)
return render_to_response(partial_row, {'object':object, 'selectable':selectable},
context_instance=RequestContext(request))
def generic(request,
model=None,
queryset=None,
template_object_name='object_list',
base_template='generic/base.html',
partial_base='generic/partials/partial_base.html',
partial_header='generic/partials/partial_header.html',
partial_row='generic/partials/partial_row.html',
paginator_template='generic/partials/pagination.html',
results_title='Results',
paginated=True,
selectable=True,
objects_per_page=25,
columns=[('object', False, '')],
sort_column='',
sort_ascending=True,
filter_forms=[],
action_forms=[],
needs_date=False,
dates=get_dates,
**kwargs):
# model parameter is required
if not model:
return HttpResponseServerError
# querysets can be calls to a function for dynamic, run-time retrieval
if callable(queryset):
if needs_date:
queryset = queryset(request=request, dates=dates)
else:
queryset = queryset(request=request)
# the default list is either a queryset parameter, or all
# objects from the model parameter
object_list = queryset if queryset is not None else model.objects.all()
# dynamically create a form class to represent the list of selected results,
# for performing actions
class ResultsForm(forms.Form):
results = forms.ModelMultipleChoiceField(queryset=object_list, widget=forms.CheckboxSelectMultiple())
class_dict = {}
action_form_instances = []
for action_class in action_forms:
form_instance = action_class(**{'request':request})
fully_qualified_class_name = "%s.%s" % (form_instance.__module__, form_instance.__class__.__name__)
# we need both a dictionary of action forms (for looking up actions performed)
# and a list of tuple for rendering within the template in a particular order
class_dict[fully_qualified_class_name] = action_class
action_form_instances.append((fully_qualified_class_name, form_instance,))
filter_form_instances = []
for filter_class in filter_forms:
form_instance = filter_class(**{'request':request})
filter_form_instances.append(form_instance)
# define some defaults
response_template = base_template
page = 1
selected = False
status_message = ''
status_message_type = ''
FILTER_REQUEST_KEY = "%s_filter_request" % request.path
filtered_list = object_list
if request.method == 'POST':
# check for previous filters in the case of a post,
# as other actions will be driven from this
# filtered list
filter_request_post = request.session.setdefault(FILTER_REQUEST_KEY, None)
if filter_request_post:
for form_class in filter_forms:
form_instance = form_class(filter_request_post, request=request)
if form_instance.is_valid():
filtered_list = form_instance.filter(request, filtered_list)
page_action = request.POST.get('page_action', '')
sort_action = request.POST.get('sort_action', '')
sort_column = request.POST.get('sort_column', '')
sort_ascending = request.POST.get('sort_ascending', 'True')
sort_ascending = (sort_ascending == 'True')
action_taken = request.POST.get('action', '')
for column_name, sortable, sort_param, sorter in columns:
if sortable and sort_param == sort_column:
filtered_list = sorter.sort(sort_column, filtered_list, sort_ascending)
if sort_action:
# we already have to sort regardless, but
# if this action was just a sort, we're done
# (and should avoid re-filtering)
pass
elif page_action:
try:
page = int(request.POST.get('page_num', '1'))
except ValueError:
pass
elif action_taken:
everything_selected = request.POST.get('select_everything', None)
results = []
if everything_selected:
results = filtered_list
else:
resultsform = ResultsForm(request.POST)
if resultsform.is_valid():
results = resultsform.cleaned_data['results']
action_class = class_dict[action_taken]
action_instance = action_class(request.POST, request=request)
if action_instance.is_valid():
status_message, status_message_type = action_instance.perform(request, results)
else:
# it's a new filter, re-start from the object list
# and filter down on the new set of forms
filtered_list = object_list
for form_class in filter_forms:
form_instance = form_class(request.POST, request=request)
if form_instance.is_valid():
filtered_list = form_instance.filter(request, filtered_list)
else:
status_message,status_message_type=action_instance.errors,'error'
selected = True
# store the request filters in the session
request.session[FILTER_REQUEST_KEY] = request.POST
response_template = partial_base
else:
# reset the filter key, if there was a previous one it should be
# cleared out
request.session[FILTER_REQUEST_KEY] = None
# calls to this view can define a default sorting order,
# if it's an initial GET request we should perform this sort here
if sort_column:
for column_name, sortable, sort_param, sorter in columns:
if sortable and sort_param == sort_column:
filtered_list = sorter.sort(sort_column, filtered_list, sort_ascending)
if hasattr(filtered_list, 'count') and callable(filtered_list.count):
try:
total = filtered_list.count()
except TypeError:
total = len(filtered_list)
else:
total = len(filtered_list)
paginator = None
ranges = []
if paginated:
paginator = Paginator(filtered_list, objects_per_page)
# If page request is out of range, deliver last page of results.
try:
filtered_list = paginator.page(page).object_list
except (EmptyPage, InvalidPage):
filtered_list = paginator.page(paginator.num_pages).object_list
page = paginator.num_pages
if paginator.num_pages > 10:
low_range = []
mid_range = []
high_range = []
low_range = range(1, 6)
high_range = range(paginator.num_pages - 4, paginator.num_pages + 1)
if page < 10:
low_range += range(6, min(paginator.num_pages, page + 5))
mid_range = range(10, paginator.num_pages - 10, 10)
ranges.append(low_range)
ranges.append(mid_range)
ranges.append(high_range)
elif page > paginator.num_pages - 10:
high_range = range(max(0, page - 5), paginator.num_pages - 4) + high_range
mid_range = range(10, paginator.num_pages - 10, 10)
ranges.append(low_range)
ranges.append(mid_range)
ranges.append(high_range)
else:
ranges.append(low_range)
ranges.append(range(10, max(0, page - 2), 10))
ranges.append(range(max(0, page - 2), min(paginator.num_pages, page + 3)))
ranges.append(range((round(min(paginator.num_pages, page + 3) / 10) + 1) * 10, paginator.num_pages - 10, 10))
ranges.append(high_range)
else:
ranges.append(paginator.page_range)
context_vars = {
'partial_base':partial_base,
'partial_header':partial_header,
'partial_row':partial_row,
'paginator_template':paginator_template,
'results_title':results_title,
template_object_name:filtered_list, # for custom templates
'object_list':filtered_list, # allow generic templates to still
# access the object list in the same way
'paginator':paginator,
'filter_forms':filter_form_instances,
'action_forms':action_form_instances,
'paginated':paginated,
'total':total,
'selectable':selectable,
'columns':columns,
'sort_column':sort_column,
'sort_ascending':sort_ascending,
'page':page,
'ranges':ranges,
'selected':selected,
'status_message':status_message,
'status_message_type':status_message_type,
'base_template':'layout.html',
}
# For pages that not only have tables, but also need a time range slider
if needs_date:
set_default_dates(dates, request, context_vars)
context_vars['timeslider_update'] = 'filter(this)'
context_vars.update(kwargs)
return render_to_response(response_template, context_vars, context_instance=RequestContext(request))
@cache_control(no_cache=True, max_age=0)
def generic_dashboard(request,
slug,
module_types=[],
base_template='generic/dashboard_base.html',
module_header_partial_template='generic/partials/module_header.html',
module_partial_template='generic/partials/module.html',
title='Dashboard',
num_columns=2, **kwargs):
module_dict = {}
module_title_dict = {}
user = (not request.user.is_anonymous() and request.user) or None
dashboard, created = Dashboard.objects.get_or_create(user=user, slug=slug)
# Create mapping of module names to module forms
for view_name, module_form, module_title in module_types:
module_dict[view_name] = module_form
module_title_dict[view_name] = module_title
module_instances = [(view_name, module_form(), module_title) for view_name, module_form, module_title in module_types]
if request.method == 'POST':
page_action = request.POST.get('action', None)
module_title_dict[view_name] = request.POST.get('title', module_title)
if page_action == 'createmodule':
form_type = request.POST.get('module_type', None)
form = module_dict[form_type](request.POST)
if form.is_valid():
module = form.setModuleParams(dashboard, title=module_title_dict[form_type])
return render_to_response(module_partial_template,
{'mod': module,
'module_header_partial_template': module_header_partial_template},
context_instance=RequestContext(request))
elif page_action == 'publish':
user_pk = int(request.POST.get('user', -1))
if user_pk == -2 or user_pk == -3: # anonymous user
copydashboard, created = Dashboard.objects.get_or_create(user=None, slug=slug)
copy_dashboard(dashboard, copydashboard)
if user_pk == -3: # all users
for u in User.objects.exclude(pk=request.user.pk):
copydashboard, created = Dashboard.objects.get_or_create(user=u, slug=slug)
copy_dashboard(dashboard, copydashboard)
elif user_pk >= 0: # any other single user
try:
user = User.objects.exclude(pk=request.user.pk).get(pk=user_pk)
copydashboard, created = Dashboard.objects.get_or_create(user=user, slug=slug)
copy_dashboard(dashboard, copydashboard)
except:
pass
else:
data = request.POST.lists()
old_user_modules = dashboard.modules.values_list('pk', flat=True).distinct()
new_user_modules = []
for col_val, offset_list in data:
offset = 0
column = int(col_val)
for mod_pk in offset_list:
mod_pk = int(mod_pk)
new_user_modules.append(mod_pk)
module = Module.objects.get(pk=mod_pk)
module.offset = offset
module.column = column
module.save()
offset += 1
for mod in old_user_modules:
if not mod in new_user_modules:
dashboard.modules.get(pk=mod).delete()
return HttpResponse(status=200)
if created:
default_dash, created = Dashboard.objects.get_or_create(slug=slug, user=None)
copy_dashboard(default_dash, dashboard)
modules = [{'col':i, 'modules':[]} for i in range(0, num_columns)]
columns = dashboard.modules.values_list('column', flat=True).distinct()
for col in columns:
modules[col]['modules'] = list(dashboard.modules.filter(column=col).order_by('offset'))
user_list = []
for u in User.objects.order_by('username'):
if Dashboard.objects.filter(user=u, slug=slug).count():
user_list.append((u, Dashboard.objects.get(user=u, slug=slug),))
else:
user_list.append((u, None,))
return render_to_response(base_template,
{
'modules':modules,
'title':title,
'module_types':module_instances,
'module_header_partial_template':module_header_partial_template,
'module_partial_template':module_partial_template,
'user_list':user_list,
}, context_instance=RequestContext(request))
@cache_control(no_cache=True, max_age=0)
def generic_map(request,
base_template='generic/map_base.html',
map_layers=[],
dates=get_dates,
display_autoload=True):
needs_date = False
for layer in map_layers:
if 'needs_date' in layer and layer['needs_date']:
needs_date = True
break
context = {'map_layers':map_layers, \
'needs_date':needs_date, \
'display_autoload':display_autoload, \
'timeslider_update':'update_date_layers();'}
if needs_date:
set_default_dates(dates, request, context)
return render_to_response(base_template, context, context_instance=RequestContext(request))
def static_module(request, content_id):
content = get_object_or_404(StaticModuleContent, pk=content_id)
return render_to_response("generic/partials/static_module.html", {'content':content.content}, context_instance=RequestContext(request))
typo fix
#!/usr/bin/env python
# vim: ai ts=4 sts=4 et sw=4
import datetime
import time
from django.db.models.query import RawQuerySet, RawQuerySet
from django.template import RequestContext
from django.shortcuts import redirect, get_object_or_404, render_to_response
from django.core.paginator import Paginator, InvalidPage, EmptyPage
from django.http import Http404, HttpResponseServerError, HttpResponseRedirect, HttpResponse
from django import forms
from django.contrib.auth.models import User
from generic.models import Dashboard, Module, ModuleParams, StaticModuleContent
from django.db.models import Count
from django.views.decorators.cache import cache_control
from .utils import copy_dashboard, get_dates, set_default_dates
def generic_row(request, model=None, pk=None, partial_row='generic/partials/partial_row.html', selectable=True):
if not (model and pk):
return HttpResponseServerError
object = get_object_or_404(model, pk=pk)
return render_to_response(partial_row, {'object':object, 'selectable':selectable},
context_instance=RequestContext(request))
def generic(request,
model=None,
queryset=None,
template_object_name='object_list',
base_template='generic/base.html',
partial_base='generic/partials/partial_base.html',
partial_header='generic/partials/partial_header.html',
partial_row='generic/partials/partial_row.html',
paginator_template='generic/partials/pagination.html',
results_title='Results',
paginated=True,
selectable=True,
objects_per_page=25,
columns=[('object', False, '')],
sort_column='',
sort_ascending=True,
filter_forms=[],
action_forms=[],
needs_date=False,
dates=get_dates,
**kwargs):
# model parameter is required
if not model:
return HttpResponseServerError
# querysets can be calls to a function for dynamic, run-time retrieval
if callable(queryset):
if needs_date:
queryset = queryset(request=request, dates=dates)
else:
queryset = queryset(request=request)
# the default list is either a queryset parameter, or all
# objects from the model parameter
object_list = queryset if queryset is not None else model.objects.all()
# dynamically create a form class to represent the list of selected results,
# for performing actions
class ResultsForm(forms.Form):
results = forms.ModelMultipleChoiceField(queryset=object_list, widget=forms.CheckboxSelectMultiple())
class_dict = {}
action_form_instances = []
for action_class in action_forms:
form_instance = action_class(**{'request':request})
fully_qualified_class_name = "%s.%s" % (form_instance.__module__, form_instance.__class__.__name__)
# we need both a dictionary of action forms (for looking up actions performed)
# and a list of tuple for rendering within the template in a particular order
class_dict[fully_qualified_class_name] = action_class
action_form_instances.append((fully_qualified_class_name, form_instance,))
filter_form_instances = []
for filter_class in filter_forms:
form_instance = filter_class(**{'request':request})
filter_form_instances.append(form_instance)
# define some defaults
response_template = base_template
page = 1
selected = False
status_message = ''
status_message_type = ''
FILTER_REQUEST_KEY = "%s_filter_request" % request.path
filtered_list = object_list
if request.method == 'POST':
# check for previous filters in the case of a post,
# as other actions will be driven from this
# filtered list
filter_request_post = request.session.setdefault(FILTER_REQUEST_KEY, None)
if filter_request_post:
for form_class in filter_forms:
form_instance = form_class(filter_request_post, request=request)
if form_instance.is_valid():
filtered_list = form_instance.filter(request, filtered_list)
page_action = request.POST.get('page_action', '')
sort_action = request.POST.get('sort_action', '')
sort_column = request.POST.get('sort_column', '')
sort_ascending = request.POST.get('sort_ascending', 'True')
sort_ascending = (sort_ascending == 'True')
action_taken = request.POST.get('action', '')
for column_name, sortable, sort_param, sorter in columns:
if sortable and sort_param == sort_column:
filtered_list = sorter.sort(sort_column, filtered_list, sort_ascending)
if sort_action:
# we already have to sort regardless, but
# if this action was just a sort, we're done
# (and should avoid re-filtering)
pass
elif page_action:
try:
page = int(request.POST.get('page_num', '1'))
except ValueError:
pass
elif action_taken:
everything_selected = request.POST.get('select_everything', None)
results = []
if everything_selected:
results = filtered_list
else:
resultsform = ResultsForm(request.POST)
if resultsform.is_valid():
results = resultsform.cleaned_data['results']
action_class = class_dict[action_taken]
action_instance = action_class(request.POST, request=request)
if action_instance.is_valid():
status_message, status_message_type = action_instance.perform(request, results)
else:
# it's a new filter, re-start from the object list
# and filter down on the new set of forms
filtered_list = object_list
for form_class in filter_forms:
form_instance = form_class(request.POST, request=request)
if form_instance.is_valid():
filtered_list = form_instance.filter(request, filtered_list)
else:
status_message,status_message_type=form_instance.errors,'error'
selected = True
# store the request filters in the session
request.session[FILTER_REQUEST_KEY] = request.POST
response_template = partial_base
else:
# reset the filter key, if there was a previous one it should be
# cleared out
request.session[FILTER_REQUEST_KEY] = None
# calls to this view can define a default sorting order,
# if it's an initial GET request we should perform this sort here
if sort_column:
for column_name, sortable, sort_param, sorter in columns:
if sortable and sort_param == sort_column:
filtered_list = sorter.sort(sort_column, filtered_list, sort_ascending)
if hasattr(filtered_list, 'count') and callable(filtered_list.count):
try:
total = filtered_list.count()
except TypeError:
total = len(filtered_list)
else:
total = len(filtered_list)
paginator = None
ranges = []
if paginated:
paginator = Paginator(filtered_list, objects_per_page)
# If page request is out of range, deliver last page of results.
try:
filtered_list = paginator.page(page).object_list
except (EmptyPage, InvalidPage):
filtered_list = paginator.page(paginator.num_pages).object_list
page = paginator.num_pages
if paginator.num_pages > 10:
low_range = []
mid_range = []
high_range = []
low_range = range(1, 6)
high_range = range(paginator.num_pages - 4, paginator.num_pages + 1)
if page < 10:
low_range += range(6, min(paginator.num_pages, page + 5))
mid_range = range(10, paginator.num_pages - 10, 10)
ranges.append(low_range)
ranges.append(mid_range)
ranges.append(high_range)
elif page > paginator.num_pages - 10:
high_range = range(max(0, page - 5), paginator.num_pages - 4) + high_range
mid_range = range(10, paginator.num_pages - 10, 10)
ranges.append(low_range)
ranges.append(mid_range)
ranges.append(high_range)
else:
ranges.append(low_range)
ranges.append(range(10, max(0, page - 2), 10))
ranges.append(range(max(0, page - 2), min(paginator.num_pages, page + 3)))
ranges.append(range((round(min(paginator.num_pages, page + 3) / 10) + 1) * 10, paginator.num_pages - 10, 10))
ranges.append(high_range)
else:
ranges.append(paginator.page_range)
context_vars = {
'partial_base':partial_base,
'partial_header':partial_header,
'partial_row':partial_row,
'paginator_template':paginator_template,
'results_title':results_title,
template_object_name:filtered_list, # for custom templates
'object_list':filtered_list, # allow generic templates to still
# access the object list in the same way
'paginator':paginator,
'filter_forms':filter_form_instances,
'action_forms':action_form_instances,
'paginated':paginated,
'total':total,
'selectable':selectable,
'columns':columns,
'sort_column':sort_column,
'sort_ascending':sort_ascending,
'page':page,
'ranges':ranges,
'selected':selected,
'status_message':status_message,
'status_message_type':status_message_type,
'base_template':'layout.html',
}
# For pages that not only have tables, but also need a time range slider
if needs_date:
set_default_dates(dates, request, context_vars)
context_vars['timeslider_update'] = 'filter(this)'
context_vars.update(kwargs)
return render_to_response(response_template, context_vars, context_instance=RequestContext(request))
@cache_control(no_cache=True, max_age=0)
def generic_dashboard(request,
slug,
module_types=[],
base_template='generic/dashboard_base.html',
module_header_partial_template='generic/partials/module_header.html',
module_partial_template='generic/partials/module.html',
title='Dashboard',
num_columns=2, **kwargs):
module_dict = {}
module_title_dict = {}
user = (not request.user.is_anonymous() and request.user) or None
dashboard, created = Dashboard.objects.get_or_create(user=user, slug=slug)
# Create mapping of module names to module forms
for view_name, module_form, module_title in module_types:
module_dict[view_name] = module_form
module_title_dict[view_name] = module_title
module_instances = [(view_name, module_form(), module_title) for view_name, module_form, module_title in module_types]
if request.method == 'POST':
page_action = request.POST.get('action', None)
module_title_dict[view_name] = request.POST.get('title', module_title)
if page_action == 'createmodule':
form_type = request.POST.get('module_type', None)
form = module_dict[form_type](request.POST)
if form.is_valid():
module = form.setModuleParams(dashboard, title=module_title_dict[form_type])
return render_to_response(module_partial_template,
{'mod': module,
'module_header_partial_template': module_header_partial_template},
context_instance=RequestContext(request))
elif page_action == 'publish':
user_pk = int(request.POST.get('user', -1))
if user_pk == -2 or user_pk == -3: # anonymous user
copydashboard, created = Dashboard.objects.get_or_create(user=None, slug=slug)
copy_dashboard(dashboard, copydashboard)
if user_pk == -3: # all users
for u in User.objects.exclude(pk=request.user.pk):
copydashboard, created = Dashboard.objects.get_or_create(user=u, slug=slug)
copy_dashboard(dashboard, copydashboard)
elif user_pk >= 0: # any other single user
try:
user = User.objects.exclude(pk=request.user.pk).get(pk=user_pk)
copydashboard, created = Dashboard.objects.get_or_create(user=user, slug=slug)
copy_dashboard(dashboard, copydashboard)
except:
pass
else:
data = request.POST.lists()
old_user_modules = dashboard.modules.values_list('pk', flat=True).distinct()
new_user_modules = []
for col_val, offset_list in data:
offset = 0
column = int(col_val)
for mod_pk in offset_list:
mod_pk = int(mod_pk)
new_user_modules.append(mod_pk)
module = Module.objects.get(pk=mod_pk)
module.offset = offset
module.column = column
module.save()
offset += 1
for mod in old_user_modules:
if not mod in new_user_modules:
dashboard.modules.get(pk=mod).delete()
return HttpResponse(status=200)
if created:
default_dash, created = Dashboard.objects.get_or_create(slug=slug, user=None)
copy_dashboard(default_dash, dashboard)
modules = [{'col':i, 'modules':[]} for i in range(0, num_columns)]
columns = dashboard.modules.values_list('column', flat=True).distinct()
for col in columns:
modules[col]['modules'] = list(dashboard.modules.filter(column=col).order_by('offset'))
user_list = []
for u in User.objects.order_by('username'):
if Dashboard.objects.filter(user=u, slug=slug).count():
user_list.append((u, Dashboard.objects.get(user=u, slug=slug),))
else:
user_list.append((u, None,))
return render_to_response(base_template,
{
'modules':modules,
'title':title,
'module_types':module_instances,
'module_header_partial_template':module_header_partial_template,
'module_partial_template':module_partial_template,
'user_list':user_list,
}, context_instance=RequestContext(request))
@cache_control(no_cache=True, max_age=0)
def generic_map(request,
base_template='generic/map_base.html',
map_layers=[],
dates=get_dates,
display_autoload=True):
needs_date = False
for layer in map_layers:
if 'needs_date' in layer and layer['needs_date']:
needs_date = True
break
context = {'map_layers':map_layers, \
'needs_date':needs_date, \
'display_autoload':display_autoload, \
'timeslider_update':'update_date_layers();'}
if needs_date:
set_default_dates(dates, request, context)
return render_to_response(base_template, context, context_instance=RequestContext(request))
def static_module(request, content_id):
content = get_object_or_404(StaticModuleContent, pk=content_id)
return render_to_response("generic/partials/static_module.html", {'content':content.content}, context_instance=RequestContext(request))
|
#!/usr/bin/env python
import rospy
import math
import time
from geometry_msgs.msg import Twist
from sensor_msgs.msg import LaserScan
class WallAvoid(object):
def __init__(self, timeout=None):
rospy.init_node("WallAvoid")
self.turnCoef = [(x ** 2 - 8100) / 10000000.0 for x in range(-90, 0)] + [(-x ** 2 + 8100) / 10000000.0 for x in range(0, 91)]
self.speedCoef = [(-x ** 2 + 8100) / 10000000.0 for x in range(-90,91)]
rospy.Subscriber("/scan", LaserScan, self._latestScan)
self.pub = rospy.Publisher("/cmd_vel", Twist, queue_size=10)
rospy.loginfo("Ready to get out there and avoid some walls!")
rospy.logdebug(self.turnCoef)
rospy.logdebug(self.speedCoef)
self.timeout = None
if timeout:
self.timeout = time.time() + timeout
rospy.spin()
def _latestScan(self, data):
if self.timeout <= time.time():
rospy.signal_shutdown("Execution timer expired")
turnVal = 0
speedVal = 0
right_zone = data.ranges[0:65]
front_zone = data.ranges[65:115]
left_zone = data.ranges[115:180]
front_zone_avg = sum(front_zone) / len(front_zone)
right_zone_avg = sum(right_zone) / len(right_zone)
left_zone_avg = sum(left_zone) / len(left_zone)
# If average is really REALLY close, might want to back up instead
if front_zone_avg < 1.5 or min(front_zone) < 0.8:
speedVal = -0.1
if left_zone_avg > right_zone_avg:
rospy.loginfo("Backing up to the left...")
turnVal = 0.5
else:
rospy.loginfo("Backing up to the right...")
turnVal = -0.3
else:
rospy.loginfo("Normal hallway")
for p in range(0, 181):
# Inf range return means its over 10m from the LIDAR
if math.isinf(data.ranges[p]) or math.isnan(data.ranges[p]):
speedVal = speedVal + (self.speedCoef[p] * 10)
# Don't account long ranges into turn calcs
else:
speedVal = speedVal + (self.speedCoef[p] * data.ranges[p])
# Turn away from walls
turnVal = turnVal + (self.turnCoef[p] * data.ranges[p])
speedVal = min(speedVal * 1.2, 0.45) # sets max speed
turnVal = turnVal * 1.2
cmd = Twist()
cmd.linear.x = speedVal
cmd.angular.z = turnVal
rospy.loginfo(cmd)
self.pub.publish(cmd)
# standard ros boilerplate
if __name__ == "__main__":
try:
run = WallAvoid(300) # seconds
except rospy.ROSInterruptException:
pass
Adding in further turning desicions.
#!/usr/bin/env python
import rospy
import math
import time
from geometry_msgs.msg import Twist
from sensor_msgs.msg import LaserScan
class WallAvoid(object):
def __init__(self, timeout=None):
rospy.init_node("WallAvoid")
self.turnCoef = [(x ** 2 - 8100) / 10000000.0 for x in range(-90, 0)] + [(-x ** 2 + 8100) / 10000000.0 for x in range(0, 91)]
self.speedCoef = [(-x ** 2 + 8100) / 10000000.0 for x in range(-90,91)]
rospy.Subscriber("/scan", LaserScan, self._latestScan)
self.pub = rospy.Publisher("/cmd_vel", Twist, queue_size=10)
rospy.loginfo("Ready to get out there and avoid some walls!")
rospy.logdebug(self.turnCoef)
rospy.logdebug(self.speedCoef)
self.timeout = None
if timeout:
self.timeout = time.time() + timeout
rospy.spin()
def _latestScan(self, data):
if self.timeout <= time.time():
rospy.signal_shutdown("Execution timer expired")
turnVal = 0
speedVal = 0
right_zone = data.ranges[0:65]
front_zone = data.ranges[65:115]
left_zone = data.ranges[115:180]
front_zone_avg = sum(front_zone) / len(front_zone)
right_zone_avg = sum(right_zone) / len(right_zone)
left_zone_avg = sum(left_zone) / len(left_zone)
# If average is really REALLY close, might want to back up instead
if front_zone_avg < 1.5 or min(front_zone) < 0.8:
speedVal = -0.1
if left_zone_avg > right_zone_avg and min(left_zone) > min(right_zone):
rospy.loginfo("Backing up to the left...")
turnVal = 0.5
else:
rospy.loginfo("Backing up to the right...")
turnVal = -0.3
else:
rospy.loginfo("Normal hallway")
for p in range(0, 181):
# Inf range return means its over 10m from the LIDAR
if math.isinf(data.ranges[p]) or math.isnan(data.ranges[p]):
speedVal = speedVal + (self.speedCoef[p] * 10)
# Don't account long ranges into turn calcs
else:
speedVal = speedVal + (self.speedCoef[p] * data.ranges[p])
# Turn away from walls
turnVal = turnVal + (self.turnCoef[p] * data.ranges[p])
speedVal = min(speedVal * 1.2, 0.4) # sets max speed
turnVal = turnVal * 1.4
cmd = Twist()
cmd.linear.x = speedVal
cmd.angular.z = turnVal
rospy.loginfo(cmd)
self.pub.publish(cmd)
# standard ros boilerplate
if __name__ == "__main__":
try:
run = WallAvoid(300) # seconds
except rospy.ROSInterruptException:
pass
|
import os, os.path as path
import re
import SCons.Variables
import SCons.Environment
class BuildConfig(object):
def __init__(self, **kwargs):
if not hasattr(os, 'uname') or self.matches('CYGWIN'):
self.os = 'win32'
elif self.matches('Darwin'):
self.os = 'osx'
elif self.matches('Linux'):
self.os = 'linux'
vars = SCons.Variables.Variables()
vars.Add('PRODUCT_NAME', 'The underlying product name that Kroll will display (default: "Kroll")', kwargs['PRODUCT_NAME'])
vars.Add('INSTALL_PREFIX', 'The install prefix of binaries in the system (default: /usr/local)', kwargs['INSTALL_PREFIX'])
vars.Add('GLOBAL_NS_VARNAME','The name of the Kroll global variable', kwargs['GLOBAL_NS_VARNAME'])
vars.Add('CONFIG_FILENAME','The name of the Kroll config file', kwargs['CONFIG_FILENAME'])
self.env = SCons.Environment.Environment(variables = vars)
self.env.Append(CPPDEFINES = {
'OS_' + self.os.upper(): 1,
'_INSTALL_PREFIX': '${INSTALL_PREFIX}',
'_PRODUCT_NAME': '${PRODUCT_NAME}',
'_GLOBAL_NS_VARNAME': '${GLOBAL_NS_VARNAME}',
'_CONFIG_FILENAME' : '${CONFIG_FILENAME}'
})
self.dir = kwargs['BUILD_DIR'] + '/' + self.os
self.third_party = kwargs['THIRD_PARTY_DIR'] + '/' + self.os
self.init_thirdparty_libs()
def init_thirdparty_libs(self):
self.thirdparty_libs = {
'poco': {
'win32': {
'cpp_path': [path.join(self.third_party, 'poco', 'include')],
'lib_path': [path.join(self.third_party, 'poco', 'lib')],
'libs': ['PocoFoundation', 'PocoNet', 'PocoNetSSL']
},
'linux': {
'cpp_path': [path.join(self.third_party, 'poco', 'include')],
'lib_path': [path.join(self.third_party, 'poco', 'lib')],
'libs': ['PocoFoundation', 'PocoNet', 'PocoNetSSL', 'PocoUtil', 'PocoXML']
},
'osx': {
'cpp_path': [path.join(self.third_party, 'poco', 'headers')],
'lib_path': [path.join(self.third_party, 'poco', 'lib')],
'libs': ['PocoFoundation', 'PocoNet', 'PocoNetSSL']
}
}
}
def matches(self, n): return bool(re.match(os.uname()[0], n))
def is_linux(self): return self.os == 'linux'
def is_osx(self): return self.os == 'osx'
def is_win32(self): return self.os == 'win32'
def add_thirdparty(self, env, name):
print "adding %s lib" % name
print self.thirdparty_libs[name][self.os]['libs']
env.Append(LIBPATH=[self.thirdparty_libs[name][self.os]['lib_path']])
env.Append(CPPPATH=[self.thirdparty_libs[name][self.os]['cpp_path']])
env.Append(LIBS=[self.thirdparty_libs[name][self.os]['libs']])
Removed debug
import os, os.path as path
import re
import SCons.Variables
import SCons.Environment
class BuildConfig(object):
def __init__(self, **kwargs):
if not hasattr(os, 'uname') or self.matches('CYGWIN'):
self.os = 'win32'
elif self.matches('Darwin'):
self.os = 'osx'
elif self.matches('Linux'):
self.os = 'linux'
vars = SCons.Variables.Variables()
vars.Add('PRODUCT_NAME', 'The underlying product name that Kroll will display (default: "Kroll")', kwargs['PRODUCT_NAME'])
vars.Add('INSTALL_PREFIX', 'The install prefix of binaries in the system (default: /usr/local)', kwargs['INSTALL_PREFIX'])
vars.Add('GLOBAL_NS_VARNAME','The name of the Kroll global variable', kwargs['GLOBAL_NS_VARNAME'])
vars.Add('CONFIG_FILENAME','The name of the Kroll config file', kwargs['CONFIG_FILENAME'])
self.env = SCons.Environment.Environment(variables = vars)
self.env.Append(CPPDEFINES = {
'OS_' + self.os.upper(): 1,
'_INSTALL_PREFIX': '${INSTALL_PREFIX}',
'_PRODUCT_NAME': '${PRODUCT_NAME}',
'_GLOBAL_NS_VARNAME': '${GLOBAL_NS_VARNAME}',
'_CONFIG_FILENAME' : '${CONFIG_FILENAME}'
})
self.dir = kwargs['BUILD_DIR'] + '/' + self.os
self.third_party = kwargs['THIRD_PARTY_DIR'] + '/' + self.os
self.init_thirdparty_libs()
def init_thirdparty_libs(self):
self.thirdparty_libs = {
'poco': {
'win32': {
'cpp_path': [path.join(self.third_party, 'poco', 'include')],
'lib_path': [path.join(self.third_party, 'poco', 'lib')],
'libs': ['PocoFoundation', 'PocoNet', 'PocoNetSSL']
},
'linux': {
'cpp_path': [path.join(self.third_party, 'poco', 'include')],
'lib_path': [path.join(self.third_party, 'poco', 'lib')],
'libs': ['PocoFoundation', 'PocoNet', 'PocoNetSSL', 'PocoUtil', 'PocoXML']
},
'osx': {
'cpp_path': [path.join(self.third_party, 'poco', 'headers')],
'lib_path': [path.join(self.third_party, 'poco', 'lib')],
'libs': ['PocoFoundation', 'PocoNet', 'PocoNetSSL']
}
}
}
def matches(self, n): return bool(re.match(os.uname()[0], n))
def is_linux(self): return self.os == 'linux'
def is_osx(self): return self.os == 'osx'
def is_win32(self): return self.os == 'win32'
def add_thirdparty(self, env, name):
env.Append(LIBPATH=[self.thirdparty_libs[name][self.os]['lib_path']])
env.Append(CPPPATH=[self.thirdparty_libs[name][self.os]['cpp_path']])
env.Append(LIBS=[self.thirdparty_libs[name][self.os]['libs']])
|
##
# Registry is a GeniServer that implements the Registry interface
import tempfile
import os
import time
import sys
from geni.util.credential import Credential
from geni.util.hierarchy import Hierarchy
from geni.util.trustedroot import TrustedRootList
from geni.util.cert import Keypair, Certificate
from geni.util.gid import GID, create_uuid
from geni.util.geniserver import GeniServer
from geni.util.geniclient import GeniClient
from geni.util.record import GeniRecord
from geni.util.rights import RightList
from geni.util.genitable import GeniTable
from geni.util.geniticket import Ticket
from geni.util.excep import *
from geni.util.misc import *
from geni.util.config import *
from geni.util.storage import *
##
# Convert geni fields to PLC fields for use when registering up updating
# registry record in the PLC database
#
# @param type type of record (user, slice, ...)
# @param hrn human readable name
# @param geni_fields dictionary of geni fields
# @param pl_fields dictionary of PLC fields (output)
def geni_fields_to_pl_fields(type, hrn, geni_fields, pl_fields):
if type == "user":
if not "email" in pl_fields:
if not "email" in geni_fields:
raise MissingGeniInfo("email")
pl_fields["email"] = geni_fields["email"]
if not "first_name" in pl_fields:
pl_fields["first_name"] = "geni"
if not "last_name" in pl_fields:
pl_fields["last_name"] = hrn
elif type == "slice":
if not "instantiation" in pl_fields:
pl_fields["instantiation"] = "delegated" # "plc-instantiated"
if not "name" in pl_fields:
pl_fields["name"] = hrn_to_pl_slicename(hrn)
if not "max_nodes" in pl_fields:
pl_fields["max_nodes"] = 10
elif type == "node":
if not "hostname" in pl_fields:
if not "dns" in geni_fields:
raise MissingGeniInfo("dns")
pl_fields["hostname"] = geni_fields["dns"]
if not "model" in pl_fields:
pl_fields["model"] = "geni"
elif type == "sa":
pl_fields["login_base"] = hrn_to_pl_login_base(hrn)
if not "name" in pl_fields:
pl_fields["name"] = hrn
if not "abbreviated_name" in pl_fields:
pl_fields["abbreviated_name"] = hrn
if not "enabled" in pl_fields:
pl_fields["enabled"] = True
if not "is_public" in pl_fields:
pl_fields["is_public"] = True
##
# Registry is a GeniServer that serves registry and slice operations at PLC.
class Registry(GeniServer):
##
# Create a new registry object.
#
# @param ip the ip address to listen on
# @param port the port to listen on
# @param key_file private key filename of registry
# @param cert_file certificate filename containing public key (could be a GID file)
def __init__(self, ip, port, key_file, cert_file, config = '/usr/share/geniwrapper/geni/util/geni_config'):
GeniServer.__init__(self, ip, port, key_file, cert_file)
# get PL account settings from config module
self.pl_auth = get_pl_auth()
# connect to planetlab
if "Url" in self.pl_auth:
self.connect_remote_shell()
else:
self.connect_local_shell()
self.key_file = key_file
self.cert_file = cert_file
self.config = Config(config)
self.basedir = self.config.GENI_BASE_DIR + os.sep
self.server_basedir = self.basedir + os.sep + "geni" + os.sep
self.hrn = self.config.GENI_INTERFACE_HRN
# get peer registry information
registries_file = self.server_basedir + os.sep + 'registries.xml'
connection_dict = {'hrn': '', 'addr': '', 'port': ''}
self.registry_info = XmlStorage(registries_file, {'registries': {'registry': [connection_dict]}})
self.registry_info.load()
self.connectRegistry()
self.loadCredential()
self.connectRegistries()
##
# Connect to a remote shell via XMLRPC
def connect_remote_shell(self):
from geni.util import remoteshell
self.shell = remoteshell.RemoteShell()
##
# Connect to a local shell via local API functions
def connect_local_shell(self):
import PLC.Shell
self.shell = PLC.Shell.Shell(globals = globals())
##
# Register the server RPCs for the registry
def register_functions(self):
GeniServer.register_functions(self)
# registry interface
self.server.register_function(self.create_gid)
self.server.register_function(self.get_self_credential)
self.server.register_function(self.get_credential)
self.server.register_function(self.get_gid)
self.server.register_function(self.get_ticket)
self.server.register_function(self.register)
self.server.register_function(self.remove)
self.server.register_function(self.update)
self.server.register_function(self.list)
self.server.register_function(self.resolve)
def loadCredential(self):
"""
Attempt to load credential from file if it exists. If it doesnt get
credential from registry.
"""
# see if this file exists
ma_cred_filename = self.server_basedir + os.sep + "reg." + self.hrn + ".sa.cred"
try:
self.credential = Credential(filename = ma_cred_filename)
except IOError:
self.credential = self.getCredentialFromRegistry()
def getCredentialFromRegistry(self):
"""
Get our current credential from the registry.
"""
# get self credential
self_cred_filename = self.server_basedir + os.sep + "smgr." + self.hrn + ".cred"
self_cred = self.registry.get_credential(None, 'ma', self.hrn)
self_cred.save_to_file(self_cred_filename, save_parents=True)
# get ma credential
ma_cred_filename = self.server_basedir + os.sep + "smgr." + self.hrn + ".sa.cred"
ma_cred = self.registry.get_credential(self_cred, 'sa', self.hrn)
ma_cred.save_to_file(ma_cred_filename, save_parents=True)
return ma_cred
def connectRegistry(self):
"""
Connect to the registry
"""
# connect to registry using GeniClient
address = self.config.GENI_REGISTRY_HOSTNAME
port = self.config.GENI_REGISTRY_PORT
url = 'http://%(address)s:%(port)s' % locals()
self.registry = GeniClient(url, self.key_file, self.cert_file)
def connectRegistries(self):
"""
Get connection details for the trusted peer registries from file and
create an GeniClient connection to each.
"""
self.registries= {}
registries = self.registry_info['registries']['registry']
if isinstance(registries, dict):
registries = [registries]
if isinstance(registries, list):
for registry in registries:
# create xmlrpc connection using GeniClient
hrn, address, port = registry['hrn'], registry['addr'], registry['port']
url = 'http://%(address)s:%(port)s' % locals()
self.registries[hrn] = GeniClient(url, self.key_file, self.cert_file)
##
# Given an authority name, return the information for that authority. This
# is basically a stub that calls the hierarchy module.
#
# @param auth_hrn human readable name of authority
def get_auth_info(self, auth_hrn):
return self.hierarchy.get_auth_info(auth_hrn)
##
# Given an authority name, return the database table for that authority. If
# the database table does not exist, then one will be automatically
# created.
#
# @param auth_name human readable name of authority
def get_auth_table(self, auth_name):
auth_info = self.get_auth_info(auth_name)
table = GeniTable(hrn=auth_name,
cninfo=auth_info.get_dbinfo())
# if the table doesn't exist, then it means we haven't put any records
# into this authority yet.
if not table.exists():
print "Registry: creating table for authority", auth_name
table.create()
return table
##
# Verify that an authority belongs to this registry. This is basically left
# up to the implementation of the hierarchy module. If the specified name
# does not belong to this registry, an exception is thrown indicating the
# caller should contact someone else.
#
# @param auth_name human readable name of authority
def verify_auth_belongs_to_me(self, name):
# get_auth_info will throw an exception if the authority does not
# exist
self.get_auth_info(name)
##
# Verify that an object belongs to this registry. By extension, this implies
# that the authority that owns the object belongs to this registry. If the
# object does not belong to this registry, then an exception is thrown.
#
# @param name human readable name of object
def verify_object_belongs_to_me(self, name):
auth_name = get_authority(name)
if not auth_name:
# the root authority belongs to the registry by default?
# TODO: is this true?
return
self.verify_auth_belongs_to_me(auth_name)
##
# Verify that the object_gid that was specified in the credential allows
# permission to the object 'name'. This is done by a simple prefix test.
# For example, an object_gid for planetlab.us.arizona would match the
# objects planetlab.us.arizona.slice1 and planetlab.us.arizona.
#
# @param name human readable name to test
def verify_object_permission(self, name):
object_hrn = self.object_gid.get_hrn()
if object_hrn == name:
return
if name.startswith(object_hrn + "."):
return
raise PermissionError(name)
##
# Fill in the planetlab-specific fields of a Geni record. This involves
# calling the appropriate PLC methods to retrieve the database record for
# the object.
#
# PLC data is filled into the pl_info field of the record.
#
# @param record record to fill in fields (in/out param)
def fill_record_pl_info(self, record):
type = record.get_type()
pointer = record.get_pointer()
# records with pointer==-1 do not have plc info associated with them.
# for example, the top level authority records which are
# authorities, but not PL "sites"
if pointer == -1:
record.set_pl_info({})
return
if (type == "sa") or (type == "ma"):
pl_res = self.shell.GetSites(self.pl_auth, [pointer])
elif (type == "slice"):
pl_res = self.shell.GetSlices(self.pl_auth, [pointer])
elif (type == "user"):
pl_res = self.shell.GetPersons(self.pl_auth, [pointer])
elif (type == "node"):
pl_res = self.shell.GetNodes(self.pl_auth, [pointer])
else:
raise UnknownGeniType(type)
if not pl_res:
# the planetlab record no longer exists
# TODO: delete the geni record ?
raise PlanetLabRecordDoesNotExist(record.get_name())
record.set_pl_info(pl_res[0])
##
# Look up user records given PLC user-ids. This is used as part of the
# process for reverse-mapping PLC records into Geni records.
#
# @param auth_table database table for the authority that holds the user records
# @param user_id_list list of user ids
# @param role either "*" or a string describing the role to look for ("pi", "user", ...)
#
# TODO: This function currently only searches one authority because it would
# be inefficient to brute-force search all authorities for a user id. The
# solution would likely be to implement a reverse mapping of user-id to
# (type, hrn) pairs.
def lookup_users(self, auth_table, user_id_list, role="*"):
record_list = []
for person_id in user_id_list:
user_records = auth_table.find("user", person_id, "pointer")
for user_record in user_records:
self.fill_record_info(user_record)
user_roles = user_record.get_pl_info().get("roles")
if (role=="*") or (role in user_roles):
record_list.append(user_record.get_name())
return record_list
##
# Fill in the geni-specific fields of the record.
#
# Note: It is assumed the fill_record_pl_info() has already been performed
# on the record.
def fill_record_geni_info(self, record):
geni_info = {}
type = record.get_type()
if (type == "slice"):
auth_table = self.get_auth_table(get_authority(record.get_name()))
person_ids = record.pl_info.get("person_ids", [])
researchers = self.lookup_users(auth_table, person_ids)
geni_info['researcher'] = researchers
elif (type == "sa"):
auth_table = self.get_auth_table(record.get_name())
person_ids = record.pl_info.get("person_ids", [])
pis = self.lookup_users(auth_table, person_ids, "pi")
geni_info['pi'] = pis
# TODO: OrganizationName
elif (type == "ma"):
auth_table = self.get_auth_table(record.get_name())
person_ids = record.pl_info.get("person_ids", [])
operators = self.lookup_users(auth_table, person_ids, "tech")
geni_info['operator'] = operators
# TODO: OrganizationName
auth_table = self.get_auth_table(record.get_name())
person_ids = record.pl_info.get("person_ids", [])
owners = self.lookup_users(auth_table, person_ids, "admin")
geni_info['owner'] = owners
elif (type == "node"):
geni_info['dns'] = record.pl_info.get("hostname", "")
# TODO: URI, LatLong, IP, DNS
elif (type == "user"):
geni_info['email'] = record.pl_info.get("email", "")
# TODO: PostalAddress, Phone
record.set_geni_info(geni_info)
##
# Given a Geni record, fill in the PLC-specific and Geni-specific fields
# in the record.
def fill_record_info(self, record):
self.fill_record_pl_info(record)
self.fill_record_geni_info(record)
def update_membership_list(self, oldRecord, record, listName, addFunc, delFunc):
# get a list of the HRNs tht are members of the old and new records
if oldRecord:
if oldRecord.pl_info == None:
oldRecord.pl_info = {}
oldList = oldRecord.get_geni_info().get(listName, [])
else:
oldList = []
newList = record.get_geni_info().get(listName, [])
# if the lists are the same, then we don't have to update anything
if (oldList == newList):
return
# build a list of the new person ids, by looking up each person to get
# their pointer
newIdList = []
for hrn in newList:
userRecord = self.resolve_raw("user", hrn)[0]
newIdList.append(userRecord.get_pointer())
# build a list of the old person ids from the person_ids field of the
# pl_info
if oldRecord:
oldIdList = oldRecord.pl_info.get("person_ids", [])
containerId = oldRecord.get_pointer()
else:
# if oldRecord==None, then we are doing a Register, instead of an
# update.
oldIdList = []
containerId = record.get_pointer()
# add people who are in the new list, but not the oldList
for personId in newIdList:
if not (personId in oldIdList):
print "adding id", personId, "to", record.get_name()
addFunc(self.pl_auth, personId, containerId)
# remove people who are in the old list, but not the new list
for personId in oldIdList:
if not (personId in newIdList):
print "removing id", personId, "from", record.get_name()
delFunc(self.pl_auth, personId, containerId)
def update_membership(self, oldRecord, record):
if record.type == "slice":
self.update_membership_list(oldRecord, record, 'researcher',
self.shell.AddPersonToSlice,
self.shell.DeletePersonFromSlice)
elif record.type == "sa":
# TODO
pass
elif record.type == "ma":
# TODO
pass
##
# GENI API: register
#
# Register an object with the registry. In addition to being stored in the
# Geni database, the appropriate records will also be created in the
# PLC databases
#
# @param cred credential string
# @param record_dict dictionary containing record fields
def register(self, cred, record_dict):
self.decode_authentication(cred, "register")
record = GeniRecord(dict = record_dict)
type = record.get_type()
name = record.get_name()
auth_name = get_authority(name)
self.verify_object_permission(auth_name)
auth_info = self.get_auth_info(auth_name)
table = self.get_auth_table(auth_name)
pkey = None
# check if record already exists
existing_records = table.resolve(type, name)
if existing_records:
raise ExistingRecord(name)
if (type == "sa") or (type=="ma"):
# update the tree
if not self.hierarchy.auth_exists(name):
self.hierarchy.create_auth(name)
# authorities are special since they are managed by the registry
# rather than by the caller. We create our own GID for the
# authority rather than relying on the caller to supply one.
# get the GID from the newly created authority
child_auth_info = self.get_auth_info(name)
gid = auth_info.get_gid_object()
record.set_gid(gid.save_to_string(save_parents=True))
geni_fields = record.get_geni_info()
site_fields = record.get_pl_info()
# if registering a sa, see if a ma already exists
# if registering a ma, see if a sa already exists
if (type == "sa"):
other_rec = table.resolve("ma", record.get_name())
elif (type == "ma"):
other_rec = table.resolve("sa", record.get_name())
if other_rec:
print "linking ma and sa to the same plc site"
pointer = other_rec[0].get_pointer()
else:
geni_fields_to_pl_fields(type, name, geni_fields, site_fields)
print "adding site with fields", site_fields
pointer = self.shell.AddSite(self.pl_auth, site_fields)
record.set_pointer(pointer)
elif (type == "slice"):
geni_fields = record.get_geni_info()
slice_fields = record.get_pl_info()
geni_fields_to_pl_fields(type, name, geni_fields, slice_fields)
pointer = self.shell.AddSlice(self.pl_auth, slice_fields)
record.set_pointer(pointer)
elif (type == "user"):
geni_fields = record.get_geni_info()
user_fields = record.get_pl_info()
geni_fields_to_pl_fields(type, name, geni_fields, user_fields)
pointer = self.shell.AddPerson(self.pl_auth, user_fields)
record.set_pointer(pointer)
elif (type == "node"):
geni_fields = record.get_geni_info()
node_fields = record.get_pl_info()
geni_fields_to_pl_fields(type, name, geni_fields, node_fields)
login_base = hrn_to_pl_login_base(auth_name)
print "calling addnode with", login_base, node_fields
pointer = self.shell.AddNode(self.pl_auth, login_base, node_fields)
record.set_pointer(pointer)
else:
raise UnknownGeniType(type)
table.insert(record)
# update membership for researchers, pis, owners, operators
self.update_membership(None, record)
return record.get_gid_object().save_to_string(save_parents=True)
##
# GENI API: remove
#
# Remove an object from the registry. If the object represents a PLC object,
# then the PLC records will also be removed.
#
# @param cred credential string
# @param record_dict dictionary containing record fields. The only relevant
# fields of the record are 'name' and 'type', which are used to lookup
# the current copy of the record in the Geni database, to make sure
# that the appopriate record is removed.
def remove(self, cred, type, hrn):
self.decode_authentication(cred, "remove")
self.verify_object_permission(hrn)
auth_name = get_authority(hrn)
table = self.get_auth_table(auth_name)
record_list = table.resolve(type, hrn)
if not record_list:
raise RecordNotFound(hrn)
record = record_list[0]
# TODO: sa, ma
if type == "user":
self.shell.DeletePerson(self.pl_auth, record.get_pointer())
elif type == "slice":
self.shell.DeleteSlice(self.pl_auth, record.get_pointer())
elif type == "node":
self.shell.DeleteNode(self.pl_auth, record.get_pointer())
elif (type == "sa") or (type == "ma"):
if (type == "sa"):
other_rec = table.resolve("ma", record.get_name())
elif (type == "ma"):
other_rec = table.resolve("sa", record.get_name())
if other_rec:
# sa and ma both map to a site, so if we are deleting one
# but the other still exists, then do not delete the site
print "not removing site", record.get_name(), "because either sa or ma still exists"
pass
else:
print "removing site", record.get_name()
self.shell.DeleteSite(self.pl_auth, record.get_pointer())
else:
raise UnknownGeniType(type)
table.remove(record)
return True
##
# GENI API: Update
#
# Update an object in the registry. Currently, this only updates the
# PLC information associated with the record. The Geni fields (name, type,
# GID) are fixed.
#
# The record is expected to have the pl_info field filled in with the data
# that should be updated.
#
# TODO: The geni_info member of the record should be parsed and the pl_info
# adjusted as necessary (add/remove users from a slice, etc)
#
# @param cred credential string specifying rights of the caller
# @param record a record dictionary to be updated
def update(self, cred, record_dict):
self.decode_authentication(cred, "update")
record = GeniRecord(dict = record_dict)
type = record.get_type()
self.verify_object_permission(record.get_name())
auth_name = get_authority(record.get_name())
if not auth_name:
auth_name = record.get_name()
table = self.get_auth_table(auth_name)
# make sure the record exists
existing_record_list = table.resolve(type, record.get_name())
if not existing_record_list:
raise RecordNotFound(record.get_name())
existing_record = existing_record_list[0]
# Update_membership needs the membership lists in the existing record
# filled in, so it can see if members were added or removed
self.fill_record_info(existing_record)
# Use the pointer from the existing record, not the one that the user
# gave us. This prevents the user from inserting a forged pointer
pointer = existing_record.get_pointer()
# update the PLC information that was specified with the record
if (type == "sa") or (type == "ma"):
self.shell.UpdateSite(self.pl_auth, pointer, record.get_pl_info())
elif type == "slice":
self.shell.UpdateSlice(self.pl_auth, pointer, record.get_pl_info())
elif type == "user":
# SMBAKER: UpdatePerson only allows a limited set of fields to be
# updated. Ideally we should have a more generic way of doing
# this. I copied the field names from UpdatePerson.py...
update_fields = {}
all_fields = record.get_pl_info()
for key in all_fields.keys():
if key in ['first_name', 'last_name', 'title', 'email',
'password', 'phone', 'url', 'bio', 'accepted_aup',
'enabled']:
update_fields[key] = all_fields[key]
self.shell.UpdatePerson(self.pl_auth, pointer, update_fields)
elif type == "node":
self.shell.UpdateNode(self.pl_auth, pointer, record.get_pl_info())
else:
raise UnknownGeniType(type)
# update membership for researchers, pis, owners, operators
self.update_membership(existing_record, record)
##
# List the records in an authority. The objectGID in the supplied credential
# should name the authority that will be listed.
#
# TODO: List doesn't take an hrn and uses the hrn contained in the
# objectGid of the credential. Does this mean the only way to list an
# authority is by having a credential for that authority?
#
# @param cred credential string specifying rights of the caller
#
# @return list of record dictionaries
def list(self, cred, auth_hrn):
self.decode_authentication(cred, "list")
if not self.hierarchy.auth_exists(auth_hrn):
raise MissingAuthority(auth_hrn)
table = self.get_auth_table(auth_hrn)
records = table.list()
good_records = []
for record in records:
try:
self.fill_record_info(record)
good_records.append(record)
except PlanetLabRecordDoesNotExist:
# silently drop the ones that are missing in PL.
# is this the right thing to do?
print "ignoring geni record ", record.get_name(), " because pl record does not exist"
table.remove(record)
dicts = []
for record in good_records:
dicts.append(record.as_dict())
return dicts
return dict_list
##
# Resolve a record. This is an internal version of the Resolve API call
# and returns records in record object format rather than dictionaries
# that may be sent over XMLRPC.
#
# @param type type of record to resolve (user | sa | ma | slice | node)
# @param name human readable name of object
# @param must_exist if True, throw an exception if no records are found
#
# @return a list of record objects, or an empty list []
def resolve_raw(self, type, name, must_exist=True):
auth_name = get_authority(name)
if not auth_name:
auth_name = name
table = self.get_auth_table(auth_name)
records = table.resolve(type, name)
if (not records) and must_exist:
raise RecordNotFound(name)
good_records = []
for record in records:
try:
self.fill_record_info(record)
good_records.append(record)
except PlanetLabRecordDoesNotExist:
# silently drop the ones that are missing in PL.
# is this the right thing to do?
print "ignoring geni record ", record.get_name(), "because pl record does not exist"
table.remove(record)
return good_records
##
# GENI API: Resolve
#
# This is a wrapper around resolve_raw that converts records objects into
# dictionaries before returning them to the user.
#
# @param cred credential string authorizing the caller
# @param name human readable name to resolve
#
# @return a list of record dictionaries, or an empty list
def resolve(self, cred, name):
self.decode_authentication(cred, "resolve")
records = self.resolve_raw("*", name)
dicts = []
for record in records:
dicts.append(record.as_dict())
return dicts
##
# GENI API: get_gid
#
# Retrieve the GID for an object. This function looks up a record in the
# registry and returns the GID of the record if it exists.
# TODO: Is this function needed? It's a shortcut for Resolve()
#
# @param name hrn to look up
#
# @return the string representation of a GID object
def get_gid(self, name):
self.verify_object_belongs_to_me(name)
records = self.resolve_raw("*", name)
gid_string_list = []
for record in records:
gid = record.get_gid_object()
gid_string_list.append(gid.save_to_string(save_parents=True))
return gid_string_list
##
# Determine tje rights that an object should have. The rights are entirely
# dependent on the type of the object. For example, users automatically
# get "refresh", "resolve", and "info".
#
# @param type the type of the object (user | sa | ma | slice | node)
# @param name human readable name of the object (not used at this time)
#
# @return RightList object containing rights
def determine_rights(self, type, name):
rl = RightList()
# rights seem to be somewhat redundant with the type of the credential.
# For example, a "sa" credential implies the authority right, because
# a sa credential cannot be issued to a user who is not an owner of
# the authority
if type == "user":
rl.add("refresh")
rl.add("resolve")
rl.add("info")
elif type == "sa":
rl.add("authority,sa")
elif type == "ma":
rl.add("authority,ma")
elif type == "slice":
rl.add("refresh")
rl.add("embed")
rl.add("bind")
rl.add("control")
rl.add("info")
elif type == "component":
rl.add("operator")
return rl
##
# GENI API: Get_self_credential
#
# Get_self_credential a degenerate version of get_credential used by a
# client to get his initial credential when he doesn't have one. This is
# the same as get_credential(..., cred=None,...).
#
# The registry ensures that the client is the principal that is named by
# (type, name) by comparing the public key in the record's GID to the
# private key used to encrypt the client-side of the HTTPS connection. Thus
# it is impossible for one principal to retrieve another principal's
# credential without having the appropriate private key.
#
# @param type type of object (user | slice | sa | ma | node
# @param name human readable name of object
#
# @return the string representation of a credential object
def get_self_credential(self, type, name):
self.verify_object_belongs_to_me(name)
auth_hrn = get_authority(name)
if not auth_hrn:
auth_hrn = name
auth_info = self.get_auth_info(auth_hrn)
# find a record that matches
records = self.resolve_raw(type, name, must_exist=True)
record = records[0]
gid = record.get_gid_object()
peer_cert = self.server.peer_cert
if not peer_cert.is_pubkey(gid.get_pubkey()):
raise ConnectionKeyGIDMismatch(gid.get_subject())
# create the credential
gid = record.get_gid_object()
cred = Credential(subject = gid.get_subject())
cred.set_gid_caller(gid)
cred.set_gid_object(gid)
cred.set_issuer(key=auth_info.get_pkey_object(), subject=auth_hrn)
cred.set_pubkey(gid.get_pubkey())
rl = self.determine_rights(type, name)
cred.set_privileges(rl)
# determine the type of credential that we want to use as a parent for
# this credential.
if (type == "ma") or (type == "node"):
auth_kind = "authority,ma"
else: # user, slice, sa
auth_kind = "authority,sa"
cred.set_parent(self.hierarchy.get_auth_cred(auth_hrn, kind=auth_kind))
cred.encode()
cred.sign()
return cred.save_to_string(save_parents=True)
##
# verify_cancreate_credential
#
# Verify that a user can retrieve a particular type of credential. For
# slices, the user must be on the researcher list. For SA and MA the user
# must be on the pi and operator lists respectively.
def verify_cancreate_credential(self, src_cred, record):
type = record.get_type()
cred_object_hrn = src_cred.get_gid_object().get_hrn()
config = Config()
if cred_object_hrn in [config.GENI_REGISTRY_ROOT_AUTH]:
return
if type=="slice":
researchers = record.get_geni_info().get("researcher", [])
if not (cred_object_hrn in researchers):
raise PermissionError(cred_object_hrn + " is not in researcher list for " + record.get_name())
elif type == "sa":
pis = record.get_geni_info().get("pi", [])
if not (cred_object_hrn in pis):
raise PermissionError(cred_object_hrn + " is not in pi list for " + record.get_name())
elif type == "ma":
operators = record.get_geni_info().get("operator", [])
if not (cred_object_hrn in operators):
raise PermissionError(cred_object_hrn + " is not in operator list for " + record.get_name())
##
# GENI API: Get_credential
#
# Retrieve a credential for an object.
#
# If cred==None, then the behavior reverts to get_self_credential()
#
# @param cred credential object specifying rights of the caller
# @param type type of object (user | slice | sa | ma | node)
# @param name human readable name of object
#
# @return the string representation of a credental object
def get_credential(self, cred, type, name):
if not cred:
return get_self_credential(self, type, name)
self.decode_authentication(cred, "getcredential")
self.verify_object_belongs_to_me(name)
auth_hrn = get_authority(name)
if not auth_hrn:
auth_hrn = name
auth_info = self.get_auth_info(auth_hrn)
records = self.resolve_raw(type, name, must_exist=True)
record = records[0]
# verify_cancreate_credential requires that the member lists
# (researchers, pis, etc) be filled in
self.fill_record_info(record)
self.verify_cancreate_credential(self.client_cred, record)
# TODO: Check permission that self.client_cred can access the object
object_gid = record.get_gid_object()
new_cred = Credential(subject = object_gid.get_subject())
new_cred.set_gid_caller(self.client_gid)
new_cred.set_gid_object(object_gid)
new_cred.set_issuer(key=auth_info.get_pkey_object(), subject=auth_hrn)
new_cred.set_pubkey(object_gid.get_pubkey())
rl = self.determine_rights(type, name)
new_cred.set_privileges(rl)
# determine the type of credential that we want to use as a parent for
# this credential.
if (type == "ma") or (type == "node"):
auth_kind = "authority,ma"
else: # user, slice, sa
auth_kind = "authority,sa"
new_cred.set_parent(self.hierarchy.get_auth_cred(auth_hrn, kind=auth_kind))
new_cred.encode()
new_cred.sign()
return new_cred.save_to_string(save_parents=True)
##
# GENI API: get_ticket
#
# Retrieve a ticket. This operation is currently implemented on PLC
# only (see SFA, engineering decisions); it is not implemented on
# components.
#
# The ticket is filled in with information from the PLC database. This
# information includes resources, and attributes such as user keys and
# initscripts.
#
# @param cred credential string
# @param name name of the slice to retrieve a ticket for
# @param rspec resource specification dictionary
#
# @return the string representation of a ticket object
def get_ticket(self, cred, name, rspec):
self.decode_authentication(cred, "getticket")
self.verify_object_belongs_to_me(name)
self.verify_object_permission(name)
# XXX much of this code looks like get_credential... are they so similar
# that they should be combined?
auth_hrn = get_authority(name)
if not auth_hrn:
auth_hrn = name
auth_info = self.get_auth_info(auth_hrn)
records = self.resolve_raw("slice", name, must_exist=True)
record = records[0]
object_gid = record.get_gid_object()
new_ticket = Ticket(subject = object_gid.get_subject())
new_ticket.set_gid_caller(self.client_gid)
new_ticket.set_gid_object(object_gid)
new_ticket.set_issuer(key=auth_info.get_pkey_object(), subject=auth_hrn)
new_ticket.set_pubkey(object_gid.get_pubkey())
self.fill_record_info(record)
(attributes, rspec) = self.record_to_slice_info(record)
new_ticket.set_attributes(attributes)
new_ticket.set_rspec(rspec)
new_ticket.set_parent(AuthHierarchy.get_auth_ticket(auth_hrn))
new_ticket.encode()
new_ticket.sign()
return new_ticket.save_to_string(save_parents=True)
##
# GENI_API: Create_gid
#
# Create a new GID. For MAs and SAs that are physically located on the
# registry, this allows a owner/operator/PI to create a new GID and have it
# signed by his respective authority.
#
# @param cred credential of caller
# @param name hrn for new GID
# @param uuid unique identifier for new GID
# @param pkey_string public-key string (TODO: why is this a string and not a keypair object?)
#
# @return the string representation of a GID object
def create_gid(self, cred, name, uuid, pubkey_str):
self.decode_authentication(cred, "getcredential")
self.verify_object_belongs_to_me(name)
self.verify_object_permission(name)
if uuid == None:
uuid = create_uuid()
pkey = Keypair()
pkey.load_pubkey_from_string(pubkey_str)
gid = self.hierarchy.create_gid(name, uuid, pkey)
return gid.save_to_string(save_parents=True)
bug in loadCredential(), commented out for now
##
# Registry is a GeniServer that implements the Registry interface
import tempfile
import os
import time
import sys
from geni.util.credential import Credential
from geni.util.hierarchy import Hierarchy
from geni.util.trustedroot import TrustedRootList
from geni.util.cert import Keypair, Certificate
from geni.util.gid import GID, create_uuid
from geni.util.geniserver import GeniServer
from geni.util.geniclient import GeniClient
from geni.util.record import GeniRecord
from geni.util.rights import RightList
from geni.util.genitable import GeniTable
from geni.util.geniticket import Ticket
from geni.util.excep import *
from geni.util.misc import *
from geni.util.config import *
from geni.util.storage import *
##
# Convert geni fields to PLC fields for use when registering up updating
# registry record in the PLC database
#
# @param type type of record (user, slice, ...)
# @param hrn human readable name
# @param geni_fields dictionary of geni fields
# @param pl_fields dictionary of PLC fields (output)
def geni_fields_to_pl_fields(type, hrn, geni_fields, pl_fields):
if type == "user":
if not "email" in pl_fields:
if not "email" in geni_fields:
raise MissingGeniInfo("email")
pl_fields["email"] = geni_fields["email"]
if not "first_name" in pl_fields:
pl_fields["first_name"] = "geni"
if not "last_name" in pl_fields:
pl_fields["last_name"] = hrn
elif type == "slice":
if not "instantiation" in pl_fields:
pl_fields["instantiation"] = "delegated" # "plc-instantiated"
if not "name" in pl_fields:
pl_fields["name"] = hrn_to_pl_slicename(hrn)
if not "max_nodes" in pl_fields:
pl_fields["max_nodes"] = 10
elif type == "node":
if not "hostname" in pl_fields:
if not "dns" in geni_fields:
raise MissingGeniInfo("dns")
pl_fields["hostname"] = geni_fields["dns"]
if not "model" in pl_fields:
pl_fields["model"] = "geni"
elif type == "sa":
pl_fields["login_base"] = hrn_to_pl_login_base(hrn)
if not "name" in pl_fields:
pl_fields["name"] = hrn
if not "abbreviated_name" in pl_fields:
pl_fields["abbreviated_name"] = hrn
if not "enabled" in pl_fields:
pl_fields["enabled"] = True
if not "is_public" in pl_fields:
pl_fields["is_public"] = True
##
# Registry is a GeniServer that serves registry and slice operations at PLC.
class Registry(GeniServer):
##
# Create a new registry object.
#
# @param ip the ip address to listen on
# @param port the port to listen on
# @param key_file private key filename of registry
# @param cert_file certificate filename containing public key (could be a GID file)
def __init__(self, ip, port, key_file, cert_file, config = '/usr/share/geniwrapper/geni/util/geni_config'):
GeniServer.__init__(self, ip, port, key_file, cert_file)
# get PL account settings from config module
self.pl_auth = get_pl_auth()
# connect to planetlab
if "Url" in self.pl_auth:
self.connect_remote_shell()
else:
self.connect_local_shell()
self.key_file = key_file
self.cert_file = cert_file
self.config = Config(config)
self.basedir = self.config.GENI_BASE_DIR + os.sep
self.server_basedir = self.basedir + os.sep + "geni" + os.sep
self.hrn = self.config.GENI_INTERFACE_HRN
# get peer registry information
registries_file = self.server_basedir + os.sep + 'registries.xml'
connection_dict = {'hrn': '', 'addr': '', 'port': ''}
self.registry_info = XmlStorage(registries_file, {'registries': {'registry': [connection_dict]}})
self.registry_info.load()
self.connectRegistry()
#self.loadCredential()
self.connectRegistries()
##
# Connect to a remote shell via XMLRPC
def connect_remote_shell(self):
from geni.util import remoteshell
self.shell = remoteshell.RemoteShell()
##
# Connect to a local shell via local API functions
def connect_local_shell(self):
import PLC.Shell
self.shell = PLC.Shell.Shell(globals = globals())
##
# Register the server RPCs for the registry
def register_functions(self):
GeniServer.register_functions(self)
# registry interface
self.server.register_function(self.create_gid)
self.server.register_function(self.get_self_credential)
self.server.register_function(self.get_credential)
self.server.register_function(self.get_gid)
self.server.register_function(self.get_ticket)
self.server.register_function(self.register)
self.server.register_function(self.remove)
self.server.register_function(self.update)
self.server.register_function(self.list)
self.server.register_function(self.resolve)
def loadCredential(self):
"""
Attempt to load credential from file if it exists. If it doesnt get
credential from registry.
"""
# see if this file exists
ma_cred_filename = self.server_basedir + os.sep + "reg." + self.hrn + ".sa.cred"
try:
self.credential = Credential(filename = ma_cred_filename)
except IOError:
self.credential = self.getCredentialFromRegistry()
def getCredentialFromRegistry(self):
"""
Get our current credential from the registry.
"""
# get self credential
self_cred_filename = self.server_basedir + os.sep + "smgr." + self.hrn + ".cred"
self_cred = self.registry.get_credential(None, 'ma', self.hrn)
self_cred.save_to_file(self_cred_filename, save_parents=True)
# get ma credential
ma_cred_filename = self.server_basedir + os.sep + "smgr." + self.hrn + ".sa.cred"
ma_cred = self.registry.get_credential(self_cred, 'sa', self.hrn)
ma_cred.save_to_file(ma_cred_filename, save_parents=True)
return ma_cred
def connectRegistry(self):
"""
Connect to the registry
"""
# connect to registry using GeniClient
address = self.config.GENI_REGISTRY_HOSTNAME
port = self.config.GENI_REGISTRY_PORT
url = 'http://%(address)s:%(port)s' % locals()
self.registry = GeniClient(url, self.key_file, self.cert_file)
def connectRegistries(self):
"""
Get connection details for the trusted peer registries from file and
create an GeniClient connection to each.
"""
self.registries= {}
registries = self.registry_info['registries']['registry']
if isinstance(registries, dict):
registries = [registries]
if isinstance(registries, list):
for registry in registries:
# create xmlrpc connection using GeniClient
hrn, address, port = registry['hrn'], registry['addr'], registry['port']
url = 'http://%(address)s:%(port)s' % locals()
self.registries[hrn] = GeniClient(url, self.key_file, self.cert_file)
##
# Given an authority name, return the information for that authority. This
# is basically a stub that calls the hierarchy module.
#
# @param auth_hrn human readable name of authority
def get_auth_info(self, auth_hrn):
return self.hierarchy.get_auth_info(auth_hrn)
##
# Given an authority name, return the database table for that authority. If
# the database table does not exist, then one will be automatically
# created.
#
# @param auth_name human readable name of authority
def get_auth_table(self, auth_name):
auth_info = self.get_auth_info(auth_name)
table = GeniTable(hrn=auth_name,
cninfo=auth_info.get_dbinfo())
# if the table doesn't exist, then it means we haven't put any records
# into this authority yet.
if not table.exists():
print "Registry: creating table for authority", auth_name
table.create()
return table
##
# Verify that an authority belongs to this registry. This is basically left
# up to the implementation of the hierarchy module. If the specified name
# does not belong to this registry, an exception is thrown indicating the
# caller should contact someone else.
#
# @param auth_name human readable name of authority
def verify_auth_belongs_to_me(self, name):
# get_auth_info will throw an exception if the authority does not
# exist
self.get_auth_info(name)
##
# Verify that an object belongs to this registry. By extension, this implies
# that the authority that owns the object belongs to this registry. If the
# object does not belong to this registry, then an exception is thrown.
#
# @param name human readable name of object
def verify_object_belongs_to_me(self, name):
auth_name = get_authority(name)
if not auth_name:
# the root authority belongs to the registry by default?
# TODO: is this true?
return
self.verify_auth_belongs_to_me(auth_name)
##
# Verify that the object_gid that was specified in the credential allows
# permission to the object 'name'. This is done by a simple prefix test.
# For example, an object_gid for planetlab.us.arizona would match the
# objects planetlab.us.arizona.slice1 and planetlab.us.arizona.
#
# @param name human readable name to test
def verify_object_permission(self, name):
object_hrn = self.object_gid.get_hrn()
if object_hrn == name:
return
if name.startswith(object_hrn + "."):
return
raise PermissionError(name)
##
# Fill in the planetlab-specific fields of a Geni record. This involves
# calling the appropriate PLC methods to retrieve the database record for
# the object.
#
# PLC data is filled into the pl_info field of the record.
#
# @param record record to fill in fields (in/out param)
def fill_record_pl_info(self, record):
type = record.get_type()
pointer = record.get_pointer()
# records with pointer==-1 do not have plc info associated with them.
# for example, the top level authority records which are
# authorities, but not PL "sites"
if pointer == -1:
record.set_pl_info({})
return
if (type == "sa") or (type == "ma"):
pl_res = self.shell.GetSites(self.pl_auth, [pointer])
elif (type == "slice"):
pl_res = self.shell.GetSlices(self.pl_auth, [pointer])
elif (type == "user"):
pl_res = self.shell.GetPersons(self.pl_auth, [pointer])
elif (type == "node"):
pl_res = self.shell.GetNodes(self.pl_auth, [pointer])
else:
raise UnknownGeniType(type)
if not pl_res:
# the planetlab record no longer exists
# TODO: delete the geni record ?
raise PlanetLabRecordDoesNotExist(record.get_name())
record.set_pl_info(pl_res[0])
##
# Look up user records given PLC user-ids. This is used as part of the
# process for reverse-mapping PLC records into Geni records.
#
# @param auth_table database table for the authority that holds the user records
# @param user_id_list list of user ids
# @param role either "*" or a string describing the role to look for ("pi", "user", ...)
#
# TODO: This function currently only searches one authority because it would
# be inefficient to brute-force search all authorities for a user id. The
# solution would likely be to implement a reverse mapping of user-id to
# (type, hrn) pairs.
def lookup_users(self, auth_table, user_id_list, role="*"):
record_list = []
for person_id in user_id_list:
user_records = auth_table.find("user", person_id, "pointer")
for user_record in user_records:
self.fill_record_info(user_record)
user_roles = user_record.get_pl_info().get("roles")
if (role=="*") or (role in user_roles):
record_list.append(user_record.get_name())
return record_list
##
# Fill in the geni-specific fields of the record.
#
# Note: It is assumed the fill_record_pl_info() has already been performed
# on the record.
def fill_record_geni_info(self, record):
geni_info = {}
type = record.get_type()
if (type == "slice"):
auth_table = self.get_auth_table(get_authority(record.get_name()))
person_ids = record.pl_info.get("person_ids", [])
researchers = self.lookup_users(auth_table, person_ids)
geni_info['researcher'] = researchers
elif (type == "sa"):
auth_table = self.get_auth_table(record.get_name())
person_ids = record.pl_info.get("person_ids", [])
pis = self.lookup_users(auth_table, person_ids, "pi")
geni_info['pi'] = pis
# TODO: OrganizationName
elif (type == "ma"):
auth_table = self.get_auth_table(record.get_name())
person_ids = record.pl_info.get("person_ids", [])
operators = self.lookup_users(auth_table, person_ids, "tech")
geni_info['operator'] = operators
# TODO: OrganizationName
auth_table = self.get_auth_table(record.get_name())
person_ids = record.pl_info.get("person_ids", [])
owners = self.lookup_users(auth_table, person_ids, "admin")
geni_info['owner'] = owners
elif (type == "node"):
geni_info['dns'] = record.pl_info.get("hostname", "")
# TODO: URI, LatLong, IP, DNS
elif (type == "user"):
geni_info['email'] = record.pl_info.get("email", "")
# TODO: PostalAddress, Phone
record.set_geni_info(geni_info)
##
# Given a Geni record, fill in the PLC-specific and Geni-specific fields
# in the record.
def fill_record_info(self, record):
self.fill_record_pl_info(record)
self.fill_record_geni_info(record)
def update_membership_list(self, oldRecord, record, listName, addFunc, delFunc):
# get a list of the HRNs tht are members of the old and new records
if oldRecord:
if oldRecord.pl_info == None:
oldRecord.pl_info = {}
oldList = oldRecord.get_geni_info().get(listName, [])
else:
oldList = []
newList = record.get_geni_info().get(listName, [])
# if the lists are the same, then we don't have to update anything
if (oldList == newList):
return
# build a list of the new person ids, by looking up each person to get
# their pointer
newIdList = []
for hrn in newList:
userRecord = self.resolve_raw("user", hrn)[0]
newIdList.append(userRecord.get_pointer())
# build a list of the old person ids from the person_ids field of the
# pl_info
if oldRecord:
oldIdList = oldRecord.pl_info.get("person_ids", [])
containerId = oldRecord.get_pointer()
else:
# if oldRecord==None, then we are doing a Register, instead of an
# update.
oldIdList = []
containerId = record.get_pointer()
# add people who are in the new list, but not the oldList
for personId in newIdList:
if not (personId in oldIdList):
print "adding id", personId, "to", record.get_name()
addFunc(self.pl_auth, personId, containerId)
# remove people who are in the old list, but not the new list
for personId in oldIdList:
if not (personId in newIdList):
print "removing id", personId, "from", record.get_name()
delFunc(self.pl_auth, personId, containerId)
def update_membership(self, oldRecord, record):
if record.type == "slice":
self.update_membership_list(oldRecord, record, 'researcher',
self.shell.AddPersonToSlice,
self.shell.DeletePersonFromSlice)
elif record.type == "sa":
# TODO
pass
elif record.type == "ma":
# TODO
pass
##
# GENI API: register
#
# Register an object with the registry. In addition to being stored in the
# Geni database, the appropriate records will also be created in the
# PLC databases
#
# @param cred credential string
# @param record_dict dictionary containing record fields
def register(self, cred, record_dict):
self.decode_authentication(cred, "register")
record = GeniRecord(dict = record_dict)
type = record.get_type()
name = record.get_name()
auth_name = get_authority(name)
self.verify_object_permission(auth_name)
auth_info = self.get_auth_info(auth_name)
table = self.get_auth_table(auth_name)
pkey = None
# check if record already exists
existing_records = table.resolve(type, name)
if existing_records:
raise ExistingRecord(name)
if (type == "sa") or (type=="ma"):
# update the tree
if not self.hierarchy.auth_exists(name):
self.hierarchy.create_auth(name)
# authorities are special since they are managed by the registry
# rather than by the caller. We create our own GID for the
# authority rather than relying on the caller to supply one.
# get the GID from the newly created authority
child_auth_info = self.get_auth_info(name)
gid = auth_info.get_gid_object()
record.set_gid(gid.save_to_string(save_parents=True))
geni_fields = record.get_geni_info()
site_fields = record.get_pl_info()
# if registering a sa, see if a ma already exists
# if registering a ma, see if a sa already exists
if (type == "sa"):
other_rec = table.resolve("ma", record.get_name())
elif (type == "ma"):
other_rec = table.resolve("sa", record.get_name())
if other_rec:
print "linking ma and sa to the same plc site"
pointer = other_rec[0].get_pointer()
else:
geni_fields_to_pl_fields(type, name, geni_fields, site_fields)
print "adding site with fields", site_fields
pointer = self.shell.AddSite(self.pl_auth, site_fields)
record.set_pointer(pointer)
elif (type == "slice"):
geni_fields = record.get_geni_info()
slice_fields = record.get_pl_info()
geni_fields_to_pl_fields(type, name, geni_fields, slice_fields)
pointer = self.shell.AddSlice(self.pl_auth, slice_fields)
record.set_pointer(pointer)
elif (type == "user"):
geni_fields = record.get_geni_info()
user_fields = record.get_pl_info()
geni_fields_to_pl_fields(type, name, geni_fields, user_fields)
pointer = self.shell.AddPerson(self.pl_auth, user_fields)
record.set_pointer(pointer)
elif (type == "node"):
geni_fields = record.get_geni_info()
node_fields = record.get_pl_info()
geni_fields_to_pl_fields(type, name, geni_fields, node_fields)
login_base = hrn_to_pl_login_base(auth_name)
print "calling addnode with", login_base, node_fields
pointer = self.shell.AddNode(self.pl_auth, login_base, node_fields)
record.set_pointer(pointer)
else:
raise UnknownGeniType(type)
table.insert(record)
# update membership for researchers, pis, owners, operators
self.update_membership(None, record)
return record.get_gid_object().save_to_string(save_parents=True)
##
# GENI API: remove
#
# Remove an object from the registry. If the object represents a PLC object,
# then the PLC records will also be removed.
#
# @param cred credential string
# @param record_dict dictionary containing record fields. The only relevant
# fields of the record are 'name' and 'type', which are used to lookup
# the current copy of the record in the Geni database, to make sure
# that the appopriate record is removed.
def remove(self, cred, type, hrn):
self.decode_authentication(cred, "remove")
self.verify_object_permission(hrn)
auth_name = get_authority(hrn)
table = self.get_auth_table(auth_name)
record_list = table.resolve(type, hrn)
if not record_list:
raise RecordNotFound(hrn)
record = record_list[0]
# TODO: sa, ma
if type == "user":
self.shell.DeletePerson(self.pl_auth, record.get_pointer())
elif type == "slice":
self.shell.DeleteSlice(self.pl_auth, record.get_pointer())
elif type == "node":
self.shell.DeleteNode(self.pl_auth, record.get_pointer())
elif (type == "sa") or (type == "ma"):
if (type == "sa"):
other_rec = table.resolve("ma", record.get_name())
elif (type == "ma"):
other_rec = table.resolve("sa", record.get_name())
if other_rec:
# sa and ma both map to a site, so if we are deleting one
# but the other still exists, then do not delete the site
print "not removing site", record.get_name(), "because either sa or ma still exists"
pass
else:
print "removing site", record.get_name()
self.shell.DeleteSite(self.pl_auth, record.get_pointer())
else:
raise UnknownGeniType(type)
table.remove(record)
return True
##
# GENI API: Update
#
# Update an object in the registry. Currently, this only updates the
# PLC information associated with the record. The Geni fields (name, type,
# GID) are fixed.
#
# The record is expected to have the pl_info field filled in with the data
# that should be updated.
#
# TODO: The geni_info member of the record should be parsed and the pl_info
# adjusted as necessary (add/remove users from a slice, etc)
#
# @param cred credential string specifying rights of the caller
# @param record a record dictionary to be updated
def update(self, cred, record_dict):
self.decode_authentication(cred, "update")
record = GeniRecord(dict = record_dict)
type = record.get_type()
self.verify_object_permission(record.get_name())
auth_name = get_authority(record.get_name())
if not auth_name:
auth_name = record.get_name()
table = self.get_auth_table(auth_name)
# make sure the record exists
existing_record_list = table.resolve(type, record.get_name())
if not existing_record_list:
raise RecordNotFound(record.get_name())
existing_record = existing_record_list[0]
# Update_membership needs the membership lists in the existing record
# filled in, so it can see if members were added or removed
self.fill_record_info(existing_record)
# Use the pointer from the existing record, not the one that the user
# gave us. This prevents the user from inserting a forged pointer
pointer = existing_record.get_pointer()
# update the PLC information that was specified with the record
if (type == "sa") or (type == "ma"):
self.shell.UpdateSite(self.pl_auth, pointer, record.get_pl_info())
elif type == "slice":
self.shell.UpdateSlice(self.pl_auth, pointer, record.get_pl_info())
elif type == "user":
# SMBAKER: UpdatePerson only allows a limited set of fields to be
# updated. Ideally we should have a more generic way of doing
# this. I copied the field names from UpdatePerson.py...
update_fields = {}
all_fields = record.get_pl_info()
for key in all_fields.keys():
if key in ['first_name', 'last_name', 'title', 'email',
'password', 'phone', 'url', 'bio', 'accepted_aup',
'enabled']:
update_fields[key] = all_fields[key]
self.shell.UpdatePerson(self.pl_auth, pointer, update_fields)
elif type == "node":
self.shell.UpdateNode(self.pl_auth, pointer, record.get_pl_info())
else:
raise UnknownGeniType(type)
# update membership for researchers, pis, owners, operators
self.update_membership(existing_record, record)
##
# List the records in an authority. The objectGID in the supplied credential
# should name the authority that will be listed.
#
# TODO: List doesn't take an hrn and uses the hrn contained in the
# objectGid of the credential. Does this mean the only way to list an
# authority is by having a credential for that authority?
#
# @param cred credential string specifying rights of the caller
#
# @return list of record dictionaries
def list(self, cred, auth_hrn):
self.decode_authentication(cred, "list")
if not self.hierarchy.auth_exists(auth_hrn):
raise MissingAuthority(auth_hrn)
table = self.get_auth_table(auth_hrn)
records = table.list()
good_records = []
for record in records:
try:
self.fill_record_info(record)
good_records.append(record)
except PlanetLabRecordDoesNotExist:
# silently drop the ones that are missing in PL.
# is this the right thing to do?
print "ignoring geni record ", record.get_name(), " because pl record does not exist"
table.remove(record)
dicts = []
for record in good_records:
dicts.append(record.as_dict())
return dicts
return dict_list
##
# Resolve a record. This is an internal version of the Resolve API call
# and returns records in record object format rather than dictionaries
# that may be sent over XMLRPC.
#
# @param type type of record to resolve (user | sa | ma | slice | node)
# @param name human readable name of object
# @param must_exist if True, throw an exception if no records are found
#
# @return a list of record objects, or an empty list []
def resolve_raw(self, type, name, must_exist=True):
auth_name = get_authority(name)
if not auth_name:
auth_name = name
table = self.get_auth_table(auth_name)
records = table.resolve(type, name)
if (not records) and must_exist:
raise RecordNotFound(name)
good_records = []
for record in records:
try:
self.fill_record_info(record)
good_records.append(record)
except PlanetLabRecordDoesNotExist:
# silently drop the ones that are missing in PL.
# is this the right thing to do?
print "ignoring geni record ", record.get_name(), "because pl record does not exist"
table.remove(record)
return good_records
##
# GENI API: Resolve
#
# This is a wrapper around resolve_raw that converts records objects into
# dictionaries before returning them to the user.
#
# @param cred credential string authorizing the caller
# @param name human readable name to resolve
#
# @return a list of record dictionaries, or an empty list
def resolve(self, cred, name):
self.decode_authentication(cred, "resolve")
try:
records = self.resolve_raw("*", name)
except RecordNotFound:
records = []
for registry in self.registries:
if name.startswith(registry):
records = self.registries[registry].resolve(cred, name)
dicts = []
for record in records:
dicts.append(record.as_dict())
return dicts
##
# GENI API: get_gid
#
# Retrieve the GID for an object. This function looks up a record in the
# registry and returns the GID of the record if it exists.
# TODO: Is this function needed? It's a shortcut for Resolve()
#
# @param name hrn to look up
#
# @return the string representation of a GID object
def get_gid(self, name):
self.verify_object_belongs_to_me(name)
records = self.resolve_raw("*", name)
gid_string_list = []
for record in records:
gid = record.get_gid_object()
gid_string_list.append(gid.save_to_string(save_parents=True))
return gid_string_list
##
# Determine tje rights that an object should have. The rights are entirely
# dependent on the type of the object. For example, users automatically
# get "refresh", "resolve", and "info".
#
# @param type the type of the object (user | sa | ma | slice | node)
# @param name human readable name of the object (not used at this time)
#
# @return RightList object containing rights
def determine_rights(self, type, name):
rl = RightList()
# rights seem to be somewhat redundant with the type of the credential.
# For example, a "sa" credential implies the authority right, because
# a sa credential cannot be issued to a user who is not an owner of
# the authority
if type == "user":
rl.add("refresh")
rl.add("resolve")
rl.add("info")
elif type == "sa":
rl.add("authority,sa")
elif type == "ma":
rl.add("authority,ma")
elif type == "slice":
rl.add("refresh")
rl.add("embed")
rl.add("bind")
rl.add("control")
rl.add("info")
elif type == "component":
rl.add("operator")
return rl
##
# GENI API: Get_self_credential
#
# Get_self_credential a degenerate version of get_credential used by a
# client to get his initial credential when he doesn't have one. This is
# the same as get_credential(..., cred=None,...).
#
# The registry ensures that the client is the principal that is named by
# (type, name) by comparing the public key in the record's GID to the
# private key used to encrypt the client-side of the HTTPS connection. Thus
# it is impossible for one principal to retrieve another principal's
# credential without having the appropriate private key.
#
# @param type type of object (user | slice | sa | ma | node
# @param name human readable name of object
#
# @return the string representation of a credential object
def get_self_credential(self, type, name):
self.verify_object_belongs_to_me(name)
auth_hrn = get_authority(name)
if not auth_hrn:
auth_hrn = name
auth_info = self.get_auth_info(auth_hrn)
# find a record that matches
records = self.resolve_raw(type, name, must_exist=True)
record = records[0]
gid = record.get_gid_object()
peer_cert = self.server.peer_cert
if not peer_cert.is_pubkey(gid.get_pubkey()):
raise ConnectionKeyGIDMismatch(gid.get_subject())
# create the credential
gid = record.get_gid_object()
cred = Credential(subject = gid.get_subject())
cred.set_gid_caller(gid)
cred.set_gid_object(gid)
cred.set_issuer(key=auth_info.get_pkey_object(), subject=auth_hrn)
cred.set_pubkey(gid.get_pubkey())
rl = self.determine_rights(type, name)
cred.set_privileges(rl)
# determine the type of credential that we want to use as a parent for
# this credential.
if (type == "ma") or (type == "node"):
auth_kind = "authority,ma"
else: # user, slice, sa
auth_kind = "authority,sa"
cred.set_parent(self.hierarchy.get_auth_cred(auth_hrn, kind=auth_kind))
cred.encode()
cred.sign()
return cred.save_to_string(save_parents=True)
##
# verify_cancreate_credential
#
# Verify that a user can retrieve a particular type of credential. For
# slices, the user must be on the researcher list. For SA and MA the user
# must be on the pi and operator lists respectively.
def verify_cancreate_credential(self, src_cred, record):
type = record.get_type()
cred_object_hrn = src_cred.get_gid_object().get_hrn()
config = Config()
if cred_object_hrn in [config.GENI_REGISTRY_ROOT_AUTH]:
return
if type=="slice":
researchers = record.get_geni_info().get("researcher", [])
if not (cred_object_hrn in researchers):
raise PermissionError(cred_object_hrn + " is not in researcher list for " + record.get_name())
elif type == "sa":
pis = record.get_geni_info().get("pi", [])
if not (cred_object_hrn in pis):
raise PermissionError(cred_object_hrn + " is not in pi list for " + record.get_name())
elif type == "ma":
operators = record.get_geni_info().get("operator", [])
if not (cred_object_hrn in operators):
raise PermissionError(cred_object_hrn + " is not in operator list for " + record.get_name())
##
# GENI API: Get_credential
#
# Retrieve a credential for an object.
#
# If cred==None, then the behavior reverts to get_self_credential()
#
# @param cred credential object specifying rights of the caller
# @param type type of object (user | slice | sa | ma | node)
# @param name human readable name of object
#
# @return the string representation of a credental object
def get_credential(self, cred, type, name):
if not cred:
return get_self_credential(self, type, name)
self.decode_authentication(cred, "getcredential")
self.verify_object_belongs_to_me(name)
auth_hrn = get_authority(name)
if not auth_hrn:
auth_hrn = name
auth_info = self.get_auth_info(auth_hrn)
records = self.resolve_raw(type, name, must_exist=True)
record = records[0]
# verify_cancreate_credential requires that the member lists
# (researchers, pis, etc) be filled in
self.fill_record_info(record)
self.verify_cancreate_credential(self.client_cred, record)
# TODO: Check permission that self.client_cred can access the object
object_gid = record.get_gid_object()
new_cred = Credential(subject = object_gid.get_subject())
new_cred.set_gid_caller(self.client_gid)
new_cred.set_gid_object(object_gid)
new_cred.set_issuer(key=auth_info.get_pkey_object(), subject=auth_hrn)
new_cred.set_pubkey(object_gid.get_pubkey())
rl = self.determine_rights(type, name)
new_cred.set_privileges(rl)
# determine the type of credential that we want to use as a parent for
# this credential.
if (type == "ma") or (type == "node"):
auth_kind = "authority,ma"
else: # user, slice, sa
auth_kind = "authority,sa"
new_cred.set_parent(self.hierarchy.get_auth_cred(auth_hrn, kind=auth_kind))
new_cred.encode()
new_cred.sign()
return new_cred.save_to_string(save_parents=True)
##
# GENI API: get_ticket
#
# Retrieve a ticket. This operation is currently implemented on PLC
# only (see SFA, engineering decisions); it is not implemented on
# components.
#
# The ticket is filled in with information from the PLC database. This
# information includes resources, and attributes such as user keys and
# initscripts.
#
# @param cred credential string
# @param name name of the slice to retrieve a ticket for
# @param rspec resource specification dictionary
#
# @return the string representation of a ticket object
def get_ticket(self, cred, name, rspec):
self.decode_authentication(cred, "getticket")
self.verify_object_belongs_to_me(name)
self.verify_object_permission(name)
# XXX much of this code looks like get_credential... are they so similar
# that they should be combined?
auth_hrn = get_authority(name)
if not auth_hrn:
auth_hrn = name
auth_info = self.get_auth_info(auth_hrn)
records = self.resolve_raw("slice", name, must_exist=True)
record = records[0]
object_gid = record.get_gid_object()
new_ticket = Ticket(subject = object_gid.get_subject())
new_ticket.set_gid_caller(self.client_gid)
new_ticket.set_gid_object(object_gid)
new_ticket.set_issuer(key=auth_info.get_pkey_object(), subject=auth_hrn)
new_ticket.set_pubkey(object_gid.get_pubkey())
self.fill_record_info(record)
(attributes, rspec) = self.record_to_slice_info(record)
new_ticket.set_attributes(attributes)
new_ticket.set_rspec(rspec)
new_ticket.set_parent(AuthHierarchy.get_auth_ticket(auth_hrn))
new_ticket.encode()
new_ticket.sign()
return new_ticket.save_to_string(save_parents=True)
##
# GENI_API: Create_gid
#
# Create a new GID. For MAs and SAs that are physically located on the
# registry, this allows a owner/operator/PI to create a new GID and have it
# signed by his respective authority.
#
# @param cred credential of caller
# @param name hrn for new GID
# @param uuid unique identifier for new GID
# @param pkey_string public-key string (TODO: why is this a string and not a keypair object?)
#
# @return the string representation of a GID object
def create_gid(self, cred, name, uuid, pubkey_str):
self.decode_authentication(cred, "getcredential")
self.verify_object_belongs_to_me(name)
self.verify_object_permission(name)
if uuid == None:
uuid = create_uuid()
pkey = Keypair()
pkey.load_pubkey_from_string(pubkey_str)
gid = self.hierarchy.create_gid(name, uuid, pkey)
return gid.save_to_string(save_parents=True)
|
from Cookie import Cookie
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from django.contrib.auth import SESSION_KEY
from django.contrib.auth.models import User
from django.contrib.sessions.models import Session
from django.utils.translation import ugettext_lazy as _
from socketio import socketio_manage
from socketio.mixins import BroadcastMixin
from socketio.namespace import BaseNamespace
from redis import Redis, ConnectionPool
from core.game import registry
redis = Redis(connection_pool=ConnectionPool())
class GameNamespace(BaseNamespace, BroadcastMixin):
"""
Per-user socket.io namespace for event handlers.
"""
def on_start(self):
"""
Set up the initial user. We only have access to the
HTTP environment, so we use the session ID in the cookie
and look up a user with it. If a valid user is found, we
add them to the user set in redis, and broadcast their
join event to everyone else.
"""
try:
cookie = Cookie(self.environ["HTTP_COOKIE"])
session_key = cookie[settings.SESSION_COOKIE_NAME].value
session = Session.objects.get(session_key=session_key)
user_id = session.get_decoded().get(SESSION_KEY)
user = User.objects.get(id=user_id)
except (KeyError, ObjectDoesNotExist):
self.user = None
else:
self.user = {"name": user.username, "id": user.id}
self.broadcast_event_not_me("join", self.user)
redis.sadd("users", self.user)
# Send the current set of users to the new socket.
self.emit("users", list(redis.smembers("users")))
for game in registry.values():
self.emit("game_users", game.name, game.players.keys())
def on_chat(self, message):
if self.user:
self.broadcast_event("chat", self.user, message)
def recv_disconnect(self):
"""
Socket disconnected - if the user was authenticated, remove
them from redis and broadcast their leave event.
"""
self.disconnect()
if self.user:
redis.srem("users", self.user)
self.broadcast_event_not_me("leave", self.user)
def on_bet(self, game_name, amount, bet_args):
"""
Takes a bet for a game.
"""
try:
assert self.user is not None # Must have a user
assert str(amount).isdigit() # Amount must be digit
assert int(amount) > 0 # Amount must be positive
assert game_name in registry # Game must be valid
except AssertionError:
return
amount = int(amount)
user = User.objects.get(id=self.user["id"])
user.account.balance -= amount
if user.account.balance < 0:
self.emit("notice", _("You don't have that amount to bet"))
else:
game = registry[game_name]
if game.bet(self, amount, bet_args):
user.account.save()
self.broadcast_event("game_users", game_name, game.players.keys())
class GameApplication(object):
"""
Standard socket.io wsgi application.
"""
def __call__(self, environ, start_response):
if environ["PATH_INFO"].startswith("/socket.io/"):
socketio_manage(environ, {"": GameNamespace})
else:
start_response('404 Not Found', [])
return ['<h1>Not Found</h1>']
Use redis hash for users and add default coords.
from Cookie import Cookie
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from django.contrib.auth import SESSION_KEY
from django.contrib.auth.models import User
from django.contrib.sessions.models import Session
from django.utils.translation import ugettext_lazy as _
from socketio import socketio_manage
from socketio.mixins import BroadcastMixin
from socketio.namespace import BaseNamespace
from redis import Redis, ConnectionPool
from core.game import registry
redis = Redis(connection_pool=ConnectionPool())
USERS_KEY = "users"
class GameNamespace(BaseNamespace, BroadcastMixin):
"""
Per-user socket.io namespace for event handlers.
"""
def on_start(self):
"""
Set up the initial user. We only have access to the
HTTP environment, so we use the session ID in the cookie
and look up a user with it. If a valid user is found, we
add them to the user set in redis, and broadcast their
join event to everyone else.
"""
try:
cookie = Cookie(self.environ["HTTP_COOKIE"])
session_key = cookie[settings.SESSION_COOKIE_NAME].value
session = Session.objects.get(session_key=session_key)
user_id = session.get_decoded().get(SESSION_KEY)
user = User.objects.get(id=user_id)
except (KeyError, ObjectDoesNotExist):
self.user = None
else:
self.user = {
"id": user.id,
"name": user.username,
"x": 880,
"y": 200,
}
self.broadcast_event_not_me("join", self.user)
redis.hset(USERS_KEY, self.user["id"], self.user)
# Send the current set of users to the new socket.
self.emit("users", [eval(u) for u in redis.hvals(USERS_KEY)])
# for game in registry.values():
# self.emit("game_users", game.name, game.players.keys())
def on_chat(self, message):
if self.user:
self.broadcast_event("chat", self.user, message)
def on_move(self, pos):
if self.user:
self.user.update(pos)
redis.hset(USERS_KEY, self.user["id"], self.user)
self.broadcast_event_not_me("move", self.user)
def recv_disconnect(self):
"""
Socket disconnected - if the user was authenticated, remove
them from redis and broadcast their leave event.
"""
self.disconnect()
if self.user:
redis.hdel(USERS_KEY, self.user["id"])
self.broadcast_event_not_me("leave", self.user)
def on_bet(self, game_name, amount, bet_args):
"""
Takes a bet for a game.
"""
try:
assert self.user is not None # Must have a user
assert str(amount).isdigit() # Amount must be digit
assert int(amount) > 0 # Amount must be positive
assert game_name in registry # Game must be valid
except AssertionError:
return
amount = int(amount)
user = User.objects.get(id=self.user["id"])
user.account.balance -= amount
if user.account.balance < 0:
self.emit("notice", _("You don't have that amount to bet"))
else:
game = registry[game_name]
if game.bet(self, amount, bet_args):
user.account.save()
self.broadcast_event("game_users", game_name, game.players.keys())
class GameApplication(object):
"""
Standard socket.io wsgi application.
"""
def __call__(self, environ, start_response):
if environ["PATH_INFO"].startswith("/socket.io/"):
socketio_manage(environ, {"": GameNamespace})
else:
start_response('404 Not Found', [])
return ['<h1>Not Found</h1>']
|
import math
import numpy as np
import random
import copy
from matplotlib import pyplot as plt
from matplotlib.patches import Circle
from tools import action as a
from tools import Simulation as Sim
from naoth import math2d as m2d
from tools import tools
from tools import field_info as field
from tools import raw_attack_direction_provider as attack_dir
"""
Visualization of Action Selection Algorithm until a goal is scored, with extra variations not considered in Action Model
definition.
Example:
run without any parameters
$ python simulate_variate_until_goal.py
"""
# TODO update this with all the bugfixes from other scripts
# TODO make it possible to switch between variations and standard
class State:
def __init__(self):
self.pose = m2d.Pose2D()
self.pose.translation = m2d.Vector2(-4000, -700)
self.pose.rotation = math.radians(0)
self.ball_position = m2d.Vector2(100.0, 0.0)
# Possible options: normal, influence_01, generated
self.potential_field_function = "normal"
self.opp_robots = ([]) # is in global coordinates
self.own_robots = ([]) # is in global coordinates
self.next_action = 0
def update_pos(self, glob_pos, rotation):
self.pose.translation = glob_pos
self.pose.rotation = rotation
def draw_robot_walk_lines(line):
plt.clf()
axes = plt.gca()
tools.draw_field(axes)
count = 0
action_name_list = ["none", "short", "left", "right"]
for state in line:
origin = state.pose.translation
ball_pos = state.pose * state.ball_position
arrow_head = m2d.Vector2(500, 0).rotate(state.pose.rotation)
axes.add_artist(Circle(xy=(origin.x, origin.y), radius=100, fill=False, edgecolor='white'))
axes.add_artist(Circle(xy=(ball_pos.x, ball_pos.y), radius=120, fill=True, edgecolor='blue'))
axes.arrow(origin.x, origin.y, arrow_head.x, arrow_head.y, head_width=100, head_length=100, fc='k', ec='k')
# -- Add counter for moves
# -- Add executed action
# -- prove Ball position ist correct
# -- Haeufungspunkte (z.B. rotieren um Ball) Zahlen verbessern - mehr Uebersichtlichkeit
plt.show()
def simulate_goal_cycle():
state = State()
sim_data = [copy.deepcopy(state)]
no_action = a.Action("none", 0, 0, 0, 0)
kick_short = a.Action("kick_short", 1080, 150, 0, 7)
sidekick_left = a.Action("sidekick_left", 750, 150, 90, 10)
sidekick_right = a.Action("sidekick_right", 750, 150, -90, 10)
action_list = [no_action, kick_short, sidekick_left, sidekick_right]
num_kicks = 0
num_turn_degrees = 0
goal_scored = False
# while not goal_scored:
# do a fixed number of steps
for i in range(2):
actions_consequences = []
# Simulate Consequences
for action in action_list:
single_consequence = a.ActionResults([])
actions_consequences.append(Sim.simulate_consequences(action, single_consequence, state, 30))
# Decide best action
best_action = Sim.decide_smart(actions_consequences, state)
# state.next_action = best_action #not next but last action
sim_data[len(sim_data)-1].next_action = best_action
# expected_ball_pos should be in local coordinates for rotation calculations
expected_ball_pos = actions_consequences[best_action].expected_ball_pos_mean
# Check if expected_ball_pos inside opponent goal
opp_goal_back_right = m2d.Vector2(field.opponent_goalpost_right.x + field.goal_depth,
field.opponent_goalpost_right.y)
opp_goal_box = m2d.Rect2d(opp_goal_back_right, field.opponent_goalpost_left)
goal_scored = opp_goal_box.inside(state.pose * expected_ball_pos)
inside_field = field.field_rect.inside(state.pose * expected_ball_pos)
# Assert that expected_ball_pos is inside field or inside opp goal
if not inside_field and not goal_scored:
sim_data.append(copy.deepcopy(state))
print("Error")
# For histogram -> note the this position doesnt manage a goal
break
if not action_list[best_action].name == "none":
# print(str(state.pose * expected_ball_pos) + " Decision: " + str(action_list[best_action].name))
# update the robots position
rotation = np.arctan2(expected_ball_pos.y, expected_ball_pos.x)
# print(math.degrees(rotation))
state.update_pos(state.pose * expected_ball_pos, state.pose.rotation + rotation)
sim_data.append(copy.deepcopy(state))
num_kicks += 1
elif action_list[best_action].name == "none":
# print(str(state.pose * expected_ball_pos) + " Decision: " + str(action_list[best_action].name))
attack_direction = attack_dir.get_attack_direction(state)
# Todo: can run in a deadlock for some reason
if attack_direction > 0:
state.update_pos(state.pose.translation, state.pose.rotation + math.radians(10)) # Should be turn right
# state.pose.rotation += math.radians(10) # Should be turn right
print("Robot turns right - global rotation turns left")
else:
state.update_pos(state.pose.translation, state.pose.rotation - math.radians(10)) # Should be turn left
# state.pose.rotation -= math.radians(10) # Should be turn left
# print("Robot turns left - global rotation turns right")
sim_data.append(copy.deepcopy(state))
num_turn_degrees += 1
# print("Num Kicks: " + str(num_kicks))
# print("Num Turns: " + str(num_turn_degrees))
return sim_data
def main():
ball_line = simulate_goal_cycle()
draw_robot_walk_lines(ball_line)
if __name__ == "__main__":
main()
updated plot three steps script
import math
import numpy as np
import random
import copy
from matplotlib import pyplot as plt
from matplotlib.patches import Circle
from tools import action as a
from tools import Simulation as Sim
from naoth import math2d as m2d
from tools import tools
from tools import field_info as field
from tools import raw_attack_direction_provider as attack_dir
from run_simulation_with_particleFilter import calculate_best_direction as heinrich_test
"""
Visualization of Action Selection Algorithm until a goal is scored, with extra variations not considered in Action Model
definition.
Example:
run without any parameters
$ python simulate_variate_until_goal.py
"""
# TODO update this with all the bugfixes from other scripts
# TODO make it possible to switch between variations and standard
class State:
def __init__(self):
self.pose = m2d.Pose2D()
self.pose.translation = m2d.Vector2(-4000, -700)
self.pose.rotation = math.radians(0)
self.ball_position = m2d.Vector2(100.0, 0.0)
# Possible options: normal, influence_01, generated
self.potential_field_function = "normal"
self.opp_robots = ([]) # is in global coordinates
self.own_robots = ([]) # is in global coordinates
self.next_action = 0
def update_pos(self, glob_pos, rotation):
self.pose.translation = glob_pos
self.pose.rotation = math.radians(rotation)
def draw_robot_walk_lines(axes, line, position_color):
for state in line:
origin = state.pose.translation
ball_pos = state.pose * state.ball_position
arrow_head = m2d.Vector2(500, 0).rotate(state.pose.rotation)
axes.add_artist(Circle(xy=(origin.x, origin.y), radius=100, fill=False, edgecolor='white'))
axes.add_artist(Circle(xy=(ball_pos.x, ball_pos.y), radius=120, fill=True, color=position_color))
axes.arrow(origin.x, origin.y, arrow_head.x, arrow_head.y, head_width=100, head_length=100, fc='k', ec='k')
def simulate_goal_cycle_current_impl():
state = State()
sim_data = [copy.deepcopy(state)]
no_action = a.Action("none", 0, 0, 0, 0)
kick_short = a.Action("kick_short", 1080, 150, 0, 7)
sidekick_left = a.Action("sidekick_left", 750, 150, 90, 10)
sidekick_right = a.Action("sidekick_right", 750, 150, -90, 10)
action_list = [no_action, kick_short, sidekick_left, sidekick_right]
# while not goal_scored:
# do a fixed number of steps
for i in range(2):
actions_consequences = []
# Simulate Consequences
for action in action_list:
single_consequence = a.ActionResults([])
actions_consequences.append(Sim.simulate_consequences(action, single_consequence, state, 30))
# Decide best action
best_action = Sim.decide_smart(actions_consequences, state)
# state.next_action = best_action #not next but last action
sim_data[len(sim_data)-1].next_action = best_action
# expected_ball_pos should be in local coordinates for rotation calculations
expected_ball_pos = actions_consequences[best_action].expected_ball_pos_mean
if not action_list[best_action].name == "none":
# update the robots position
rotation = np.arctan2(expected_ball_pos.y, expected_ball_pos.x)
# print(math.degrees(rotation))
state.update_pos(state.pose * expected_ball_pos, math.degrees(state.pose.rotation) + rotation)
# reset the rotation direction
chosen_rotation = 'none'
sim_data.append(copy.deepcopy(state))
elif action_list[best_action].name == "none":
turn_rotation_step = 5
# Calculate rotation time
attack_direction = attack_dir.get_attack_direction(state)
if (attack_direction > 0 and chosen_rotation) is 'none' or chosen_rotation is 'left':
state.update_pos(state.pose.translation, math.degrees(state.pose.rotation) + turn_rotation_step) # Should turn right
chosen_rotation = 'left'
elif (attack_direction <= 0 and chosen_rotation is 'none') or chosen_rotation is 'right':
state.update_pos(state.pose.translation, math.degrees(state.pose.rotation) - turn_rotation_step) # Should turn left
chosen_rotation = 'right'
sim_data.append(copy.deepcopy(state))
return sim_data
def simulate_goal_cycle_particle():
state = State()
sim_data = [copy.deepcopy(state)]
no_action = a.Action("none", 0, 0, 0, 0)
kick_short = a.Action("kick_short", 1080, 150, 0, 7)
sidekick_left = a.Action("sidekick_left", 750, 150, 90, 10)
sidekick_right = a.Action("sidekick_right", 750, 150, -90, 10)
action_list = [no_action, kick_short, sidekick_left, sidekick_right]
for i in range(2):
# Change Angle of all actions according to the particle filter
# best_dir is the global rotation for that kick
best_dir = 360
best_action = 0
for ix, action in enumerate(action_list):
if action.name is "none":
continue
tmp, _ = heinrich_test(state, action_list[ix], False, iterations=20)
# print("Best dir: " + str(math.degrees(tmp)) + " for action: " + action_list[idx].name)
if np.abs(tmp) < np.abs(best_dir):
best_dir = tmp
best_action = ix
# print("Best dir: " + str(math.degrees(best_dir)) + " for action: " + action_list[best_action].name)
# Rotate the robot so that the shooting angle == best_dir
state.pose.rotation = state.pose.rotation + best_dir
new_action = a.Action("new_action", action_list[best_action].speed, action_list[best_action].speed_std,
action_list[best_action].angle, 0)
# after turning evaluate the best action again to calculate the expected ball position
actions_consequences = []
single_consequence = a.ActionResults([])
actions_consequences.append(Sim.simulate_consequences(new_action, single_consequence, state, num_particles=30))
# expected_ball_pos should be in local coordinates for rotation calculations
expected_ball_pos = actions_consequences[0].expected_ball_pos_mean
# calculate the time needed
rotation = np.arctan2(expected_ball_pos.y, expected_ball_pos.x)
# update the robots position
state.update_pos(state.pose * expected_ball_pos, math.degrees(state.pose.rotation + rotation))
sim_data.append(copy.deepcopy(state))
return sim_data
def main():
plt.clf()
axes = plt.gca()
tools.draw_field(axes)
ball_line_current = simulate_goal_cycle_current_impl()
ball_line_particle = simulate_goal_cycle_particle()
draw_robot_walk_lines(axes, ball_line_current, position_color='blue')
draw_robot_walk_lines(axes, ball_line_particle, position_color='red')
plt.show()
if __name__ == "__main__":
main()
|
# ActivitySim
# See full license in LICENSE.txt.
import os
import logging
import logging.config
import sys
import time
from collections import OrderedDict
import yaml
import numpy as np
import pandas as pd
from activitysim.core import inject
import inject_defaults
import config
# Configurations
ASIM_LOGGER = 'activitysim'
CSV_FILE_TYPE = 'csv'
LOGGING_CONF_FILE_NAME = 'logging.yaml'
logger = logging.getLogger(__name__)
def log_file_path(file_name):
# FIXME - for compatability with v0.7
return config.log_file_path(file_name)
def check_for_variability():
return inject.get_injectable('check_for_variability', False)
def extend_trace_label(trace_label, extension):
if trace_label:
trace_label = "%s.%s" % (trace_label, extension)
return trace_label
def print_elapsed_time(msg=None, t0=None, debug=False):
t1 = time.time()
if msg:
t = t1 - (t0 or t1)
msg = "Time to execute %s : %s seconds (%s minutes)" % (msg, round(t, 3), round(t/60.0, 1))
if debug:
logger.debug(msg)
else:
logger.info(msg)
return t1
def delete_output_files(file_type):
"""
Delete files in output directory of specified type
Parameters
----------
output_dir: str
Directory of trace output CSVs
Returns
-------
Nothing
"""
output_dir = inject.get_injectable('output_dir')
logger.debug("Deleting %s files in output_dir %s" % (file_type, output_dir))
for the_file in os.listdir(output_dir):
if the_file.endswith(file_type):
file_path = os.path.join(output_dir, the_file)
try:
if os.path.isfile(file_path):
os.unlink(file_path)
except Exception as e:
print(e)
def delete_csv_files():
"""
Delete CSV files
Parameters
----------
output_dir: str
Directory of trace output CSVs
Returns
-------
Nothing
"""
delete_output_files(CSV_FILE_TYPE)
def config_logger(custom_config_file=None, basic=False):
"""
Configure logger
if log_config_file is not supplied then look for conf file in configs_dir
if not found use basicConfig
Parameters
----------
custom_config_file: str
custom config filename
basic: boolean
basic setup
Returns
-------
Nothing
"""
log_config_file = None
if custom_config_file and os.path.isfile(custom_config_file):
log_config_file = custom_config_file
elif not basic:
# look for conf file in configs_dir
configs_dir = inject.get_injectable('configs_dir')
default_config_file = os.path.join(configs_dir, LOGGING_CONF_FILE_NAME)
if os.path.isfile(default_config_file):
log_config_file = default_config_file
if log_config_file:
with open(log_config_file) as f:
config_dict = yaml.load(f)
config_dict = config_dict['logging']
config_dict.setdefault('version', 1)
logging.config.dictConfig(config_dict)
else:
logging.basicConfig(level=logging.INFO, stream=sys.stdout)
logger = logging.getLogger(ASIM_LOGGER)
if custom_config_file and not os.path.isfile(custom_config_file):
logger.error("#\n#\n#\nconfig_logger could not find conf file '%s'" % custom_config_file)
if log_config_file:
logger.info("Read logging configuration from: %s" % log_config_file)
else:
print "Configured logging using basicConfig"
logger.info("Configured logging using basicConfig")
def print_summary(label, df, describe=False, value_counts=False):
"""
Print summary
Parameters
----------
label: str
tracer name
df: pandas.DataFrame
traced dataframe
describe: boolean
print describe?
value_counts: boolean
print value counts?
Returns
-------
Nothing
"""
if not (value_counts or describe):
logger.error("print_summary neither value_counts nor describe")
if value_counts:
logger.info("%s value counts:\n%s" % (label, df.value_counts()))
if describe:
logger.info("%s summary:\n%s" % (label, df.describe()))
def register_traceable_table(table_name, df):
"""
Register traceable table
Parameters
----------
df: pandas.DataFrame
traced dataframe
Returns
-------
Nothing
"""
trace_hh_id = inject.get_injectable("trace_hh_id", None)
trace_injectable = 'trace_%s' % table_name
new_traced_ids = []
if trace_hh_id is None:
return
traceable_tables = inject.get_injectable('traceable_tables', [])
if table_name not in traceable_tables:
logger.error("table '%s' not in traceable_tables" % table_name)
return
idx_name = df.index.name
if idx_name is None:
logger.error("Can't register table '%s' without index name" % table_name)
return
traceable_table_refs = inject.get_injectable('traceable_table_refs', None)
# traceable_table_refs is OrderedDict so we can find first registered table to slice by ref_con
if traceable_table_refs is None:
traceable_table_refs = OrderedDict()
if idx_name in traceable_table_refs and traceable_table_refs[idx_name] != table_name:
logger.error("table '%s' index name '%s' already registered for table '%s'" %
(table_name, idx_name, traceable_table_refs[idx_name]))
return
if table_name == 'households':
if trace_hh_id not in df.index:
logger.warn("trace_hh_id %s not in dataframe" % trace_hh_id)
new_traced_ids = []
else:
logger.info("tracing household id %s in %s households" % (trace_hh_id, len(df.index)))
new_traced_ids = [trace_hh_id]
else:
# find first already registered ref_col we can use to slice this table
ref_con = next((c for c in traceable_table_refs if c in df.columns), None)
if ref_con is None:
logger.error("can't find a registered table to slice table '%s' index name '%s'"
" in traceable_table_refs: %s" %
(table_name, idx_name, traceable_table_refs))
return
# get traceable_ids for ref_con table
ref_con_trace_injectable = 'trace_%s' % traceable_table_refs[ref_con]
ref_con_traced_ids = inject.get_injectable(ref_con_trace_injectable, [])
# inject list of ids in table we are tracing
# this allows us to slice by id without requiring presence of a household id column
traced_df = df[df[ref_con].isin(ref_con_traced_ids)]
new_traced_ids = traced_df.index.tolist()
if len(new_traced_ids) == 0:
logger.warn("register %s: no rows with %s in %s." %
(table_name, ref_con, ref_con_traced_ids))
# update traceable_table_refs with this traceable_table's ref_con
if idx_name not in traceable_table_refs:
traceable_table_refs[idx_name] = table_name
print "adding table %s.%s to traceable_table_refs" % (table_name, idx_name)
inject.add_injectable('traceable_table_refs', traceable_table_refs)
# update the list of trace_ids for this table
prior_traced_ids = inject.get_injectable(trace_injectable, [])
if prior_traced_ids:
logger.info("register %s: adding %s ids to %s existing trace ids" %
(table_name, len(new_traced_ids), len(prior_traced_ids)))
traced_ids = prior_traced_ids + new_traced_ids
logger.info("register %s: tracing ids %s in %s %s" %
(table_name, traced_ids, len(df.index), table_name))
if new_traced_ids:
inject.add_injectable(trace_injectable, traced_ids)
def write_df_csv(df, file_path, index_label=None, columns=None, column_labels=None, transpose=True):
mode = 'a' if os.path.isfile(file_path) else 'w'
if columns:
df = df[columns]
if not transpose:
df.to_csv(file_path, mode="a", index=df.index.name is not None, header=True)
return
df_t = df.transpose()
if df.index.name is not None:
df_t.index.name = df.index.name
elif index_label:
df_t.index.name = index_label
with open(file_path, mode=mode) as f:
if column_labels is None:
column_labels = [None, None]
if column_labels[0] is None:
column_labels[0] = 'label'
if column_labels[1] is None:
column_labels[1] = 'value'
if len(df_t.columns) == len(column_labels) - 1:
column_label_row = ','.join(column_labels)
else:
column_label_row = \
column_labels[0] + ',' \
+ ','.join([column_labels[1] + '_' + str(i+1) for i in range(len(df_t.columns))])
if mode == 'a':
column_label_row = '# ' + column_label_row
f.write(column_label_row + '\n')
df_t.to_csv(file_path, mode='a', index=True, header=True)
def write_series_csv(series, file_path, index_label=None, columns=None, column_labels=None):
if isinstance(columns, str):
series = series.rename(columns)
elif isinstance(columns, list):
if columns[0]:
series.index.name = columns[0]
series = series.rename(columns[1])
if index_label and series.index.name is None:
series.index.name = index_label
series.to_csv(file_path, mode='a', index=True, header=True)
def write_csv(df, file_name, index_label=None, columns=None, column_labels=None, transpose=True):
"""
Print write_csv
Parameters
----------
df: pandas.DataFrame or pandas.Series
traced dataframe
file_name: str
output file name
index_label: str
index name
columns: list
columns to write
transpose: bool
whether to transpose dataframe (ignored for series)
Returns
-------
Nothing
"""
file_name = file_name.encode('ascii', 'ignore')
assert len(file_name) > 0
if not file_name.endswith(".%s" % CSV_FILE_TYPE):
file_name = '%s.%s' % (file_name, CSV_FILE_TYPE)
file_path = config.trace_file_path(file_name)
if os.path.isfile(file_path):
logger.error("write_csv file exists %s %s" % (type(df).__name__, file_name))
if isinstance(df, pd.DataFrame):
# logger.debug("dumping %s dataframe to %s" % (df.shape, file_name))
write_df_csv(df, file_path, index_label, columns, column_labels, transpose=transpose)
elif isinstance(df, pd.Series):
# logger.debug("dumping %s element series to %s" % (df.shape[0], file_name))
write_series_csv(df, file_path, index_label, columns, column_labels)
elif isinstance(df, dict):
df = pd.Series(data=df)
# logger.debug("dumping %s element dict to %s" % (df.shape[0], file_name))
write_series_csv(df, file_path, index_label, columns, column_labels)
else:
logger.error("write_csv object for file_name '%s' of unexpected type: %s" %
(file_name, type(df)))
def slice_ids(df, ids, column=None):
"""
slice a dataframe to select only records with the specified ids
Parameters
----------
df: pandas.DataFrame
traced dataframe
ids: int or list of ints
slice ids
column: str
column to slice (slice using index if None)
Returns
-------
df: pandas.DataFrame
sliced dataframe
"""
if np.isscalar(ids):
ids = [ids]
try:
if column is None:
df = df[df.index.isin(ids)]
else:
df = df[df[column].isin(ids)]
except KeyError:
# this happens if specified slicer column is not in df
# df = df[0:0]
raise RuntimeError("slice_ids slicer column '%s' not in dataframe" % column)
return df
def get_trace_target(df, slicer):
"""
get target ids and column or index to identify target trace rows in df
Parameters
----------
df: pandas.DataFrame
dataframe to slice
slicer: str
name of column or index to use for slicing
Returns
-------
(target, column) tuple
target : int or list of ints
id or ids that identify tracer target rows
column : str
name of column to search for targets or None to search index
"""
target_ids = None # id or ids to slice by (e.g. hh_id or person_ids or tour_ids)
column = None # column name to slice on or None to slice on index
# special do-not-slice code for dumping entire df
if slicer == 'NONE':
return target_ids, column
if slicer is None:
slicer = df.index.name
if isinstance(df, pd.DataFrame):
# always slice by household id if we can
if 'household_id' in df.columns:
slicer = 'household_id'
if slicer in df.columns:
column = slicer
if column is None and df.index.name != slicer:
raise RuntimeError("bad slicer '%s' for df with index '%s'" % (slicer, df.index.name))
table_refs = inject.get_injectable('traceable_table_refs', {})
if df.empty:
target_ids = None
elif slicer in table_refs:
# maps 'person_id' to 'persons', etc
table_name = table_refs[slicer]
target_ids = inject.get_injectable('trace_%s' % table_name, [])
elif slicer == 'TAZ':
target_ids = inject.get_injectable('trace_od', [])
else:
raise RuntimeError("slice_canonically: bad slicer '%s'" % (slicer, ))
return target_ids, column
def slice_canonically(df, slicer, label, warn_if_empty=False):
"""
Slice dataframe by traced household or person id dataframe and write to CSV
Parameters
----------
df: pandas.DataFrame
dataframe to slice
slicer: str
name of column or index to use for slicing
label: str
tracer name - only used to report bad slicer
Returns
-------
sliced subset of dataframe
"""
target_ids, column = get_trace_target(df, slicer)
if target_ids is not None:
df = slice_ids(df, target_ids, column)
if warn_if_empty and df.shape[0] == 0:
column_name = column or slicer
logger.warn("slice_canonically: no rows in %s with %s == %s"
% (label, column_name, target_ids))
return df
def trace_targets(df, slicer=None):
target_ids, column = get_trace_target(df, slicer)
if target_ids is None:
targets = None
else:
if column is None:
targets = df.index.isin(target_ids)
else:
targets = df[column].isin(target_ids)
return targets
def has_trace_targets(df, slicer=None):
target_ids, column = get_trace_target(df, slicer)
if target_ids is None:
found = False
else:
if column is None:
found = df.index.isin(target_ids).any()
else:
found = df[column].isin(target_ids).any()
return found
def hh_id_for_chooser(id, choosers):
"""
Parameters
----------
id - scalar id (or list of ids) from chooser index
choosers - pandas dataframe whose index contains ids
Returns
-------
scalar household_id or series of household_ids
"""
if choosers.index.name == 'household_id':
hh_id = id
elif 'household_id' in choosers.columns:
hh_id = choosers.loc[id]['household_id']
else:
hh_id = None
return hh_id
def dump_df(dump_switch, df, trace_label, fname):
if dump_switch:
trace_label = extend_trace_label(trace_label, 'DUMP.%s' % fname)
trace_df(df, trace_label, index_label=df.index.name, slicer='NONE', transpose=False)
def trace_df(df, label, slicer=None, columns=None,
index_label=None, column_labels=None, transpose=True, warn_if_empty=False):
"""
Slice dataframe by traced household or person id dataframe and write to CSV
Parameters
----------
df: pandas.DataFrame
traced dataframe
label: str
tracer name
slicer: Object
slicer for subsetting
columns: list
columns to write
index_label: str
index name
column_labels: [str, str]
labels for columns in csv
transpose: boolean
whether to transpose file for legibility
warn_if_empty: boolean
write warning if sliced df is empty
Returns
-------
Nothing
"""
df = slice_canonically(df, slicer, label, warn_if_empty)
if df.shape[0] > 0:
write_csv(df, file_name=label, index_label=(index_label or slicer), columns=columns,
column_labels=column_labels, transpose=transpose)
def interaction_trace_rows(interaction_df, choosers, sample_size=None):
"""
Trace model design for interaction_simulate
Parameters
----------
interaction_df: pandas.DataFrame
traced model_design dataframe
choosers: pandas.DataFrame
interaction_simulate choosers
(needed to filter the model_design dataframe by traced hh or person id)
sample_size int or None
int for constant sample size, or None if choosers have different numbers of alternatives
Returns
-------
trace_rows : numpy.ndarray
array of booleans to flag which rows in interaction_df to trace
trace_ids : tuple (str, numpy.ndarray)
column name and array of trace_ids mapping trace_rows to their target_id
for use by trace_interaction_eval_results which needs to know target_id
so it can create separate tables for each distinct target for readability
"""
# slicer column name and id targets to use for chooser id added to model_design dataframe
# currently we only ever slice by person_id, but that could change, so we check here...
table_refs = inject.get_injectable('traceable_table_refs', {})
if choosers.index.name == 'person_id' and inject.get_injectable('trace_persons', False):
slicer_column_name = choosers.index.name
targets = inject.get_injectable('trace_persons', [])
elif 'household_id' in choosers.columns and inject.get_injectable('trace_hh_id', False):
slicer_column_name = 'household_id'
targets = inject.get_injectable('trace_hh_id', [])
elif 'person_id' in choosers.columns and inject.get_injectable('trace_persons', False):
slicer_column_name = 'person_id'
targets = inject.get_injectable('trace_persons', [])
else:
print choosers.columns
raise RuntimeError("interaction_trace_rows don't know how to slice index '%s'"
% choosers.index.name)
if sample_size is None:
# if sample size not constant, we count on either
# slicer column being in itneraction_df
# or index of interaction_df being same as choosers
if slicer_column_name in interaction_df.columns:
trace_rows = np.in1d(interaction_df[slicer_column_name], targets)
trace_ids = interaction_df.loc[trace_rows, slicer_column_name].values
else:
assert interaction_df.index.name == choosers.index.name
trace_rows = np.in1d(interaction_df.index, targets)
trace_ids = interaction_df[trace_rows].index.values
else:
if slicer_column_name == choosers.index.name:
trace_rows = np.in1d(choosers.index, targets)
trace_ids = np.asanyarray(choosers[trace_rows].index)
elif slicer_column_name == 'person_id':
trace_rows = np.in1d(choosers['person_id'], targets)
trace_ids = np.asanyarray(choosers[trace_rows].person_id)
elif slicer_column_name == 'household_id':
trace_rows = np.in1d(choosers['household_id'], targets)
trace_ids = np.asanyarray(choosers[trace_rows].household_id)
else:
assert False
# simply repeat if sample size is constant across choosers
assert sample_size == len(interaction_df.index) / len(choosers.index)
trace_rows = np.repeat(trace_rows, sample_size)
trace_ids = np.repeat(trace_ids, sample_size)
assert type(trace_rows) == np.ndarray
assert type(trace_ids) == np.ndarray
trace_ids = (slicer_column_name, trace_ids)
return trace_rows, trace_ids
def trace_interaction_eval_results(trace_results, trace_ids, label):
"""
Trace model design eval results for interaction_simulate
Parameters
----------
trace_results: pandas.DataFrame
traced model_design dataframe
trace_ids : tuple (str, numpy.ndarray)
column name and array of trace_ids from interaction_trace_rows()
used to filter the trace_results dataframe by traced hh or person id
label: str
tracer name
Returns
-------
Nothing
"""
assert type(trace_ids[1]) == np.ndarray
slicer_column_name = trace_ids[0]
trace_results[slicer_column_name] = trace_ids[1]
targets = np.unique(trace_ids[1])
if len(trace_results.index) == 0:
return
# write out the raw dataframe
file_path = config.trace_file_path('%s.raw.csv' % label)
trace_results.to_csv(file_path, mode="a", index=True, header=True)
# if there are multiple targets, we want them in separate tables for readability
for target in targets:
df_target = trace_results[trace_results[slicer_column_name] == target]
# we want the transposed columns in predictable order
df_target.sort_index(inplace=True)
# # remove the slicer (person_id or hh_id) column?
# del df_target[slicer_column_name]
target_label = '%s.%s.%s' % (label, slicer_column_name, target)
trace_df(df_target,
label=target_label,
slicer="NONE",
transpose=True,
column_labels=['expression', None],
warn_if_empty=False)
def no_results(trace_label):
"""
standard no-op to write tracing when a model produces no results
"""
logger.info("Skipping %s: no_results" % trace_label)
accept output_dir arg to tracing.delete_csv_files for 0.7 compatibility
# ActivitySim
# See full license in LICENSE.txt.
import os
import logging
import logging.config
import sys
import time
from collections import OrderedDict
import yaml
import numpy as np
import pandas as pd
from activitysim.core import inject
import inject_defaults
import config
# Configurations
ASIM_LOGGER = 'activitysim'
CSV_FILE_TYPE = 'csv'
LOGGING_CONF_FILE_NAME = 'logging.yaml'
logger = logging.getLogger(__name__)
def log_file_path(file_name):
# FIXME - for compatability with v0.7
return config.log_file_path(file_name)
def check_for_variability():
return inject.get_injectable('check_for_variability', False)
def extend_trace_label(trace_label, extension):
if trace_label:
trace_label = "%s.%s" % (trace_label, extension)
return trace_label
def print_elapsed_time(msg=None, t0=None, debug=False):
t1 = time.time()
if msg:
t = t1 - (t0 or t1)
msg = "Time to execute %s : %s seconds (%s minutes)" % (msg, round(t, 3), round(t/60.0, 1))
if debug:
logger.debug(msg)
else:
logger.info(msg)
return t1
def delete_output_files(file_type, output_dir=None):
"""
Delete files in output directory of specified type
Parameters
----------
output_dir: str
Directory of trace output CSVs
Returns
-------
Nothing
"""
if output_dir is None:
output_dir = inject.get_injectable('output_dir')
logger.debug("Deleting %s files in output_dir %s" % (file_type, output_dir))
for the_file in os.listdir(output_dir):
if the_file.endswith(file_type):
file_path = os.path.join(output_dir, the_file)
try:
if os.path.isfile(file_path):
os.unlink(file_path)
except Exception as e:
print(e)
def delete_csv_files(output_dir=None):
"""
Delete CSV files in output_dir
Returns
-------
Nothing
"""
delete_output_files(CSV_FILE_TYPE, output_dir)
def config_logger(custom_config_file=None, basic=False):
"""
Configure logger
if log_config_file is not supplied then look for conf file in configs_dir
if not found use basicConfig
Parameters
----------
custom_config_file: str
custom config filename
basic: boolean
basic setup
Returns
-------
Nothing
"""
log_config_file = None
if custom_config_file and os.path.isfile(custom_config_file):
log_config_file = custom_config_file
elif not basic:
# look for conf file in configs_dir
configs_dir = inject.get_injectable('configs_dir')
default_config_file = os.path.join(configs_dir, LOGGING_CONF_FILE_NAME)
if os.path.isfile(default_config_file):
log_config_file = default_config_file
if log_config_file:
with open(log_config_file) as f:
config_dict = yaml.load(f)
config_dict = config_dict['logging']
config_dict.setdefault('version', 1)
logging.config.dictConfig(config_dict)
else:
logging.basicConfig(level=logging.INFO, stream=sys.stdout)
logger = logging.getLogger(ASIM_LOGGER)
if custom_config_file and not os.path.isfile(custom_config_file):
logger.error("#\n#\n#\nconfig_logger could not find conf file '%s'" % custom_config_file)
if log_config_file:
logger.info("Read logging configuration from: %s" % log_config_file)
else:
print "Configured logging using basicConfig"
logger.info("Configured logging using basicConfig")
def print_summary(label, df, describe=False, value_counts=False):
"""
Print summary
Parameters
----------
label: str
tracer name
df: pandas.DataFrame
traced dataframe
describe: boolean
print describe?
value_counts: boolean
print value counts?
Returns
-------
Nothing
"""
if not (value_counts or describe):
logger.error("print_summary neither value_counts nor describe")
if value_counts:
logger.info("%s value counts:\n%s" % (label, df.value_counts()))
if describe:
logger.info("%s summary:\n%s" % (label, df.describe()))
def register_traceable_table(table_name, df):
"""
Register traceable table
Parameters
----------
df: pandas.DataFrame
traced dataframe
Returns
-------
Nothing
"""
trace_hh_id = inject.get_injectable("trace_hh_id", None)
trace_injectable = 'trace_%s' % table_name
new_traced_ids = []
if trace_hh_id is None:
return
traceable_tables = inject.get_injectable('traceable_tables', [])
if table_name not in traceable_tables:
logger.error("table '%s' not in traceable_tables" % table_name)
return
idx_name = df.index.name
if idx_name is None:
logger.error("Can't register table '%s' without index name" % table_name)
return
traceable_table_refs = inject.get_injectable('traceable_table_refs', None)
# traceable_table_refs is OrderedDict so we can find first registered table to slice by ref_con
if traceable_table_refs is None:
traceable_table_refs = OrderedDict()
if idx_name in traceable_table_refs and traceable_table_refs[idx_name] != table_name:
logger.error("table '%s' index name '%s' already registered for table '%s'" %
(table_name, idx_name, traceable_table_refs[idx_name]))
return
if table_name == 'households':
if trace_hh_id not in df.index:
logger.warn("trace_hh_id %s not in dataframe" % trace_hh_id)
new_traced_ids = []
else:
logger.info("tracing household id %s in %s households" % (trace_hh_id, len(df.index)))
new_traced_ids = [trace_hh_id]
else:
# find first already registered ref_col we can use to slice this table
ref_con = next((c for c in traceable_table_refs if c in df.columns), None)
if ref_con is None:
logger.error("can't find a registered table to slice table '%s' index name '%s'"
" in traceable_table_refs: %s" %
(table_name, idx_name, traceable_table_refs))
return
# get traceable_ids for ref_con table
ref_con_trace_injectable = 'trace_%s' % traceable_table_refs[ref_con]
ref_con_traced_ids = inject.get_injectable(ref_con_trace_injectable, [])
# inject list of ids in table we are tracing
# this allows us to slice by id without requiring presence of a household id column
traced_df = df[df[ref_con].isin(ref_con_traced_ids)]
new_traced_ids = traced_df.index.tolist()
if len(new_traced_ids) == 0:
logger.warn("register %s: no rows with %s in %s." %
(table_name, ref_con, ref_con_traced_ids))
# update traceable_table_refs with this traceable_table's ref_con
if idx_name not in traceable_table_refs:
traceable_table_refs[idx_name] = table_name
print "adding table %s.%s to traceable_table_refs" % (table_name, idx_name)
inject.add_injectable('traceable_table_refs', traceable_table_refs)
# update the list of trace_ids for this table
prior_traced_ids = inject.get_injectable(trace_injectable, [])
if prior_traced_ids:
logger.info("register %s: adding %s ids to %s existing trace ids" %
(table_name, len(new_traced_ids), len(prior_traced_ids)))
traced_ids = prior_traced_ids + new_traced_ids
logger.info("register %s: tracing ids %s in %s %s" %
(table_name, traced_ids, len(df.index), table_name))
if new_traced_ids:
inject.add_injectable(trace_injectable, traced_ids)
def write_df_csv(df, file_path, index_label=None, columns=None, column_labels=None, transpose=True):
mode = 'a' if os.path.isfile(file_path) else 'w'
if columns:
df = df[columns]
if not transpose:
df.to_csv(file_path, mode="a", index=df.index.name is not None, header=True)
return
df_t = df.transpose()
if df.index.name is not None:
df_t.index.name = df.index.name
elif index_label:
df_t.index.name = index_label
with open(file_path, mode=mode) as f:
if column_labels is None:
column_labels = [None, None]
if column_labels[0] is None:
column_labels[0] = 'label'
if column_labels[1] is None:
column_labels[1] = 'value'
if len(df_t.columns) == len(column_labels) - 1:
column_label_row = ','.join(column_labels)
else:
column_label_row = \
column_labels[0] + ',' \
+ ','.join([column_labels[1] + '_' + str(i+1) for i in range(len(df_t.columns))])
if mode == 'a':
column_label_row = '# ' + column_label_row
f.write(column_label_row + '\n')
df_t.to_csv(file_path, mode='a', index=True, header=True)
def write_series_csv(series, file_path, index_label=None, columns=None, column_labels=None):
if isinstance(columns, str):
series = series.rename(columns)
elif isinstance(columns, list):
if columns[0]:
series.index.name = columns[0]
series = series.rename(columns[1])
if index_label and series.index.name is None:
series.index.name = index_label
series.to_csv(file_path, mode='a', index=True, header=True)
def write_csv(df, file_name, index_label=None, columns=None, column_labels=None, transpose=True):
"""
Print write_csv
Parameters
----------
df: pandas.DataFrame or pandas.Series
traced dataframe
file_name: str
output file name
index_label: str
index name
columns: list
columns to write
transpose: bool
whether to transpose dataframe (ignored for series)
Returns
-------
Nothing
"""
file_name = file_name.encode('ascii', 'ignore')
assert len(file_name) > 0
if not file_name.endswith(".%s" % CSV_FILE_TYPE):
file_name = '%s.%s' % (file_name, CSV_FILE_TYPE)
file_path = config.trace_file_path(file_name)
if os.path.isfile(file_path):
logger.error("write_csv file exists %s %s" % (type(df).__name__, file_name))
if isinstance(df, pd.DataFrame):
# logger.debug("dumping %s dataframe to %s" % (df.shape, file_name))
write_df_csv(df, file_path, index_label, columns, column_labels, transpose=transpose)
elif isinstance(df, pd.Series):
# logger.debug("dumping %s element series to %s" % (df.shape[0], file_name))
write_series_csv(df, file_path, index_label, columns, column_labels)
elif isinstance(df, dict):
df = pd.Series(data=df)
# logger.debug("dumping %s element dict to %s" % (df.shape[0], file_name))
write_series_csv(df, file_path, index_label, columns, column_labels)
else:
logger.error("write_csv object for file_name '%s' of unexpected type: %s" %
(file_name, type(df)))
def slice_ids(df, ids, column=None):
"""
slice a dataframe to select only records with the specified ids
Parameters
----------
df: pandas.DataFrame
traced dataframe
ids: int or list of ints
slice ids
column: str
column to slice (slice using index if None)
Returns
-------
df: pandas.DataFrame
sliced dataframe
"""
if np.isscalar(ids):
ids = [ids]
try:
if column is None:
df = df[df.index.isin(ids)]
else:
df = df[df[column].isin(ids)]
except KeyError:
# this happens if specified slicer column is not in df
# df = df[0:0]
raise RuntimeError("slice_ids slicer column '%s' not in dataframe" % column)
return df
def get_trace_target(df, slicer):
"""
get target ids and column or index to identify target trace rows in df
Parameters
----------
df: pandas.DataFrame
dataframe to slice
slicer: str
name of column or index to use for slicing
Returns
-------
(target, column) tuple
target : int or list of ints
id or ids that identify tracer target rows
column : str
name of column to search for targets or None to search index
"""
target_ids = None # id or ids to slice by (e.g. hh_id or person_ids or tour_ids)
column = None # column name to slice on or None to slice on index
# special do-not-slice code for dumping entire df
if slicer == 'NONE':
return target_ids, column
if slicer is None:
slicer = df.index.name
if isinstance(df, pd.DataFrame):
# always slice by household id if we can
if 'household_id' in df.columns:
slicer = 'household_id'
if slicer in df.columns:
column = slicer
if column is None and df.index.name != slicer:
raise RuntimeError("bad slicer '%s' for df with index '%s'" % (slicer, df.index.name))
table_refs = inject.get_injectable('traceable_table_refs', {})
if df.empty:
target_ids = None
elif slicer in table_refs:
# maps 'person_id' to 'persons', etc
table_name = table_refs[slicer]
target_ids = inject.get_injectable('trace_%s' % table_name, [])
elif slicer == 'TAZ':
target_ids = inject.get_injectable('trace_od', [])
else:
raise RuntimeError("slice_canonically: bad slicer '%s'" % (slicer, ))
return target_ids, column
def slice_canonically(df, slicer, label, warn_if_empty=False):
"""
Slice dataframe by traced household or person id dataframe and write to CSV
Parameters
----------
df: pandas.DataFrame
dataframe to slice
slicer: str
name of column or index to use for slicing
label: str
tracer name - only used to report bad slicer
Returns
-------
sliced subset of dataframe
"""
target_ids, column = get_trace_target(df, slicer)
if target_ids is not None:
df = slice_ids(df, target_ids, column)
if warn_if_empty and df.shape[0] == 0:
column_name = column or slicer
logger.warn("slice_canonically: no rows in %s with %s == %s"
% (label, column_name, target_ids))
return df
def trace_targets(df, slicer=None):
target_ids, column = get_trace_target(df, slicer)
if target_ids is None:
targets = None
else:
if column is None:
targets = df.index.isin(target_ids)
else:
targets = df[column].isin(target_ids)
return targets
def has_trace_targets(df, slicer=None):
target_ids, column = get_trace_target(df, slicer)
if target_ids is None:
found = False
else:
if column is None:
found = df.index.isin(target_ids).any()
else:
found = df[column].isin(target_ids).any()
return found
def hh_id_for_chooser(id, choosers):
"""
Parameters
----------
id - scalar id (or list of ids) from chooser index
choosers - pandas dataframe whose index contains ids
Returns
-------
scalar household_id or series of household_ids
"""
if choosers.index.name == 'household_id':
hh_id = id
elif 'household_id' in choosers.columns:
hh_id = choosers.loc[id]['household_id']
else:
hh_id = None
return hh_id
def dump_df(dump_switch, df, trace_label, fname):
if dump_switch:
trace_label = extend_trace_label(trace_label, 'DUMP.%s' % fname)
trace_df(df, trace_label, index_label=df.index.name, slicer='NONE', transpose=False)
def trace_df(df, label, slicer=None, columns=None,
index_label=None, column_labels=None, transpose=True, warn_if_empty=False):
"""
Slice dataframe by traced household or person id dataframe and write to CSV
Parameters
----------
df: pandas.DataFrame
traced dataframe
label: str
tracer name
slicer: Object
slicer for subsetting
columns: list
columns to write
index_label: str
index name
column_labels: [str, str]
labels for columns in csv
transpose: boolean
whether to transpose file for legibility
warn_if_empty: boolean
write warning if sliced df is empty
Returns
-------
Nothing
"""
df = slice_canonically(df, slicer, label, warn_if_empty)
if df.shape[0] > 0:
write_csv(df, file_name=label, index_label=(index_label or slicer), columns=columns,
column_labels=column_labels, transpose=transpose)
def interaction_trace_rows(interaction_df, choosers, sample_size=None):
"""
Trace model design for interaction_simulate
Parameters
----------
interaction_df: pandas.DataFrame
traced model_design dataframe
choosers: pandas.DataFrame
interaction_simulate choosers
(needed to filter the model_design dataframe by traced hh or person id)
sample_size int or None
int for constant sample size, or None if choosers have different numbers of alternatives
Returns
-------
trace_rows : numpy.ndarray
array of booleans to flag which rows in interaction_df to trace
trace_ids : tuple (str, numpy.ndarray)
column name and array of trace_ids mapping trace_rows to their target_id
for use by trace_interaction_eval_results which needs to know target_id
so it can create separate tables for each distinct target for readability
"""
# slicer column name and id targets to use for chooser id added to model_design dataframe
# currently we only ever slice by person_id, but that could change, so we check here...
table_refs = inject.get_injectable('traceable_table_refs', {})
if choosers.index.name == 'person_id' and inject.get_injectable('trace_persons', False):
slicer_column_name = choosers.index.name
targets = inject.get_injectable('trace_persons', [])
elif 'household_id' in choosers.columns and inject.get_injectable('trace_hh_id', False):
slicer_column_name = 'household_id'
targets = inject.get_injectable('trace_hh_id', [])
elif 'person_id' in choosers.columns and inject.get_injectable('trace_persons', False):
slicer_column_name = 'person_id'
targets = inject.get_injectable('trace_persons', [])
else:
print choosers.columns
raise RuntimeError("interaction_trace_rows don't know how to slice index '%s'"
% choosers.index.name)
if sample_size is None:
# if sample size not constant, we count on either
# slicer column being in itneraction_df
# or index of interaction_df being same as choosers
if slicer_column_name in interaction_df.columns:
trace_rows = np.in1d(interaction_df[slicer_column_name], targets)
trace_ids = interaction_df.loc[trace_rows, slicer_column_name].values
else:
assert interaction_df.index.name == choosers.index.name
trace_rows = np.in1d(interaction_df.index, targets)
trace_ids = interaction_df[trace_rows].index.values
else:
if slicer_column_name == choosers.index.name:
trace_rows = np.in1d(choosers.index, targets)
trace_ids = np.asanyarray(choosers[trace_rows].index)
elif slicer_column_name == 'person_id':
trace_rows = np.in1d(choosers['person_id'], targets)
trace_ids = np.asanyarray(choosers[trace_rows].person_id)
elif slicer_column_name == 'household_id':
trace_rows = np.in1d(choosers['household_id'], targets)
trace_ids = np.asanyarray(choosers[trace_rows].household_id)
else:
assert False
# simply repeat if sample size is constant across choosers
assert sample_size == len(interaction_df.index) / len(choosers.index)
trace_rows = np.repeat(trace_rows, sample_size)
trace_ids = np.repeat(trace_ids, sample_size)
assert type(trace_rows) == np.ndarray
assert type(trace_ids) == np.ndarray
trace_ids = (slicer_column_name, trace_ids)
return trace_rows, trace_ids
def trace_interaction_eval_results(trace_results, trace_ids, label):
"""
Trace model design eval results for interaction_simulate
Parameters
----------
trace_results: pandas.DataFrame
traced model_design dataframe
trace_ids : tuple (str, numpy.ndarray)
column name and array of trace_ids from interaction_trace_rows()
used to filter the trace_results dataframe by traced hh or person id
label: str
tracer name
Returns
-------
Nothing
"""
assert type(trace_ids[1]) == np.ndarray
slicer_column_name = trace_ids[0]
trace_results[slicer_column_name] = trace_ids[1]
targets = np.unique(trace_ids[1])
if len(trace_results.index) == 0:
return
# write out the raw dataframe
file_path = config.trace_file_path('%s.raw.csv' % label)
trace_results.to_csv(file_path, mode="a", index=True, header=True)
# if there are multiple targets, we want them in separate tables for readability
for target in targets:
df_target = trace_results[trace_results[slicer_column_name] == target]
# we want the transposed columns in predictable order
df_target.sort_index(inplace=True)
# # remove the slicer (person_id or hh_id) column?
# del df_target[slicer_column_name]
target_label = '%s.%s.%s' % (label, slicer_column_name, target)
trace_df(df_target,
label=target_label,
slicer="NONE",
transpose=True,
column_labels=['expression', None],
warn_if_empty=False)
def no_results(trace_label):
"""
standard no-op to write tracing when a model produces no results
"""
logger.info("Skipping %s: no_results" % trace_label)
|
from datetime import datetime
import requests
import json
from auth import TTRAuth
from exceptions import raise_on_error
class TTRClient(object):
"""
This is the actual client interface to Tiny Tiny RSS.
This object retains a http session with the needed session cookies. From
the client you can fetch categories, feeds, headlines and articles, all
represented by Python objects. You can also update modify articles and
feeds on the server.
"""
def __init__(self, url, user=None, password=None, auto_login=False):
"""
Instantiate a new client.
:param url: The full URL to the Tiny Tiny RSS server, *without* the
/api/ suffix.
:param user: The username to use when logging in.
:param password: The password for the user.
:param auto_login: *Optional* Automatically login upon instantiation,
and re-login
when a session cookie expires.
"""
self.url = url + '/api/'
self.user = user
self.password = password
self._session = requests.Session()
if auto_login:
auth = TTRAuth(user, password)
self._session.auth = auth
def login(self):
"""
Manually log in (i.e. request a session cookie)
This method must be used if the client was not instantiated with
``auto_login=True``
"""
r = self._get_json({
'op': 'login',
'user': self.user,
'password': self.password
})
def logout(self):
"""
Log out.
After logging out, ``login()`` must be used to gain a valid session
again. Please note that logging out invalidates any automatic
re-login even after logging back in.
"""
self._get_json({'op': 'logout'})
self._session.auth = None
def logged_in(self):
r = self._get_json({'op': 'isLoggedIn'})
return r['content']['status']
def _get_json(self, post_data):
r = self._session.post(self.url, data=json.dumps(post_data))
raise_on_error(r)
return json.loads(r.content)
def get_unread_count(self):
"""Get total number of unread articles"""
r = self._get_json({'op': 'getUnread'})
return int(r['content']['unread'])
def get_feed_count(self):
"""Get total number of subscribed feeds."""
r = self._get_json({'op': 'getCounters'})
for c in r['content']:
if c['id'] == u'subscribed-feeds':
return int(c['counter'])
return None
def get_categories(self):
"""Get a list of all available categories"""
r = self._get_json({'op': 'getCategories'})
return [Category(cat, self) for cat in r['content']]
def get_feeds(
self,
cat_id=-1,
unread_only=False,
limit=0,
offset=0,
include_nested=False):
"""
Get a list of feeds in a category.
:param cat_id: Category id. This is available as the ``id`` property
of a Category object.
:param unread_only: *Optional* Include only feeds containing unread
articles. Default is ``False``.
:param limit: *Optional* Limit number of included feeds to ``limit``.
Default is 0 (unlimited).
:param offset: *Optional* Skip this number of feeds. Useful for
pagination. Default is 0.
:param include_nested: *Optional* Include child categories. Default
is ``False``.
"""
r = self._get_json({'op': 'getFeeds', 'cat_id': cat_id})
return [Feed(feed, self) for feed in r['content']]
def get_headlines(self, feed_id=0):
"""
Get a list of headlines from a specified feed.
:param feed_id: Feed id. This is available as the ``id`` property of
a Feed object.
"""
r = self._get_json({'op': 'getHeadlines', 'feed_id': feed_id})
return [Headline(hl, self) for hl in r['content']]
def get_articles(self, article_id):
"""
Get a list of articles from article ids.
:param article_id: A comma separated string of article ids to fetch.
"""
r = self._get_json({'op': 'getArticle', 'article_id': article_id})
return [Article(article, self) for article in r['content']]
def refresh_article(self, article):
"""
Update all properties of an article object with fresh information from
the server.
Please note that this method alters the original object and does not
return a new one.
:param article: The article to refresh.
"""
r = self._get_json({'op': 'getArticle', 'article_id': article.id})
article.__init__(r['content'][0], client=self)
def share_to_published(self, title, url, content):
"""
Share an article to the *published* feed.
:param title: Article title.
:param url: Article url.
:param content: Article content.
"""
r = self._get_json({
'op': 'shareToPublished',
'title': title,
'url': url,
'content': content
})
def mark_unread(self, article_id):
"""
Mark an article as unread.
:param article_id: ID of article to mark as unread.
"""
r = self._get_json({
'op': 'updateArticle',
'article_ids': article_id,
'mode': 1,
'field': 2
})
def mark_read(self, article_id):
"""
Mark an article as read.
:param article_id: ID of article to mark as read.
"""
r = self._get_json({
'op': 'updateArticle',
'article_ids': article_id,
'mode': 0,
'field': 2
})
pass
def toggle_unread(self, article_id):
"""
Toggle the unread status of an article.
:param article_id: ID of the article to toggle.
"""
r = self._get_json({
'op': 'updateArticle',
'article_ids': article_id,
'mode': 2,
'field': 2
})
class RemoteObject(object):
def __init__(self, attr, client=None):
self._client = client
for key, value in attr.items():
if key == 'id':
value = int(value)
self.__setattr__(key, value)
class Category(RemoteObject):
def feeds(self, **kwargs):
return self._client.get_feeds(cat_id=self.id, **kwargs)
class Feed(RemoteObject):
def __init__(self, attr, client):
super(Feed, self).__init__(attr, client)
try:
self.last_updated = datetime.fromtimestamp(self.last_updated)
except AttributeError:
pass
def headlines(self):
return self._client.get_headlines(feed_id=self.id)
class Headline(RemoteObject):
def full_article(self):
r = self._client.get_articles(self.id)
return r[0]
class Article(RemoteObject):
def publish(self):
self._client.share_to_published(self.title, self.link, self.content)
def refresh_status(self):
self._client.refresh_article(self)
def toggle_unread(self):
self._client.toggle_unread(self.id)
Add parameters to get_categories()
from datetime import datetime
import requests
import json
from auth import TTRAuth
from exceptions import raise_on_error
class TTRClient(object):
"""
This is the actual client interface to Tiny Tiny RSS.
This object retains a http session with the needed session cookies. From
the client you can fetch categories, feeds, headlines and articles, all
represented by Python objects. You can also update modify articles and
feeds on the server.
"""
def __init__(self, url, user=None, password=None, auto_login=False):
"""
Instantiate a new client.
:param url: The full URL to the Tiny Tiny RSS server, *without* the
/api/ suffix.
:param user: The username to use when logging in.
:param password: The password for the user.
:param auto_login: *Optional* Automatically login upon instantiation,
and re-login
when a session cookie expires.
"""
self.url = url + '/api/'
self.user = user
self.password = password
self._session = requests.Session()
if auto_login:
auth = TTRAuth(user, password)
self._session.auth = auth
def login(self):
"""
Manually log in (i.e. request a session cookie)
This method must be used if the client was not instantiated with
``auto_login=True``
"""
r = self._get_json({
'op': 'login',
'user': self.user,
'password': self.password
})
def logout(self):
"""
Log out.
After logging out, ``login()`` must be used to gain a valid session
again. Please note that logging out invalidates any automatic
re-login even after logging back in.
"""
self._get_json({'op': 'logout'})
self._session.auth = None
def logged_in(self):
r = self._get_json({'op': 'isLoggedIn'})
return r['content']['status']
def _get_json(self, post_data):
r = self._session.post(self.url, data=json.dumps(post_data))
raise_on_error(r)
return json.loads(r.content)
def get_unread_count(self):
"""Get total number of unread articles"""
r = self._get_json({'op': 'getUnread'})
return int(r['content']['unread'])
def get_feed_count(self):
"""Get total number of subscribed feeds."""
r = self._get_json({'op': 'getCounters'})
for c in r['content']:
if c['id'] == u'subscribed-feeds':
return int(c['counter'])
return None
def get_categories(
self,
unread_only=False,
enable_nested=False,
include_empty=False
):
"""
Get a list of all available categories
:param unread_only: Only return categories containing unread articles.
Defaults to ``False``.
:param enable_nested: When enabled, traverse through sub-categories
and return only the **topmost** categories in a flat list.
Defaults to ``False``.
:param include_empty: Include categories not containing any feeds.
Defaults to ``False``. *Requires server version 1.7.6*
"""
r = self._get_json({
'op': 'getCategories',
'unread_only': unread_only,
'enable_nested': enable_nested,
'include_empty': include_empty
})
return [Category(cat, self) for cat in r['content']]
def get_feeds(
self,
cat_id=-1,
unread_only=False,
limit=0,
offset=0,
include_nested=False):
"""
Get a list of feeds in a category.
:param cat_id: Category id. This is available as the ``id`` property
of a Category object.
:param unread_only: *Optional* Include only feeds containing unread
articles. Default is ``False``.
:param limit: *Optional* Limit number of included feeds to ``limit``.
Default is 0 (unlimited).
:param offset: *Optional* Skip this number of feeds. Useful for
pagination. Default is 0.
:param include_nested: *Optional* Include child categories. Default
is ``False``.
"""
r = self._get_json({'op': 'getFeeds', 'cat_id': cat_id})
return [Feed(feed, self) for feed in r['content']]
def get_headlines(self, feed_id=0):
"""
Get a list of headlines from a specified feed.
:param feed_id: Feed id. This is available as the ``id`` property of
a Feed object.
"""
r = self._get_json({'op': 'getHeadlines', 'feed_id': feed_id})
return [Headline(hl, self) for hl in r['content']]
def get_articles(self, article_id):
"""
Get a list of articles from article ids.
:param article_id: A comma separated string of article ids to fetch.
"""
r = self._get_json({'op': 'getArticle', 'article_id': article_id})
return [Article(article, self) for article in r['content']]
def refresh_article(self, article):
"""
Update all properties of an article object with fresh information from
the server.
Please note that this method alters the original object and does not
return a new one.
:param article: The article to refresh.
"""
r = self._get_json({'op': 'getArticle', 'article_id': article.id})
article.__init__(r['content'][0], client=self)
def share_to_published(self, title, url, content):
"""
Share an article to the *published* feed.
:param title: Article title.
:param url: Article url.
:param content: Article content.
"""
r = self._get_json({
'op': 'shareToPublished',
'title': title,
'url': url,
'content': content
})
def mark_unread(self, article_id):
"""
Mark an article as unread.
:param article_id: ID of article to mark as unread.
"""
r = self._get_json({
'op': 'updateArticle',
'article_ids': article_id,
'mode': 1,
'field': 2
})
def mark_read(self, article_id):
"""
Mark an article as read.
:param article_id: ID of article to mark as read.
"""
r = self._get_json({
'op': 'updateArticle',
'article_ids': article_id,
'mode': 0,
'field': 2
})
pass
def toggle_unread(self, article_id):
"""
Toggle the unread status of an article.
:param article_id: ID of the article to toggle.
"""
r = self._get_json({
'op': 'updateArticle',
'article_ids': article_id,
'mode': 2,
'field': 2
})
class RemoteObject(object):
def __init__(self, attr, client=None):
self._client = client
for key, value in attr.items():
if key == 'id':
value = int(value)
self.__setattr__(key, value)
class Category(RemoteObject):
def feeds(self, **kwargs):
return self._client.get_feeds(cat_id=self.id, **kwargs)
class Feed(RemoteObject):
def __init__(self, attr, client):
super(Feed, self).__init__(attr, client)
try:
self.last_updated = datetime.fromtimestamp(self.last_updated)
except AttributeError:
pass
def headlines(self):
return self._client.get_headlines(feed_id=self.id)
class Headline(RemoteObject):
def full_article(self):
r = self._client.get_articles(self.id)
return r[0]
class Article(RemoteObject):
def publish(self):
self._client.share_to_published(self.title, self.link, self.content)
def refresh_status(self):
self._client.refresh_article(self)
def toggle_unread(self):
self._client.toggle_unread(self.id)
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2018, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
from __future__ import print_function
import os
import random
import sys
import traceback
import numpy as np
import time
import torch
import torch.optim as optim
from torchvision import datasets, transforms
from htmresearch.support.expsuite import PyExperimentSuite
from htmresearch.frameworks.pytorch.image_transforms import RandomNoise
from htmresearch.frameworks.pytorch.sparse_net import SparseNet
from htmresearch.frameworks.pytorch.duty_cycle_metrics import plotDutyCycles
from htmresearch.frameworks.pytorch.dataset_utils import createValidationDataSampler
from htmresearch.frameworks.pytorch.model_utils import (
evaluateModel, trainModel)
class MNISTSparseExperiment(PyExperimentSuite):
"""
Allows running multiple sparse MNIST experiments in parallel
"""
def parse_cfg(self):
super(MNISTSparseExperiment, self).parse_cfg()
# Change the current working directory to be relative to 'experiments.cfg'
projectDir = os.path.dirname(self.options.config)
projectDir = os.path.abspath(projectDir)
os.chdir(projectDir)
def reset(self, params, repetition):
"""
Called once at the beginning of each experiment.
"""
self.startTime = time.time()
print(params)
seed = params["seed"] + repetition
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
# Get our directories correct
self.dataDir = params["datadir"]
self.resultsDir = os.path.join(params["path"], params["name"], "plots")
if not os.path.exists(self.resultsDir):
os.makedirs(self.resultsDir)
self.use_cuda = not params["no_cuda"] and torch.cuda.is_available()
self.device = torch.device("cuda" if self.use_cuda else "cpu")
transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))])
train_dataset = datasets.MNIST(self.dataDir, train=True, download=True,
transform=transform)
# Create training and validation sampler from MNIST dataset by training on
# random X% of the training set and validating on the remaining (1-X)%,
# where X can be tuned via the "validation" parameter
validation = params.get("validation", 50000.0 / 60000.0)
if validation < 1.0:
self.train_sampler, self.validation_sampler = createValidationDataSampler(
train_dataset, validation)
self.train_loader = torch.utils.data.DataLoader(train_dataset,
batch_size=params["batch_size"],
sampler=self.train_sampler)
self.validation_loader = torch.utils.data.DataLoader(train_dataset,
batch_size=params["batch_size"],
sampler=self.validation_sampler)
else:
# No validation. Normal training dataset
self.validation_loader = None
self.validation_sampler = None
self.train_sampler = None
self.train_loader = torch.utils.data.DataLoader(train_dataset,
batch_size=params["batch_size"],
shuffle=True)
self.test_loader = torch.utils.data.DataLoader(
datasets.MNIST(self.dataDir, train=False, transform=transform),
batch_size=params["test_batch_size"], shuffle=True)
# Parse 'n' and 'k' parameters
n = params["n"]
k = params["k"]
if isinstance(n, basestring):
n = map(int, n.split("_"))
if isinstance(k, basestring):
k = map(int, k.split("_"))
if params["use_cnn"]:
c1_out_channels = params["c1_out_channels"]
c1_k = params["c1_k"]
if isinstance(c1_out_channels, basestring):
c1_out_channels = map(int, c1_out_channels.split("_"))
if isinstance(c1_k, basestring):
c1_k = map(int, c1_k.split("_"))
# Parse 'c1_input_shape; parameter
if "c1_input_shape" in params:
c1_input_shape = map(int, params["c1_input_shape"].split("_"))
else:
c1_input_shape = (1, 28, 28)
sp_model = SparseNet(
inputSize=c1_input_shape,
outChannels=c1_out_channels,
c_k=c1_k,
kernelSize=5,
stride=1,
dropout=params["dropout"],
n=n,
k=k,
boostStrength=params["boost_strength"],
weightSparsity=params["weight_sparsity"],
boostStrengthFactor=params["boost_strength_factor"],
kInferenceFactor=params["k_inference_factor"],
useBatchNorm=params["use_batch_norm"],
normalizeWeights=params.get("normalize_weights", False)
)
print("c1OutputLength=", sp_model.cnnSdr[0].outputLength)
else:
sp_model = SparseNet(
n=n,
k=k,
boostStrength=params["boost_strength"],
weightSparsity=params["weight_sparsity"],
boostStrengthFactor=params["boost_strength_factor"],
kInferenceFactor=params["k_inference_factor"],
dropout=params["dropout"],
useBatchNorm=params["use_batch_norm"],
normalizeWeights=params.get("normalize_weights", False)
)
if torch.cuda.device_count() > 1:
print("Using", torch.cuda.device_count(), "GPUs")
sp_model = torch.nn.DataParallel(sp_model)
self.model = sp_model.to(self.device)
self.learningRate = params["learning_rate"]
self.optimizer = self.createOptimizer(params, self.model)
self.lr_scheduler = self.createLearningRateScheduler(params, self.optimizer)
def iterate(self, params, repetition, iteration):
"""
Called once for each training iteration (== epoch here).
"""
try:
print("\nStarting iteration",iteration)
t1 = time.time()
ret = {}
# Update learning rate using learning rate scheduler if configured
if self.lr_scheduler is not None:
# ReduceLROnPlateau lr_scheduler step should be called after validation,
# all other lr_schedulers should be called before training
if params["lr_scheduler"] != "ReduceLROnPlateau":
self.lr_scheduler.step()
self.train(params, epoch=iteration)
# Run validation test
if self.validation_loader is not None:
validation = self.test(params, self.validation_loader)
# ReduceLROnPlateau step should be called after validation
if params["lr_scheduler"] == "ReduceLROnPlateau":
self.lr_scheduler.step(validation["test_loss"])
ret["validation"] = validation
print("Validation: Test error=", validation["testerror"],
"entropy=", validation["entropy"])
# Run noise test
if (params["test_noise_every_epoch"] or
iteration == params["iterations"] - 1):
ret.update(self.runNoiseTests(params))
print("Noise test results: totalCorrect=", ret["totalCorrect"],
"Test error=", ret["testerror"], ", entropy=", ret["entropy"])
if ret["totalCorrect"] > 100000 and ret["testerror"] > 98.3:
print("*******")
print(params)
ret.update({"elapsedTime": time.time() - self.startTime})
ret.update({"learningRate": self.learningRate if self.lr_scheduler is None
else self.lr_scheduler.get_lr()})
print("Iteration time= {0:.3f} secs, "
"total elapsed time= {1:.3f} mins".format(
time.time() - t1,ret["elapsedTime"]/60.0))
except Exception as e:
# Tracebacks are not printed if using multiprocessing so we do it here
tb = sys.exc_info()[2]
traceback.print_tb(tb)
raise RuntimeError("Something went wrong in iterate")
return ret
def finalize(self, params, rep):
"""
Called once we are done.
"""
if params.get("saveNet", True):
# Save the full model once we are done.
saveDir = os.path.join(params["path"], params["name"], "model.pt")
torch.save(self.model, saveDir)
def createLearningRateScheduler(self, params, optimizer):
"""
Creates the learning rate scheduler and attach the optimizer
"""
lr_scheduler = params.get("lr_scheduler", None)
if lr_scheduler is None:
return None
if lr_scheduler == "StepLR":
lr_scheduler_params = "{'step_size': 1, 'gamma':" + str(params["learning_rate_factor"]) + "}"
else:
lr_scheduler_params = params.get("lr_scheduler_params", None)
if lr_scheduler_params is None:
raise ValueError("Missing 'lr_scheduler_params' for {}".format(lr_scheduler))
# Get lr_scheduler class by name
clazz = eval("torch.optim.lr_scheduler.{}".format(lr_scheduler))
# Parse scheduler parameters from config
lr_scheduler_params = eval(lr_scheduler_params)
return clazz(optimizer, **lr_scheduler_params)
def createOptimizer(self, params, model):
"""
Create a new instance of the optimizer
"""
lr = params["learning_rate"]
print("Creating optimizer with learning rate=", lr)
if params["optimizer"] == "SGD":
optimizer = optim.SGD(model.parameters(), lr=lr,
momentum=params["momentum"])
elif params["optimizer"] == "Adam":
optimizer = optim.Adam(model.parameters(), lr=lr)
else:
raise LookupError("Incorrect optimizer value")
return optimizer
def train(self, params, epoch):
"""
Train one epoch of this model by iterating through mini batches. An epoch
ends after one pass through the training set, or if the number of mini
batches exceeds the parameter "batches_in_epoch".
"""
# Callback used to log information on every batch
def log(model, batch_idx):
if batch_idx % params["log_interval"] == 0:
entropy = model.entropy()
print("logging: {} learning iterations, entropy: {} / {}".format(
model.getLearningIterations(), float(entropy), model.maxEntropy()))
if params["create_plots"]:
plotDutyCycles(model.dutyCycle,
self.resultsDir + "/figure_" + str(epoch) + "_" +
str(model.getLearningIterations()))
trainModel(model=self.model, loader=self.train_loader,
optimizer=self.optimizer, device=self.device,
batches_in_epoch=params["batches_in_epoch"],
batch_callback=log)
self.model.postEpoch()
def test(self, params, test_loader):
"""
Test the model using the given loader and return test metrics
"""
results = evaluateModel(model=self.model, device=self.device,
loader=test_loader)
entropy = self.model.entropy()
ret = {"num_correct": results["total_correct"],
"test_loss": results["loss"],
"testerror": results["accuracy"] * 100,
"entropy": float(entropy)}
return ret
def runNoiseTests(self, params):
"""
Test the model with different noise values and return test metrics.
"""
ret = {}
# Noise on validation data
validation = {} if self.validation_sampler is not None else None
# Test with noise
total_correct = 0
validation_total_correct = 0
for noise in [0.0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5]:
transform = transforms.Compose([
transforms.ToTensor(),
RandomNoise(noise, whiteValue=0.1307 + 2*0.3081),
transforms.Normalize((0.1307,), (0.3081,))
])
test_loader = torch.utils.data.DataLoader(
datasets.MNIST(self.dataDir, train=False, transform=transform),
batch_size=params["test_batch_size"], shuffle=True)
testResult = self.test(params, test_loader)
total_correct += testResult["num_correct"]
ret[noise]= testResult
if validation is not None:
validation_loader = torch.utils.data.DataLoader(
datasets.MNIST(self.dataDir, train=True, transform=transform),
sampler=self.validation_sampler,
batch_size=params["test_batch_size"])
validationResult = self.test(params, validation_loader)
validation_total_correct += validationResult["num_correct"]
validation[noise] = validationResult
ret["totalCorrect"] = total_correct
ret["testerror"] = ret[0.0]["testerror"]
ret["entropy"] = ret[0.0]["entropy"]
if "nonzeros" in ret[0.0]:
ret["nonzeros"] = ret[0.0]["nonzeros"]
if validation is not None:
validation["totalCorrect"] = validation_total_correct
validation["testerror"] = validation[0.0]["testerror"]
validation["entropy"] = validation[0.0]["entropy"]
ret["validation"] = validation
return ret
if __name__ == '__main__':
suite = MNISTSparseExperiment()
suite.start()
Add parameter to control "first_epoch_batch_size"
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2018, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
from __future__ import print_function
import os
import random
import sys
import traceback
import numpy as np
import time
import torch
import torch.optim as optim
from torchvision import datasets, transforms
from htmresearch.support.expsuite import PyExperimentSuite
from htmresearch.frameworks.pytorch.image_transforms import RandomNoise
from htmresearch.frameworks.pytorch.sparse_net import SparseNet
from htmresearch.frameworks.pytorch.duty_cycle_metrics import plotDutyCycles
from htmresearch.frameworks.pytorch.dataset_utils import createValidationDataSampler
from htmresearch.frameworks.pytorch.model_utils import (
evaluateModel, trainModel)
class MNISTSparseExperiment(PyExperimentSuite):
"""
Allows running multiple sparse MNIST experiments in parallel
"""
def parse_cfg(self):
super(MNISTSparseExperiment, self).parse_cfg()
# Change the current working directory to be relative to 'experiments.cfg'
projectDir = os.path.dirname(self.options.config)
projectDir = os.path.abspath(projectDir)
os.chdir(projectDir)
def reset(self, params, repetition):
"""
Called once at the beginning of each experiment.
"""
self.startTime = time.time()
print(params)
seed = params["seed"] + repetition
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
# Get our directories correct
self.dataDir = params["datadir"]
self.resultsDir = os.path.join(params["path"], params["name"], "plots")
if not os.path.exists(self.resultsDir):
os.makedirs(self.resultsDir)
self.use_cuda = not params["no_cuda"] and torch.cuda.is_available()
self.device = torch.device("cuda" if self.use_cuda else "cpu")
transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))])
train_dataset = datasets.MNIST(self.dataDir, train=True, download=True,
transform=transform)
# Create training and validation sampler from MNIST dataset by training on
# random X% of the training set and validating on the remaining (1-X)%,
# where X can be tuned via the "validation" parameter
validation = params.get("validation", 50000.0 / 60000.0)
if validation < 1.0:
self.train_sampler, self.validation_sampler = createValidationDataSampler(
train_dataset, validation)
self.train_loader = torch.utils.data.DataLoader(train_dataset,
batch_size=params["batch_size"],
sampler=self.train_sampler)
self.validation_loader = torch.utils.data.DataLoader(train_dataset,
batch_size=params["batch_size"],
sampler=self.validation_sampler)
else:
# No validation. Normal training dataset
self.validation_loader = None
self.validation_sampler = None
self.train_sampler = None
self.train_loader = torch.utils.data.DataLoader(train_dataset,
batch_size=params["batch_size"],
shuffle=True)
self.test_loader = torch.utils.data.DataLoader(
datasets.MNIST(self.dataDir, train=False, transform=transform),
batch_size=params["test_batch_size"], shuffle=True)
# Parse 'n' and 'k' parameters
n = params["n"]
k = params["k"]
if isinstance(n, basestring):
n = map(int, n.split("_"))
if isinstance(k, basestring):
k = map(int, k.split("_"))
if params["use_cnn"]:
c1_out_channels = params["c1_out_channels"]
c1_k = params["c1_k"]
if isinstance(c1_out_channels, basestring):
c1_out_channels = map(int, c1_out_channels.split("_"))
if isinstance(c1_k, basestring):
c1_k = map(int, c1_k.split("_"))
# Parse 'c1_input_shape; parameter
if "c1_input_shape" in params:
c1_input_shape = map(int, params["c1_input_shape"].split("_"))
else:
c1_input_shape = (1, 28, 28)
sp_model = SparseNet(
inputSize=c1_input_shape,
outChannels=c1_out_channels,
c_k=c1_k,
kernelSize=5,
stride=1,
dropout=params["dropout"],
n=n,
k=k,
boostStrength=params["boost_strength"],
weightSparsity=params["weight_sparsity"],
boostStrengthFactor=params["boost_strength_factor"],
kInferenceFactor=params["k_inference_factor"],
useBatchNorm=params["use_batch_norm"],
normalizeWeights=params.get("normalize_weights", False)
)
print("c1OutputLength=", sp_model.cnnSdr[0].outputLength)
else:
sp_model = SparseNet(
n=n,
k=k,
boostStrength=params["boost_strength"],
weightSparsity=params["weight_sparsity"],
boostStrengthFactor=params["boost_strength_factor"],
kInferenceFactor=params["k_inference_factor"],
dropout=params["dropout"],
useBatchNorm=params["use_batch_norm"],
normalizeWeights=params.get("normalize_weights", False)
)
if torch.cuda.device_count() > 1:
print("Using", torch.cuda.device_count(), "GPUs")
sp_model = torch.nn.DataParallel(sp_model)
self.model = sp_model.to(self.device)
self.learningRate = params["learning_rate"]
self.optimizer = self.createOptimizer(params, self.model)
self.lr_scheduler = self.createLearningRateScheduler(params, self.optimizer)
def iterate(self, params, repetition, iteration):
"""
Called once for each training iteration (== epoch here).
"""
try:
print("\nStarting iteration",iteration)
t1 = time.time()
ret = {}
# Update learning rate using learning rate scheduler if configured
if self.lr_scheduler is not None:
# ReduceLROnPlateau lr_scheduler step should be called after validation,
# all other lr_schedulers should be called before training
if params["lr_scheduler"] != "ReduceLROnPlateau":
self.lr_scheduler.step()
self.train(params, epoch=iteration)
# Run validation test
if self.validation_loader is not None:
validation = self.test(params, self.validation_loader)
# ReduceLROnPlateau step should be called after validation
if params["lr_scheduler"] == "ReduceLROnPlateau":
self.lr_scheduler.step(validation["test_loss"])
ret["validation"] = validation
print("Validation: Test error=", validation["testerror"],
"entropy=", validation["entropy"])
# Run noise test
if (params["test_noise_every_epoch"] or
iteration == params["iterations"] - 1):
ret.update(self.runNoiseTests(params))
print("Noise test results: totalCorrect=", ret["totalCorrect"],
"Test error=", ret["testerror"], ", entropy=", ret["entropy"])
if ret["totalCorrect"] > 100000 and ret["testerror"] > 98.3:
print("*******")
print(params)
ret.update({"elapsedTime": time.time() - self.startTime})
ret.update({"learningRate": self.learningRate if self.lr_scheduler is None
else self.lr_scheduler.get_lr()})
print("Iteration time= {0:.3f} secs, "
"total elapsed time= {1:.3f} mins".format(
time.time() - t1,ret["elapsedTime"]/60.0))
except Exception as e:
# Tracebacks are not printed if using multiprocessing so we do it here
tb = sys.exc_info()[2]
traceback.print_tb(tb)
raise RuntimeError("Something went wrong in iterate")
return ret
def finalize(self, params, rep):
"""
Called once we are done.
"""
if params.get("saveNet", True):
# Save the full model once we are done.
saveDir = os.path.join(params["path"], params["name"], "model.pt")
torch.save(self.model, saveDir)
def createLearningRateScheduler(self, params, optimizer):
"""
Creates the learning rate scheduler and attach the optimizer
"""
lr_scheduler = params.get("lr_scheduler", None)
if lr_scheduler is None:
return None
if lr_scheduler == "StepLR":
lr_scheduler_params = "{'step_size': 1, 'gamma':" + str(params["learning_rate_factor"]) + "}"
else:
lr_scheduler_params = params.get("lr_scheduler_params", None)
if lr_scheduler_params is None:
raise ValueError("Missing 'lr_scheduler_params' for {}".format(lr_scheduler))
# Get lr_scheduler class by name
clazz = eval("torch.optim.lr_scheduler.{}".format(lr_scheduler))
# Parse scheduler parameters from config
lr_scheduler_params = eval(lr_scheduler_params)
return clazz(optimizer, **lr_scheduler_params)
def createOptimizer(self, params, model):
"""
Create a new instance of the optimizer
"""
lr = params["learning_rate"]
print("Creating optimizer with learning rate=", lr)
if params["optimizer"] == "SGD":
optimizer = optim.SGD(model.parameters(), lr=lr,
momentum=params["momentum"])
elif params["optimizer"] == "Adam":
optimizer = optim.Adam(model.parameters(), lr=lr)
else:
raise LookupError("Incorrect optimizer value")
return optimizer
def train(self, params, epoch):
"""
Train one epoch of this model by iterating through mini batches. An epoch
ends after one pass through the training set, or if the number of mini
batches exceeds the parameter "batches_in_epoch".
"""
# Callback used to log information on every batch
def log(model, batch_idx):
if batch_idx % params["log_interval"] == 0:
entropy = model.entropy()
print("logging: {} learning iterations, entropy: {} / {}".format(
model.getLearningIterations(), float(entropy), model.maxEntropy()))
if params["create_plots"]:
plotDutyCycles(model.dutyCycle,
self.resultsDir + "/figure_" + str(epoch) + "_" +
str(model.getLearningIterations()))
# Adjust first epoch batch size to stabilize the dutycycles at the
# beginning of the training
loader = self.train_loader
batches_in_epoch = params["batches_in_epoch"]
if "first_epoch_batch_size" in params:
if epoch == 0:
batches_in_epoch = params.get("batches_in_first_epoch", batches_in_epoch)
loader = torch.utils.data.DataLoader(self.train_loader.dataset,
batch_size=params["first_epoch_batch_size"],
sampler=self.train_loader.sampler)
trainModel(model=self.model, loader=loader,
optimizer=self.optimizer, device=self.device,
batches_in_epoch=batches_in_epoch,
batch_callback=log)
self.model.postEpoch()
def test(self, params, test_loader):
"""
Test the model using the given loader and return test metrics
"""
results = evaluateModel(model=self.model, device=self.device,
loader=test_loader)
entropy = self.model.entropy()
ret = {"num_correct": results["total_correct"],
"test_loss": results["loss"],
"testerror": results["accuracy"] * 100,
"entropy": float(entropy)}
return ret
def runNoiseTests(self, params):
"""
Test the model with different noise values and return test metrics.
"""
ret = {}
# Noise on validation data
validation = {} if self.validation_sampler is not None else None
# Test with noise
total_correct = 0
validation_total_correct = 0
for noise in [0.0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5]:
transform = transforms.Compose([
transforms.ToTensor(),
RandomNoise(noise, whiteValue=0.1307 + 2*0.3081),
transforms.Normalize((0.1307,), (0.3081,))
])
test_loader = torch.utils.data.DataLoader(
datasets.MNIST(self.dataDir, train=False, transform=transform),
batch_size=params["test_batch_size"], shuffle=True)
testResult = self.test(params, test_loader)
total_correct += testResult["num_correct"]
ret[noise]= testResult
if validation is not None:
validation_loader = torch.utils.data.DataLoader(
datasets.MNIST(self.dataDir, train=True, transform=transform),
sampler=self.validation_sampler,
batch_size=params["test_batch_size"])
validationResult = self.test(params, validation_loader)
validation_total_correct += validationResult["num_correct"]
validation[noise] = validationResult
ret["totalCorrect"] = total_correct
ret["testerror"] = ret[0.0]["testerror"]
ret["entropy"] = ret[0.0]["entropy"]
if "nonzeros" in ret[0.0]:
ret["nonzeros"] = ret[0.0]["nonzeros"]
if validation is not None:
validation["totalCorrect"] = validation_total_correct
validation["testerror"] = validation[0.0]["testerror"]
validation["entropy"] = validation[0.0]["entropy"]
ret["validation"] = validation
return ret
if __name__ == '__main__':
suite = MNISTSparseExperiment()
suite.start()
|
'''
This script allows to split the dataset of songs into training/validation/test
splits. It is done at the song granularity in order to prevent leaking
information within each song (compared to splitting at block level).
Also this approach is invariant to the block/hop size of features like
chromagram. This allows to compare various feature types.
The output is a TSV file containing information on which song is in which split
and its relative order within the split.
'''
import numpy as np
import pandas as pd
from sklearn.cross_validation import train_test_split
data_dir = '../data/beatles'
songs_file = data_dir + '/isophonic-songs.txt'
df = pd.read_csv(songs_file, sep='\t', header=None, names=['path'])
songs = np.array([p.split('/') for p in df['path']])
df['artist'] = songs[:, 0]
df['album'] = songs[:, 1]
df['song'] = songs[:, 2]
def split_dataset(index):
index = list(index)
ix_train, ix_test = train_test_split(index, test_size=0.2, random_state=42)
ix_train, ix_valid = train_test_split(ix_train, test_size=0.2 / (1 - 0.2))
return {'train': ix_train, 'valid': ix_valid, 'test': ix_test}
split_incides = split_dataset(df.index)
df['split'] = ''
for name in split_incides:
df['split'].ix[split_incides[name]] = name
df['order'] = np.hstack([split_incides['train'], split_incides['valid'], split_incides['test']])
df.to_csv(data_dir + '/songs-dataset-split.tsv', sep='\t', index=None)
with open(data_dir + '/dataset-split-indexes.tsv', 'w') as file:
for name in split_incides:
print(name + '\t' + ','.join([str(i) for i in split_incides[name]]), file=file)
Fix a bug in the splitting script - fix a random seed.
This is in order to be able to reproduce the experiments.
NOTE: we cannot use the previously split dataset since we don't know the original seed.
'''
This script allows to split the dataset of songs into training/validation/test
splits. It is done at the song granularity in order to prevent leaking
information within each song (compared to splitting at block level).
Also this approach is invariant to the block/hop size of features like
chromagram. This allows to compare various feature types.
The output is a TSV file containing information on which song is in which split
and its relative order within the split.
'''
import numpy as np
import pandas as pd
from sklearn.cross_validation import train_test_split
data_dir = '../data/beatles'
songs_file = data_dir + '/isophonic-songs.txt'
df = pd.read_csv(songs_file, sep='\t', header=None, names=['path'])
songs = np.array([p.split('/') for p in df['path']])
df['artist'] = songs[:, 0]
df['album'] = songs[:, 1]
df['song'] = songs[:, 2]
def split_dataset(index):
index = list(index)
ix_train, ix_test = train_test_split(index, test_size=0.2, random_state=42)
ix_train, ix_valid = train_test_split(ix_train, test_size=0.2 / (1 - 0.2), random_state=42)
return {'train': ix_train, 'valid': ix_valid, 'test': ix_test}
split_incides = split_dataset(df.index)
df['split'] = ''
for name in split_incides:
df['split'].ix[split_incides[name]] = name
df['order'] = np.hstack([split_incides['train'], split_incides['valid'], split_incides['test']])
df.to_csv(data_dir + '/songs-dataset-split.tsv', sep='\t', index=None)
with open(data_dir + '/dataset-split-indexes.tsv', 'w') as file:
for name in split_incides:
print(name + '\t' + ','.join([str(i) for i in split_incides[name]]), file=file)
|
"""Pump simulated OpenStack notificationss into RabbitMQ.
You need to install rabbitqm-server and
pip install librabbitmq
pip install --pre notabene
pip install --pre notification_utils
"""
import datetime
from notabene import kombu_driver as driver
import notification_utils
import notigen
connection = driver.create_connection("localhost", 5672, 'guest', 'guest',
"librabbitmq", "/")
exchange = driver.create_exchange("monitor", "topic")
g = notigen.EventGenerator(100) # Number of operations per minute
now = datetime.datetime.utcnow()
start = now
nevents = 0
while nevents < 10000:
e = g.generate(now)
if e:
nevents += len(e)
for event in e:
driver.send_notification(event, "monitor.info", connection, exchange)
now = datetime.datetime.utcnow()
now writes events to notabene
"""Pump simulated OpenStack notificationss into RabbitMQ.
You need to install rabbitqm-server and
pip install librabbitmq
pip install --pre notabene
pip install --pre notification_utils
"""
import datetime
from notabene import kombu_driver as driver
import notification_utils
import notigen
connection = driver.create_connection("localhost", 5672, 'guest', 'guest',
"librabbitmq", "/")
exchange = driver.create_exchange("monitor", "topic")
queue_name = "monitor.info"
queue = driver.create_queue(queue_name, exchange, queue_name, channel=connection.channel())
queue.declare()
g = notigen.EventGenerator(100) # Number of operations per minute
now = datetime.datetime.utcnow()
start = now
nevents = 0
while nevents < 10000:
e = g.generate(now)
if e:
nevents += len(e)
for event in e:
driver.send_notification(event, queue_name, connection, exchange)
print nevents, event['when'], event['event']
now = datetime.datetime.utcnow()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
class Address(dict):
"""
Dictionary class that provides some convenience wrappers for accessing
commonly used data elements on an Address.
"""
def __init__(self, address_dict, order='lat'):
super(Address, self).__init__(address_dict)
self.order = order
@property
def coords(self):
"""
Returns a tuple represneting the location of the address in a
GIS coords format, i.e. (longitude, latitude).
"""
x, y = ("lat", "lng") if self.order == "lat" else ("lng", "lat")
try:
return (self["location"][x], self["location"][y])
except KeyError:
return None
@property
def accuracy(self):
"""
Returns the accuracy integer or None of the geocoded address.
"""
try:
return self["accuracy"]
except KeyError:
return None
@property
def formatted_address(self):
"""
Returns a lsit of formatted addresses from the Location list
"""
return self.get("formatted_address", "")
class Location(dict):
"""
Dictionary class that provides some convenience accessors to commonly used
data elements.
"""
def __init__(self, result_dict, order='lat'):
super(Location, self).__init__(result_dict)
try:
self.best_match = Address(self["results"][0], order=order)
# A KeyError would be raised if an address could not be parsed or
# geocoded, i.e. from a batch address geocoding process. An index error
# would be raised under similar circumstances, e.g. the 'results' key
# just refers to an empty list.
except (KeyError, IndexError):
self.best_match = Address({})
self.order = order
@property
def coords(self):
"""
Returns a tuple represneting the location of the first result in a
GIS coords format, i.e. (longitude, latitude).
"""
return self.best_match.coords
@property
def accuracy(self):
"""
Returns the accuracy integer or None of the geocoded address.
"""
return self.best_match.accuracy
@property
def formatted_address(self):
"""
Returns a lsit of formatted addresses from the Location list
"""
return self.best_match.formatted_address
class LocationCollection(list):
"""
A list of Location objects, with dictionary lookup by address.
"""
lookups = {}
def __init__(self, results_list, order='lat'):
"""
Loads the individual responses into an internal list and uses the query
values as lookup keys.
"""
results = []
for index, result in enumerate(results_list):
results.append(Location(result['response'], order=order))
self.lookups[result['query']] = index
super(LocationCollection, self).__init__(results)
self.order = order
def get(self, key):
"""
Returns an individual Location by query lookup, e.g. address or point.
"""
if isinstance(key, tuple):
# TODO handle different ordering
try:
x, y = float(key[0]), float(key[1])
except IndexError:
raise ValueError("Two values are required for a coordinate pair")
except ValueError:
raise ValueError("Only float or float-coercable values can be passed")
key = "{0}, {1}".format(x, y)
return self[self.lookups[key]]
@property
def coords(self):
"""
Returns a list of tuples for the best matched coordinates.
"""
return [l.coords for l in self]
@property
def formatted_addresses(self):
"""
Returns a lsit of formatted addresses from the Location list
"""
return [l.formatted_address for l in self]
Fixes some typos in documentation.
#!/usr/bin/env python
# -*- coding: utf-8 -*-
class Address(dict):
"""
Dictionary class that provides some convenience wrappers for accessing
commonly used data elements on an Address.
"""
def __init__(self, address_dict, order='lat'):
super(Address, self).__init__(address_dict)
self.order = order
@property
def coords(self):
"""
Returns a tuple representing the location of the address in a
GIS coords format, i.e. (longitude, latitude).
"""
x, y = ("lat", "lng") if self.order == "lat" else ("lng", "lat")
try:
return (self["location"][x], self["location"][y])
except KeyError:
return None
@property
def accuracy(self):
"""
Returns the accuracy integer or None of the geocoded address.
"""
try:
return self["accuracy"]
except KeyError:
return None
@property
def formatted_address(self):
"""
Returns a list of formatted addresses from the Location list
"""
return self.get("formatted_address", "")
class Location(dict):
"""
Dictionary class that provides some convenience accessors to commonly used
data elements.
"""
def __init__(self, result_dict, order='lat'):
super(Location, self).__init__(result_dict)
try:
self.best_match = Address(self["results"][0], order=order)
# A KeyError would be raised if an address could not be parsed or
# geocoded, i.e. from a batch address geocoding process. An index error
# would be raised under similar circumstances, e.g. the 'results' key
# just refers to an empty list.
except (KeyError, IndexError):
self.best_match = Address({})
self.order = order
@property
def coords(self):
"""
Returns a tuple representing the location of the first result in a
GIS coords format, i.e. (longitude, latitude).
"""
return self.best_match.coords
@property
def accuracy(self):
"""
Returns the accuracy integer or None of the geocoded address.
"""
return self.best_match.accuracy
@property
def formatted_address(self):
"""
Returns a list of formatted addresses from the Location list
"""
return self.best_match.formatted_address
class LocationCollection(list):
"""
A list of Location objects, with dictionary lookup by address.
"""
lookups = {}
def __init__(self, results_list, order='lat'):
"""
Loads the individual responses into an internal list and uses the query
values as lookup keys.
"""
results = []
for index, result in enumerate(results_list):
results.append(Location(result['response'], order=order))
self.lookups[result['query']] = index
super(LocationCollection, self).__init__(results)
self.order = order
def get(self, key):
"""
Returns an individual Location by query lookup, e.g. address or point.
"""
if isinstance(key, tuple):
# TODO handle different ordering
try:
x, y = float(key[0]), float(key[1])
except IndexError:
raise ValueError("Two values are required for a coordinate pair")
except ValueError:
raise ValueError("Only float or float-coercable values can be passed")
key = "{0}, {1}".format(x, y)
return self[self.lookups[key]]
@property
def coords(self):
"""
Returns a list of tuples for the best matched coordinates.
"""
return [l.coords for l in self]
@property
def formatted_addresses(self):
"""
Returns a list of formatted addresses from the Location list
"""
return [l.formatted_address for l in self]
|
#
# Copyright (c) 2004 Specifix, Inc.
#
# This program is distributed under the terms of the Common Public License,
# version 1.0. A copy of this license should have been distributed with this
# source file in a file called LICENSE. If it is not present, the license
# is always available at http://www.opensource.org/licenses/cpl.php.
#
# This program is distributed in the hope that it will be useful, but
# without any waranty; without even the implied warranty of merchantability
# or fitness for a particular purpose. See the Common Public License for
# full details.
#
"""
Module implementing the "macro" dictionary class
"""
from lib import util
class Macros(dict):
def __init__(self, macros={}, shadow=False):
self.__tracked = {}
self.__track = False
self.__overrides = {}
if shadow:
self.__macros = macros
else:
self.__macros = {}
self.update(macros)
def update(self, other):
for key, item in other.iteritems():
self[key] = item
def __setitem__(self, name, value):
if name.startswith('_Macros'):
dict.__setitem__(self, name, value)
return
# '.' in name reserved for getting alternative representations
if '.' in name:
raise MacroError, 'name "%s" contains illegal character: "."' % name
if self.__track:
self.__tracked[name] = 1
# only expand references to ourself
d = {name: self.get(name)}
# escape any macros in the new value
value = value.replace('%', '%%')
# unescape references to ourself
value = value.replace('%%%%(%s)s' %name, '%%(%s)s'%name)
# expand our old value when defining the new value
dict.__setitem__(self, name, value % d)
# overrides allow you to set a macro value at the command line
# or in a config file and use it despite the value being
# set subsequently within the recipe
def _override(self, key, value):
self.__overrides[key] = value
def __setattr__(self, name, value):
self.__setitem__(name, value)
def __getitem__(self, name):
if name.startswith('_Macros'):
return dict.__getitem__(self, name)
repmethod = None
parts = name.split('.', 1)
if len(parts) > 1:
repmethod = parts[1]
name = parts[0]
if name in self.__overrides:
return self.__repmethod(self.__overrides[name], repmethod)
if not name in self:
# update on access
# okay for this to fail bc of no __macros
# -- equivalent to missing dict value
value = self.__macros[name]
self[name] = value
return self.__repmethod(value, repmethod)
else:
return self.__repmethod(dict.__getitem__(self, name) % self, repmethod)
def __repmethod(self, name, repmethod):
if repmethod is None:
return name
if repmethod == 'literalRegex':
return util.literalRegex(name)
# should not be reached
raise MacroError, 'unknown representation method %s for %s' %(repmethod, name)
def __getattr__(self, name):
return self.__getitem__(name)
def trackChanges(self, flag=True):
self.__track = flag
def getTrackedChanges(self):
return self.__tracked.keys()
def copy(self, shadow=True):
# shadow saves initial copying cost for a higher search cost
return Macros(self, shadow)
# occasionally it may be desirable to switch from shadowing
# to a flattened representation
def _flatten(self):
if self.__macros:
# just accessing the element will copy it to this
# macro
for key in self.__macros.keys():
dummy = self[key]
self.__macros = {}
def __iter__(self):
# since we are accessing every element in the parent anyway
# just flatten hierarchy first, which greatly simplifies iterating
self._flatten()
# iter over self and parents
for key in dict.__iter__(self):
if not key.startswith('_Macros'):
yield key
def iterkeys(self):
for key in self.__iter__():
yield key
def iteritems(self):
for key in self.__iter__():
yield (key, self[key])
def keys(self):
return [ x for x in self.__iter__() ]
class MacroError(Exception):
def __init__(self, msg):
self.msg = msg
def __repr__(self):
return self.msg
def __str__(self):
return repr(self)
fixed indentation
#
# Copyright (c) 2004 Specifix, Inc.
#
# This program is distributed under the terms of the Common Public License,
# version 1.0. A copy of this license should have been distributed with this
# source file in a file called LICENSE. If it is not present, the license
# is always available at http://www.opensource.org/licenses/cpl.php.
#
# This program is distributed in the hope that it will be useful, but
# without any waranty; without even the implied warranty of merchantability
# or fitness for a particular purpose. See the Common Public License for
# full details.
#
"""
Module implementing the "macro" dictionary class
"""
from lib import util
class Macros(dict):
def __init__(self, macros={}, shadow=False):
self.__tracked = {}
self.__track = False
self.__overrides = {}
if shadow:
self.__macros = macros
else:
self.__macros = {}
self.update(macros)
def update(self, other):
for key, item in other.iteritems():
self[key] = item
def __setitem__(self, name, value):
if name.startswith('_Macros'):
dict.__setitem__(self, name, value)
return
# '.' in name reserved for getting alternative representations
if '.' in name:
raise MacroError, 'name "%s" contains illegal character: "."' % name
if self.__track:
self.__tracked[name] = 1
# only expand references to ourself
d = {name: self.get(name)}
# escape any macros in the new value
value = value.replace('%', '%%')
# unescape references to ourself
value = value.replace('%%%%(%s)s' %name, '%%(%s)s'%name)
# expand our old value when defining the new value
dict.__setitem__(self, name, value % d)
# overrides allow you to set a macro value at the command line
# or in a config file and use it despite the value being
# set subsequently within the recipe
def _override(self, key, value):
self.__overrides[key] = value
def __setattr__(self, name, value):
self.__setitem__(name, value)
def __getitem__(self, name):
if name.startswith('_Macros'):
return dict.__getitem__(self, name)
repmethod = None
parts = name.split('.', 1)
if len(parts) > 1:
repmethod = parts[1]
name = parts[0]
if name in self.__overrides:
return self.__repmethod(self.__overrides[name], repmethod)
if not name in self:
# update on access
# okay for this to fail bc of no __macros
# -- equivalent to missing dict value
value = self.__macros[name]
self[name] = value
return self.__repmethod(value, repmethod)
else:
return self.__repmethod(dict.__getitem__(self, name) % self, repmethod)
def __repmethod(self, name, repmethod):
if repmethod is None:
return name
if repmethod == 'literalRegex':
return util.literalRegex(name)
# should not be reached
raise MacroError, 'unknown representation method %s for %s' %(repmethod, name)
def __getattr__(self, name):
return self.__getitem__(name)
def trackChanges(self, flag=True):
self.__track = flag
def getTrackedChanges(self):
return self.__tracked.keys()
def copy(self, shadow=True):
# shadow saves initial copying cost for a higher search cost
return Macros(self, shadow)
# occasionally it may be desirable to switch from shadowing
# to a flattened representation
def _flatten(self):
if self.__macros:
# just accessing the element will copy it to this
# macro
for key in self.__macros.keys():
dummy = self[key]
self.__macros = {}
def __iter__(self):
# since we are accessing every element in the parent anyway
# just flatten hierarchy first, which greatly simplifies iterating
self._flatten()
# iter over self and parents
for key in dict.__iter__(self):
if not key.startswith('_Macros'):
yield key
def iterkeys(self):
for key in self.__iter__():
yield key
def iteritems(self):
for key in self.__iter__():
yield (key, self[key])
def keys(self):
return [ x for x in self.__iter__() ]
class MacroError(Exception):
def __init__(self, msg):
self.msg = msg
def __repr__(self):
return self.msg
def __str__(self):
return repr(self)
|
from datetime import datetime
import requests
import json
from auth import TTRAuth
from exceptions import raise_on_error
class TTRClient(object):
"""
This is the actual client interface to Tiny Tiny RSS.
This object retains a http session with the needed session cookies. From
the client you can fetch categories, feeds, headlines and articles, all
represented by Python objects. You can also update modify articles and
feeds on the server.
"""
def __init__(self, url, user=None, password=None, auto_login=False):
"""
Instantiate a new client.
:param url: The full URL to the Tiny Tiny RSS server, *without* the
/api/ suffix.
:param user: The username to use when logging in.
:param password: The password for the user.
:param auto_login: *Optional* Automatically login upon instantiation,
and re-login
when a session cookie expires.
"""
self.url = url + '/api/'
self.user = user
self.password = password
self._session = requests.Session()
if auto_login:
auth = TTRAuth(user, password)
self._session.auth = auth
def login(self):
"""
Manually log in (i.e. request a session cookie)
This method must be used if the client was not instantiated with
``auto_login=True``
"""
r = self._get_json({
'op': 'login',
'user': self.user,
'password': self.password
})
def logout(self):
"""
Log out.
After logging out, ``login()`` must be used to gain a valid session
again. Please note that logging out invalidates any automatic
re-login even after logging back in.
"""
self._get_json({'op': 'logout'})
self._session.auth = None
def _get_json(self, post_data):
r = self._session.post(self.url, data=json.dumps(post_data))
raise_on_error(r)
return json.loads(r.content)
def get_categories(self):
"""Get a list of all available categories"""
r = self._get_json({'op': 'getCategories'})
return [Category(cat, self) for cat in r['content']]
def get_feeds(
self,
cat_id=-1,
unread_only=False,
limit=0,
offset=0,
include_nested=False):
"""
Get a list of feeds in a category.
:param cat_id: Category id. This is available as the ``id`` property
of a Category object.
:param unread_only: *Optional* Include only feeds containing unread
articles. Default is ``False``.
:param limit: *Optional* Limit number of included feeds to ``limit``.
Default is 0 (unlimited).
:param offset: *Optional* Skip this number of feeds. Useful for
pagination. Default is 0.
:param include_nested: *Optional* Include child categories. Default
is ``False``.
"""
r = self._get_json({'op': 'getFeeds', 'cat_id': cat_id})
return [Feed(feed, self) for feed in r['content']]
def get_headlines(self, feed_id=0):
"""
Get a list of headlines from a specified feed.
:param feed_id: Feed id. This is available as the ``id`` property of
a Feed object.
"""
r = self._get_json({'op': 'getHeadlines', 'feed_id': feed_id})
return [Headline(hl, self) for hl in r['content']]
def get_articles(self, article_id):
"""
Get a list of articles from article ids.
:param article_id: A comma separated string of article ids to fetch.
"""
r = self._get_json({'op': 'getArticle', 'article_id': article_id})
return [Article(article, self) for article in r['content']]
def refresh_article(self, article):
"""
Update all properties of an article object with fresh information from
the server.
Please note that this method alters the original object and does not
return a new one.
:param article: The article to refresh.
"""
r = self._get_json({'op': 'getArticle', 'article_id': article.id})
article.__init__(r['content'][0], client=self)
def share_to_published(self, title, url, content):
"""
Share an article to the *published* feed.
:param title: Article title.
:param url: Article url.
:param content: Article content.
"""
r = self._get_json({
'op': 'shareToPublished',
'title': title,
'url': url,
'content': content
})
def mark_unread(self, article_id):
"""
Mark an article as unread.
:param article_id: ID of article to mark as unread.
"""
r = self._get_json({
'op': 'updateArticle',
'article_ids': article_id,
'mode': 1,
'field': 2
})
def mark_read(self, article_id):
"""
Mark an article as read.
:param article_id: ID of article to mark as read.
"""
r = self._get_json({
'op': 'updateArticle',
'article_ids': article_id,
'mode': 0,
'field': 2
})
pass
def toggle_unread(self, article_id):
"""
Toggle the unread status of an article.
:param article_id: ID of the article to toggle.
"""
r = self._get_json({
'op': 'updateArticle',
'article_ids': article_id,
'mode': 2,
'field': 2
})
class RemoteObject(object):
def __init__(self, attr, client=None):
self._client = client
for key, value in attr.items():
if key == 'id':
value = int(value)
self.__setattr__(key, value)
class Category(RemoteObject):
def feeds(self, **kwargs):
return self._client.get_feeds(cat_id=self.id, **kwargs)
class Feed(RemoteObject):
def __init__(self, attr, client):
super(Feed, self).__init__(attr, client)
try:
self.last_updated = datetime.fromtimestamp(self.last_updated)
except AttributeError:
pass
def headlines(self):
return self._client.get_headlines(feed_id=self.id)
class Headline(RemoteObject):
def full_article(self):
r = self._client.get_articles(self.id)
return r[0]
class Article(RemoteObject):
def publish(self):
self._client.share_to_published(self.title, self.link, self.content)
def refresh_status(self):
self._client.refresh_article(self)
def toggle_unread(self):
self._client.toggle_unread(self.id)
Fix broken docs
from datetime import datetime
import requests
import json
from auth import TTRAuth
from exceptions import raise_on_error
class TTRClient(object):
"""
This is the actual client interface to Tiny Tiny RSS.
This object retains a http session with the needed session cookies. From
the client you can fetch categories, feeds, headlines and articles, all
represented by Python objects. You can also update modify articles and
feeds on the server.
"""
def __init__(self, url, user=None, password=None, auto_login=False):
"""
Instantiate a new client.
:param url: The full URL to the Tiny Tiny RSS server, *without* the
/api/ suffix.
:param user: The username to use when logging in.
:param password: The password for the user.
:param auto_login: *Optional* Automatically login upon instantiation,
and re-login
when a session cookie expires.
"""
self.url = url + '/api/'
self.user = user
self.password = password
self._session = requests.Session()
if auto_login:
auth = TTRAuth(user, password)
self._session.auth = auth
def login(self):
"""
Manually log in (i.e. request a session cookie)
This method must be used if the client was not instantiated with
``auto_login=True``
"""
r = self._get_json({
'op': 'login',
'user': self.user,
'password': self.password
})
def logout(self):
"""
Log out.
After logging out, ``login()`` must be used to gain a valid session
again. Please note that logging out invalidates any automatic
re-login even after logging back in.
"""
self._get_json({'op': 'logout'})
self._session.auth = None
def _get_json(self, post_data):
r = self._session.post(self.url, data=json.dumps(post_data))
raise_on_error(r)
return json.loads(r.content)
def get_categories(self):
"""Get a list of all available categories"""
r = self._get_json({'op': 'getCategories'})
return [Category(cat, self) for cat in r['content']]
def get_feeds(
self,
cat_id=-1,
unread_only=False,
limit=0,
offset=0,
include_nested=False):
"""
Get a list of feeds in a category.
:param cat_id: Category id. This is available as the ``id`` property
of a Category object.
:param unread_only: *Optional* Include only feeds containing unread
articles. Default is ``False``.
:param limit: *Optional* Limit number of included feeds to ``limit``.
Default is 0 (unlimited).
:param offset: *Optional* Skip this number of feeds. Useful for
pagination. Default is 0.
:param include_nested: *Optional* Include child categories. Default
is ``False``.
"""
r = self._get_json({'op': 'getFeeds', 'cat_id': cat_id})
return [Feed(feed, self) for feed in r['content']]
def get_headlines(self, feed_id=0):
"""
Get a list of headlines from a specified feed.
:param feed_id: Feed id. This is available as the ``id`` property of
a Feed object.
"""
r = self._get_json({'op': 'getHeadlines', 'feed_id': feed_id})
return [Headline(hl, self) for hl in r['content']]
def get_articles(self, article_id):
"""
Get a list of articles from article ids.
:param article_id: A comma separated string of article ids to fetch.
"""
r = self._get_json({'op': 'getArticle', 'article_id': article_id})
return [Article(article, self) for article in r['content']]
def refresh_article(self, article):
"""
Update all properties of an article object with fresh information from
the server.
Please note that this method alters the original object and does not
return a new one.
:param article: The article to refresh.
"""
r = self._get_json({'op': 'getArticle', 'article_id': article.id})
article.__init__(r['content'][0], client=self)
def share_to_published(self, title, url, content):
"""
Share an article to the *published* feed.
:param title: Article title.
:param url: Article url.
:param content: Article content.
"""
r = self._get_json({
'op': 'shareToPublished',
'title': title,
'url': url,
'content': content
})
def mark_unread(self, article_id):
"""
Mark an article as unread.
:param article_id: ID of article to mark as unread.
"""
r = self._get_json({
'op': 'updateArticle',
'article_ids': article_id,
'mode': 1,
'field': 2
})
def mark_read(self, article_id):
"""
Mark an article as read.
:param article_id: ID of article to mark as read.
"""
r = self._get_json({
'op': 'updateArticle',
'article_ids': article_id,
'mode': 0,
'field': 2
})
pass
def toggle_unread(self, article_id):
"""
Toggle the unread status of an article.
:param article_id: ID of the article to toggle.
"""
r = self._get_json({
'op': 'updateArticle',
'article_ids': article_id,
'mode': 2,
'field': 2
})
class RemoteObject(object):
def __init__(self, attr, client=None):
self._client = client
for key, value in attr.items():
if key == 'id':
value = int(value)
self.__setattr__(key, value)
class Category(RemoteObject):
def feeds(self, **kwargs):
return self._client.get_feeds(cat_id=self.id, **kwargs)
class Feed(RemoteObject):
def __init__(self, attr, client):
super(Feed, self).__init__(attr, client)
try:
self.last_updated = datetime.fromtimestamp(self.last_updated)
except AttributeError:
pass
def headlines(self):
return self._client.get_headlines(feed_id=self.id)
class Headline(RemoteObject):
def full_article(self):
r = self._client.get_articles(self.id)
return r[0]
class Article(RemoteObject):
def publish(self):
self._client.share_to_published(self.title, self.link, self.content)
def refresh_status(self):
self._client.refresh_article(self)
def toggle_unread(self):
self._client.toggle_unread(self.id)
|
"""
@package mi.instrument.sunburst.sami2_pco2.pco2a.test.test_driver
@file marine-integrations/mi/instrument/sunburst/sami2_pco2/pco2a/driver.py
@author Christopher Wingard
@brief Test cases for pco2a driver
USAGE:
Make tests verbose and provide stdout
* From the IDK
$ bin/test_driver
$ bin/test_driver -u [-t testname]
$ bin/test_driver -i [-t testname]
$ bin/test_driver -q [-t testname]
"""
__author__ = 'Kevin Stiemke'
__license__ = 'Apache 2.0'
import unittest
import time
import copy
from nose.plugins.attrib import attr
from mock import Mock
from mi.core.log import get_logger
log = get_logger()
# MI imports.
from mi.idk.unit_test import InstrumentDriverTestCase
from mi.idk.unit_test import ParameterTestConfigKey
from mi.idk.unit_test import DriverStartupConfigKey
from mi.idk.unit_test import AgentCapabilityType
from mi.core.instrument.chunker import StringChunker
from pyon.agent.agent import ResourceAgentEvent
from pyon.agent.agent import ResourceAgentState
from mi.instrument.sunburst.sami2_pco2.pco2a.driver import InstrumentDriver
from mi.instrument.sunburst.sami2_pco2.pco2a.driver import InstrumentCommand
from mi.instrument.sunburst.sami2_pco2.pco2a.driver import ProtocolState
from mi.instrument.sunburst.sami2_pco2.pco2a.driver import ProtocolEvent
from mi.instrument.sunburst.sami2_pco2.pco2a.driver import Capability
from mi.instrument.sunburst.sami2_pco2.pco2a.driver import Parameter
from mi.instrument.sunburst.sami2_pco2.pco2a.driver import Protocol
from mi.instrument.sunburst.driver import Prompt
from mi.instrument.sunburst.driver import SAMI_NEWLINE
from mi.instrument.sunburst.sami2_pco2.driver import Pco2wSamiSampleDataParticleKey
from mi.instrument.sunburst.sami2_pco2.pco2a.driver import Pco2waConfigurationDataParticleKey
from mi.instrument.sunburst.sami2_pco2.pco2a.driver import DataParticleType
# Added Imports (Note, these pick up some of the base classes not directly imported above)
from mi.instrument.sunburst.sami2_pco2.test.test_driver import Pco2DriverTestMixinSub
from mi.instrument.sunburst.sami2_pco2.test.test_driver import Pco2DriverUnitTest
from mi.instrument.sunburst.sami2_pco2.test.test_driver import Pco2DriverIntegrationTest
from mi.instrument.sunburst.sami2_pco2.test.test_driver import Pco2DriverQualificationTest
###
# Driver parameters for the tests
###
InstrumentDriverTestCase.initialize(
driver_module='mi.instrument.sunburst.sami2_pco2.pco2a.driver',
driver_class="InstrumentDriver",
instrument_agent_resource_id='V7HE4T',
instrument_agent_name='sunburst_sami2_pco2_pco2a',
instrument_agent_packet_config=DataParticleType(),
##driver_startup_config={}
driver_startup_config={
DriverStartupConfigKey.PARAMETERS: {
Parameter.BIT_SWITCHES: 0x01,
},
}
)
#################################### RULES ####################################
# #
# Common capabilities in the base class #
# #
# Instrument specific stuff in the derived class #
# #
# Generator spits out either stubs or comments describing test this here, #
# test that there. #
# #
# Qualification tests are driven through the instrument_agent #
# #
###############################################################################
###
# Driver constant definitions
###
###############################################################################
# DRIVER TEST MIXIN #
# Defines a set of constants and assert methods used for data particle #
# verification #
# #
# In python mixin classes are classes designed such that they wouldn't be #
# able to stand on their own, but are inherited by other classes generally #
# using multiple inheritance. #
# #
# This class defines a configuration structure for testing and common assert #
# methods for validating data particles. #
###############################################################################
class DriverTestMixinSub(Pco2DriverTestMixinSub):
"""
Mixin class used for storing data particle constants and common data
assertion methods.
"""
# Create some short names for the parameter test config
TYPE = ParameterTestConfigKey.TYPE
READONLY = ParameterTestConfigKey.READONLY
STARTUP = ParameterTestConfigKey.STARTUP
DA = ParameterTestConfigKey.DIRECT_ACCESS
VALUE = ParameterTestConfigKey.VALUE
REQUIRED = ParameterTestConfigKey.REQUIRED
DEFAULT = ParameterTestConfigKey.DEFAULT
STATES = ParameterTestConfigKey.STATES
_driver_capabilities = {
# capabilities defined in the IOS
Capability.ACQUIRE_STATUS: {STATES: [ProtocolState.COMMAND,
ProtocolState.AUTOSAMPLE]},
Capability.ACQUIRE_SAMPLE: {STATES: [ProtocolState.COMMAND]},
Capability.ACQUIRE_BLANK_SAMPLE: {STATES: [ProtocolState.COMMAND]},
Capability.START_AUTOSAMPLE: {STATES: [ProtocolState.COMMAND,
ProtocolState.AUTOSAMPLE]},
Capability.STOP_AUTOSAMPLE: {STATES: [ProtocolState.AUTOSAMPLE,
ProtocolState.COMMAND]},
Capability.DEIONIZED_WATER_FLUSH: {STATES: [ProtocolState.COMMAND]},
Capability.REAGENT_FLUSH: {STATES: [ProtocolState.COMMAND]},
Capability.DEIONIZED_WATER_FLUSH_100ML: {STATES: [ProtocolState.COMMAND]},
Capability.REAGENT_FLUSH_100ML: {STATES: [ProtocolState.COMMAND]}
}
###
# Instrument output (driver input) Definitions
###
# Configuration string received from the instrument via the L command
# (clock set to 2014-01-01 00:00:00) with sampling set to start 540 days
# (~18 months) later and stop 365 days after that. SAMI and Device1
# (external SBE pump) are set to run every 60 minutes, but will be polled
# on a regular schedule rather than autosampled. Device1 is not configured
# to run after the SAMI and will run for 10 seconds. To configure the
# instrument using this string, add a null byte (00) to the end of the
# string.
VALID_CONFIG_STRING = 'CEE90B0002C7EA0001E133800A000E100402000E10010B' + \
'000000000D000000000D000000000D07' + \
'1020FF54181C010038' + \
'000000000000000000000000000000000000000000000000000' + \
'000000000000000000000000000000000000000000000000000' + \
'000000000000000000000000000000' + \
'FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF' + \
'FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF' + \
'FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF' + \
'FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF' + \
'FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF' + \
'FFFFFFFFFFFFFFFFFFFFFFFFFFFFF' + SAMI_NEWLINE
# Data records -- SAMI and Device1 (external pump) (responses to R0 and R1
# commands, respectively)
VALID_R0_BLANK_SAMPLE = '*542705CEE91CC800400019096206800730074C2CE042' + \
'74003B0018096106800732074E0D82066124' + SAMI_NEWLINE
VALID_R0_DATA_SAMPLE = '*542704CEE91CC8003B001909620155073003E908A1232' + \
'D0043001A09620154072F03EA0D92065F3B' + SAMI_NEWLINE
###
# Parameter and Type Definitions
###
_driver_parameters = {
# Parameters defined in the IOS
Parameter.LAUNCH_TIME: {TYPE: int, READONLY: True, DA: True, STARTUP: False,
DEFAULT: 0x00000000, VALUE: 0xCEE90B00, REQUIRED: True},
Parameter.START_TIME_FROM_LAUNCH: {TYPE: int, READONLY: True, DA: True, STARTUP: False,
DEFAULT: 0x02C7EA00, VALUE: 0x02C7EA00, REQUIRED: True},
Parameter.STOP_TIME_FROM_START: {TYPE: int, READONLY: True, DA: True, STARTUP: False,
DEFAULT: 0x01E13380, VALUE: 0x01E13380, REQUIRED: True},
Parameter.MODE_BITS: {TYPE: int, READONLY: True, DA: True, STARTUP: False,
DEFAULT: 0x0A, VALUE: 0x0A, REQUIRED: True},
Parameter.SAMI_SAMPLE_INTERVAL: {TYPE: int, READONLY: True, DA: True, STARTUP: False,
DEFAULT: 0x000E10, VALUE: 0x000E10, REQUIRED: True},
Parameter.SAMI_DRIVER_VERSION: {TYPE: int, READONLY: True, DA: True, STARTUP: False,
DEFAULT: 0x04, VALUE: 0x04, REQUIRED: True},
Parameter.SAMI_PARAMS_POINTER: {TYPE: int, READONLY: True, DA: True, STARTUP: False,
DEFAULT: 0x02, VALUE: 0x02, REQUIRED: True},
Parameter.DEVICE1_SAMPLE_INTERVAL: {TYPE: int, READONLY: True, DA: True, STARTUP: False,
DEFAULT: 0x000E10, VALUE: 0x000E10, REQUIRED: True},
Parameter.DEVICE1_DRIVER_VERSION: {TYPE: int, READONLY: True, DA: True, STARTUP: False,
DEFAULT: 0x01, VALUE: 0x01, REQUIRED: True},
Parameter.DEVICE1_PARAMS_POINTER: {TYPE: int, READONLY: True, DA: True, STARTUP: False,
DEFAULT: 0x0B, VALUE: 0x0B, REQUIRED: True},
Parameter.DEVICE2_SAMPLE_INTERVAL: {TYPE: int, READONLY: True, DA: True, STARTUP: False,
DEFAULT: 0x000000, VALUE: 0x000000, REQUIRED: True},
Parameter.DEVICE2_DRIVER_VERSION: {TYPE: int, READONLY: True, DA: True, STARTUP: False,
DEFAULT: 0x00, VALUE: 0x00, REQUIRED: True},
Parameter.DEVICE2_PARAMS_POINTER: {TYPE: int, READONLY: True, DA: True, STARTUP: False,
DEFAULT: 0x0D, VALUE: 0x0D, REQUIRED: True},
Parameter.DEVICE3_SAMPLE_INTERVAL: {TYPE: int, READONLY: True, DA: True, STARTUP: False,
DEFAULT: 0x000000, VALUE: 0x000000, REQUIRED: True},
Parameter.DEVICE3_DRIVER_VERSION: {TYPE: int, READONLY: True, DA: True, STARTUP: False,
DEFAULT: 0x00, VALUE: 0x00, REQUIRED: True},
Parameter.DEVICE3_PARAMS_POINTER: {TYPE: int, READONLY: True, DA: True, STARTUP: False,
DEFAULT: 0x0D, VALUE: 0x0D, REQUIRED: True},
Parameter.PRESTART_SAMPLE_INTERVAL: {TYPE: int, READONLY: True, DA: True, STARTUP: False,
DEFAULT: 0x000000, VALUE: 0x000000, REQUIRED: True},
Parameter.PRESTART_DRIVER_VERSION: {TYPE: int, READONLY: True, DA: True, STARTUP: False,
DEFAULT: 0x00, VALUE: 0x00, REQUIRED: True},
Parameter.PRESTART_PARAMS_POINTER: {TYPE: int, READONLY: True, DA: True, STARTUP: False,
DEFAULT: 0x0D, VALUE: 0x00, REQUIRED: True},
Parameter.GLOBAL_CONFIGURATION: {TYPE: int, READONLY: True, DA: True, STARTUP: False,
DEFAULT: 0x07, VALUE: 0x07, REQUIRED: True},
Parameter.PUMP_PULSE: {TYPE: int, READONLY: False, DA: True, STARTUP: False,
DEFAULT: 0x10, VALUE: 0x10, REQUIRED: True},
Parameter.PUMP_DURATION: {TYPE: int, READONLY: False, DA: True, STARTUP: False,
DEFAULT: 0x20, VALUE: 0x20, REQUIRED: True},
Parameter.SAMPLES_PER_MEASUREMENT: {TYPE: int, READONLY: False, DA: True, STARTUP: False,
DEFAULT: 0xFF, VALUE: 0xFF, REQUIRED: True},
Parameter.CYCLES_BETWEEN_BLANKS: {TYPE: int, READONLY: False, DA: True, STARTUP: False,
DEFAULT: 0x54, VALUE: 0x54, REQUIRED: True},
Parameter.NUMBER_REAGENT_CYCLES: {TYPE: int, READONLY: False, DA: True, STARTUP: False,
DEFAULT: 0x18, VALUE: 0x18, REQUIRED: True},
Parameter.NUMBER_BLANK_CYCLES: {TYPE: int, READONLY: False, DA: True, STARTUP: False,
DEFAULT: 0x1C, VALUE: 0x1C, REQUIRED: True},
Parameter.FLUSH_PUMP_INTERVAL: {TYPE: int, READONLY: False, DA: True, STARTUP: False,
DEFAULT: 0x01, VALUE: 0x01, REQUIRED: True},
Parameter.BIT_SWITCHES: {TYPE: int, READONLY: False, DA: True, STARTUP: False,
DEFAULT: 0x00, VALUE: 0x00, REQUIRED: True},
Parameter.NUMBER_EXTRA_PUMP_CYCLES: {TYPE: int, READONLY: False, DA: True, STARTUP: False,
DEFAULT: 0x38, VALUE: 0x38, REQUIRED: True},
Parameter.AUTO_SAMPLE_INTERVAL: {TYPE: int, READONLY: False, DA: False, STARTUP: False,
DEFAULT: 0x38, VALUE: 3600, REQUIRED: True},
Parameter.REAGENT_FLUSH_DURATION: {TYPE: int, READONLY: False, DA: False, STARTUP: False,
DEFAULT: 0x08, VALUE: 0x08, REQUIRED: True},
Parameter.DEIONIZED_WATER_FLUSH_DURATION: {TYPE: int, READONLY: False, DA: False, STARTUP: False,
DEFAULT: 0x08, VALUE: 0x08, REQUIRED: True},
Parameter.PUMP_100ML_CYCLES: {TYPE: int, READONLY: False, DA: False, STARTUP: False,
DEFAULT: 0x01, VALUE: 0x01, REQUIRED: True},
}
_sami_data_sample_parameters = {
# SAMI Type 4/5 sample (in this case it is a Type 4)
Pco2wSamiSampleDataParticleKey.UNIQUE_ID: {TYPE: int, VALUE: 0x54, REQUIRED: True},
Pco2wSamiSampleDataParticleKey.RECORD_LENGTH: {TYPE: int, VALUE: 0x27, REQUIRED: True},
Pco2wSamiSampleDataParticleKey.RECORD_TYPE: {TYPE: int, VALUE: 0x04, REQUIRED: True},
Pco2wSamiSampleDataParticleKey.RECORD_TIME: {TYPE: int, VALUE: 0xCEE91CC8, REQUIRED: True},
Pco2wSamiSampleDataParticleKey.LIGHT_MEASUREMENTS: {TYPE: list, VALUE: [0x003B, 0x0019, 0x0962, 0x0155,
0x0730, 0x03E9, 0x08A1, 0x232D,
0x0043, 0x001A, 0x0962, 0x0154,
0x072F, 0x03EA], REQUIRED: True},
Pco2wSamiSampleDataParticleKey.VOLTAGE_BATTERY: {TYPE: int, VALUE: 0x0D92, REQUIRED: True},
Pco2wSamiSampleDataParticleKey.THERMISTER_RAW: {TYPE: int, VALUE: 0x065F, REQUIRED: True},
Pco2wSamiSampleDataParticleKey.CHECKSUM: {TYPE: int, VALUE: 0x3B, REQUIRED: True}
}
_sami_blank_sample_parameters = {
# SAMI Type 4/5 sample (in this case it is a Type 5)
Pco2wSamiSampleDataParticleKey.UNIQUE_ID: {TYPE: int, VALUE: 0x54, REQUIRED: True},
Pco2wSamiSampleDataParticleKey.RECORD_LENGTH: {TYPE: int, VALUE: 0x27, REQUIRED: True},
Pco2wSamiSampleDataParticleKey.RECORD_TYPE: {TYPE: int, VALUE: 0x05, REQUIRED: True},
Pco2wSamiSampleDataParticleKey.RECORD_TIME: {TYPE: int, VALUE: 0xCEE91CC8, REQUIRED: True},
Pco2wSamiSampleDataParticleKey.LIGHT_MEASUREMENTS: {TYPE: list, VALUE: [0x0040, 0x0019, 0x0962, 0x0680, 0x0730,
0x074C, 0x2CE0, 0x4274, 0x003B, 0x0018,
0x0961, 0x0680, 0x0732, 0x074E],
REQUIRED: True},
Pco2wSamiSampleDataParticleKey.VOLTAGE_BATTERY: {TYPE: int, VALUE: 0x0D82, REQUIRED: True},
Pco2wSamiSampleDataParticleKey.THERMISTER_RAW: {TYPE: int, VALUE: 0x0661, REQUIRED: True},
Pco2wSamiSampleDataParticleKey.CHECKSUM: {TYPE: int, VALUE: 0x24, REQUIRED: True}
}
_configuration_parameters = {
# Configuration settings
Pco2waConfigurationDataParticleKey.LAUNCH_TIME: {TYPE: int, VALUE: 0xCEE90B00, REQUIRED: True},
Pco2waConfigurationDataParticleKey.START_TIME_OFFSET: {TYPE: int, VALUE: 0x02C7EA00, REQUIRED: True},
Pco2waConfigurationDataParticleKey.RECORDING_TIME: {TYPE: int, VALUE: 0x01E13380, REQUIRED: True},
Pco2waConfigurationDataParticleKey.PMI_SAMPLE_SCHEDULE: {TYPE: bool, VALUE: False, REQUIRED: True},
Pco2waConfigurationDataParticleKey.SAMI_SAMPLE_SCHEDULE: {TYPE: bool, VALUE: True, REQUIRED: True},
Pco2waConfigurationDataParticleKey.SLOT1_FOLLOWS_SAMI_SCHEDULE: {TYPE: bool, VALUE: False, REQUIRED: True},
Pco2waConfigurationDataParticleKey.SLOT1_INDEPENDENT_SCHEDULE: {TYPE: bool, VALUE: True, REQUIRED: True},
Pco2waConfigurationDataParticleKey.SLOT2_FOLLOWS_SAMI_SCHEDULE: {TYPE: bool, VALUE: False, REQUIRED: True},
Pco2waConfigurationDataParticleKey.SLOT2_INDEPENDENT_SCHEDULE: {TYPE: bool, VALUE: False, REQUIRED: True},
Pco2waConfigurationDataParticleKey.SLOT3_FOLLOWS_SAMI_SCHEDULE: {TYPE: bool, VALUE: False, REQUIRED: True},
Pco2waConfigurationDataParticleKey.SLOT3_INDEPENDENT_SCHEDULE: {TYPE: bool, VALUE: False, REQUIRED: True},
Pco2waConfigurationDataParticleKey.TIMER_INTERVAL_SAMI: {TYPE: int, VALUE: 0x000E10, REQUIRED: True},
Pco2waConfigurationDataParticleKey.DRIVER_ID_SAMI: {TYPE: int, VALUE: 0x04, REQUIRED: True},
Pco2waConfigurationDataParticleKey.PARAMETER_POINTER_SAMI: {TYPE: int, VALUE: 0x02, REQUIRED: True},
Pco2waConfigurationDataParticleKey.TIMER_INTERVAL_DEVICE1: {TYPE: int, VALUE: 0x000E10, REQUIRED: True},
Pco2waConfigurationDataParticleKey.DRIVER_ID_DEVICE1: {TYPE: int, VALUE: 0x01, REQUIRED: True},
Pco2waConfigurationDataParticleKey.PARAMETER_POINTER_DEVICE1: {TYPE: int, VALUE: 0x0B, REQUIRED: True},
Pco2waConfigurationDataParticleKey.TIMER_INTERVAL_DEVICE2: {TYPE: int, VALUE: 0x000000, REQUIRED: True},
Pco2waConfigurationDataParticleKey.DRIVER_ID_DEVICE2: {TYPE: int, VALUE: 0x00, REQUIRED: True},
Pco2waConfigurationDataParticleKey.PARAMETER_POINTER_DEVICE2: {TYPE: int, VALUE: 0x0D, REQUIRED: True},
Pco2waConfigurationDataParticleKey.TIMER_INTERVAL_DEVICE3: {TYPE: int, VALUE: 0x000000, REQUIRED: True},
Pco2waConfigurationDataParticleKey.DRIVER_ID_DEVICE3: {TYPE: int, VALUE: 0x00, REQUIRED: True},
Pco2waConfigurationDataParticleKey.PARAMETER_POINTER_DEVICE3: {TYPE: int, VALUE: 0x0D, REQUIRED: True},
Pco2waConfigurationDataParticleKey.TIMER_INTERVAL_PRESTART: {TYPE: int, VALUE: 0x000000, REQUIRED: True},
Pco2waConfigurationDataParticleKey.DRIVER_ID_PRESTART: {TYPE: int, VALUE: 0x00, REQUIRED: True},
Pco2waConfigurationDataParticleKey.PARAMETER_POINTER_PRESTART: {TYPE: int, VALUE: 0x0D, REQUIRED: True},
Pco2waConfigurationDataParticleKey.USE_BAUD_RATE_57600: {TYPE: bool, VALUE: True, REQUIRED: True},
Pco2waConfigurationDataParticleKey.SEND_RECORD_TYPE: {TYPE: bool, VALUE: True, REQUIRED: True},
Pco2waConfigurationDataParticleKey.SEND_LIVE_RECORDS: {TYPE: bool, VALUE: True, REQUIRED: True},
Pco2waConfigurationDataParticleKey.EXTEND_GLOBAL_CONFIG: {TYPE: bool, VALUE: False, REQUIRED: True},
Pco2waConfigurationDataParticleKey.PUMP_PULSE: {TYPE: int, VALUE: 0x10, REQUIRED: True},
Pco2waConfigurationDataParticleKey.PUMP_DURATION: {TYPE: int, VALUE: 0x20, REQUIRED: True},
Pco2waConfigurationDataParticleKey.SAMPLES_PER_MEASUREMENT: {TYPE: int, VALUE: 0xFF, REQUIRED: True},
Pco2waConfigurationDataParticleKey.CYCLES_BETWEEN_BLANKS: {TYPE: int, VALUE: 0x54, REQUIRED: True},
Pco2waConfigurationDataParticleKey.NUMBER_REAGENT_CYCLES: {TYPE: int, VALUE: 0x18, REQUIRED: True},
Pco2waConfigurationDataParticleKey.NUMBER_BLANK_CYCLES: {TYPE: int, VALUE: 0x1C, REQUIRED: True},
Pco2waConfigurationDataParticleKey.FLUSH_PUMP_INTERVAL: {TYPE: int, VALUE: 0x01, REQUIRED: True},
Pco2waConfigurationDataParticleKey.DISABLE_START_BLANK_FLUSH: {TYPE: bool, VALUE: False, REQUIRED: True},
Pco2waConfigurationDataParticleKey.MEASURE_AFTER_PUMP_PULSE: {TYPE: bool, VALUE: False, REQUIRED: True},
Pco2waConfigurationDataParticleKey.NUMBER_EXTRA_PUMP_CYCLES: {TYPE: int, VALUE: 0x38, REQUIRED: True},
}
###
# Driver Parameter Methods
###
def assert_driver_parameters(self, current_parameters, verify_values=False):
"""
Verify that all driver parameters are correct and potentially verify
values.
@param current_parameters: driver parameters read from the driver
instance
@param verify_values: should we verify values against definition?
"""
self.assert_parameters(current_parameters, self._driver_parameters,
verify_values)
#self.assert_parameters(current_parameters, self._driver_parameters,
# False)
def assert_particle_sami_sample(self, data_particle, verify_values=False):
"""
Verify sami_data_sample particles (Type 4 and 5). Used in INT test where type doesn't matter.
@param data_particle: Pco2wSamiSampleDataParticle data particle
@param verify_values: bool, should we verify parameter values
"""
self.assert_data_particle_keys(Pco2wSamiSampleDataParticleKey,
self._sami_data_sample_parameters)
self.assert_data_particle_header(data_particle,
DataParticleType.SAMI_SAMPLE)
self.assert_data_particle_parameters(data_particle,
self._sami_data_sample_parameters,
verify_values)
def assert_particle_sami_data_sample(self, data_particle, verify_values=False):
"""
Verify sami_data_sample particle (Type 4)
@param data_particle: Pco2wSamiSampleDataParticle data particle
@param verify_values: bool, should we verify parameter values
"""
sample_dict = self.get_data_particle_values_as_dict(data_particle)
record_type = sample_dict.get(Pco2wSamiSampleDataParticleKey.RECORD_TYPE)
self.assertEqual(record_type, 4, msg="Not a regular sample, record_type = %d" % record_type)
self.assert_data_particle_keys(Pco2wSamiSampleDataParticleKey,
self._sami_data_sample_parameters)
self.assert_data_particle_header(data_particle,
DataParticleType.SAMI_SAMPLE)
self.assert_data_particle_parameters(data_particle,
self._sami_data_sample_parameters,
verify_values)
sample_dict = self.get_data_particle_values_as_dict(data_particle)
record_type = sample_dict.get(Pco2wSamiSampleDataParticleKey.RECORD_TYPE)
required_record_type = 4
log.debug('assert_particle_sami_data_sample(): record_type:required_record_type = ' +
str(record_type) + ":" +
str(required_record_type))
self.assertEquals(record_type, required_record_type)
def assert_particle_sami_blank_sample(self, data_particle, verify_values=False):
"""
Verify sami_blank_sample particle (Type 5)
@param data_particle: Pco2wSamiSampleDataParticle data particle
@param verify_values: bool, should we verify parameter values
"""
sample_dict = self.get_data_particle_values_as_dict(data_particle)
record_type = sample_dict.get(Pco2wSamiSampleDataParticleKey.RECORD_TYPE)
self.assertEqual(record_type, 5, msg="Not a blank sample, record_type = %d" % record_type)
self.assert_data_particle_keys(Pco2wSamiSampleDataParticleKey,
self._sami_blank_sample_parameters)
self.assert_data_particle_header(data_particle,
DataParticleType.SAMI_SAMPLE)
self.assert_data_particle_parameters(data_particle,
self._sami_blank_sample_parameters,
verify_values)
sample_dict = self.get_data_particle_values_as_dict(data_particle)
record_type = sample_dict.get(Pco2wSamiSampleDataParticleKey.RECORD_TYPE)
required_record_type = 5
log.debug('assert_particle_sami_blank_sample(): record_type:required_record_type = ' +
str(record_type) + ":" +
str(required_record_type))
self.assertEquals(record_type, required_record_type)
def assert_particle_configuration(self, data_particle, verify_values=False):
"""
Verify configuration particle
@param data_particle: Pco2wConfigurationDataParticle data particle
@param verify_values: bool, should we verify parameter values
"""
self.assert_data_particle_keys(Pco2waConfigurationDataParticleKey,
self._configuration_parameters)
self.assert_data_particle_header(data_particle,
DataParticleType.CONFIGURATION)
self.assert_data_particle_parameters(data_particle,
self._configuration_parameters,
verify_values)
###############################################################################
# UNIT TESTS #
# Unit Tests: test the method calls and parameters using Mock. #
# #
# These tests are especially useful for testing parsers and other data #
# handling. The tests generally focus on small segments of code, like a #
# single function call, but more complex code using Mock objects. However #
# if you find yourself mocking too much maybe it is better as an #
# integration test. #
# #
# Unit tests do not start up external processes like the port agent or #
# driver process. #
###############################################################################
@attr('UNIT', group='mi')
class DriverUnitTest(Pco2DriverUnitTest, DriverTestMixinSub):
capabilities_test_dict = {
ProtocolState.UNKNOWN: ['DRIVER_EVENT_DISCOVER'],
ProtocolState.WAITING: ['DRIVER_EVENT_DISCOVER'],
ProtocolState.COMMAND: ['DRIVER_EVENT_GET',
'DRIVER_EVENT_SET',
'DRIVER_EVENT_START_DIRECT',
'DRIVER_EVENT_ACQUIRE_STATUS',
'DRIVER_EVENT_ACQUIRE_SAMPLE',
'DRIVER_EVENT_ACQUIRE_BLANK_SAMPLE',
'DRIVER_EVENT_START_AUTOSAMPLE',
'DRIVER_EVENT_DEIONIZED_WATER_FLUSH',
'DRIVER_EVENT_REAGENT_FLUSH',
'DRIVER_EVENT_DEIONIZED_WATER_FLUSH_100ML',
'DRIVER_EVENT_REAGENT_FLUSH_100ML'],
ProtocolState.DEIONIZED_WATER_FLUSH: ['PROTOCOL_EVENT_EXECUTE',
'PROTOCOL_EVENT_SUCCESS',
'PROTOCOL_EVENT_TIMEOUT',
'DRIVER_EVENT_ACQUIRE_STATUS'],
ProtocolState.REAGENT_FLUSH: ['PROTOCOL_EVENT_EXECUTE',
'PROTOCOL_EVENT_SUCCESS',
'PROTOCOL_EVENT_TIMEOUT',
'DRIVER_EVENT_ACQUIRE_STATUS'],
ProtocolState.DEIONIZED_WATER_FLUSH_100ML: ['PROTOCOL_EVENT_EXECUTE',
'PROTOCOL_EVENT_SUCCESS',
'PROTOCOL_EVENT_TIMEOUT',
'DRIVER_EVENT_ACQUIRE_STATUS'],
ProtocolState.REAGENT_FLUSH_100ML: ['PROTOCOL_EVENT_EXECUTE',
'PROTOCOL_EVENT_SUCCESS',
'PROTOCOL_EVENT_TIMEOUT',
'DRIVER_EVENT_ACQUIRE_STATUS'],
ProtocolState.AUTOSAMPLE: ['DRIVER_EVENT_ACQUIRE_SAMPLE',
'DRIVER_EVENT_ACQUIRE_BLANK_SAMPLE',
'DRIVER_EVENT_STOP_AUTOSAMPLE',
'DRIVER_EVENT_ACQUIRE_STATUS'],
ProtocolState.DIRECT_ACCESS: ['EXECUTE_DIRECT',
'DRIVER_EVENT_STOP_DIRECT'],
ProtocolState.POLLED_SAMPLE: ['PROTOCOL_EVENT_EXECUTE',
'PROTOCOL_EVENT_SUCCESS',
'PROTOCOL_EVENT_TIMEOUT',
'DRIVER_EVENT_ACQUIRE_STATUS'],
ProtocolState.POLLED_BLANK_SAMPLE: ['PROTOCOL_EVENT_EXECUTE',
'PROTOCOL_EVENT_SUCCESS',
'PROTOCOL_EVENT_TIMEOUT',
'DRIVER_EVENT_ACQUIRE_STATUS'],
ProtocolState.SCHEDULED_SAMPLE: ['PROTOCOL_EVENT_EXECUTE',
'PROTOCOL_EVENT_SUCCESS',
'PROTOCOL_EVENT_TIMEOUT',
'DRIVER_EVENT_ACQUIRE_STATUS'],
ProtocolState.SCHEDULED_BLANK_SAMPLE: ['PROTOCOL_EVENT_EXECUTE',
'PROTOCOL_EVENT_SUCCESS',
'PROTOCOL_EVENT_TIMEOUT',
'DRIVER_EVENT_ACQUIRE_STATUS'],
}
def test_base_driver_enums(self):
"""
Verify that all the SAMI Instrument driver enumerations have no
duplicate values that might cause confusion. Also do a little
extra validation for the Capabilites
Extra enumeration tests are done in a specific subclass
"""
# Test Enums defined in the base SAMI driver
self.assert_enum_has_no_duplicates(ProtocolState())
self.assert_enum_has_no_duplicates(ProtocolEvent())
# Test capabilites for duplicates, then verify that capabilities
# is a subset of proto events
self.assert_enum_has_no_duplicates(Capability())
self.assert_enum_complete(Capability(), ProtocolEvent())
def test_driver_schema(self):
"""
get the driver schema and verify it is configured properly
"""
driver = InstrumentDriver(self._got_data_event_callback)
self.assert_driver_schema(driver, self._driver_parameters, self._driver_capabilities)
def test_driver_enums(self):
"""
Verify that all driver enumeration has no duplicate values that might
cause confusion.
"""
self.assert_enum_has_no_duplicates(DataParticleType())
self.assert_enum_has_no_duplicates(Parameter())
self.assert_enum_has_no_duplicates(InstrumentCommand())
def test_chunker(self):
"""
Test the chunker and verify the particles created.
"""
chunker = StringChunker(Protocol.sieve_function)
self.assert_chunker_sample(chunker, self.VALID_STATUS_MESSAGE)
self.assert_chunker_sample_with_noise(chunker, self.VALID_STATUS_MESSAGE)
self.assert_chunker_fragmented_sample(chunker, self.VALID_STATUS_MESSAGE)
self.assert_chunker_combined_sample(chunker, self.VALID_STATUS_MESSAGE)
self.assert_chunker_sample(chunker, self.VALID_CONTROL_RECORD)
self.assert_chunker_sample_with_noise(chunker, self.VALID_CONTROL_RECORD)
self.assert_chunker_fragmented_sample(chunker, self.VALID_CONTROL_RECORD)
self.assert_chunker_combined_sample(chunker, self.VALID_CONTROL_RECORD)
self.assert_chunker_sample(chunker, self.VALID_R0_BLANK_SAMPLE)
self.assert_chunker_sample_with_noise(chunker, self.VALID_R0_BLANK_SAMPLE)
self.assert_chunker_fragmented_sample(chunker, self.VALID_R0_BLANK_SAMPLE)
self.assert_chunker_combined_sample(chunker, self.VALID_R0_BLANK_SAMPLE)
self.assert_chunker_sample(chunker, self.VALID_R0_DATA_SAMPLE)
self.assert_chunker_sample_with_noise(chunker, self.VALID_R0_DATA_SAMPLE)
self.assert_chunker_fragmented_sample(chunker, self.VALID_R0_DATA_SAMPLE)
self.assert_chunker_combined_sample(chunker, self.VALID_R0_DATA_SAMPLE)
self.assert_chunker_sample(chunker, self.VALID_CONFIG_STRING)
self.assert_chunker_sample_with_noise(chunker, self.VALID_CONFIG_STRING)
self.assert_chunker_fragmented_sample(chunker, self.VALID_CONFIG_STRING)
self.assert_chunker_combined_sample(chunker, self.VALID_CONFIG_STRING)
def test_got_data(self):
"""
Verify sample data passed through the got data method produces the
correct data particles
"""
# Create and initialize the instrument driver with a mock port agent
driver = InstrumentDriver(self._got_data_event_callback)
self.assert_initialize_driver(driver)
self.assert_raw_particle_published(driver, True)
# Start validating data particles
self.assert_particle_published(driver, self.VALID_STATUS_MESSAGE,
self.assert_particle_regular_status, True)
self.assert_particle_published(driver, self.VALID_CONTROL_RECORD,
self.assert_particle_control_record, True)
self.assert_particle_published(driver, self.VALID_R0_BLANK_SAMPLE,
self.assert_particle_sami_blank_sample, True)
self.assert_particle_published(driver, self.VALID_R0_DATA_SAMPLE,
self.assert_particle_sami_data_sample, True)
self.assert_particle_published(driver, self.VALID_CONFIG_STRING,
self.assert_particle_configuration, True)
def test_protocol_filter_capabilities(self):
"""
This tests driver filter_capabilities. Iterate through available
capabilities, and verify that they can pass successfully through the
filter. Test silly made up capabilities to verify they are blocked by
filter.
"""
mock_callback = Mock()
protocol = Protocol(Prompt, SAMI_NEWLINE, mock_callback)
driver_capabilities = Capability().list()
test_capabilities = Capability().list()
# Add a bogus capability that will be filtered out.
test_capabilities.append("BOGUS_CAPABILITY")
# Verify "BOGUS_CAPABILITY was filtered out
self.assertEquals(sorted(driver_capabilities),
sorted(protocol._filter_capabilities(test_capabilities)))
def test_capabilities(self):
"""
Verify the FSM reports capabilities as expected. All states defined in
this dict must also be defined in the protocol FSM. Note, the EXIT and
ENTER DRIVER_EVENTS don't need to be listed here.
"""
# capabilities defined in base class test_driver.
driver = InstrumentDriver(self._got_data_event_callback)
self.assert_capabilities(driver, self.capabilities_test_dict)
def test_pump_commands(self):
driver = InstrumentDriver(self._got_data_event_callback)
self.assert_pump_commands(driver)
def test_pump_timing(self):
driver = InstrumentDriver(self._got_data_event_callback)
self.assert_pump_timing(driver)
def test_waiting_discover(self):
driver = InstrumentDriver(self._got_data_event_callback)
self.assert_waiting_discover(driver)
def test_autosample_timing(self):
driver = InstrumentDriver(self._got_data_event_callback)
self.assert_autosample_timing(driver)
###############################################################################
# INTEGRATION TESTS #
# Integration test test the direct driver / instrument interaction #
# but making direct calls via zeromq. #
# - Common Integration tests test the driver through the instrument agent #
# and common for all drivers (minimum requirement for ION ingestion) #
###############################################################################
@attr('INT', group='mi')
class DriverIntegrationTest(Pco2DriverIntegrationTest, DriverTestMixinSub):
"""
Integration Tests:
test_startup_params: Verify that driver startup parameters are set properly.
test_set: In command state, test configuration particle generation.
Parameter.PUMP_PULSE
Parameter.PUMP_DURATION
Parameter.SAMPLES_PER_MEASUREMENT
Parameter.CYCLES_BETWEEN_BLANKS
Parameter.NUMBER_REAGENT_CYCLES
Parameter.NUMBER_BLANK_CYCLES
Parameter.FLUSH_PUMP_INTERVAL
Parameter.BIT_SWITCHES
Parameter.NUMBER_EXTRA_PUMP_CYCLES
Parameter.AUTO_SAMPLE_INTERVAL
Negative Set Tests:
START_TIME_FROM_LAUNCH
STOP_TIME_FROM_START
MODE_BITS
SAMI_SAMPLE_INTERVAL
test_commands: In autosample and command states, test particle generation.
ACQUIRE_STATUS = ProtocolEvent.ACQUIRE_STATUS
ACQUIRE_SAMPLE = ProtocolEvent.ACQUIRE_SAMPLE
ACQUIRE_BLANK_SAMPLE = ProtocolEvent.ACQUIRE_BLANK_SAMPLE
test_autosample: Test autosample particle generation.
START_AUTOSAMPLE = ProtocolEvent.START_AUTOSAMPLE
STOP_AUTOSAMPLE = ProtocolEvent.STOP_AUTOSAMPLE
test_flush_pump: Test flush pump commands
"""
# def test_initialize_driver(self):
# self.assert_initialize_driver()
def test_startup_params(self):
startup_values = {
Parameter.PUMP_PULSE: 0x10,
Parameter.PUMP_DURATION: 0x20,
Parameter.SAMPLES_PER_MEASUREMENT: 0xFF,
Parameter.CYCLES_BETWEEN_BLANKS: 0x54,
Parameter.NUMBER_REAGENT_CYCLES: 0x18,
Parameter.NUMBER_BLANK_CYCLES: 0x1C,
Parameter.FLUSH_PUMP_INTERVAL: 0x01,
Parameter.BIT_SWITCHES: 0x01,
Parameter.NUMBER_EXTRA_PUMP_CYCLES: 0x38,
Parameter.AUTO_SAMPLE_INTERVAL: 3600,
Parameter.REAGENT_FLUSH_DURATION: 0x08,
Parameter.DEIONIZED_WATER_FLUSH_DURATION: 0x08,
Parameter.PUMP_100ML_CYCLES: 1
}
new_values = {
Parameter.PUMP_PULSE: 0x11,
Parameter.PUMP_DURATION: 0x21,
Parameter.SAMPLES_PER_MEASUREMENT: 0xFA,
Parameter.CYCLES_BETWEEN_BLANKS: 0xA9,
Parameter.NUMBER_REAGENT_CYCLES: 0x19,
Parameter.NUMBER_BLANK_CYCLES: 0x1D,
Parameter.FLUSH_PUMP_INTERVAL: 0x02,
Parameter.BIT_SWITCHES: 0x02,
Parameter.NUMBER_EXTRA_PUMP_CYCLES: 0x39,
Parameter.AUTO_SAMPLE_INTERVAL: 600,
Parameter.REAGENT_FLUSH_DURATION: 0x01,
Parameter.DEIONIZED_WATER_FLUSH_DURATION: 0x0F,
Parameter.PUMP_100ML_CYCLES: 14
}
self.assert_initialize_driver()
for (key, val) in startup_values.iteritems():
self.assert_get(key, val)
self.assert_set_bulk(new_values)
reply = self.driver_client.cmd_dvr('apply_startup_params')
for (key, val) in startup_values.iteritems():
self.assert_get(key, val)
def test_set(self):
self.assert_initialize_driver()
self.assert_set(Parameter.AUTO_SAMPLE_INTERVAL, 77)
self.assert_set(Parameter.CYCLES_BETWEEN_BLANKS, 7)
self.assert_set(Parameter.PUMP_PULSE, 20)
self.assert_set(Parameter.SAMPLES_PER_MEASUREMENT, 239)
self.assert_set(Parameter.NUMBER_REAGENT_CYCLES, 26)
self.assert_set(Parameter.NUMBER_BLANK_CYCLES, 30)
self.assert_set(Parameter.FLUSH_PUMP_INTERVAL, 2)
self.assert_set(Parameter.BIT_SWITCHES, 1)
self.assert_set(Parameter.NUMBER_EXTRA_PUMP_CYCLES, 88)
self.assert_set(Parameter.REAGENT_FLUSH_DURATION, 16)
self.assert_set(Parameter.DEIONIZED_WATER_FLUSH_DURATION, 4)
self.assert_set(Parameter.PUMP_100ML_CYCLES, 14)
self.assert_set_readonly(Parameter.START_TIME_FROM_LAUNCH, 84600)
self.assert_set_readonly(Parameter.STOP_TIME_FROM_START, 84600)
self.assert_set_readonly(Parameter.MODE_BITS, 10)
self.assert_set_readonly(Parameter.SAMI_SAMPLE_INTERVAL, 1800)
def test_bulk_set(self):
self.assert_initialize_driver()
new_values = {
Parameter.AUTO_SAMPLE_INTERVAL: 77,
Parameter.CYCLES_BETWEEN_BLANKS: 7,
Parameter.PUMP_PULSE: 20,
Parameter.SAMPLES_PER_MEASUREMENT: 239,
Parameter.NUMBER_REAGENT_CYCLES: 26,
Parameter.NUMBER_BLANK_CYCLES: 30,
Parameter.FLUSH_PUMP_INTERVAL: 2,
Parameter.BIT_SWITCHES: 1,
Parameter.NUMBER_EXTRA_PUMP_CYCLES: 88,
Parameter.REAGENT_FLUSH_DURATION: 4,
Parameter.DEIONIZED_WATER_FLUSH_DURATION: 16,
Parameter.PUMP_100ML_CYCLES: 14
}
self.assert_set_bulk(new_values)
def test_bad_parameters(self):
self.assert_initialize_driver()
self.assert_set_exception(Parameter.CYCLES_BETWEEN_BLANKS, value=7.0)
self.assert_set_exception(Parameter.PUMP_PULSE, value=20.0)
self.assert_set_exception(Parameter.SAMPLES_PER_MEASUREMENT, 239.0)
self.assert_set_exception(Parameter.NUMBER_REAGENT_CYCLES, 26.0)
self.assert_set_exception(Parameter.NUMBER_BLANK_CYCLES, 30.0)
self.assert_set_exception(Parameter.FLUSH_PUMP_INTERVAL, 2.0)
self.assert_set_exception(Parameter.BIT_SWITCHES, 1.0)
self.assert_set_exception(Parameter.NUMBER_EXTRA_PUMP_CYCLES, 88.0)
def test_acquire_sample(self):
self.assert_initialize_driver()
self.assert_driver_command(ProtocolEvent.ACQUIRE_SAMPLE)
self.assert_async_particle_generation(DataParticleType.SAMI_SAMPLE, self.assert_particle_sami_data_sample,
timeout=160)
def test_acquire_blank_sample(self):
self.assert_initialize_driver()
self.assert_driver_command(ProtocolEvent.ACQUIRE_BLANK_SAMPLE)
self.assert_async_particle_generation(DataParticleType.SAMI_SAMPLE, self.assert_particle_sami_blank_sample,
timeout=160)
def test_auto_sample(self):
self.assert_initialize_driver()
self.assert_set(Parameter.AUTO_SAMPLE_INTERVAL, 60)
self.assert_driver_command(ProtocolEvent.START_AUTOSAMPLE, state=ProtocolState.SCHEDULED_SAMPLE, delay=5)
self.assert_async_particle_generation(DataParticleType.SAMI_SAMPLE, self.assert_particle_sami_data_sample,
particle_count=4, timeout=320)
self.assert_driver_command(ProtocolEvent.STOP_AUTOSAMPLE, state=ProtocolState.COMMAND, delay=5)
def test_polled_sample_state(self):
self.assert_initialize_driver()
self.assert_driver_command(ProtocolEvent.ACQUIRE_SAMPLE, state=ProtocolState.POLLED_SAMPLE, delay=5)
self.assert_async_particle_generation(DataParticleType.SAMI_SAMPLE, self.assert_particle_sami_data_sample,
timeout=160)
def test_polled_blank_sample_state(self):
self.assert_initialize_driver()
self.assert_driver_command(ProtocolEvent.ACQUIRE_BLANK_SAMPLE, state=ProtocolState.POLLED_BLANK_SAMPLE, delay=5)
self.assert_async_particle_generation(DataParticleType.SAMI_SAMPLE, self.assert_particle_sami_blank_sample,
timeout=160)
def test_scheduled_sample_state(self):
self.assert_initialize_driver()
self.assert_driver_command(ProtocolEvent.START_AUTOSAMPLE, state=ProtocolState.SCHEDULED_SAMPLE, delay=5)
self.assert_async_particle_generation(DataParticleType.SAMI_SAMPLE, self.assert_particle_sami_data_sample,
timeout=160)
self.assert_driver_command(ProtocolEvent.STOP_AUTOSAMPLE, state=ProtocolState.COMMAND, delay=5)
def test_scheduled_blank_sample_state(self):
self.assert_initialize_driver()
self.assert_driver_command(ProtocolEvent.START_AUTOSAMPLE, state=ProtocolState.SCHEDULED_SAMPLE, delay=5)
self.assert_async_particle_generation(DataParticleType.SAMI_SAMPLE, self.assert_particle_sami_data_sample,
timeout=160)
self.clear_events()
self.assert_driver_command(ProtocolEvent.ACQUIRE_BLANK_SAMPLE, state=ProtocolState.SCHEDULED_BLANK_SAMPLE,
delay=5)
self.assert_async_particle_generation(DataParticleType.SAMI_SAMPLE, self.assert_particle_sami_blank_sample,
timeout=160)
self.assert_driver_command(ProtocolEvent.STOP_AUTOSAMPLE, state=ProtocolState.COMMAND, delay=5)
def test_queued_command(self):
"""
Verify status is queued while samples are being taken
"""
self.assert_initialize_driver()
## Queue status
self.clear_events()
self.assert_driver_command(ProtocolEvent.ACQUIRE_SAMPLE)
self.assert_driver_command(ProtocolEvent.ACQUIRE_STATUS)
self.assert_async_particle_generation(DataParticleType.SAMI_SAMPLE, self.assert_particle_sami_data_sample,
particle_count=1, timeout=180)
self.assert_async_particle_generation(DataParticleType.REGULAR_STATUS, self.assert_particle_regular_status,
timeout=180)
self.assert_current_state(ProtocolState.COMMAND)
def test_queued_autosample(self):
"""
Verify status is queued while samples are being taken
"""
self.assert_initialize_driver()
self.clear_events()
self.assert_driver_command(ProtocolEvent.START_AUTOSAMPLE, state=ProtocolState.SCHEDULED_SAMPLE, delay=5)
## Queue sample and status
self.assert_driver_command(ProtocolEvent.ACQUIRE_STATUS)
self.assert_async_particle_generation(DataParticleType.SAMI_SAMPLE, self.assert_particle_sami_data_sample,
particle_count=1, timeout=180)
self.assert_async_particle_generation(DataParticleType.REGULAR_STATUS, self.assert_particle_regular_status,
timeout=180)
self.assert_current_state(ProtocolState.AUTOSAMPLE)
def test_acquire_status(self):
self.assert_initialize_driver()
self.clear_events()
self.assert_particle_generation(ProtocolEvent.ACQUIRE_STATUS, DataParticleType.REGULAR_STATUS,
self.assert_particle_regular_status)
self.assert_async_particle_generation(DataParticleType.CONFIGURATION, self.assert_particle_configuration)
self.assert_async_particle_generation(DataParticleType.BATTERY_VOLTAGE, self.assert_particle_battery_voltage)
self.assert_async_particle_generation(DataParticleType.THERMISTOR_VOLTAGE,
self.assert_particle_thermistor_voltage)
###############################################################################
# QUALIFICATION TESTS #
# Device specific qualification tests are for doing final testing of ion #
# integration. The generally aren't used for instrument debugging and should #
# be tackled after all unit and integration tests are complete #
###############################################################################
@attr('QUAL', group='mi')
class DriverQualificationTest(Pco2DriverQualificationTest, DriverTestMixinSub):
def test_queued_command(self):
self.assert_enter_command_mode()
self.assert_resource_command(ProtocolEvent.ACQUIRE_SAMPLE, delay=4, resource_state=ProtocolState.POLLED_SAMPLE)
self.assert_resource_command(ProtocolEvent.ACQUIRE_STATUS)
self.assert_particle_async(DataParticleType.REGULAR_STATUS, self.assert_particle_regular_status, timeout=60)
def test_queued_autosample(self):
self.assert_enter_command_mode()
self.assert_start_autosample(timeout=200)
self.assert_resource_command(ProtocolEvent.ACQUIRE_SAMPLE, delay=4,
resource_state=ProtocolState.SCHEDULED_SAMPLE)
self.assert_resource_command(ProtocolEvent.ACQUIRE_STATUS)
self.assert_particle_async(DataParticleType.REGULAR_STATUS, self.assert_particle_regular_status, timeout=60)
self.assert_stop_autosample()
self.assert_state_change(ResourceAgentState.COMMAND, ProtocolState.COMMAND, 60)
@unittest.skip("Runs for several hours to test default autosample rate of 60 minutes")
def test_overnight(self):
"""
Verify autosample at default rate
"""
self.assert_enter_command_mode()
self.assert_set_parameter(Parameter.BIT_SWITCHES, 0x00)
self.assert_particle_polled(ProtocolEvent.ACQUIRE_SAMPLE, self.assert_particle_sami_blank_sample,
DataParticleType.SAMI_SAMPLE, sample_count=1, timeout=200)
self.assert_sample_autosample(self.assert_particle_sami_data_sample, DataParticleType.SAMI_SAMPLE,
timeout=14400)
def test_direct_access_telnet_mode(self):
"""
@brief This test manually tests that the Instrument Driver properly
supports direct access to the physical instrument. (telnet mode)
"""
self.assert_enter_command_mode()
self.assert_set_parameter(Parameter.CYCLES_BETWEEN_BLANKS, 7)
configuration_string = 'CF889C9C02C7EA0001E1338002000E10040200000000000000000000000000000000000000000' + \
'71020FFA8181C0100380000000000000000000000000000000000000000000000000000000000' + \
'00000000000000000000000000000000000000000000000000000000000000000000000000000' + \
'0FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF' + \
'FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF' + \
'FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF' + \
'FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF'
self.assert_direct_access_start_telnet()
self.assertTrue(self.tcp_client)
# Erase memory
self.tcp_client.send_data("E5A%s" % SAMI_NEWLINE)
time.sleep(1)
# Load a new configuration string changing X to X
self.tcp_client.send_data("L5A%s" % SAMI_NEWLINE)
time.sleep(1)
self.tcp_client.send_data("%s00%s" % (configuration_string, SAMI_NEWLINE))
time.sleep(1)
# Check that configuration was changed
self.tcp_client.send_data("L%s" % SAMI_NEWLINE)
return_value = self.tcp_client.expect(configuration_string)
self.assertTrue(return_value)
###
# Add instrument specific code here.
###
self.assert_direct_access_stop_telnet()
self.assert_state_change(ResourceAgentState.COMMAND, ProtocolState.COMMAND, 60)
self.assert_get_parameter(Parameter.CYCLES_BETWEEN_BLANKS, 7)
def test_command_poll(self):
self.assert_enter_command_mode()
self.assert_particle_polled(ProtocolEvent.ACQUIRE_SAMPLE, self.assert_particle_sami_data_sample,
DataParticleType.SAMI_SAMPLE, sample_count=1, timeout=200)
self.assert_particle_polled(ProtocolEvent.ACQUIRE_BLANK_SAMPLE, self.assert_particle_sami_blank_sample,
DataParticleType.SAMI_SAMPLE, sample_count=1, timeout=200)
self.assert_particle_polled(ProtocolEvent.ACQUIRE_STATUS, self.assert_particle_regular_status,
DataParticleType.REGULAR_STATUS, sample_count=1, timeout=10)
self.assert_particle_polled(ProtocolEvent.ACQUIRE_STATUS, self.assert_particle_configuration,
DataParticleType.CONFIGURATION, sample_count=1, timeout=10)
self.assert_particle_polled(ProtocolEvent.ACQUIRE_STATUS, self.assert_particle_battery_voltage,
DataParticleType.BATTERY_VOLTAGE, sample_count=1, timeout=10)
self.assert_particle_polled(ProtocolEvent.ACQUIRE_STATUS, self.assert_particle_thermistor_voltage,
DataParticleType.THERMISTOR_VOLTAGE, sample_count=1, timeout=10)
self.assert_resource_command(ProtocolEvent.DEIONIZED_WATER_FLUSH, delay=15,
agent_state=ResourceAgentState.COMMAND, resource_state=ProtocolState.COMMAND)
self.assert_resource_command(ProtocolEvent.REAGENT_FLUSH, delay=15, agent_state=ResourceAgentState.COMMAND,
resource_state=ProtocolState.COMMAND)
self.assert_resource_command(ProtocolEvent.DEIONIZED_WATER_FLUSH_100ML, delay=15,
agent_state=ResourceAgentState.COMMAND, resource_state=ProtocolState.COMMAND)
self.assert_resource_command(ProtocolEvent.REAGENT_FLUSH_100ML, delay=15,
agent_state=ResourceAgentState.COMMAND, resource_state=ProtocolState.COMMAND)
self.assert_state_change(ResourceAgentState.COMMAND, ProtocolState.COMMAND, 60)
def test_autosample_poll(self):
self.assert_enter_command_mode()
self.assert_start_autosample(timeout=200)
self.assert_particle_polled(ProtocolEvent.ACQUIRE_SAMPLE, self.assert_particle_sami_data_sample,
DataParticleType.SAMI_SAMPLE, sample_count=1, timeout=200)
self.assert_particle_polled(ProtocolEvent.ACQUIRE_BLANK_SAMPLE, self.assert_particle_sami_blank_sample,
DataParticleType.SAMI_SAMPLE, sample_count=1, timeout=200)
self.assert_particle_polled(ProtocolEvent.ACQUIRE_STATUS, self.assert_particle_regular_status,
DataParticleType.REGULAR_STATUS, sample_count=1, timeout=10)
self.assert_particle_polled(ProtocolEvent.ACQUIRE_STATUS, self.assert_particle_configuration,
DataParticleType.CONFIGURATION, sample_count=1, timeout=10)
self.assert_particle_polled(ProtocolEvent.ACQUIRE_STATUS, self.assert_particle_battery_voltage,
DataParticleType.BATTERY_VOLTAGE, sample_count=1, timeout=10)
self.assert_particle_polled(ProtocolEvent.ACQUIRE_STATUS, self.assert_particle_thermistor_voltage,
DataParticleType.THERMISTOR_VOLTAGE, sample_count=1, timeout=10)
self.assert_stop_autosample()
self.assert_state_change(ResourceAgentState.COMMAND, ProtocolState.COMMAND, 60)
def test_autosample(self):
"""
Verify autosample works and data particles are created
"""
self.assert_enter_command_mode()
self.assert_set_parameter(Parameter.AUTO_SAMPLE_INTERVAL, 60)
self.assert_sample_autosample(self.assert_particle_sami_data_sample, DataParticleType.SAMI_SAMPLE)
def test_get_capabilities(self):
"""
@brief Verify that the correct capabilities are returned from get_capabilities
at various driver/agent states.
"""
self.assert_enter_command_mode()
##################
# Command Mode
##################
capabilities = {
AgentCapabilityType.AGENT_COMMAND: self._common_agent_commands(ResourceAgentState.COMMAND),
AgentCapabilityType.AGENT_PARAMETER: self._common_agent_parameters(),
AgentCapabilityType.RESOURCE_COMMAND: [
ProtocolEvent.START_AUTOSAMPLE,
ProtocolEvent.ACQUIRE_STATUS,
ProtocolEvent.ACQUIRE_SAMPLE,
ProtocolEvent.ACQUIRE_BLANK_SAMPLE,
ProtocolEvent.DEIONIZED_WATER_FLUSH,
ProtocolEvent.REAGENT_FLUSH,
ProtocolEvent.DEIONIZED_WATER_FLUSH_100ML,
ProtocolEvent.REAGENT_FLUSH_100ML
],
AgentCapabilityType.RESOURCE_INTERFACE: None,
AgentCapabilityType.RESOURCE_PARAMETER: self._driver_parameters.keys()
}
self.assert_capabilities(capabilities)
##################
# DA Mode
##################
da_capabilities = copy.deepcopy(capabilities)
da_capabilities[AgentCapabilityType.AGENT_COMMAND] = [ResourceAgentEvent.GO_COMMAND]
da_capabilities[AgentCapabilityType.RESOURCE_COMMAND] = []
# Test direct access disconnect
self.assert_direct_access_start_telnet(timeout=10)
self.assertTrue(self.tcp_client)
self.assert_capabilities(da_capabilities)
self.tcp_client.disconnect()
# Now do it again, but use the event to stop DA
self.assert_state_change(ResourceAgentState.COMMAND, ProtocolState.COMMAND, 60)
self.assert_direct_access_start_telnet(timeout=10)
self.assert_capabilities(da_capabilities)
self.assert_direct_access_stop_telnet()
##################
# Command Mode
##################
# We should be back in command mode from DA.
self.assert_state_change(ResourceAgentState.COMMAND, ProtocolState.COMMAND, 60)
self.assert_capabilities(capabilities)
##################
# Streaming Mode
##################
st_capabilities = copy.deepcopy(capabilities)
st_capabilities[AgentCapabilityType.AGENT_COMMAND] = self._common_agent_commands(ResourceAgentState.STREAMING)
st_capabilities[AgentCapabilityType.RESOURCE_COMMAND] = [
ProtocolEvent.STOP_AUTOSAMPLE,
ProtocolEvent.ACQUIRE_STATUS,
ProtocolEvent.ACQUIRE_SAMPLE,
ProtocolEvent.ACQUIRE_BLANK_SAMPLE
]
self.assert_start_autosample(timeout=200)
self.assert_capabilities(st_capabilities)
self.assert_stop_autosample()
##################
# Command Mode
##################
# We should be back in command mode from DA.
self.assert_state_change(ResourceAgentState.COMMAND, ProtocolState.COMMAND, 60)
self.assert_capabilities(capabilities)
#######################
# Uninitialized Mode
#######################
capabilities[AgentCapabilityType.AGENT_COMMAND] = self._common_agent_commands(ResourceAgentState.UNINITIALIZED)
capabilities[AgentCapabilityType.RESOURCE_COMMAND] = []
capabilities[AgentCapabilityType.RESOURCE_INTERFACE] = []
capabilities[AgentCapabilityType.RESOURCE_PARAMETER] = []
self.assert_reset()
self.assert_capabilities(capabilities)
stale code removed
"""
@package mi.instrument.sunburst.sami2_pco2.pco2a.test.test_driver
@file marine-integrations/mi/instrument/sunburst/sami2_pco2/pco2a/driver.py
@author Christopher Wingard
@brief Test cases for pco2a driver
USAGE:
Make tests verbose and provide stdout
* From the IDK
$ bin/test_driver
$ bin/test_driver -u [-t testname]
$ bin/test_driver -i [-t testname]
$ bin/test_driver -q [-t testname]
"""
__author__ = 'Kevin Stiemke'
__license__ = 'Apache 2.0'
import unittest
import time
import copy
from nose.plugins.attrib import attr
from mock import Mock
from mi.core.log import get_logger
log = get_logger()
# MI imports.
from mi.idk.unit_test import InstrumentDriverTestCase
from mi.idk.unit_test import ParameterTestConfigKey
from mi.idk.unit_test import DriverStartupConfigKey
from mi.idk.unit_test import AgentCapabilityType
from mi.core.instrument.chunker import StringChunker
from pyon.agent.agent import ResourceAgentEvent
from pyon.agent.agent import ResourceAgentState
from mi.instrument.sunburst.sami2_pco2.pco2a.driver import InstrumentDriver
from mi.instrument.sunburst.sami2_pco2.pco2a.driver import InstrumentCommand
from mi.instrument.sunburst.sami2_pco2.pco2a.driver import ProtocolState
from mi.instrument.sunburst.sami2_pco2.pco2a.driver import ProtocolEvent
from mi.instrument.sunburst.sami2_pco2.pco2a.driver import Capability
from mi.instrument.sunburst.sami2_pco2.pco2a.driver import Parameter
from mi.instrument.sunburst.sami2_pco2.pco2a.driver import Protocol
from mi.instrument.sunburst.driver import Prompt
from mi.instrument.sunburst.driver import SAMI_NEWLINE
from mi.instrument.sunburst.sami2_pco2.driver import Pco2wSamiSampleDataParticleKey
from mi.instrument.sunburst.sami2_pco2.pco2a.driver import Pco2waConfigurationDataParticleKey
from mi.instrument.sunburst.sami2_pco2.pco2a.driver import DataParticleType
# Added Imports (Note, these pick up some of the base classes not directly imported above)
from mi.instrument.sunburst.sami2_pco2.test.test_driver import Pco2DriverTestMixinSub
from mi.instrument.sunburst.sami2_pco2.test.test_driver import Pco2DriverUnitTest
from mi.instrument.sunburst.sami2_pco2.test.test_driver import Pco2DriverIntegrationTest
from mi.instrument.sunburst.sami2_pco2.test.test_driver import Pco2DriverQualificationTest
###
# Driver parameters for the tests
###
InstrumentDriverTestCase.initialize(
driver_module='mi.instrument.sunburst.sami2_pco2.pco2a.driver',
driver_class="InstrumentDriver",
instrument_agent_resource_id='V7HE4T',
instrument_agent_name='sunburst_sami2_pco2_pco2a',
instrument_agent_packet_config=DataParticleType(),
##driver_startup_config={}
driver_startup_config={
DriverStartupConfigKey.PARAMETERS: {
Parameter.BIT_SWITCHES: 0x01,
},
}
)
#################################### RULES ####################################
# #
# Common capabilities in the base class #
# #
# Instrument specific stuff in the derived class #
# #
# Generator spits out either stubs or comments describing test this here, #
# test that there. #
# #
# Qualification tests are driven through the instrument_agent #
# #
###############################################################################
###
# Driver constant definitions
###
###############################################################################
# DRIVER TEST MIXIN #
# Defines a set of constants and assert methods used for data particle #
# verification #
# #
# In python mixin classes are classes designed such that they wouldn't be #
# able to stand on their own, but are inherited by other classes generally #
# using multiple inheritance. #
# #
# This class defines a configuration structure for testing and common assert #
# methods for validating data particles. #
###############################################################################
class DriverTestMixinSub(Pco2DriverTestMixinSub):
"""
Mixin class used for storing data particle constants and common data
assertion methods.
"""
# Create some short names for the parameter test config
TYPE = ParameterTestConfigKey.TYPE
READONLY = ParameterTestConfigKey.READONLY
STARTUP = ParameterTestConfigKey.STARTUP
DA = ParameterTestConfigKey.DIRECT_ACCESS
VALUE = ParameterTestConfigKey.VALUE
REQUIRED = ParameterTestConfigKey.REQUIRED
DEFAULT = ParameterTestConfigKey.DEFAULT
STATES = ParameterTestConfigKey.STATES
_driver_capabilities = {
# capabilities defined in the IOS
Capability.ACQUIRE_STATUS: {STATES: [ProtocolState.COMMAND,
ProtocolState.AUTOSAMPLE]},
Capability.ACQUIRE_SAMPLE: {STATES: [ProtocolState.COMMAND]},
Capability.ACQUIRE_BLANK_SAMPLE: {STATES: [ProtocolState.COMMAND]},
Capability.START_AUTOSAMPLE: {STATES: [ProtocolState.COMMAND,
ProtocolState.AUTOSAMPLE]},
Capability.STOP_AUTOSAMPLE: {STATES: [ProtocolState.AUTOSAMPLE,
ProtocolState.COMMAND]},
Capability.DEIONIZED_WATER_FLUSH: {STATES: [ProtocolState.COMMAND]},
Capability.REAGENT_FLUSH: {STATES: [ProtocolState.COMMAND]},
Capability.DEIONIZED_WATER_FLUSH_100ML: {STATES: [ProtocolState.COMMAND]},
Capability.REAGENT_FLUSH_100ML: {STATES: [ProtocolState.COMMAND]}
}
###
# Instrument output (driver input) Definitions
###
# Configuration string received from the instrument via the L command
# (clock set to 2014-01-01 00:00:00) with sampling set to start 540 days
# (~18 months) later and stop 365 days after that. SAMI and Device1
# (external SBE pump) are set to run every 60 minutes, but will be polled
# on a regular schedule rather than autosampled. Device1 is not configured
# to run after the SAMI and will run for 10 seconds. To configure the
# instrument using this string, add a null byte (00) to the end of the
# string.
VALID_CONFIG_STRING = 'CEE90B0002C7EA0001E133800A000E100402000E10010B' + \
'000000000D000000000D000000000D07' + \
'1020FF54181C010038' + \
'000000000000000000000000000000000000000000000000000' + \
'000000000000000000000000000000000000000000000000000' + \
'000000000000000000000000000000' + \
'FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF' + \
'FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF' + \
'FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF' + \
'FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF' + \
'FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF' + \
'FFFFFFFFFFFFFFFFFFFFFFFFFFFFF' + SAMI_NEWLINE
# Data records -- SAMI and Device1 (external pump) (responses to R0 and R1
# commands, respectively)
VALID_R0_BLANK_SAMPLE = '*542705CEE91CC800400019096206800730074C2CE042' + \
'74003B0018096106800732074E0D82066124' + SAMI_NEWLINE
VALID_R0_DATA_SAMPLE = '*542704CEE91CC8003B001909620155073003E908A1232' + \
'D0043001A09620154072F03EA0D92065F3B' + SAMI_NEWLINE
###
# Parameter and Type Definitions
###
_driver_parameters = {
# Parameters defined in the IOS
Parameter.LAUNCH_TIME: {TYPE: int, READONLY: True, DA: True, STARTUP: False,
DEFAULT: 0x00000000, VALUE: 0xCEE90B00, REQUIRED: True},
Parameter.START_TIME_FROM_LAUNCH: {TYPE: int, READONLY: True, DA: True, STARTUP: False,
DEFAULT: 0x02C7EA00, VALUE: 0x02C7EA00, REQUIRED: True},
Parameter.STOP_TIME_FROM_START: {TYPE: int, READONLY: True, DA: True, STARTUP: False,
DEFAULT: 0x01E13380, VALUE: 0x01E13380, REQUIRED: True},
Parameter.MODE_BITS: {TYPE: int, READONLY: True, DA: True, STARTUP: False,
DEFAULT: 0x0A, VALUE: 0x0A, REQUIRED: True},
Parameter.SAMI_SAMPLE_INTERVAL: {TYPE: int, READONLY: True, DA: True, STARTUP: False,
DEFAULT: 0x000E10, VALUE: 0x000E10, REQUIRED: True},
Parameter.SAMI_DRIVER_VERSION: {TYPE: int, READONLY: True, DA: True, STARTUP: False,
DEFAULT: 0x04, VALUE: 0x04, REQUIRED: True},
Parameter.SAMI_PARAMS_POINTER: {TYPE: int, READONLY: True, DA: True, STARTUP: False,
DEFAULT: 0x02, VALUE: 0x02, REQUIRED: True},
Parameter.DEVICE1_SAMPLE_INTERVAL: {TYPE: int, READONLY: True, DA: True, STARTUP: False,
DEFAULT: 0x000E10, VALUE: 0x000E10, REQUIRED: True},
Parameter.DEVICE1_DRIVER_VERSION: {TYPE: int, READONLY: True, DA: True, STARTUP: False,
DEFAULT: 0x01, VALUE: 0x01, REQUIRED: True},
Parameter.DEVICE1_PARAMS_POINTER: {TYPE: int, READONLY: True, DA: True, STARTUP: False,
DEFAULT: 0x0B, VALUE: 0x0B, REQUIRED: True},
Parameter.DEVICE2_SAMPLE_INTERVAL: {TYPE: int, READONLY: True, DA: True, STARTUP: False,
DEFAULT: 0x000000, VALUE: 0x000000, REQUIRED: True},
Parameter.DEVICE2_DRIVER_VERSION: {TYPE: int, READONLY: True, DA: True, STARTUP: False,
DEFAULT: 0x00, VALUE: 0x00, REQUIRED: True},
Parameter.DEVICE2_PARAMS_POINTER: {TYPE: int, READONLY: True, DA: True, STARTUP: False,
DEFAULT: 0x0D, VALUE: 0x0D, REQUIRED: True},
Parameter.DEVICE3_SAMPLE_INTERVAL: {TYPE: int, READONLY: True, DA: True, STARTUP: False,
DEFAULT: 0x000000, VALUE: 0x000000, REQUIRED: True},
Parameter.DEVICE3_DRIVER_VERSION: {TYPE: int, READONLY: True, DA: True, STARTUP: False,
DEFAULT: 0x00, VALUE: 0x00, REQUIRED: True},
Parameter.DEVICE3_PARAMS_POINTER: {TYPE: int, READONLY: True, DA: True, STARTUP: False,
DEFAULT: 0x0D, VALUE: 0x0D, REQUIRED: True},
Parameter.PRESTART_SAMPLE_INTERVAL: {TYPE: int, READONLY: True, DA: True, STARTUP: False,
DEFAULT: 0x000000, VALUE: 0x000000, REQUIRED: True},
Parameter.PRESTART_DRIVER_VERSION: {TYPE: int, READONLY: True, DA: True, STARTUP: False,
DEFAULT: 0x00, VALUE: 0x00, REQUIRED: True},
Parameter.PRESTART_PARAMS_POINTER: {TYPE: int, READONLY: True, DA: True, STARTUP: False,
DEFAULT: 0x0D, VALUE: 0x00, REQUIRED: True},
Parameter.GLOBAL_CONFIGURATION: {TYPE: int, READONLY: True, DA: True, STARTUP: False,
DEFAULT: 0x07, VALUE: 0x07, REQUIRED: True},
Parameter.PUMP_PULSE: {TYPE: int, READONLY: False, DA: True, STARTUP: False,
DEFAULT: 0x10, VALUE: 0x10, REQUIRED: True},
Parameter.PUMP_DURATION: {TYPE: int, READONLY: False, DA: True, STARTUP: False,
DEFAULT: 0x20, VALUE: 0x20, REQUIRED: True},
Parameter.SAMPLES_PER_MEASUREMENT: {TYPE: int, READONLY: False, DA: True, STARTUP: False,
DEFAULT: 0xFF, VALUE: 0xFF, REQUIRED: True},
Parameter.CYCLES_BETWEEN_BLANKS: {TYPE: int, READONLY: False, DA: True, STARTUP: False,
DEFAULT: 0x54, VALUE: 0x54, REQUIRED: True},
Parameter.NUMBER_REAGENT_CYCLES: {TYPE: int, READONLY: False, DA: True, STARTUP: False,
DEFAULT: 0x18, VALUE: 0x18, REQUIRED: True},
Parameter.NUMBER_BLANK_CYCLES: {TYPE: int, READONLY: False, DA: True, STARTUP: False,
DEFAULT: 0x1C, VALUE: 0x1C, REQUIRED: True},
Parameter.FLUSH_PUMP_INTERVAL: {TYPE: int, READONLY: False, DA: True, STARTUP: False,
DEFAULT: 0x01, VALUE: 0x01, REQUIRED: True},
Parameter.BIT_SWITCHES: {TYPE: int, READONLY: False, DA: True, STARTUP: False,
DEFAULT: 0x00, VALUE: 0x00, REQUIRED: True},
Parameter.NUMBER_EXTRA_PUMP_CYCLES: {TYPE: int, READONLY: False, DA: True, STARTUP: False,
DEFAULT: 0x38, VALUE: 0x38, REQUIRED: True},
Parameter.AUTO_SAMPLE_INTERVAL: {TYPE: int, READONLY: False, DA: False, STARTUP: False,
DEFAULT: 0x38, VALUE: 3600, REQUIRED: True},
Parameter.REAGENT_FLUSH_DURATION: {TYPE: int, READONLY: False, DA: False, STARTUP: False,
DEFAULT: 0x08, VALUE: 0x08, REQUIRED: True},
Parameter.DEIONIZED_WATER_FLUSH_DURATION: {TYPE: int, READONLY: False, DA: False, STARTUP: False,
DEFAULT: 0x08, VALUE: 0x08, REQUIRED: True},
Parameter.PUMP_100ML_CYCLES: {TYPE: int, READONLY: False, DA: False, STARTUP: False,
DEFAULT: 0x01, VALUE: 0x01, REQUIRED: True},
}
_sami_data_sample_parameters = {
# SAMI Type 4/5 sample (in this case it is a Type 4)
Pco2wSamiSampleDataParticleKey.UNIQUE_ID: {TYPE: int, VALUE: 0x54, REQUIRED: True},
Pco2wSamiSampleDataParticleKey.RECORD_LENGTH: {TYPE: int, VALUE: 0x27, REQUIRED: True},
Pco2wSamiSampleDataParticleKey.RECORD_TYPE: {TYPE: int, VALUE: 0x04, REQUIRED: True},
Pco2wSamiSampleDataParticleKey.RECORD_TIME: {TYPE: int, VALUE: 0xCEE91CC8, REQUIRED: True},
Pco2wSamiSampleDataParticleKey.LIGHT_MEASUREMENTS: {TYPE: list, VALUE: [0x003B, 0x0019, 0x0962, 0x0155,
0x0730, 0x03E9, 0x08A1, 0x232D,
0x0043, 0x001A, 0x0962, 0x0154,
0x072F, 0x03EA], REQUIRED: True},
Pco2wSamiSampleDataParticleKey.VOLTAGE_BATTERY: {TYPE: int, VALUE: 0x0D92, REQUIRED: True},
Pco2wSamiSampleDataParticleKey.THERMISTER_RAW: {TYPE: int, VALUE: 0x065F, REQUIRED: True},
Pco2wSamiSampleDataParticleKey.CHECKSUM: {TYPE: int, VALUE: 0x3B, REQUIRED: True}
}
_sami_blank_sample_parameters = {
# SAMI Type 4/5 sample (in this case it is a Type 5)
Pco2wSamiSampleDataParticleKey.UNIQUE_ID: {TYPE: int, VALUE: 0x54, REQUIRED: True},
Pco2wSamiSampleDataParticleKey.RECORD_LENGTH: {TYPE: int, VALUE: 0x27, REQUIRED: True},
Pco2wSamiSampleDataParticleKey.RECORD_TYPE: {TYPE: int, VALUE: 0x05, REQUIRED: True},
Pco2wSamiSampleDataParticleKey.RECORD_TIME: {TYPE: int, VALUE: 0xCEE91CC8, REQUIRED: True},
Pco2wSamiSampleDataParticleKey.LIGHT_MEASUREMENTS: {TYPE: list, VALUE: [0x0040, 0x0019, 0x0962, 0x0680, 0x0730,
0x074C, 0x2CE0, 0x4274, 0x003B, 0x0018,
0x0961, 0x0680, 0x0732, 0x074E],
REQUIRED: True},
Pco2wSamiSampleDataParticleKey.VOLTAGE_BATTERY: {TYPE: int, VALUE: 0x0D82, REQUIRED: True},
Pco2wSamiSampleDataParticleKey.THERMISTER_RAW: {TYPE: int, VALUE: 0x0661, REQUIRED: True},
Pco2wSamiSampleDataParticleKey.CHECKSUM: {TYPE: int, VALUE: 0x24, REQUIRED: True}
}
_configuration_parameters = {
# Configuration settings
Pco2waConfigurationDataParticleKey.LAUNCH_TIME: {TYPE: int, VALUE: 0xCEE90B00, REQUIRED: True},
Pco2waConfigurationDataParticleKey.START_TIME_OFFSET: {TYPE: int, VALUE: 0x02C7EA00, REQUIRED: True},
Pco2waConfigurationDataParticleKey.RECORDING_TIME: {TYPE: int, VALUE: 0x01E13380, REQUIRED: True},
Pco2waConfigurationDataParticleKey.PMI_SAMPLE_SCHEDULE: {TYPE: bool, VALUE: False, REQUIRED: True},
Pco2waConfigurationDataParticleKey.SAMI_SAMPLE_SCHEDULE: {TYPE: bool, VALUE: True, REQUIRED: True},
Pco2waConfigurationDataParticleKey.SLOT1_FOLLOWS_SAMI_SCHEDULE: {TYPE: bool, VALUE: False, REQUIRED: True},
Pco2waConfigurationDataParticleKey.SLOT1_INDEPENDENT_SCHEDULE: {TYPE: bool, VALUE: True, REQUIRED: True},
Pco2waConfigurationDataParticleKey.SLOT2_FOLLOWS_SAMI_SCHEDULE: {TYPE: bool, VALUE: False, REQUIRED: True},
Pco2waConfigurationDataParticleKey.SLOT2_INDEPENDENT_SCHEDULE: {TYPE: bool, VALUE: False, REQUIRED: True},
Pco2waConfigurationDataParticleKey.SLOT3_FOLLOWS_SAMI_SCHEDULE: {TYPE: bool, VALUE: False, REQUIRED: True},
Pco2waConfigurationDataParticleKey.SLOT3_INDEPENDENT_SCHEDULE: {TYPE: bool, VALUE: False, REQUIRED: True},
Pco2waConfigurationDataParticleKey.TIMER_INTERVAL_SAMI: {TYPE: int, VALUE: 0x000E10, REQUIRED: True},
Pco2waConfigurationDataParticleKey.DRIVER_ID_SAMI: {TYPE: int, VALUE: 0x04, REQUIRED: True},
Pco2waConfigurationDataParticleKey.PARAMETER_POINTER_SAMI: {TYPE: int, VALUE: 0x02, REQUIRED: True},
Pco2waConfigurationDataParticleKey.TIMER_INTERVAL_DEVICE1: {TYPE: int, VALUE: 0x000E10, REQUIRED: True},
Pco2waConfigurationDataParticleKey.DRIVER_ID_DEVICE1: {TYPE: int, VALUE: 0x01, REQUIRED: True},
Pco2waConfigurationDataParticleKey.PARAMETER_POINTER_DEVICE1: {TYPE: int, VALUE: 0x0B, REQUIRED: True},
Pco2waConfigurationDataParticleKey.TIMER_INTERVAL_DEVICE2: {TYPE: int, VALUE: 0x000000, REQUIRED: True},
Pco2waConfigurationDataParticleKey.DRIVER_ID_DEVICE2: {TYPE: int, VALUE: 0x00, REQUIRED: True},
Pco2waConfigurationDataParticleKey.PARAMETER_POINTER_DEVICE2: {TYPE: int, VALUE: 0x0D, REQUIRED: True},
Pco2waConfigurationDataParticleKey.TIMER_INTERVAL_DEVICE3: {TYPE: int, VALUE: 0x000000, REQUIRED: True},
Pco2waConfigurationDataParticleKey.DRIVER_ID_DEVICE3: {TYPE: int, VALUE: 0x00, REQUIRED: True},
Pco2waConfigurationDataParticleKey.PARAMETER_POINTER_DEVICE3: {TYPE: int, VALUE: 0x0D, REQUIRED: True},
Pco2waConfigurationDataParticleKey.TIMER_INTERVAL_PRESTART: {TYPE: int, VALUE: 0x000000, REQUIRED: True},
Pco2waConfigurationDataParticleKey.DRIVER_ID_PRESTART: {TYPE: int, VALUE: 0x00, REQUIRED: True},
Pco2waConfigurationDataParticleKey.PARAMETER_POINTER_PRESTART: {TYPE: int, VALUE: 0x0D, REQUIRED: True},
Pco2waConfigurationDataParticleKey.USE_BAUD_RATE_57600: {TYPE: bool, VALUE: True, REQUIRED: True},
Pco2waConfigurationDataParticleKey.SEND_RECORD_TYPE: {TYPE: bool, VALUE: True, REQUIRED: True},
Pco2waConfigurationDataParticleKey.SEND_LIVE_RECORDS: {TYPE: bool, VALUE: True, REQUIRED: True},
Pco2waConfigurationDataParticleKey.EXTEND_GLOBAL_CONFIG: {TYPE: bool, VALUE: False, REQUIRED: True},
Pco2waConfigurationDataParticleKey.PUMP_PULSE: {TYPE: int, VALUE: 0x10, REQUIRED: True},
Pco2waConfigurationDataParticleKey.PUMP_DURATION: {TYPE: int, VALUE: 0x20, REQUIRED: True},
Pco2waConfigurationDataParticleKey.SAMPLES_PER_MEASUREMENT: {TYPE: int, VALUE: 0xFF, REQUIRED: True},
Pco2waConfigurationDataParticleKey.CYCLES_BETWEEN_BLANKS: {TYPE: int, VALUE: 0x54, REQUIRED: True},
Pco2waConfigurationDataParticleKey.NUMBER_REAGENT_CYCLES: {TYPE: int, VALUE: 0x18, REQUIRED: True},
Pco2waConfigurationDataParticleKey.NUMBER_BLANK_CYCLES: {TYPE: int, VALUE: 0x1C, REQUIRED: True},
Pco2waConfigurationDataParticleKey.FLUSH_PUMP_INTERVAL: {TYPE: int, VALUE: 0x01, REQUIRED: True},
Pco2waConfigurationDataParticleKey.DISABLE_START_BLANK_FLUSH: {TYPE: bool, VALUE: False, REQUIRED: True},
Pco2waConfigurationDataParticleKey.MEASURE_AFTER_PUMP_PULSE: {TYPE: bool, VALUE: False, REQUIRED: True},
Pco2waConfigurationDataParticleKey.NUMBER_EXTRA_PUMP_CYCLES: {TYPE: int, VALUE: 0x38, REQUIRED: True},
}
###
# Driver Parameter Methods
###
def assert_driver_parameters(self, current_parameters, verify_values=False):
"""
Verify that all driver parameters are correct and potentially verify
values.
@param current_parameters: driver parameters read from the driver
instance
@param verify_values: should we verify values against definition?
"""
self.assert_parameters(current_parameters, self._driver_parameters,
verify_values)
def assert_particle_sami_sample(self, data_particle, verify_values=False):
"""
Verify sami_data_sample particles (Type 4 and 5). Used in INT test where type doesn't matter.
@param data_particle: Pco2wSamiSampleDataParticle data particle
@param verify_values: bool, should we verify parameter values
"""
self.assert_data_particle_keys(Pco2wSamiSampleDataParticleKey,
self._sami_data_sample_parameters)
self.assert_data_particle_header(data_particle,
DataParticleType.SAMI_SAMPLE)
self.assert_data_particle_parameters(data_particle,
self._sami_data_sample_parameters,
verify_values)
def assert_particle_sami_data_sample(self, data_particle, verify_values=False):
"""
Verify sami_data_sample particle (Type 4)
@param data_particle: Pco2wSamiSampleDataParticle data particle
@param verify_values: bool, should we verify parameter values
"""
sample_dict = self.get_data_particle_values_as_dict(data_particle)
record_type = sample_dict.get(Pco2wSamiSampleDataParticleKey.RECORD_TYPE)
self.assertEqual(record_type, 4, msg="Not a regular sample, record_type = %d" % record_type)
self.assert_data_particle_keys(Pco2wSamiSampleDataParticleKey,
self._sami_data_sample_parameters)
self.assert_data_particle_header(data_particle,
DataParticleType.SAMI_SAMPLE)
self.assert_data_particle_parameters(data_particle,
self._sami_data_sample_parameters,
verify_values)
sample_dict = self.get_data_particle_values_as_dict(data_particle)
record_type = sample_dict.get(Pco2wSamiSampleDataParticleKey.RECORD_TYPE)
required_record_type = 4
log.debug('assert_particle_sami_data_sample(): record_type:required_record_type = ' +
str(record_type) + ":" +
str(required_record_type))
self.assertEquals(record_type, required_record_type)
def assert_particle_sami_blank_sample(self, data_particle, verify_values=False):
"""
Verify sami_blank_sample particle (Type 5)
@param data_particle: Pco2wSamiSampleDataParticle data particle
@param verify_values: bool, should we verify parameter values
"""
sample_dict = self.get_data_particle_values_as_dict(data_particle)
record_type = sample_dict.get(Pco2wSamiSampleDataParticleKey.RECORD_TYPE)
self.assertEqual(record_type, 5, msg="Not a blank sample, record_type = %d" % record_type)
self.assert_data_particle_keys(Pco2wSamiSampleDataParticleKey,
self._sami_blank_sample_parameters)
self.assert_data_particle_header(data_particle,
DataParticleType.SAMI_SAMPLE)
self.assert_data_particle_parameters(data_particle,
self._sami_blank_sample_parameters,
verify_values)
sample_dict = self.get_data_particle_values_as_dict(data_particle)
record_type = sample_dict.get(Pco2wSamiSampleDataParticleKey.RECORD_TYPE)
required_record_type = 5
log.debug('assert_particle_sami_blank_sample(): record_type:required_record_type = ' +
str(record_type) + ":" +
str(required_record_type))
self.assertEquals(record_type, required_record_type)
def assert_particle_configuration(self, data_particle, verify_values=False):
"""
Verify configuration particle
@param data_particle: Pco2wConfigurationDataParticle data particle
@param verify_values: bool, should we verify parameter values
"""
self.assert_data_particle_keys(Pco2waConfigurationDataParticleKey,
self._configuration_parameters)
self.assert_data_particle_header(data_particle,
DataParticleType.CONFIGURATION)
self.assert_data_particle_parameters(data_particle,
self._configuration_parameters,
verify_values)
###############################################################################
# UNIT TESTS #
# Unit Tests: test the method calls and parameters using Mock. #
# #
# These tests are especially useful for testing parsers and other data #
# handling. The tests generally focus on small segments of code, like a #
# single function call, but more complex code using Mock objects. However #
# if you find yourself mocking too much maybe it is better as an #
# integration test. #
# #
# Unit tests do not start up external processes like the port agent or #
# driver process. #
###############################################################################
@attr('UNIT', group='mi')
class DriverUnitTest(Pco2DriverUnitTest, DriverTestMixinSub):
capabilities_test_dict = {
ProtocolState.UNKNOWN: ['DRIVER_EVENT_DISCOVER'],
ProtocolState.WAITING: ['DRIVER_EVENT_DISCOVER'],
ProtocolState.COMMAND: ['DRIVER_EVENT_GET',
'DRIVER_EVENT_SET',
'DRIVER_EVENT_START_DIRECT',
'DRIVER_EVENT_ACQUIRE_STATUS',
'DRIVER_EVENT_ACQUIRE_SAMPLE',
'DRIVER_EVENT_ACQUIRE_BLANK_SAMPLE',
'DRIVER_EVENT_START_AUTOSAMPLE',
'DRIVER_EVENT_DEIONIZED_WATER_FLUSH',
'DRIVER_EVENT_REAGENT_FLUSH',
'DRIVER_EVENT_DEIONIZED_WATER_FLUSH_100ML',
'DRIVER_EVENT_REAGENT_FLUSH_100ML'],
ProtocolState.DEIONIZED_WATER_FLUSH: ['PROTOCOL_EVENT_EXECUTE',
'PROTOCOL_EVENT_SUCCESS',
'PROTOCOL_EVENT_TIMEOUT',
'DRIVER_EVENT_ACQUIRE_STATUS'],
ProtocolState.REAGENT_FLUSH: ['PROTOCOL_EVENT_EXECUTE',
'PROTOCOL_EVENT_SUCCESS',
'PROTOCOL_EVENT_TIMEOUT',
'DRIVER_EVENT_ACQUIRE_STATUS'],
ProtocolState.DEIONIZED_WATER_FLUSH_100ML: ['PROTOCOL_EVENT_EXECUTE',
'PROTOCOL_EVENT_SUCCESS',
'PROTOCOL_EVENT_TIMEOUT',
'DRIVER_EVENT_ACQUIRE_STATUS'],
ProtocolState.REAGENT_FLUSH_100ML: ['PROTOCOL_EVENT_EXECUTE',
'PROTOCOL_EVENT_SUCCESS',
'PROTOCOL_EVENT_TIMEOUT',
'DRIVER_EVENT_ACQUIRE_STATUS'],
ProtocolState.AUTOSAMPLE: ['DRIVER_EVENT_ACQUIRE_SAMPLE',
'DRIVER_EVENT_ACQUIRE_BLANK_SAMPLE',
'DRIVER_EVENT_STOP_AUTOSAMPLE',
'DRIVER_EVENT_ACQUIRE_STATUS'],
ProtocolState.DIRECT_ACCESS: ['EXECUTE_DIRECT',
'DRIVER_EVENT_STOP_DIRECT'],
ProtocolState.POLLED_SAMPLE: ['PROTOCOL_EVENT_EXECUTE',
'PROTOCOL_EVENT_SUCCESS',
'PROTOCOL_EVENT_TIMEOUT',
'DRIVER_EVENT_ACQUIRE_STATUS'],
ProtocolState.POLLED_BLANK_SAMPLE: ['PROTOCOL_EVENT_EXECUTE',
'PROTOCOL_EVENT_SUCCESS',
'PROTOCOL_EVENT_TIMEOUT',
'DRIVER_EVENT_ACQUIRE_STATUS'],
ProtocolState.SCHEDULED_SAMPLE: ['PROTOCOL_EVENT_EXECUTE',
'PROTOCOL_EVENT_SUCCESS',
'PROTOCOL_EVENT_TIMEOUT',
'DRIVER_EVENT_ACQUIRE_STATUS'],
ProtocolState.SCHEDULED_BLANK_SAMPLE: ['PROTOCOL_EVENT_EXECUTE',
'PROTOCOL_EVENT_SUCCESS',
'PROTOCOL_EVENT_TIMEOUT',
'DRIVER_EVENT_ACQUIRE_STATUS'],
}
def test_base_driver_enums(self):
"""
Verify that all the SAMI Instrument driver enumerations have no
duplicate values that might cause confusion. Also do a little
extra validation for the Capabilites
Extra enumeration tests are done in a specific subclass
"""
# Test Enums defined in the base SAMI driver
self.assert_enum_has_no_duplicates(ProtocolState())
self.assert_enum_has_no_duplicates(ProtocolEvent())
# Test capabilites for duplicates, then verify that capabilities
# is a subset of proto events
self.assert_enum_has_no_duplicates(Capability())
self.assert_enum_complete(Capability(), ProtocolEvent())
def test_driver_schema(self):
"""
get the driver schema and verify it is configured properly
"""
driver = InstrumentDriver(self._got_data_event_callback)
self.assert_driver_schema(driver, self._driver_parameters, self._driver_capabilities)
def test_driver_enums(self):
"""
Verify that all driver enumeration has no duplicate values that might
cause confusion.
"""
self.assert_enum_has_no_duplicates(DataParticleType())
self.assert_enum_has_no_duplicates(Parameter())
self.assert_enum_has_no_duplicates(InstrumentCommand())
def test_chunker(self):
"""
Test the chunker and verify the particles created.
"""
chunker = StringChunker(Protocol.sieve_function)
self.assert_chunker_sample(chunker, self.VALID_STATUS_MESSAGE)
self.assert_chunker_sample_with_noise(chunker, self.VALID_STATUS_MESSAGE)
self.assert_chunker_fragmented_sample(chunker, self.VALID_STATUS_MESSAGE)
self.assert_chunker_combined_sample(chunker, self.VALID_STATUS_MESSAGE)
self.assert_chunker_sample(chunker, self.VALID_CONTROL_RECORD)
self.assert_chunker_sample_with_noise(chunker, self.VALID_CONTROL_RECORD)
self.assert_chunker_fragmented_sample(chunker, self.VALID_CONTROL_RECORD)
self.assert_chunker_combined_sample(chunker, self.VALID_CONTROL_RECORD)
self.assert_chunker_sample(chunker, self.VALID_R0_BLANK_SAMPLE)
self.assert_chunker_sample_with_noise(chunker, self.VALID_R0_BLANK_SAMPLE)
self.assert_chunker_fragmented_sample(chunker, self.VALID_R0_BLANK_SAMPLE)
self.assert_chunker_combined_sample(chunker, self.VALID_R0_BLANK_SAMPLE)
self.assert_chunker_sample(chunker, self.VALID_R0_DATA_SAMPLE)
self.assert_chunker_sample_with_noise(chunker, self.VALID_R0_DATA_SAMPLE)
self.assert_chunker_fragmented_sample(chunker, self.VALID_R0_DATA_SAMPLE)
self.assert_chunker_combined_sample(chunker, self.VALID_R0_DATA_SAMPLE)
self.assert_chunker_sample(chunker, self.VALID_CONFIG_STRING)
self.assert_chunker_sample_with_noise(chunker, self.VALID_CONFIG_STRING)
self.assert_chunker_fragmented_sample(chunker, self.VALID_CONFIG_STRING)
self.assert_chunker_combined_sample(chunker, self.VALID_CONFIG_STRING)
def test_got_data(self):
"""
Verify sample data passed through the got data method produces the
correct data particles
"""
# Create and initialize the instrument driver with a mock port agent
driver = InstrumentDriver(self._got_data_event_callback)
self.assert_initialize_driver(driver)
self.assert_raw_particle_published(driver, True)
# Start validating data particles
self.assert_particle_published(driver, self.VALID_STATUS_MESSAGE,
self.assert_particle_regular_status, True)
self.assert_particle_published(driver, self.VALID_CONTROL_RECORD,
self.assert_particle_control_record, True)
self.assert_particle_published(driver, self.VALID_R0_BLANK_SAMPLE,
self.assert_particle_sami_blank_sample, True)
self.assert_particle_published(driver, self.VALID_R0_DATA_SAMPLE,
self.assert_particle_sami_data_sample, True)
self.assert_particle_published(driver, self.VALID_CONFIG_STRING,
self.assert_particle_configuration, True)
def test_protocol_filter_capabilities(self):
"""
This tests driver filter_capabilities. Iterate through available
capabilities, and verify that they can pass successfully through the
filter. Test silly made up capabilities to verify they are blocked by
filter.
"""
mock_callback = Mock()
protocol = Protocol(Prompt, SAMI_NEWLINE, mock_callback)
driver_capabilities = Capability().list()
test_capabilities = Capability().list()
# Add a bogus capability that will be filtered out.
test_capabilities.append("BOGUS_CAPABILITY")
# Verify "BOGUS_CAPABILITY was filtered out
self.assertEquals(sorted(driver_capabilities),
sorted(protocol._filter_capabilities(test_capabilities)))
def test_capabilities(self):
"""
Verify the FSM reports capabilities as expected. All states defined in
this dict must also be defined in the protocol FSM. Note, the EXIT and
ENTER DRIVER_EVENTS don't need to be listed here.
"""
# capabilities defined in base class test_driver.
driver = InstrumentDriver(self._got_data_event_callback)
self.assert_capabilities(driver, self.capabilities_test_dict)
def test_pump_commands(self):
driver = InstrumentDriver(self._got_data_event_callback)
self.assert_pump_commands(driver)
def test_pump_timing(self):
driver = InstrumentDriver(self._got_data_event_callback)
self.assert_pump_timing(driver)
def test_waiting_discover(self):
driver = InstrumentDriver(self._got_data_event_callback)
self.assert_waiting_discover(driver)
def test_autosample_timing(self):
driver = InstrumentDriver(self._got_data_event_callback)
self.assert_autosample_timing(driver)
###############################################################################
# INTEGRATION TESTS #
# Integration test test the direct driver / instrument interaction #
# but making direct calls via zeromq. #
# - Common Integration tests test the driver through the instrument agent #
# and common for all drivers (minimum requirement for ION ingestion) #
###############################################################################
@attr('INT', group='mi')
class DriverIntegrationTest(Pco2DriverIntegrationTest, DriverTestMixinSub):
"""
Integration Tests:
test_startup_params: Verify that driver startup parameters are set properly.
test_set: In command state, test configuration particle generation.
Parameter.PUMP_PULSE
Parameter.PUMP_DURATION
Parameter.SAMPLES_PER_MEASUREMENT
Parameter.CYCLES_BETWEEN_BLANKS
Parameter.NUMBER_REAGENT_CYCLES
Parameter.NUMBER_BLANK_CYCLES
Parameter.FLUSH_PUMP_INTERVAL
Parameter.BIT_SWITCHES
Parameter.NUMBER_EXTRA_PUMP_CYCLES
Parameter.AUTO_SAMPLE_INTERVAL
Negative Set Tests:
START_TIME_FROM_LAUNCH
STOP_TIME_FROM_START
MODE_BITS
SAMI_SAMPLE_INTERVAL
test_commands: In autosample and command states, test particle generation.
ACQUIRE_STATUS = ProtocolEvent.ACQUIRE_STATUS
ACQUIRE_SAMPLE = ProtocolEvent.ACQUIRE_SAMPLE
ACQUIRE_BLANK_SAMPLE = ProtocolEvent.ACQUIRE_BLANK_SAMPLE
test_autosample: Test autosample particle generation.
START_AUTOSAMPLE = ProtocolEvent.START_AUTOSAMPLE
STOP_AUTOSAMPLE = ProtocolEvent.STOP_AUTOSAMPLE
test_flush_pump: Test flush pump commands
"""
# def test_initialize_driver(self):
# self.assert_initialize_driver()
def test_startup_params(self):
startup_values = {
Parameter.PUMP_PULSE: 0x10,
Parameter.PUMP_DURATION: 0x20,
Parameter.SAMPLES_PER_MEASUREMENT: 0xFF,
Parameter.CYCLES_BETWEEN_BLANKS: 0x54,
Parameter.NUMBER_REAGENT_CYCLES: 0x18,
Parameter.NUMBER_BLANK_CYCLES: 0x1C,
Parameter.FLUSH_PUMP_INTERVAL: 0x01,
Parameter.BIT_SWITCHES: 0x01,
Parameter.NUMBER_EXTRA_PUMP_CYCLES: 0x38,
Parameter.AUTO_SAMPLE_INTERVAL: 3600,
Parameter.REAGENT_FLUSH_DURATION: 0x08,
Parameter.DEIONIZED_WATER_FLUSH_DURATION: 0x08,
Parameter.PUMP_100ML_CYCLES: 1
}
new_values = {
Parameter.PUMP_PULSE: 0x11,
Parameter.PUMP_DURATION: 0x21,
Parameter.SAMPLES_PER_MEASUREMENT: 0xFA,
Parameter.CYCLES_BETWEEN_BLANKS: 0xA9,
Parameter.NUMBER_REAGENT_CYCLES: 0x19,
Parameter.NUMBER_BLANK_CYCLES: 0x1D,
Parameter.FLUSH_PUMP_INTERVAL: 0x02,
Parameter.BIT_SWITCHES: 0x02,
Parameter.NUMBER_EXTRA_PUMP_CYCLES: 0x39,
Parameter.AUTO_SAMPLE_INTERVAL: 600,
Parameter.REAGENT_FLUSH_DURATION: 0x01,
Parameter.DEIONIZED_WATER_FLUSH_DURATION: 0x0F,
Parameter.PUMP_100ML_CYCLES: 14
}
self.assert_initialize_driver()
for (key, val) in startup_values.iteritems():
self.assert_get(key, val)
self.assert_set_bulk(new_values)
reply = self.driver_client.cmd_dvr('apply_startup_params')
for (key, val) in startup_values.iteritems():
self.assert_get(key, val)
def test_set(self):
self.assert_initialize_driver()
self.assert_set(Parameter.AUTO_SAMPLE_INTERVAL, 77)
self.assert_set(Parameter.CYCLES_BETWEEN_BLANKS, 7)
self.assert_set(Parameter.PUMP_PULSE, 20)
self.assert_set(Parameter.SAMPLES_PER_MEASUREMENT, 239)
self.assert_set(Parameter.NUMBER_REAGENT_CYCLES, 26)
self.assert_set(Parameter.NUMBER_BLANK_CYCLES, 30)
self.assert_set(Parameter.FLUSH_PUMP_INTERVAL, 2)
self.assert_set(Parameter.BIT_SWITCHES, 1)
self.assert_set(Parameter.NUMBER_EXTRA_PUMP_CYCLES, 88)
self.assert_set(Parameter.REAGENT_FLUSH_DURATION, 16)
self.assert_set(Parameter.DEIONIZED_WATER_FLUSH_DURATION, 4)
self.assert_set(Parameter.PUMP_100ML_CYCLES, 14)
self.assert_set_readonly(Parameter.START_TIME_FROM_LAUNCH, 84600)
self.assert_set_readonly(Parameter.STOP_TIME_FROM_START, 84600)
self.assert_set_readonly(Parameter.MODE_BITS, 10)
self.assert_set_readonly(Parameter.SAMI_SAMPLE_INTERVAL, 1800)
def test_bulk_set(self):
self.assert_initialize_driver()
new_values = {
Parameter.AUTO_SAMPLE_INTERVAL: 77,
Parameter.CYCLES_BETWEEN_BLANKS: 7,
Parameter.PUMP_PULSE: 20,
Parameter.SAMPLES_PER_MEASUREMENT: 239,
Parameter.NUMBER_REAGENT_CYCLES: 26,
Parameter.NUMBER_BLANK_CYCLES: 30,
Parameter.FLUSH_PUMP_INTERVAL: 2,
Parameter.BIT_SWITCHES: 1,
Parameter.NUMBER_EXTRA_PUMP_CYCLES: 88,
Parameter.REAGENT_FLUSH_DURATION: 4,
Parameter.DEIONIZED_WATER_FLUSH_DURATION: 16,
Parameter.PUMP_100ML_CYCLES: 14
}
self.assert_set_bulk(new_values)
def test_bad_parameters(self):
self.assert_initialize_driver()
self.assert_set_exception(Parameter.CYCLES_BETWEEN_BLANKS, value=7.0)
self.assert_set_exception(Parameter.PUMP_PULSE, value=20.0)
self.assert_set_exception(Parameter.SAMPLES_PER_MEASUREMENT, 239.0)
self.assert_set_exception(Parameter.NUMBER_REAGENT_CYCLES, 26.0)
self.assert_set_exception(Parameter.NUMBER_BLANK_CYCLES, 30.0)
self.assert_set_exception(Parameter.FLUSH_PUMP_INTERVAL, 2.0)
self.assert_set_exception(Parameter.BIT_SWITCHES, 1.0)
self.assert_set_exception(Parameter.NUMBER_EXTRA_PUMP_CYCLES, 88.0)
def test_acquire_sample(self):
self.assert_initialize_driver()
self.assert_driver_command(ProtocolEvent.ACQUIRE_SAMPLE)
self.assert_async_particle_generation(DataParticleType.SAMI_SAMPLE, self.assert_particle_sami_data_sample,
timeout=160)
def test_acquire_blank_sample(self):
self.assert_initialize_driver()
self.assert_driver_command(ProtocolEvent.ACQUIRE_BLANK_SAMPLE)
self.assert_async_particle_generation(DataParticleType.SAMI_SAMPLE, self.assert_particle_sami_blank_sample,
timeout=160)
def test_auto_sample(self):
self.assert_initialize_driver()
self.assert_set(Parameter.AUTO_SAMPLE_INTERVAL, 60)
self.assert_driver_command(ProtocolEvent.START_AUTOSAMPLE, state=ProtocolState.SCHEDULED_SAMPLE, delay=5)
self.assert_async_particle_generation(DataParticleType.SAMI_SAMPLE, self.assert_particle_sami_data_sample,
particle_count=4, timeout=320)
self.assert_driver_command(ProtocolEvent.STOP_AUTOSAMPLE, state=ProtocolState.COMMAND, delay=5)
def test_polled_sample_state(self):
self.assert_initialize_driver()
self.assert_driver_command(ProtocolEvent.ACQUIRE_SAMPLE, state=ProtocolState.POLLED_SAMPLE, delay=5)
self.assert_async_particle_generation(DataParticleType.SAMI_SAMPLE, self.assert_particle_sami_data_sample,
timeout=160)
def test_polled_blank_sample_state(self):
self.assert_initialize_driver()
self.assert_driver_command(ProtocolEvent.ACQUIRE_BLANK_SAMPLE, state=ProtocolState.POLLED_BLANK_SAMPLE, delay=5)
self.assert_async_particle_generation(DataParticleType.SAMI_SAMPLE, self.assert_particle_sami_blank_sample,
timeout=160)
def test_scheduled_sample_state(self):
self.assert_initialize_driver()
self.assert_driver_command(ProtocolEvent.START_AUTOSAMPLE, state=ProtocolState.SCHEDULED_SAMPLE, delay=5)
self.assert_async_particle_generation(DataParticleType.SAMI_SAMPLE, self.assert_particle_sami_data_sample,
timeout=160)
self.assert_driver_command(ProtocolEvent.STOP_AUTOSAMPLE, state=ProtocolState.COMMAND, delay=5)
def test_scheduled_blank_sample_state(self):
self.assert_initialize_driver()
self.assert_driver_command(ProtocolEvent.START_AUTOSAMPLE, state=ProtocolState.SCHEDULED_SAMPLE, delay=5)
self.assert_async_particle_generation(DataParticleType.SAMI_SAMPLE, self.assert_particle_sami_data_sample,
timeout=160)
self.clear_events()
self.assert_driver_command(ProtocolEvent.ACQUIRE_BLANK_SAMPLE, state=ProtocolState.SCHEDULED_BLANK_SAMPLE,
delay=5)
self.assert_async_particle_generation(DataParticleType.SAMI_SAMPLE, self.assert_particle_sami_blank_sample,
timeout=160)
self.assert_driver_command(ProtocolEvent.STOP_AUTOSAMPLE, state=ProtocolState.COMMAND, delay=5)
def test_queued_command(self):
"""
Verify status is queued while samples are being taken
"""
self.assert_initialize_driver()
## Queue status
self.clear_events()
self.assert_driver_command(ProtocolEvent.ACQUIRE_SAMPLE)
self.assert_driver_command(ProtocolEvent.ACQUIRE_STATUS)
self.assert_async_particle_generation(DataParticleType.SAMI_SAMPLE, self.assert_particle_sami_data_sample,
particle_count=1, timeout=180)
self.assert_async_particle_generation(DataParticleType.REGULAR_STATUS, self.assert_particle_regular_status,
timeout=180)
self.assert_current_state(ProtocolState.COMMAND)
def test_queued_autosample(self):
"""
Verify status is queued while samples are being taken
"""
self.assert_initialize_driver()
self.clear_events()
self.assert_driver_command(ProtocolEvent.START_AUTOSAMPLE, state=ProtocolState.SCHEDULED_SAMPLE, delay=5)
## Queue sample and status
self.assert_driver_command(ProtocolEvent.ACQUIRE_STATUS)
self.assert_async_particle_generation(DataParticleType.SAMI_SAMPLE, self.assert_particle_sami_data_sample,
particle_count=1, timeout=180)
self.assert_async_particle_generation(DataParticleType.REGULAR_STATUS, self.assert_particle_regular_status,
timeout=180)
self.assert_current_state(ProtocolState.AUTOSAMPLE)
def test_acquire_status(self):
self.assert_initialize_driver()
self.clear_events()
self.assert_particle_generation(ProtocolEvent.ACQUIRE_STATUS, DataParticleType.REGULAR_STATUS,
self.assert_particle_regular_status)
self.assert_async_particle_generation(DataParticleType.CONFIGURATION, self.assert_particle_configuration)
self.assert_async_particle_generation(DataParticleType.BATTERY_VOLTAGE, self.assert_particle_battery_voltage)
self.assert_async_particle_generation(DataParticleType.THERMISTOR_VOLTAGE,
self.assert_particle_thermistor_voltage)
###############################################################################
# QUALIFICATION TESTS #
# Device specific qualification tests are for doing final testing of ion #
# integration. The generally aren't used for instrument debugging and should #
# be tackled after all unit and integration tests are complete #
###############################################################################
@attr('QUAL', group='mi')
class DriverQualificationTest(Pco2DriverQualificationTest, DriverTestMixinSub):
def test_queued_command(self):
self.assert_enter_command_mode()
self.assert_resource_command(ProtocolEvent.ACQUIRE_SAMPLE, delay=4, resource_state=ProtocolState.POLLED_SAMPLE)
self.assert_resource_command(ProtocolEvent.ACQUIRE_STATUS)
self.assert_particle_async(DataParticleType.REGULAR_STATUS, self.assert_particle_regular_status, timeout=60)
def test_queued_autosample(self):
self.assert_enter_command_mode()
self.assert_start_autosample(timeout=200)
self.assert_resource_command(ProtocolEvent.ACQUIRE_SAMPLE, delay=4,
resource_state=ProtocolState.SCHEDULED_SAMPLE)
self.assert_resource_command(ProtocolEvent.ACQUIRE_STATUS)
self.assert_particle_async(DataParticleType.REGULAR_STATUS, self.assert_particle_regular_status, timeout=60)
self.assert_stop_autosample()
self.assert_state_change(ResourceAgentState.COMMAND, ProtocolState.COMMAND, 60)
@unittest.skip("Runs for several hours to test default autosample rate of 60 minutes")
def test_overnight(self):
"""
Verify autosample at default rate
"""
self.assert_enter_command_mode()
self.assert_set_parameter(Parameter.BIT_SWITCHES, 0x00)
self.assert_particle_polled(ProtocolEvent.ACQUIRE_SAMPLE, self.assert_particle_sami_blank_sample,
DataParticleType.SAMI_SAMPLE, sample_count=1, timeout=200)
self.assert_sample_autosample(self.assert_particle_sami_data_sample, DataParticleType.SAMI_SAMPLE,
timeout=14400)
def test_direct_access_telnet_mode(self):
"""
@brief This test manually tests that the Instrument Driver properly
supports direct access to the physical instrument. (telnet mode)
"""
self.assert_enter_command_mode()
self.assert_set_parameter(Parameter.CYCLES_BETWEEN_BLANKS, 7)
configuration_string = 'CF889C9C02C7EA0001E1338002000E10040200000000000000000000000000000000000000000' + \
'71020FFA8181C0100380000000000000000000000000000000000000000000000000000000000' + \
'00000000000000000000000000000000000000000000000000000000000000000000000000000' + \
'0FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF' + \
'FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF' + \
'FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF' + \
'FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF'
self.assert_direct_access_start_telnet()
self.assertTrue(self.tcp_client)
# Erase memory
self.tcp_client.send_data("E5A%s" % SAMI_NEWLINE)
time.sleep(1)
# Load a new configuration string changing X to X
self.tcp_client.send_data("L5A%s" % SAMI_NEWLINE)
time.sleep(1)
self.tcp_client.send_data("%s00%s" % (configuration_string, SAMI_NEWLINE))
time.sleep(1)
# Check that configuration was changed
self.tcp_client.send_data("L%s" % SAMI_NEWLINE)
return_value = self.tcp_client.expect(configuration_string)
self.assertTrue(return_value)
###
# Add instrument specific code here.
###
self.assert_direct_access_stop_telnet()
self.assert_state_change(ResourceAgentState.COMMAND, ProtocolState.COMMAND, 60)
self.assert_get_parameter(Parameter.CYCLES_BETWEEN_BLANKS, 7)
def test_command_poll(self):
self.assert_enter_command_mode()
self.assert_particle_polled(ProtocolEvent.ACQUIRE_SAMPLE, self.assert_particle_sami_data_sample,
DataParticleType.SAMI_SAMPLE, sample_count=1, timeout=200)
self.assert_particle_polled(ProtocolEvent.ACQUIRE_BLANK_SAMPLE, self.assert_particle_sami_blank_sample,
DataParticleType.SAMI_SAMPLE, sample_count=1, timeout=200)
self.assert_particle_polled(ProtocolEvent.ACQUIRE_STATUS, self.assert_particle_regular_status,
DataParticleType.REGULAR_STATUS, sample_count=1, timeout=10)
self.assert_particle_polled(ProtocolEvent.ACQUIRE_STATUS, self.assert_particle_configuration,
DataParticleType.CONFIGURATION, sample_count=1, timeout=10)
self.assert_particle_polled(ProtocolEvent.ACQUIRE_STATUS, self.assert_particle_battery_voltage,
DataParticleType.BATTERY_VOLTAGE, sample_count=1, timeout=10)
self.assert_particle_polled(ProtocolEvent.ACQUIRE_STATUS, self.assert_particle_thermistor_voltage,
DataParticleType.THERMISTOR_VOLTAGE, sample_count=1, timeout=10)
self.assert_resource_command(ProtocolEvent.DEIONIZED_WATER_FLUSH, delay=15,
agent_state=ResourceAgentState.COMMAND, resource_state=ProtocolState.COMMAND)
self.assert_resource_command(ProtocolEvent.REAGENT_FLUSH, delay=15, agent_state=ResourceAgentState.COMMAND,
resource_state=ProtocolState.COMMAND)
self.assert_resource_command(ProtocolEvent.DEIONIZED_WATER_FLUSH_100ML, delay=15,
agent_state=ResourceAgentState.COMMAND, resource_state=ProtocolState.COMMAND)
self.assert_resource_command(ProtocolEvent.REAGENT_FLUSH_100ML, delay=15,
agent_state=ResourceAgentState.COMMAND, resource_state=ProtocolState.COMMAND)
self.assert_state_change(ResourceAgentState.COMMAND, ProtocolState.COMMAND, 60)
def test_autosample_poll(self):
self.assert_enter_command_mode()
self.assert_start_autosample(timeout=200)
self.assert_particle_polled(ProtocolEvent.ACQUIRE_SAMPLE, self.assert_particle_sami_data_sample,
DataParticleType.SAMI_SAMPLE, sample_count=1, timeout=200)
self.assert_particle_polled(ProtocolEvent.ACQUIRE_BLANK_SAMPLE, self.assert_particle_sami_blank_sample,
DataParticleType.SAMI_SAMPLE, sample_count=1, timeout=200)
self.assert_particle_polled(ProtocolEvent.ACQUIRE_STATUS, self.assert_particle_regular_status,
DataParticleType.REGULAR_STATUS, sample_count=1, timeout=10)
self.assert_particle_polled(ProtocolEvent.ACQUIRE_STATUS, self.assert_particle_configuration,
DataParticleType.CONFIGURATION, sample_count=1, timeout=10)
self.assert_particle_polled(ProtocolEvent.ACQUIRE_STATUS, self.assert_particle_battery_voltage,
DataParticleType.BATTERY_VOLTAGE, sample_count=1, timeout=10)
self.assert_particle_polled(ProtocolEvent.ACQUIRE_STATUS, self.assert_particle_thermistor_voltage,
DataParticleType.THERMISTOR_VOLTAGE, sample_count=1, timeout=10)
self.assert_stop_autosample()
self.assert_state_change(ResourceAgentState.COMMAND, ProtocolState.COMMAND, 60)
def test_autosample(self):
"""
Verify autosample works and data particles are created
"""
self.assert_enter_command_mode()
self.assert_set_parameter(Parameter.AUTO_SAMPLE_INTERVAL, 60)
self.assert_sample_autosample(self.assert_particle_sami_data_sample, DataParticleType.SAMI_SAMPLE)
def test_get_capabilities(self):
"""
@brief Verify that the correct capabilities are returned from get_capabilities
at various driver/agent states.
"""
self.assert_enter_command_mode()
##################
# Command Mode
##################
capabilities = {
AgentCapabilityType.AGENT_COMMAND: self._common_agent_commands(ResourceAgentState.COMMAND),
AgentCapabilityType.AGENT_PARAMETER: self._common_agent_parameters(),
AgentCapabilityType.RESOURCE_COMMAND: [
ProtocolEvent.START_AUTOSAMPLE,
ProtocolEvent.ACQUIRE_STATUS,
ProtocolEvent.ACQUIRE_SAMPLE,
ProtocolEvent.ACQUIRE_BLANK_SAMPLE,
ProtocolEvent.DEIONIZED_WATER_FLUSH,
ProtocolEvent.REAGENT_FLUSH,
ProtocolEvent.DEIONIZED_WATER_FLUSH_100ML,
ProtocolEvent.REAGENT_FLUSH_100ML
],
AgentCapabilityType.RESOURCE_INTERFACE: None,
AgentCapabilityType.RESOURCE_PARAMETER: self._driver_parameters.keys()
}
self.assert_capabilities(capabilities)
##################
# DA Mode
##################
da_capabilities = copy.deepcopy(capabilities)
da_capabilities[AgentCapabilityType.AGENT_COMMAND] = [ResourceAgentEvent.GO_COMMAND]
da_capabilities[AgentCapabilityType.RESOURCE_COMMAND] = []
# Test direct access disconnect
self.assert_direct_access_start_telnet(timeout=10)
self.assertTrue(self.tcp_client)
self.assert_capabilities(da_capabilities)
self.tcp_client.disconnect()
# Now do it again, but use the event to stop DA
self.assert_state_change(ResourceAgentState.COMMAND, ProtocolState.COMMAND, 60)
self.assert_direct_access_start_telnet(timeout=10)
self.assert_capabilities(da_capabilities)
self.assert_direct_access_stop_telnet()
##################
# Command Mode
##################
# We should be back in command mode from DA.
self.assert_state_change(ResourceAgentState.COMMAND, ProtocolState.COMMAND, 60)
self.assert_capabilities(capabilities)
##################
# Streaming Mode
##################
st_capabilities = copy.deepcopy(capabilities)
st_capabilities[AgentCapabilityType.AGENT_COMMAND] = self._common_agent_commands(ResourceAgentState.STREAMING)
st_capabilities[AgentCapabilityType.RESOURCE_COMMAND] = [
ProtocolEvent.STOP_AUTOSAMPLE,
ProtocolEvent.ACQUIRE_STATUS,
ProtocolEvent.ACQUIRE_SAMPLE,
ProtocolEvent.ACQUIRE_BLANK_SAMPLE
]
self.assert_start_autosample(timeout=200)
self.assert_capabilities(st_capabilities)
self.assert_stop_autosample()
##################
# Command Mode
##################
# We should be back in command mode from DA.
self.assert_state_change(ResourceAgentState.COMMAND, ProtocolState.COMMAND, 60)
self.assert_capabilities(capabilities)
#######################
# Uninitialized Mode
#######################
capabilities[AgentCapabilityType.AGENT_COMMAND] = self._common_agent_commands(ResourceAgentState.UNINITIALIZED)
capabilities[AgentCapabilityType.RESOURCE_COMMAND] = []
capabilities[AgentCapabilityType.RESOURCE_INTERFACE] = []
capabilities[AgentCapabilityType.RESOURCE_PARAMETER] = []
self.assert_reset()
self.assert_capabilities(capabilities) |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import json
import subprocess
from import_abstr import ImportAbstraction
class ImportSTM( ImportAbstraction ):
SCRIPT = """# Load an STM model and save its content as Termite Data Server files
# Load required libraries
library(stm)
library(lda)
# Define output filenames
app.path = "{DATA_PATH}"
app.path.TermTopicMatrix = "{DATA_PATH}/term-topic-matrix.txt"
app.path.DocTopicMatrix = "{DATA_PATH}/doc-topic-matrix.txt"
app.path.TermIndex = "{DATA_PATH}/term-index.json"
app.path.DocIndex = "{DATA_PATH}/doc-index.json"
app.path.TopicIndex = "{DATA_PATH}/topic-index.json"
# Load input data
load( file = "{MODEL_FILENAME}" )
model = mod.out
library( jsonlite )
data.DocTopicMatrix = model$theta
data.TermTopicMatrix = exp( t( model$beta$logbeta[[1]] ) )
# Document Index
temp.DocCount <- nrow(model$theta)
temp.DocIDs <- paste( "Document #", 1:temp.DocCount, sep = "" )
temp.DocIndex <- 1:temp.DocCount
temp.DocIndexValues <- cbind( temp.DocIndex, temp.DocIDs )
temp.DocIndexHeader <- c( "index", "docID" )
colnames( temp.DocIndexValues ) <- temp.DocIndexHeader
data.DocIndexJSON <- toJSON( as.data.frame( temp.DocIndexValues ), pretty = TRUE )
write( data.DocIndexJSON, file = app.path.DocIndex )
# Term Index
temp.TermCount <- nrow( data.TermTopicMatrix )
temp.TermFreq <- apply( data.TermTopicMatrix, 1, sum )
temp.TermText <- model$vocab
temp.TermIndex <- 1:temp.TermCount
temp.TermIndexValues = cbind( temp.TermIndex, temp.TermFreq, temp.TermText )
temp.TermIndexHeader = c( "index", "freq", "text" )
colnames( temp.TermIndexValues ) <- temp.TermIndexHeader
data.TermIndexJSON <- toJSON( as.data.frame( temp.TermIndexValues ), pretty = TRUE )
write( data.TermIndexJSON, file = app.path.TermIndex )
# Topic Index
temp.TopicCount <- ncol( data.TermTopicMatrix )
temp.TopicFreq <- apply( data.TermTopicMatrix, 2, sum )
temp.TopicIndex <- 1:temp.TopicCount
temp.TopicIndexValues = cbind( temp.TopicIndex, temp.TopicFreq )
temp.TopicIndexHeader = c( "index", "freq" )
colnames( temp.TopicIndexValues ) <- temp.TopicIndexHeader
data.TopicIndexJSON <- toJSON( as.data.frame( temp.TopicIndexValues ), pretty = TRUE )
write( data.TopicIndexJSON, file = app.path.TopicIndex )
# Doc-Topic Matrix
# Tab-separated with no headers. Theta (D by K)
rownames( data.DocTopicMatrix ) <- temp.DocIDs
colnames( data.DocTopicMatrix ) <- temp.TopicIndex
data.DocTopicMatrixJSON <- toJSON( data.DocTopicMatrix, pretty = TRUE )
write( data.DocTopicMatrixJSON, file = app.path.DocTopicMatrix )
# Term-Topic Matrix
# Tab-separated with no headers. Beta (V by K)
rownames( data.TermTopicMatrix ) <- temp.TermText
colnames( data.TermTopicMatrix ) <- temp.TopicIndex
data.TermTopicMatrixJSON <- toJSON( data.TermTopicMatrix, pretty = TRUE )
write( data.TermTopicMatrixJSON, file = app.path.TermTopicMatrix )
"""
def __init__( self, app_name, app_model = 'lda', app_desc = 'Structural Topic Model' ):
ImportAbstraction.__init__( self, app_name, app_model, app_desc )
def ImportLDA( self, model_filename ):
self.GenerateR( model_filename )
def GenerateR( self, model_filename ):
r = ImportSTM.SCRIPT.format( DATA_PATH = self.data_path, MODEL_FILENAME = model_filename )
script_filename = '{}/import.r'.format( self.data_path )
with open( script_filename, 'w' ) as f:
f.write( r.encode( 'utf-8' ) )
command = [ 'RScript', script_filename ]
print ' '.join(command)
process = subprocess.Popen( command, stdout = subprocess.PIPE, stderr = subprocess.PIPE )
while process.poll() is None:
line = process.stderr.readline()
print line[:-1]
def main():
parser = argparse.ArgumentParser( description = 'Import a STM topic model as a web2py application.' )
parser.add_argument( 'app_name' , type = str, help = 'Web2py application identifier' )
parser.add_argument( 'model' , type = str, help = 'File containing a STM model (RData)' )
args = parser.parse_args()
importer = ImportSTM( args.app_name )
importer.ImportLDA( args.model )
importer.AddToWeb2py()
if __name__ == '__main__':
main()
Specified number of significant figures in import_stm.
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import json
import subprocess
from import_abstr import ImportAbstraction
class ImportSTM( ImportAbstraction ):
SCRIPT = """# Load an STM model and save its content as Termite Data Server files
# Load required libraries
library(stm)
library(lda)
# Define output filenames
app.path = "{DATA_PATH}"
app.path.TermTopicMatrix = "{DATA_PATH}/term-topic-matrix.txt"
app.path.DocTopicMatrix = "{DATA_PATH}/doc-topic-matrix.txt"
app.path.TermIndex = "{DATA_PATH}/term-index.json"
app.path.DocIndex = "{DATA_PATH}/doc-index.json"
app.path.TopicIndex = "{DATA_PATH}/topic-index.json"
# Load input data
load( file = "{MODEL_FILENAME}" )
model = mod.out
library( jsonlite )
data.DocTopicMatrix = model$theta
data.TermTopicMatrix = exp( t( model$beta$logbeta[[1]] ) )
# Document Index
temp.DocCount <- nrow(model$theta)
temp.DocIDs <- paste( "Document #", 1:temp.DocCount, sep = "" )
temp.DocIndex <- 1:temp.DocCount
temp.DocIndexValues <- cbind( temp.DocIndex, temp.DocIDs )
temp.DocIndexHeader <- c( "index", "docID" )
colnames( temp.DocIndexValues ) <- temp.DocIndexHeader
data.DocIndexJSON <- toJSON( as.data.frame( temp.DocIndexValues ), pretty = TRUE, digits = 10 )
write( data.DocIndexJSON, file = app.path.DocIndex )
# Term Index
temp.TermCount <- nrow( data.TermTopicMatrix )
temp.TermFreq <- apply( data.TermTopicMatrix, 1, sum )
temp.TermText <- model$vocab
temp.TermIndex <- 1:temp.TermCount
temp.TermIndexValues = cbind( temp.TermIndex, temp.TermFreq, temp.TermText )
temp.TermIndexHeader = c( "index", "freq", "text" )
colnames( temp.TermIndexValues ) <- temp.TermIndexHeader
data.TermIndexJSON <- toJSON( as.data.frame( temp.TermIndexValues ), pretty = TRUE, digits = 10 )
write( data.TermIndexJSON, file = app.path.TermIndex )
# Topic Index
temp.TopicCount <- ncol( data.TermTopicMatrix )
temp.TopicFreq <- apply( data.TermTopicMatrix, 2, sum )
temp.TopicIndex <- 1:temp.TopicCount
temp.TopicIndexValues = cbind( temp.TopicIndex, temp.TopicFreq )
temp.TopicIndexHeader = c( "index", "freq" )
colnames( temp.TopicIndexValues ) <- temp.TopicIndexHeader
data.TopicIndexJSON <- toJSON( as.data.frame( temp.TopicIndexValues ), pretty = TRUE, digits = 10 )
write( data.TopicIndexJSON, file = app.path.TopicIndex )
# Doc-Topic Matrix
# Tab-separated with no headers. Theta (D by K)
rownames( data.DocTopicMatrix ) <- temp.DocIDs
colnames( data.DocTopicMatrix ) <- temp.TopicIndex
data.DocTopicMatrixJSON <- toJSON( data.DocTopicMatrix, pretty = TRUE, digits = 10 )
write( data.DocTopicMatrixJSON, file = app.path.DocTopicMatrix )
# Term-Topic Matrix
# Tab-separated with no headers. Beta (V by K)
rownames( data.TermTopicMatrix ) <- temp.TermText
colnames( data.TermTopicMatrix ) <- temp.TopicIndex
data.TermTopicMatrixJSON <- toJSON( data.TermTopicMatrix, pretty = TRUE, digits = 10 )
write( data.TermTopicMatrixJSON, file = app.path.TermTopicMatrix )
"""
def __init__( self, app_name, app_model = 'lda', app_desc = 'Structural Topic Model' ):
ImportAbstraction.__init__( self, app_name, app_model, app_desc )
def ImportLDA( self, model_filename ):
self.GenerateR( model_filename )
def GenerateR( self, model_filename ):
r = ImportSTM.SCRIPT.format( DATA_PATH = self.data_path, MODEL_FILENAME = model_filename )
script_filename = '{}/import.r'.format( self.data_path )
with open( script_filename, 'w' ) as f:
f.write( r.encode( 'utf-8' ) )
command = [ 'RScript', script_filename ]
print ' '.join(command)
process = subprocess.Popen( command, stdout = subprocess.PIPE, stderr = subprocess.PIPE )
while process.poll() is None:
line = process.stderr.readline()
print line[:-1]
def main():
parser = argparse.ArgumentParser( description = 'Import a STM topic model as a web2py application.' )
parser.add_argument( 'app_name' , type = str, help = 'Web2py application identifier' )
parser.add_argument( 'model' , type = str, help = 'File containing a STM model (RData)' )
args = parser.parse_args()
importer = ImportSTM( args.app_name )
importer.ImportLDA( args.model )
importer.AddToWeb2py()
if __name__ == '__main__':
main()
|
# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import threading
import os
import math
import time
import mock
import tempfile
import shutil
from datetime import datetime, timedelta
import sys
from dateutil.tz import tzlocal
from botocore.exceptions import CredentialRetrievalError
from tests import unittest, IntegerRefresher, BaseEnvVar, random_chars
from tests import temporary_file
from botocore.credentials import EnvProvider, ContainerProvider
from botocore.credentials import InstanceMetadataProvider
from botocore.credentials import Credentials, ReadOnlyCredentials
from botocore.credentials import AssumeRoleProvider
from botocore.credentials import CanonicalNameCredentialSourcer
from botocore.credentials import DeferredRefreshableCredentials
from botocore.session import Session
from botocore.exceptions import InvalidConfigError, InfiniteLoopConfigError
from botocore.stub import Stubber
class TestCredentialRefreshRaces(unittest.TestCase):
def assert_consistent_credentials_seen(self, creds, func):
collected = []
self._run_threads(20, func, collected)
for creds in collected:
# During testing, the refresher uses it's current
# refresh count as the values for the access, secret, and
# token value. This means that at any given point in time,
# the credentials should be something like:
#
# ReadOnlyCredentials('1', '1', '1')
# ReadOnlyCredentials('2', '2', '2')
# ...
# ReadOnlyCredentials('30', '30', '30')
#
# This makes it really easy to verify we see a consistent
# set of credentials from the same time period. We just
# check if all the credential values are the same. If
# we ever see something like:
#
# ReadOnlyCredentials('1', '2', '1')
#
# We fail. This is because we're using the access_key
# from the first refresh ('1'), the secret key from
# the second refresh ('2'), and the token from the
# first refresh ('1').
self.assertTrue(creds[0] == creds[1] == creds[2], creds)
def assert_non_none_retrieved_credentials(self, func):
collected = []
self._run_threads(50, func, collected)
for cred in collected:
self.assertIsNotNone(cred)
def _run_threads(self, num_threads, func, collected):
threads = []
for _ in range(num_threads):
threads.append(threading.Thread(target=func, args=(collected,)))
for thread in threads:
thread.start()
for thread in threads:
thread.join()
def test_has_no_race_conditions(self):
creds = IntegerRefresher(
creds_last_for=2,
advisory_refresh=1,
mandatory_refresh=0
)
def _run_in_thread(collected):
for _ in range(4000):
frozen = creds.get_frozen_credentials()
collected.append((frozen.access_key,
frozen.secret_key,
frozen.token))
start = time.time()
self.assert_consistent_credentials_seen(creds, _run_in_thread)
end = time.time()
# creds_last_for = 2 seconds (from above)
# So, for example, if execution time took 6.1 seconds, then
# we should see a maximum number of refreshes being (6 / 2.0) + 1 = 4
max_calls_allowed = math.ceil((end - start) / 2.0) + 1
self.assertTrue(creds.refresh_counter <= max_calls_allowed,
"Too many cred refreshes, max: %s, actual: %s, "
"time_delta: %.4f" % (max_calls_allowed,
creds.refresh_counter,
(end - start)))
def test_no_race_for_immediate_advisory_expiration(self):
creds = IntegerRefresher(
creds_last_for=1,
advisory_refresh=1,
mandatory_refresh=0
)
def _run_in_thread(collected):
for _ in range(100):
frozen = creds.get_frozen_credentials()
collected.append((frozen.access_key,
frozen.secret_key,
frozen.token))
self.assert_consistent_credentials_seen(creds, _run_in_thread)
def test_no_race_for_initial_refresh_of_deferred_refreshable(self):
def get_credentials():
expiry_time = (
datetime.now(tzlocal()) + timedelta(hours=24)).isoformat()
return {
'access_key': 'my-access-key',
'secret_key': 'my-secret-key',
'token': 'my-token',
'expiry_time': expiry_time
}
deferred_creds = DeferredRefreshableCredentials(
get_credentials, 'fixed')
def _run_in_thread(collected):
frozen = deferred_creds.get_frozen_credentials()
collected.append(frozen)
self.assert_non_none_retrieved_credentials(_run_in_thread)
class TestAssumeRole(BaseEnvVar):
def setUp(self):
super(TestAssumeRole, self).setUp()
self.tempdir = tempfile.mkdtemp()
self.config_file = os.path.join(self.tempdir, 'config')
self.environ['AWS_CONFIG_FILE'] = self.config_file
self.environ['AWS_ACCESS_KEY_ID'] = 'access_key'
self.environ['AWS_SECRET_ACCESS_KEY'] = 'secret_key'
self.metadata_provider = self.mock_provider(InstanceMetadataProvider)
self.env_provider = self.mock_provider(EnvProvider)
self.container_provider = self.mock_provider(ContainerProvider)
def mock_provider(self, provider_cls):
mock_instance = mock.Mock(spec=provider_cls)
mock_instance.load.return_value = None
mock_instance.METHOD = provider_cls.METHOD
mock_instance.CANONICAL_NAME = provider_cls.CANONICAL_NAME
return mock_instance
def tearDown(self):
shutil.rmtree(self.tempdir)
def create_session(self, profile=None):
session = Session(profile=profile)
# We have to set bogus credentials here or otherwise we'll trigger
# an early credential chain resolution.
sts = session.create_client(
'sts',
aws_access_key_id='spam',
aws_secret_access_key='eggs',
)
stubber = Stubber(sts)
stubber.activate()
assume_role_provider = AssumeRoleProvider(
load_config=lambda: session.full_config,
client_creator=lambda *args, **kwargs: sts,
cache={},
profile_name=profile,
credential_sourcer=CanonicalNameCredentialSourcer([
self.env_provider, self.container_provider,
self.metadata_provider
])
)
component_name = 'credential_provider'
resolver = session.get_component(component_name)
available_methods = [p.METHOD for p in resolver.providers]
replacements = {
'env': self.env_provider,
'iam-role': self.metadata_provider,
'container-role': self.container_provider,
'assume-role': assume_role_provider
}
for name, provider in replacements.items():
try:
index = available_methods.index(name)
except ValueError:
# The provider isn't in the session
continue
resolver.providers[index] = provider
session.register_component(
'credential_provider', resolver
)
return session, stubber
def create_assume_role_response(self, credentials, expiration=None):
if expiration is None:
expiration = self.some_future_time()
response = {
'Credentials': {
'AccessKeyId': credentials.access_key,
'SecretAccessKey': credentials.secret_key,
'SessionToken': credentials.token,
'Expiration': expiration
},
'AssumedRoleUser': {
'AssumedRoleId': 'myroleid',
'Arn': 'arn:aws:iam::1234567890:user/myuser'
}
}
return response
def create_random_credentials(self):
return Credentials(
'fake-%s' % random_chars(15),
'fake-%s' % random_chars(35),
'fake-%s' % random_chars(45)
)
def some_future_time(self):
timeobj = datetime.now(tzlocal())
return timeobj + timedelta(hours=24)
def write_config(self, config):
with open(self.config_file, 'w') as f:
f.write(config)
def assert_creds_equal(self, c1, c2):
c1_frozen = c1
if not isinstance(c1_frozen, ReadOnlyCredentials):
c1_frozen = c1.get_frozen_credentials()
c2_frozen = c2
if not isinstance(c2_frozen, ReadOnlyCredentials):
c2_frozen = c2.get_frozen_credentials()
self.assertEqual(c1_frozen, c2_frozen)
def test_assume_role(self):
config = (
'[profile A]\n'
'role_arn = arn:aws:iam::123456789:role/RoleA\n'
'source_profile = B\n\n'
'[profile B]\n'
'aws_access_key_id = abc123\n'
'aws_secret_access_key = def456\n'
)
self.write_config(config)
expected_creds = self.create_random_credentials()
response = self.create_assume_role_response(expected_creds)
session, stubber = self.create_session(profile='A')
stubber.add_response('assume_role', response)
actual_creds = session.get_credentials()
self.assert_creds_equal(actual_creds, expected_creds)
stubber.assert_no_pending_responses()
def test_environment_credential_source(self):
config = (
'[profile A]\n'
'role_arn = arn:aws:iam::123456789:role/RoleA\n'
'credential_source = Environment\n'
)
self.write_config(config)
environment_creds = self.create_random_credentials()
self.env_provider.load.return_value = environment_creds
expected_creds = self.create_random_credentials()
response = self.create_assume_role_response(expected_creds)
session, stubber = self.create_session(profile='A')
stubber.add_response('assume_role', response)
actual_creds = session.get_credentials()
self.assert_creds_equal(actual_creds, expected_creds)
stubber.assert_no_pending_responses()
self.assertEqual(self.env_provider.load.call_count, 1)
def test_instance_metadata_credential_source(self):
config = (
'[profile A]\n'
'role_arn = arn:aws:iam::123456789:role/RoleA\n'
'credential_source = Ec2InstanceMetadata\n'
)
self.write_config(config)
metadata_creds = self.create_random_credentials()
self.metadata_provider.load.return_value = metadata_creds
expected_creds = self.create_random_credentials()
response = self.create_assume_role_response(expected_creds)
session, stubber = self.create_session(profile='A')
stubber.add_response('assume_role', response)
actual_creds = session.get_credentials()
self.assert_creds_equal(actual_creds, expected_creds)
stubber.assert_no_pending_responses()
self.assertEqual(self.metadata_provider.load.call_count, 1)
def test_container_credential_source(self):
config = (
'[profile A]\n'
'role_arn = arn:aws:iam::123456789:role/RoleA\n'
'credential_source = EcsContainer\n'
)
self.write_config(config)
container_creds = self.create_random_credentials()
self.container_provider.load.return_value = container_creds
expected_creds = self.create_random_credentials()
response = self.create_assume_role_response(expected_creds)
session, stubber = self.create_session(profile='A')
stubber.add_response('assume_role', response)
actual_creds = session.get_credentials()
self.assert_creds_equal(actual_creds, expected_creds)
stubber.assert_no_pending_responses()
self.assertEqual(self.container_provider.load.call_count, 1)
def test_invalid_credential_source(self):
config = (
'[profile A]\n'
'role_arn = arn:aws:iam::123456789:role/RoleA\n'
'credential_source = CustomInvalidProvider\n'
)
self.write_config(config)
with self.assertRaises(InvalidConfigError):
session, _ = self.create_session(profile='A')
session.get_credentials()
def test_misconfigured_source_profile(self):
config = (
'[profile A]\n'
'role_arn = arn:aws:iam::123456789:role/RoleA\n'
'source_profile = B\n'
'[profile B]\n'
'credential_process = command\n'
)
self.write_config(config)
with self.assertRaises(InvalidConfigError):
session, _ = self.create_session(profile='A')
session.get_credentials()
def test_recursive_assume_role(self):
config = (
'[profile A]\n'
'role_arn = arn:aws:iam::123456789:role/RoleA\n'
'source_profile = B\n\n'
'[profile B]\n'
'role_arn = arn:aws:iam::123456789:role/RoleB\n'
'source_profile = C\n\n'
'[profile C]\n'
'aws_access_key_id = abc123\n'
'aws_secret_access_key = def456\n'
)
self.write_config(config)
profile_b_creds = self.create_random_credentials()
profile_b_response = self.create_assume_role_response(profile_b_creds)
profile_a_creds = self.create_random_credentials()
profile_a_response = self.create_assume_role_response(profile_a_creds)
session, stubber = self.create_session(profile='A')
stubber.add_response('assume_role', profile_b_response)
stubber.add_response('assume_role', profile_a_response)
actual_creds = session.get_credentials()
self.assert_creds_equal(actual_creds, profile_a_creds)
stubber.assert_no_pending_responses()
def test_recursive_assume_role_stops_at_static_creds(self):
config = (
'[profile A]\n'
'role_arn = arn:aws:iam::123456789:role/RoleA\n'
'source_profile = B\n\n'
'[profile B]\n'
'aws_access_key_id = abc123\n'
'aws_secret_access_key = def456\n'
'role_arn = arn:aws:iam::123456789:role/RoleB\n'
'source_profile = C\n\n'
'[profile C]\n'
'aws_access_key_id = abc123\n'
'aws_secret_access_key = def456\n'
)
self.write_config(config)
profile_a_creds = self.create_random_credentials()
profile_a_response = self.create_assume_role_response(profile_a_creds)
session, stubber = self.create_session(profile='A')
stubber.add_response('assume_role', profile_a_response)
actual_creds = session.get_credentials()
self.assert_creds_equal(actual_creds, profile_a_creds)
stubber.assert_no_pending_responses()
def test_infinitely_recursive_assume_role(self):
config = (
'[profile A]\n'
'role_arn = arn:aws:iam::123456789:role/RoleA\n'
'source_profile = A\n'
)
self.write_config(config)
with self.assertRaises(InfiniteLoopConfigError):
session, _ = self.create_session(profile='A')
session.get_credentials()
def test_self_referential_profile(self):
config = (
'[profile A]\n'
'role_arn = arn:aws:iam::123456789:role/RoleA\n'
'source_profile = A\n'
'aws_access_key_id = abc123\n'
'aws_secret_access_key = def456\n'
)
self.write_config(config)
expected_creds = self.create_random_credentials()
response = self.create_assume_role_response(expected_creds)
session, stubber = self.create_session(profile='A')
stubber.add_response('assume_role', response)
actual_creds = session.get_credentials()
self.assert_creds_equal(actual_creds, expected_creds)
stubber.assert_no_pending_responses()
class TestProcessProvider(unittest.TestCase):
def setUp(self):
current_dir = os.path.dirname(os.path.abspath(__file__))
credential_process = os.path.join(
current_dir, 'utils', 'credentialprocess.py'
)
self.credential_process = '%s %s' % (
sys.executable, credential_process
)
self.environ = os.environ.copy()
self.environ_patch = mock.patch('os.environ', self.environ)
self.environ_patch.start()
def tearDown(self):
self.environ_patch.stop()
def test_credential_process(self):
config = (
'[profile processcreds]\n'
'credential_process = %s\n'
)
config = config % self.credential_process
with temporary_file('w') as f:
f.write(config)
f.flush()
self.environ['AWS_CONFIG_FILE'] = f.name
credentials = Session(profile='processcreds').get_credentials()
self.assertEqual(credentials.access_key, 'spam')
self.assertEqual(credentials.secret_key, 'eggs')
def test_credential_process_returns_error(self):
config = (
'[profile processcreds]\n'
'credential_process = %s --raise-error\n'
)
config = config % self.credential_process
with temporary_file('w') as f:
f.write(config)
f.flush()
self.environ['AWS_CONFIG_FILE'] = f.name
session = Session(profile='processcreds')
# This regex validates that there is no substring: b'
# The reason why we want to validate that is that we want to
# make sure that stderr is actually decoded so that in
# exceptional cases the error is properly formatted.
# As for how the regex works:
# `(?!b').` is a negative lookahead, meaning that it will only
# match if it is not followed by the pattern `b'`. Since it is
# followed by a `.` it will match any character not followed by
# that pattern. `((?!hede).)*` does that zero or more times. The
# final pattern adds `^` and `$` to anchor the beginning and end
# of the string so we can know the whole string is consumed.
# Finally `(?s)` at the beginning makes dots match newlines so
# we can handle a multi-line string.
reg = r"(?s)^((?!b').)*$"
with self.assertRaisesRegexp(CredentialRetrievalError, reg):
session.get_credentials()
Add test to verify correct region in assume role provider
# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import uuid
import threading
import os
import math
import time
import mock
import tempfile
import shutil
from datetime import datetime, timedelta
import sys
from dateutil.tz import tzlocal
from botocore.exceptions import CredentialRetrievalError
from tests import unittest, IntegerRefresher, BaseEnvVar, random_chars
from tests import ClientHTTPStubber
from tests import temporary_file
from botocore.credentials import EnvProvider, ContainerProvider
from botocore.credentials import InstanceMetadataProvider
from botocore.credentials import Credentials, ReadOnlyCredentials
from botocore.credentials import AssumeRoleProvider
from botocore.credentials import CanonicalNameCredentialSourcer
from botocore.credentials import DeferredRefreshableCredentials
from botocore.credentials import create_credential_resolver
from botocore.session import Session
from botocore.exceptions import InvalidConfigError, InfiniteLoopConfigError
from botocore.stub import Stubber
class TestCredentialRefreshRaces(unittest.TestCase):
def assert_consistent_credentials_seen(self, creds, func):
collected = []
self._run_threads(20, func, collected)
for creds in collected:
# During testing, the refresher uses it's current
# refresh count as the values for the access, secret, and
# token value. This means that at any given point in time,
# the credentials should be something like:
#
# ReadOnlyCredentials('1', '1', '1')
# ReadOnlyCredentials('2', '2', '2')
# ...
# ReadOnlyCredentials('30', '30', '30')
#
# This makes it really easy to verify we see a consistent
# set of credentials from the same time period. We just
# check if all the credential values are the same. If
# we ever see something like:
#
# ReadOnlyCredentials('1', '2', '1')
#
# We fail. This is because we're using the access_key
# from the first refresh ('1'), the secret key from
# the second refresh ('2'), and the token from the
# first refresh ('1').
self.assertTrue(creds[0] == creds[1] == creds[2], creds)
def assert_non_none_retrieved_credentials(self, func):
collected = []
self._run_threads(50, func, collected)
for cred in collected:
self.assertIsNotNone(cred)
def _run_threads(self, num_threads, func, collected):
threads = []
for _ in range(num_threads):
threads.append(threading.Thread(target=func, args=(collected,)))
for thread in threads:
thread.start()
for thread in threads:
thread.join()
def test_has_no_race_conditions(self):
creds = IntegerRefresher(
creds_last_for=2,
advisory_refresh=1,
mandatory_refresh=0
)
def _run_in_thread(collected):
for _ in range(4000):
frozen = creds.get_frozen_credentials()
collected.append((frozen.access_key,
frozen.secret_key,
frozen.token))
start = time.time()
self.assert_consistent_credentials_seen(creds, _run_in_thread)
end = time.time()
# creds_last_for = 2 seconds (from above)
# So, for example, if execution time took 6.1 seconds, then
# we should see a maximum number of refreshes being (6 / 2.0) + 1 = 4
max_calls_allowed = math.ceil((end - start) / 2.0) + 1
self.assertTrue(creds.refresh_counter <= max_calls_allowed,
"Too many cred refreshes, max: %s, actual: %s, "
"time_delta: %.4f" % (max_calls_allowed,
creds.refresh_counter,
(end - start)))
def test_no_race_for_immediate_advisory_expiration(self):
creds = IntegerRefresher(
creds_last_for=1,
advisory_refresh=1,
mandatory_refresh=0
)
def _run_in_thread(collected):
for _ in range(100):
frozen = creds.get_frozen_credentials()
collected.append((frozen.access_key,
frozen.secret_key,
frozen.token))
self.assert_consistent_credentials_seen(creds, _run_in_thread)
def test_no_race_for_initial_refresh_of_deferred_refreshable(self):
def get_credentials():
expiry_time = (
datetime.now(tzlocal()) + timedelta(hours=24)).isoformat()
return {
'access_key': 'my-access-key',
'secret_key': 'my-secret-key',
'token': 'my-token',
'expiry_time': expiry_time
}
deferred_creds = DeferredRefreshableCredentials(
get_credentials, 'fixed')
def _run_in_thread(collected):
frozen = deferred_creds.get_frozen_credentials()
collected.append(frozen)
self.assert_non_none_retrieved_credentials(_run_in_thread)
class TestAssumeRole(BaseEnvVar):
def setUp(self):
super(TestAssumeRole, self).setUp()
self.tempdir = tempfile.mkdtemp()
self.config_file = os.path.join(self.tempdir, 'config')
self.environ['AWS_CONFIG_FILE'] = self.config_file
self.environ['AWS_SHARED_CREDENTIALS_FILE'] = str(uuid.uuid4())
self.environ['AWS_ACCESS_KEY_ID'] = 'access_key'
self.environ['AWS_SECRET_ACCESS_KEY'] = 'secret_key'
self.metadata_provider = self.mock_provider(InstanceMetadataProvider)
self.env_provider = self.mock_provider(EnvProvider)
self.container_provider = self.mock_provider(ContainerProvider)
self.actual_client_region = None
def mock_provider(self, provider_cls):
mock_instance = mock.Mock(spec=provider_cls)
mock_instance.load.return_value = None
mock_instance.METHOD = provider_cls.METHOD
mock_instance.CANONICAL_NAME = provider_cls.CANONICAL_NAME
return mock_instance
def tearDown(self):
shutil.rmtree(self.tempdir)
def create_session(self, profile=None):
session = Session(profile=profile)
# We have to set bogus credentials here or otherwise we'll trigger
# an early credential chain resolution.
sts = session.create_client(
'sts',
aws_access_key_id='spam',
aws_secret_access_key='eggs',
)
stubber = Stubber(sts)
stubber.activate()
assume_role_provider = AssumeRoleProvider(
load_config=lambda: session.full_config,
client_creator=lambda *args, **kwargs: sts,
cache={},
profile_name=profile,
credential_sourcer=CanonicalNameCredentialSourcer([
self.env_provider, self.container_provider,
self.metadata_provider
])
)
component_name = 'credential_provider'
resolver = session.get_component(component_name)
available_methods = [p.METHOD for p in resolver.providers]
replacements = {
'env': self.env_provider,
'iam-role': self.metadata_provider,
'container-role': self.container_provider,
'assume-role': assume_role_provider
}
for name, provider in replacements.items():
try:
index = available_methods.index(name)
except ValueError:
# The provider isn't in the session
continue
resolver.providers[index] = provider
session.register_component(
'credential_provider', resolver
)
return session, stubber
def create_assume_role_response(self, credentials, expiration=None):
if expiration is None:
expiration = self.some_future_time()
response = {
'Credentials': {
'AccessKeyId': credentials.access_key,
'SecretAccessKey': credentials.secret_key,
'SessionToken': credentials.token,
'Expiration': expiration
},
'AssumedRoleUser': {
'AssumedRoleId': 'myroleid',
'Arn': 'arn:aws:iam::1234567890:user/myuser'
}
}
return response
def create_random_credentials(self):
return Credentials(
'fake-%s' % random_chars(15),
'fake-%s' % random_chars(35),
'fake-%s' % random_chars(45)
)
def some_future_time(self):
timeobj = datetime.now(tzlocal())
return timeobj + timedelta(hours=24)
def write_config(self, config):
with open(self.config_file, 'w') as f:
f.write(config)
def assert_creds_equal(self, c1, c2):
c1_frozen = c1
if not isinstance(c1_frozen, ReadOnlyCredentials):
c1_frozen = c1.get_frozen_credentials()
c2_frozen = c2
if not isinstance(c2_frozen, ReadOnlyCredentials):
c2_frozen = c2.get_frozen_credentials()
self.assertEqual(c1_frozen, c2_frozen)
def test_assume_role(self):
config = (
'[profile A]\n'
'role_arn = arn:aws:iam::123456789:role/RoleA\n'
'source_profile = B\n\n'
'[profile B]\n'
'aws_access_key_id = abc123\n'
'aws_secret_access_key = def456\n'
)
self.write_config(config)
expected_creds = self.create_random_credentials()
response = self.create_assume_role_response(expected_creds)
session, stubber = self.create_session(profile='A')
stubber.add_response('assume_role', response)
actual_creds = session.get_credentials()
self.assert_creds_equal(actual_creds, expected_creds)
stubber.assert_no_pending_responses()
def test_environment_credential_source(self):
config = (
'[profile A]\n'
'role_arn = arn:aws:iam::123456789:role/RoleA\n'
'credential_source = Environment\n'
)
self.write_config(config)
environment_creds = self.create_random_credentials()
self.env_provider.load.return_value = environment_creds
expected_creds = self.create_random_credentials()
response = self.create_assume_role_response(expected_creds)
session, stubber = self.create_session(profile='A')
stubber.add_response('assume_role', response)
actual_creds = session.get_credentials()
self.assert_creds_equal(actual_creds, expected_creds)
stubber.assert_no_pending_responses()
self.assertEqual(self.env_provider.load.call_count, 1)
def test_instance_metadata_credential_source(self):
config = (
'[profile A]\n'
'role_arn = arn:aws:iam::123456789:role/RoleA\n'
'credential_source = Ec2InstanceMetadata\n'
)
self.write_config(config)
metadata_creds = self.create_random_credentials()
self.metadata_provider.load.return_value = metadata_creds
expected_creds = self.create_random_credentials()
response = self.create_assume_role_response(expected_creds)
session, stubber = self.create_session(profile='A')
stubber.add_response('assume_role', response)
actual_creds = session.get_credentials()
self.assert_creds_equal(actual_creds, expected_creds)
stubber.assert_no_pending_responses()
self.assertEqual(self.metadata_provider.load.call_count, 1)
def test_container_credential_source(self):
config = (
'[profile A]\n'
'role_arn = arn:aws:iam::123456789:role/RoleA\n'
'credential_source = EcsContainer\n'
)
self.write_config(config)
container_creds = self.create_random_credentials()
self.container_provider.load.return_value = container_creds
expected_creds = self.create_random_credentials()
response = self.create_assume_role_response(expected_creds)
session, stubber = self.create_session(profile='A')
stubber.add_response('assume_role', response)
actual_creds = session.get_credentials()
self.assert_creds_equal(actual_creds, expected_creds)
stubber.assert_no_pending_responses()
self.assertEqual(self.container_provider.load.call_count, 1)
def test_invalid_credential_source(self):
config = (
'[profile A]\n'
'role_arn = arn:aws:iam::123456789:role/RoleA\n'
'credential_source = CustomInvalidProvider\n'
)
self.write_config(config)
with self.assertRaises(InvalidConfigError):
session, _ = self.create_session(profile='A')
session.get_credentials()
def test_misconfigured_source_profile(self):
config = (
'[profile A]\n'
'role_arn = arn:aws:iam::123456789:role/RoleA\n'
'source_profile = B\n'
'[profile B]\n'
'credential_process = command\n'
)
self.write_config(config)
with self.assertRaises(InvalidConfigError):
session, _ = self.create_session(profile='A')
session.get_credentials()
def test_recursive_assume_role(self):
config = (
'[profile A]\n'
'role_arn = arn:aws:iam::123456789:role/RoleA\n'
'source_profile = B\n\n'
'[profile B]\n'
'role_arn = arn:aws:iam::123456789:role/RoleB\n'
'source_profile = C\n\n'
'[profile C]\n'
'aws_access_key_id = abc123\n'
'aws_secret_access_key = def456\n'
)
self.write_config(config)
profile_b_creds = self.create_random_credentials()
profile_b_response = self.create_assume_role_response(profile_b_creds)
profile_a_creds = self.create_random_credentials()
profile_a_response = self.create_assume_role_response(profile_a_creds)
session, stubber = self.create_session(profile='A')
stubber.add_response('assume_role', profile_b_response)
stubber.add_response('assume_role', profile_a_response)
actual_creds = session.get_credentials()
self.assert_creds_equal(actual_creds, profile_a_creds)
stubber.assert_no_pending_responses()
def test_recursive_assume_role_stops_at_static_creds(self):
config = (
'[profile A]\n'
'role_arn = arn:aws:iam::123456789:role/RoleA\n'
'source_profile = B\n\n'
'[profile B]\n'
'aws_access_key_id = abc123\n'
'aws_secret_access_key = def456\n'
'role_arn = arn:aws:iam::123456789:role/RoleB\n'
'source_profile = C\n\n'
'[profile C]\n'
'aws_access_key_id = abc123\n'
'aws_secret_access_key = def456\n'
)
self.write_config(config)
profile_a_creds = self.create_random_credentials()
profile_a_response = self.create_assume_role_response(profile_a_creds)
session, stubber = self.create_session(profile='A')
stubber.add_response('assume_role', profile_a_response)
actual_creds = session.get_credentials()
self.assert_creds_equal(actual_creds, profile_a_creds)
stubber.assert_no_pending_responses()
def test_infinitely_recursive_assume_role(self):
config = (
'[profile A]\n'
'role_arn = arn:aws:iam::123456789:role/RoleA\n'
'source_profile = A\n'
)
self.write_config(config)
with self.assertRaises(InfiniteLoopConfigError):
session, _ = self.create_session(profile='A')
session.get_credentials()
def test_self_referential_profile(self):
config = (
'[profile A]\n'
'role_arn = arn:aws:iam::123456789:role/RoleA\n'
'source_profile = A\n'
'aws_access_key_id = abc123\n'
'aws_secret_access_key = def456\n'
)
self.write_config(config)
expected_creds = self.create_random_credentials()
response = self.create_assume_role_response(expected_creds)
session, stubber = self.create_session(profile='A')
stubber.add_response('assume_role', response)
actual_creds = session.get_credentials()
self.assert_creds_equal(actual_creds, expected_creds)
stubber.assert_no_pending_responses()
def create_stubbed_sts_client(self, session):
expected_creds = self.create_random_credentials()
_original_create_client = session.create_client
def create_client_sts_stub(service, *args, **kwargs):
client = _original_create_client(service, *args, **kwargs)
stub = Stubber(client)
response = self.create_assume_role_response(expected_creds)
self.actual_client_region = client.meta.region_name
stub.add_response('assume_role', response)
stub.activate()
return client
return create_client_sts_stub, expected_creds
def test_assume_role_uses_correct_region(self):
config = (
'[profile A]\n'
'role_arn = arn:aws:iam::123456789:role/RoleA\n'
'source_profile = B\n\n'
'[profile B]\n'
'aws_access_key_id = abc123\n'
'aws_secret_access_key = def456\n'
)
self.write_config(config)
session = Session(profile='A')
# Verify that when we configure the session with a specific region
# that we use that region when creating the sts client.
session.set_config_variable('region', 'cn-north-1')
create_client, expected_creds = self.create_stubbed_sts_client(session)
session.create_client = create_client
resolver = create_credential_resolver(session)
provider = resolver.get_provider('assume-role')
creds = provider.load()
self.assert_creds_equal(creds, expected_creds)
self.assertEqual(self.actual_client_region, 'cn-north-1')
class TestProcessProvider(unittest.TestCase):
def setUp(self):
current_dir = os.path.dirname(os.path.abspath(__file__))
credential_process = os.path.join(
current_dir, 'utils', 'credentialprocess.py'
)
self.credential_process = '%s %s' % (
sys.executable, credential_process
)
self.environ = os.environ.copy()
self.environ_patch = mock.patch('os.environ', self.environ)
self.environ_patch.start()
def tearDown(self):
self.environ_patch.stop()
def test_credential_process(self):
config = (
'[profile processcreds]\n'
'credential_process = %s\n'
)
config = config % self.credential_process
with temporary_file('w') as f:
f.write(config)
f.flush()
self.environ['AWS_CONFIG_FILE'] = f.name
credentials = Session(profile='processcreds').get_credentials()
self.assertEqual(credentials.access_key, 'spam')
self.assertEqual(credentials.secret_key, 'eggs')
def test_credential_process_returns_error(self):
config = (
'[profile processcreds]\n'
'credential_process = %s --raise-error\n'
)
config = config % self.credential_process
with temporary_file('w') as f:
f.write(config)
f.flush()
self.environ['AWS_CONFIG_FILE'] = f.name
session = Session(profile='processcreds')
# This regex validates that there is no substring: b'
# The reason why we want to validate that is that we want to
# make sure that stderr is actually decoded so that in
# exceptional cases the error is properly formatted.
# As for how the regex works:
# `(?!b').` is a negative lookahead, meaning that it will only
# match if it is not followed by the pattern `b'`. Since it is
# followed by a `.` it will match any character not followed by
# that pattern. `((?!hede).)*` does that zero or more times. The
# final pattern adds `^` and `$` to anchor the beginning and end
# of the string so we can know the whole string is consumed.
# Finally `(?s)` at the beginning makes dots match newlines so
# we can handle a multi-line string.
reg = r"(?s)^((?!b').)*$"
with self.assertRaisesRegexp(CredentialRetrievalError, reg):
session.get_credentials()
|
import json
def boundingBoxAroundPolyCoords(coords):
"""
bounding box
"""
x_all = []
y_all = []
for first in coords[0]:
x_all.append(first[1])
y_all.append(first[0])
return [min(x_all), min(y_all), max(x_all), max(y_all)]
def pointInBoundingBox(point, bounds):
"""
whether the point is inside the bounding box
"""
return not(point['coordinates'][1] < bounds[0] or point['coordinates'][1] > bounds[2] or point['coordinates'][0] < bounds[1] or point['coordinates'][0] > bounds[3])
def pnpoly(x, y, coords):
"""
point in polygon algorithem
reference: https://www.ecse.rpi.edu/~wrf/Research/Short_Notes/pnpoly.html#Explanation
"""
vert = [[0, 0]]
for coord in coords:
for node in coord:
vert.append(node)
vert.append(coord[0])
vert.append([0, 0])
inside = False
i = 0
j = len(vert) - 1
while i < len(vert):
if ((vert[i][0] > y) != (vert[j][0] > y)) and (x < (vert[j][1] - vert[i][1]) * (y - vert[i][0]) / (vert[j][0] - vert[i][0]) + vert[i][1]):
inside = not inside
i += 1
j = i
return inside
def pointInPolygon(p, poly):
"""
main point in polygon function
"""
coords = [poly['coordinates']] if poly['type'] == 'Polygon' else poly['coordinates']
inside_box = False
for coord in coords:
if pointInBoundingBox(p, boundingBoxAroundPolyCoords(coord)):
inside_box = True
if not inside_box:
return False
inside_poly = True
for coord in coords:
if pnpoly(p['coordinates'][1], p['coordinates'][0], coord):
inside_poly = True
return inside_poly
def test():
in_str = '{"type": "Point", "coordinates": [5, 5]}'
out_str = '{"type": "Point", "coordinates": [15, 15]}'
box_str = '{"type": "Polygon","coordinates": [[ [0, 0], [10, 0], [10, 10], [0, 10] ]]}'
in_box = json.loads(in_str)
out_box = json.loads(out_str)
box = json.loads(box_str)
print pointInPolygon(in_box, box)
print pointInPolygon(out_box, box)
if __name__ == '__main__':
test()
fix bug
import json
def boundingBoxAroundPolyCoords(coords):
"""
bounding box
"""
x_all = []
y_all = []
for first in coords[0]:
x_all.append(first[1])
y_all.append(first[0])
return [min(x_all), min(y_all), max(x_all), max(y_all)]
def pointInBoundingBox(point, bounds):
"""
whether the point is inside the bounding box
"""
return not(point['coordinates'][1] < bounds[0] or point['coordinates'][1] > bounds[2] or point['coordinates'][0] < bounds[1] or point['coordinates'][0] > bounds[3])
def pnpoly(x, y, coords):
"""
point in polygon algorithem
reference: https://www.ecse.rpi.edu/~wrf/Research/Short_Notes/pnpoly.html#Explanation
"""
vert = [[0, 0]]
for coord in coords:
for node in coord:
vert.append(node)
vert.append(coord[0])
vert.append([0, 0])
inside = False
i = 0
j = len(vert) - 1
while i < len(vert):
if ((vert[i][0] > y) != (vert[j][0] > y)) and (x < (vert[j][1] - vert[i][1]) * (y - vert[i][0]) / (vert[j][0] - vert[i][0]) + vert[i][1]):
inside = not inside
j = i
i += 1
return inside
def pointInPolygon(p, poly):
"""
main point in polygon function
"""
coords = [poly['coordinates']] if poly['type'] == 'Polygon' else poly['coordinates']
inside_box = False
for coord in coords:
if pointInBoundingBox(p, boundingBoxAroundPolyCoords(coord)):
inside_box = True
if not inside_box:
return False
inside_poly = False
for coord in coords:
if pnpoly(p['coordinates'][1], p['coordinates'][0], coord):
inside_poly = True
return inside_poly
def test():
in_str = '{"type": "Point", "coordinates": [5, 5]}'
out_str = '{"type": "Point", "coordinates": [15, 15]}'
box_str = '{"type": "Polygon","coordinates": [[ [0, 0], [10, 0], [10, 10], [0, 10] ]]}'
in_box = json.loads(in_str)
out_box = json.loads(out_str)
box = json.loads(box_str)
print pointInPolygon(in_box, box)
print pointInPolygon(out_box, box)
if __name__ == '__main__':
test()
|
#
# Copyright (c) 2004 Specifix, Inc.
# All rights reserved
#
"""
Module implementing the "macro" dictionary class
"""
import log
class Macros(dict):
def __setitem__(self, name, value):
# only expand references to ourself
d = {name: self.get(name)}
# escape any macros in the new value
value = value.replace('%', '%%')
# unescape references to ourself
value = value.replace('%%%%(%s)s' %name, '%%(%s)s'%name)
# expand our old value when defining the new value
dict.__setitem__(self, name, value % d)
def __setattr__(self, name, value):
self.__setitem__(name, value)
# we want keys that don't exist to default to empty strings
# but warn so that we can catch bugs
def __getitem__(self, name):
if name in self:
return dict.__getitem__(self, name) %self
log.warning('name %s does not exist in macros', name)
return ''
def __getattr__(self, name):
return self.__getitem__(name)
def copy(self):
new = Macros()
new.update(self)
return new
implement update so we can properly handle macros in the new definitions
#
# Copyright (c) 2004 Specifix, Inc.
# All rights reserved
#
"""
Module implementing the "macro" dictionary class
"""
import log
class Macros(dict):
def update(self, other):
for key, item in other.iteritems():
self[key] = item
def __setitem__(self, name, value):
# only expand references to ourself
d = {name: self.get(name)}
# escape any macros in the new value
value = value.replace('%', '%%')
# unescape references to ourself
value = value.replace('%%%%(%s)s' %name, '%%(%s)s'%name)
# expand our old value when defining the new value
dict.__setitem__(self, name, value % d)
def __setattr__(self, name, value):
self.__setitem__(name, value)
# we want keys that don't exist to default to empty strings
# but warn so that we can catch bugs
def __getitem__(self, name):
if name in self:
return dict.__getitem__(self, name) %self
log.warning('name %s does not exist in macros', name)
return ''
def __getattr__(self, name):
return self.__getitem__(name)
def copy(self):
new = Macros()
new.update(self)
return new
|
"""
Tukio Workflow Engine
"""
import asyncio
import logging
import weakref
from tukio.workflow import OverrunPolicy, new_workflow
from tukio.broker import get_broker
from tukio.task import tukio_factory
from tukio.utils import Listen
log = logging.getLogger(__name__)
class _WorkflowSelector:
"""
This class stores all the workflow templates loaded in the workflow engine
and the association template ID/topics. Thanks to this association, it can
provide a list of 'trigger-able' workflow templates from a given topic or
return the right template from a given template ID.
This object is used from within the workflow engine and is not meant to be
used by others modules.
"""
def __init__(self):
self._selector = {Listen.everything: set()}
self._templates = dict()
def load(self, template):
"""
Loads a new workflow template in the selector. If the template is
already loaded, unloads it first.
"""
# Cleanup the current template ID/topic associations before loading the
# new template.
try:
current = self._templates[template.uid]
except KeyError:
pass
else:
self.unload(current.uid)
self._templates[template.uid] = template
# Add new template ID/topic associations
listen = template.listen
if listen is Listen.everything:
self._selector[Listen.everything].add(template)
elif listen is Listen.topics:
for topic in template.topics:
try:
self._selector[topic].add(template)
except KeyError:
self._selector[topic] = {template}
def unload(self, tmpl_id):
"""
Unloads a workflow template from the selector. Raises a `KeyError`
exception if the given template ID is not loaded.
"""
template = self._templates.pop(tmpl_id)
listen = template.listen
if listen is Listen.everything:
self._selector[Listen.everything].remove(template)
elif listen is Listen.topics:
for topic in template.topics:
self._selector[topic].remove(template)
if not self._selector[topic]:
del self._selector[topic]
return template
def clear(self):
"""
Removes all workflow templates loaded in the selector. As a consequence
a call to `get()` right after this operation will always return an
empty list.
"""
self._templates.clear()
self._selector.clear()
self._selector[Listen.everything] = set()
def select(self, topic=None):
"""
Returns the list of workflow templates that can be triggered by new
data received in the given topic.
Remember that topic=None means all workflow templates that can be
triggered whatever the topic (including no topic).
"""
# Always include the set of global workflow templates (trigger-able in
# any case)
templates = self._selector[Listen.everything]
if topic is not None:
try:
templates = templates | self._selector[topic]
except KeyError:
pass
return list(templates)
def get(self, tmpl_id):
"""
Returns the workflow template with the given template ID. Raises a
`KeyError` exception if no template is found.
"""
return self._templates[tmpl_id]
class Engine(asyncio.Future):
"""
The Tukio workflow engine. Basically, it can load or unload workflow
templates and trigger new executions of workflows upon receiving new data.
The `run()` method allows to select and trigger a particular workflow.
Workflow executions can be cancelled as per their execution ID (`cancel()`)
or all at once (`cancel_all()`).
It is an awaitable object (inherits from `asyncio.Future`) which will be
marked as done after its `stop()` method has been called and all the
running workflows are done. Afterwards no new workflow can be triggered.
"""
def __init__(self, *, loop=None):
super().__init__(loop=loop)
# use the custom asyncio task factory
self._loop.set_task_factory(tukio_factory)
self._selector = _WorkflowSelector()
self._running = dict()
self._broker = get_broker(self._loop)
self._lock = asyncio.Lock()
self._running_by_id = weakref.WeakValueDictionary()
self._must_stop = False
@property
def selector(self):
"""
Returns the `_WorkflowSelector` instance for use outside of the engine
"""
return self._selector
@property
def instances(self):
"""
Returns the dict or running workflows
"""
return self._running_by_id
def _add_wflow(self, wflow):
"""
Adds a new entry into the dict of running workflows and updates the
weak value dict to index it by its execution ID.
"""
try:
self._running[wflow.template.uid].append(wflow)
except KeyError:
self._running[wflow.template.uid] = [wflow]
self._running_by_id[wflow.uid] = wflow
log.debug('new workflow started %s', wflow)
def _remove_wflow(self, wflow):
"""
Removes a worflow instance from the dict of running workflows.
"""
self._running[wflow.template.uid].remove(wflow)
# Cleanup the running dict if no more running instance of that template
if len(self._running[wflow.template.uid]) == 0:
del self._running[wflow.template.uid]
del self._running_by_id[wflow.uid]
log.debug('workflow removed from the running list: %s', wflow)
try:
wflow.result()
except Exception as exc:
log.warning('workflow %s ended on exception', wflow)
log.exception(exc)
if self._must_stop and not self._running and not self.done():
self.set_result(None)
log.debug('no more workflow running, engine stopped')
def stop(self, force=False):
"""
Cancels all workflows and prevent new instances from being run.
"""
self._must_stop = True
if not self._running and not self.done():
self.set_result(None)
elif force:
self.cancel_all()
return self
def _run_in_task(self, callback, *args, **kwargs):
"""
Wrap a regular function into a coroutine and run it in a task.
This is intended to wrap time consuming functions into a task so as to
prevent slow operations from blocking the whole loop.
"""
async def coro():
return callback(*args, **kwargs)
return asyncio.ensure_future(coro(), loop=self._loop)
def _load(self, template):
"""
Loads a workflow template into the engine. Each workflow may be
triggered as soon as it is loaded.
Duplicates or invalid descriptions raise an exception.
This operation does not affect workflow executions in progress.
"""
template.validate()
self._selector.load(template)
log.debug("new workflow template loaded: %s", template)
async def load(self, template):
"""
A coroutine that loads a new workflow template while preventing other
coroutines from updating the dict of loaded templates in the mean time.
"""
with await self._lock:
await self._run_in_task(self._load, template)
async def reload(self, templates):
"""
Replaces the current list of loaded workflow templates by a new one.
This operation does not affect workflow executions in progress.
"""
with await self._lock:
self._selector.clear()
for tmpl in templates:
await self._run_in_task(self._load, tmpl)
async def unload(self, template_id):
"""
Unloads a workflow template from the engine. Returns the template
object if the template was found and actually unloaded, else raises
a `KeyError` exception.
"""
with await self._lock:
template = self._selector.unload(template_id)
return template
async def data_received(self, data, topic=None):
"""
This method should be called to pass an event to the workflow engine
which in turn will disptach this event to the right running workflows
and may trigger new workflow executions.
"""
if topic:
log.debug("data received '%s' in topic '%s'", data, topic)
else:
log.debug("data received '%s' (no topic)", data)
self._broker.dispatch(data, topic)
with await self._lock:
templates = self._selector.select(topic)
# Try to trigger new workflows from the current dict of workflow
# templates at all times!
wflows = []
for tmpl in templates:
wflow = await self._run_in_task(self.run, tmpl, data)
if wflow:
wflows.append(wflow)
return wflows
def _do_run(self, wflow, data):
"""
Adds a workflow to the running list and actually starts it.
"""
self._add_wflow(wflow)
wflow.run(data)
def run(self, template, data):
"""
Try to run a new instance of workflow defined by `tmpl_id` according to
the instances already running and the overrun policy.
"""
# Don't start new workflow instances if `stop()` was called.
if self._must_stop:
log.debug("The engine is stopping, cannot run a new workflow from"
"template id %s", template.uid)
return
# Do nothing if the template is not loaded
try:
self._selector.get(template.uid)
except KeyError:
log.error('Template %s is not loaded', template.uid)
return
running = self._running.get(template.uid)
# Always apply the policy of the current workflow template (workflow
# instances may run with another version of the template)
wflow = new_workflow(template, running=running, loop=self._loop)
if wflow:
wflow.add_done_callback(self._remove_wflow)
if template.policy == OverrunPolicy.abort_running and running:
def cb():
self._do_run(wflow, data)
asyncio.ensure_future(self._wait_abort(running, cb))
else:
self._do_run(wflow, data)
else:
log.debug("skip new workflow from %s (overrun policy)", template)
return wflow
async def _wait_abort(self, running, callback):
"""
Wait for the end of a list of aborted (cancelled) workflows before
starting a new one when the policy is 'abort-running'.
"""
# Always act on a snapshot of the original running list. Don't forget
# it is a living list!
others = list(running)
await asyncio.wait(others)
callback()
def side_run(self, template, data):
"""
Starts a new execution of the workflow template regardless of the
overrun policy and already running workflows.
Note: it does NOT load the workflow template in the engine.
"""
if self._must_stop:
log.debug("The engine is stopping, cannot run a new workflow from"
"template id %s", template.uid)
return None
wflow = new_workflow(template, loop=self._loop)
self._add_wflow(wflow)
wflow.add_done_callback(self._remove_wflow)
wflow.run(data)
return wflow
def cancel(self, exec_id):
"""
Cancels an execution of workflow identified by its execution ID.
The cancelled workflow instance (a future object) is returned.
If the workflow could not be found, returns None.
"""
wflow = self._running_by_id.get(exec_id)
if wflow:
wflow.cancel()
log.debug('cancelled workflow %s', wflow)
return wflow
def cancel_all(self):
"""
Cancels all the running workflows.
"""
cancelled = 0
for wf_list in self._running.values():
for wflow in wf_list:
is_cancelled = wflow.cancel()
if is_cancelled:
cancelled += 1
log.debug('cancelled %s workflows', cancelled)
return cancelled
new 'trigger' and 'run_once' methods + code cleanup
"""
Tukio Workflow Engine
"""
import asyncio
import logging
import weakref
from tukio.workflow import OverrunPolicy, new_workflow, Workflow
from tukio.broker import get_broker
from tukio.task import tukio_factory
from tukio.utils import Listen
log = logging.getLogger(__name__)
class _WorkflowSelector:
"""
This class stores all the workflow templates loaded in the workflow engine
and the association template ID/topics. Thanks to this association, it can
provide a list of 'trigger-able' workflow templates from a given topic or
return the right template from a given template ID.
This object is used from within the workflow engine and is not meant to be
used by others modules.
"""
def __init__(self):
self._selector = {Listen.everything: set()}
self._templates = dict()
def load(self, template):
"""
Loads a new workflow template in the selector. If the template is
already loaded, unloads it first.
"""
# Cleanup the current template ID/topic associations before loading the
# new template.
try:
current = self._templates[template.uid]
except KeyError:
pass
else:
self.unload(current.uid)
self._templates[template.uid] = template
# Add new template ID/topic associations
listen = template.listen
if listen is Listen.everything:
self._selector[Listen.everything].add(template)
elif listen is Listen.topics:
for topic in template.topics:
try:
self._selector[topic].add(template)
except KeyError:
self._selector[topic] = {template}
def unload(self, tmpl_id):
"""
Unloads a workflow template from the selector. Raises a `KeyError`
exception if the given template ID is not loaded.
"""
template = self._templates.pop(tmpl_id)
listen = template.listen
if listen is Listen.everything:
self._selector[Listen.everything].remove(template)
elif listen is Listen.topics:
for topic in template.topics:
self._selector[topic].remove(template)
if not self._selector[topic]:
del self._selector[topic]
return template
def clear(self):
"""
Removes all workflow templates loaded in the selector. As a consequence
a call to `get()` right after this operation will always return an
empty list.
"""
self._templates.clear()
self._selector.clear()
self._selector[Listen.everything] = set()
def select(self, topic=None):
"""
Returns the list of workflow templates that can be triggered by new
data received in the given topic.
Remember that topic=None means all workflow templates that can be
triggered whatever the topic (including no topic).
"""
# Always include the set of global workflow templates (trigger-able in
# any case)
templates = self._selector[Listen.everything]
if topic is not None:
try:
templates = templates | self._selector[topic]
except KeyError:
pass
return list(templates)
def get(self, tmpl_id):
"""
Returns the workflow template with the given template ID. Raises a
`KeyError` exception if no template is found.
"""
return self._templates[tmpl_id]
class Engine(asyncio.Future):
"""
The Tukio workflow engine. Basically, it can load or unload workflow
templates and trigger new executions of workflows upon receiving new data.
The `run()` method allows to select and trigger a particular workflow.
It is an awaitable object (inherits from `asyncio.Future`) which will be
marked as done after its `stop()` method has been called and all the
running workflows are done. Afterwards no new workflow can be triggered.
"""
def __init__(self, *, loop=None):
super().__init__(loop=loop)
# use the custom asyncio task factory
self._loop.set_task_factory(tukio_factory)
self._selector = _WorkflowSelector()
self._instances = []
self._broker = get_broker(self._loop)
self._lock = asyncio.Lock()
self._must_stop = False
@property
def selector(self):
"""
Returns the `_WorkflowSelector` instance for use outside of the engine
"""
return self._selector
@property
def instances(self):
"""
Returns the list or running instances
"""
return self._instances
def _add_wflow(self, wflow):
"""
Adds a new entry into the list of running instances.
"""
self._instances.append(wflow)
log.debug('new workflow started %s', wflow)
def _remove_wflow(self, wflow):
"""
Removes a worflow instance from the list of running instances.
"""
self._instances.remove(wflow)
log.debug('workflow removed from the running list: %s', wflow)
try:
wflow.result()
except Exception as exc:
log.warning('workflow %s ended on exception', wflow)
log.exception(exc)
if self._must_stop and not self._instances and not self.done():
self.set_result(None)
log.debug('no more workflow running, engine stopped')
def stop(self, force=False):
"""
Prevents new workflow instances from being run and optionally cancels
all running workflows (when force=True).
"""
self._must_stop = True
if not self._instances and not self.done():
self.set_result(None)
elif force:
for wflow in self._instances:
wflow.cancel()
log.debug('cancelled workflow %s', wflow)
return self
def _run_in_task(self, callback, *args, **kwargs):
"""
Wrap a regular function into a coroutine and run it in a task.
This is intended to wrap time consuming functions into a task so as to
prevent slow operations from blocking the whole loop.
"""
async def coro():
return callback(*args, **kwargs)
return asyncio.ensure_future(coro(), loop=self._loop)
def _load(self, template):
"""
Loads a workflow template into the engine. Each workflow may be
triggered as soon as it is loaded.
Duplicates or invalid descriptions raise an exception.
This operation does not affect workflow executions in progress.
"""
template.validate()
self._selector.load(template)
log.debug("new workflow template loaded: %s", template)
async def load(self, template):
"""
A coroutine that loads a new workflow template while preventing other
coroutines from updating the dict of loaded templates in the mean time.
"""
with await self._lock:
await self._run_in_task(self._load, template)
async def reload(self, templates):
"""
Replaces the current list of loaded workflow templates by a new one.
This operation does not affect workflow executions in progress.
"""
with await self._lock:
self._selector.clear()
for tmpl in templates:
await self._run_in_task(self._load, tmpl)
async def unload(self, template_id):
"""
Unloads a workflow template from the engine. Returns the template
object if the template was found and actually unloaded, else raises
a `KeyError` exception.
"""
with await self._lock:
template = self._selector.unload(template_id)
return template
async def data_received(self, data, topic=None):
"""
This method should be called to pass an event to the workflow engine
which in turn will disptach this event to the right running workflows
and may trigger new workflow executions.
"""
log.debug("data received: %s (topic=%s)", data, topic)
# Disptatch data to 'listening' tasks at all cases
self._broker.dispatch(data, topic)
# Don't start new workflow instances if `stop()` was called.
if self._must_stop:
log.debug("The engine is stopping, cannot trigger new workflows")
return
with await self._lock:
templates = self._selector.select(topic)
# Try to trigger new workflows from the current dict of workflow
# templates at all times!
wflows = []
for tmpl in templates:
wflow = self._try_run(tmpl, data)
if wflow:
wflows.append(wflow)
return wflows
async def trigger(self, template_id, data):
"""
Trigger a new execution of the workflow template identified by
`template_id`. Use this method instead of a reserved topic +
`data_received` to trigger a specific workflow.
"""
with await self._lock:
# Ignore unknown (aka not loaded) workflow templates
try:
template = self._selector.get(template_id)
except KeyError:
log.error('workflow template %s not loaded', template_id)
return None
return self._try_run(template, data)
def _do_run(self, wflow, data):
"""
A workflow instance must be in the list of running instances until
it completes.
"""
self._add_wflow(wflow)
wflow.add_done_callback(self._remove_wflow)
wflow.run(data)
def _try_run(self, template, data):
"""
Try to run a new instance of workflow according to the instances
already running and the overrun policy.
"""
# Don't start new workflow instances if `stop()` was called.
if self._must_stop:
log.debug("The engine is stopping, cannot trigger new workflows")
return
running = self._instances.get(template.uid)
# Always apply the policy of the current workflow template (workflow
# instances may run with another version of the template)
wflow = new_workflow(template, running=running, loop=self._loop)
if wflow:
if template.policy == OverrunPolicy.abort_running and running:
def cb():
self._do_run(wflow, data)
asyncio.ensure_future(self._wait_abort(running, cb))
else:
self._do_run(wflow, data)
else:
log.debug("skip new workflow from %s (overrun policy)", template)
return wflow
async def _wait_abort(self, running, callback):
"""
Wait for the end of a list of aborted (cancelled) workflows before
starting a new one when the policy is 'abort-running'.
XXX: we shouldn't find policy-specific code in the engine! Find a
better way to do it.
"""
# Always act on a snapshot of the original running list. Don't forget
# it is a living list!
others = list(running)
await asyncio.wait(others)
callback()
async def run_once(self, template, data):
"""
Starts a new execution of the workflow template regardless of the
overrun policy and already running workflows.
Note: it does NOT load the workflow template in the engine.
"""
if self._must_stop:
log.debug("The engine is stopping, cannot run a new workflow from"
"template id %s", template.uid)
return None
with await self._lock:
wflow = Workflow(template, loop=self._loop)
self._do_run(wflow, data)
return wflow
|
# -*- coding: utf-8 -*-
# This technical data was produced for the U. S. Government under Contract No. W15P7T-13-C-F600, and
# is subject to the Rights in Technical Data-Noncommercial Items clause at DFARS 252.227-7013 (FEB 2012)
import os
#DJANGO_ROOT = os.path.dirname(os.path.realpath(django.__file__))
SITE_ROOT = os.path.dirname(os.path.realpath(__file__))
DEBUG = True
TEMPLATE_DEBUG = True
ADMINS = (
('Admin User', 'admin@domain.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.contrib.gis.db.backends.postgis', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'geoq', # Or path to database file if using sqlite3.
# The following settings are not used with sqlite3:
'USER': 'geoq',
'PASSWORD': 'geoq',
'HOST': 'localhost', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': '5432', # Set to empty string for default.
}
}
# Use this to change the base bootstrap library
BOOTSTRAP_BASE_URL = '/static/bootstrap/'
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/New_York'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = '/opt/src/pyenv/geoq/nga-geoq'
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = '/images/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_URL_FOLDER = '' # Can be set to something like 'geoq-test/' if the app is not run at root level
STATIC_ROOT = '{0}{1}'.format('/var/www/static/', STATIC_URL_FOLDER)
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '{0}{1}'.format('/static/', STATIC_URL_FOLDER)
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(SITE_ROOT, 'static'),
# TODO: Should we add this static location back in?
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
#'django.contrib.staticfiles.finders.DefaultStorageFinder',
'compressor.finders.CompressorFinder',
)
COMPRESS_ENABLED = True
COMPRESS_PRECOMPILERS = (
('text/less', 'lessc {infile} {outfile}'),
)
LEAFLET_CSS = [
STATIC_URL + 'leaflet/leaflet-draw/leaflet.draw.css',
os.path.join(STATIC_ROOT, '/static/leaflet/leaflet-draw/leaflet.draw.css')
]
LEAFLET_CONFIG = {
'RESET_VIEW' : False,
'PLUGINS': {
'draw': {
'css': LEAFLET_CSS,
'js': STATIC_URL + 'leaflet/leaflet-draw/leaflet.draw-src.js',
'repo': 'https://github.com/Leaflet/Leaflet.draw'
},
'esri': {
'css': [],
'js': [STATIC_URL + 'leaflet/esri-leaflet-src.js'],
'repo': 'https://github.com/Esri/esri-leaflet'
},
'esriCluster': {
'css': [STATIC_URL + 'leaflet/MarkerCluster.css'],
'js': [STATIC_URL + 'leaflet/ClusteredFeatureLayer.js', STATIC_URL + 'leaflet/leaflet.markercluster.js'],
'repo': 'https://github.com/Esri/esri-leaflet'
},
'MakiMarkers': {
'css': [],
'js': [STATIC_URL + 'leaflet/Leaflet.MakiMarkers.js'],
'repo': 'https://github.com/jseppi/Leaflet.MakiMarkers'
}
}
}
# Make this unique, and don't share it with anybody.
SECRET_KEY = '2t=^l2e$e5!du$0^c@3&qk4h_*stwwgp#1o$*n7#eisc)^2(wk'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
#'django.template.loaders.eggs.Loader',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.request',
'django.core.context_processors.static',
'django.contrib.messages.context_processors.messages',
'geoq.core.contextprocessors.app_settings',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
#'django.middleware.clickjacking.XFrameOptionsMiddleware',
'geoq.core.middleware.UserPermsMiddleware', # works w/ guardian
'geoq.core.middleware.Http403Middleware',
)
# auth setup
AUTHENTICATION_BACKENDS = (
'userena.backends.UserenaAuthenticationBackend',
'guardian.backends.ObjectPermissionBackend',
'django.contrib.auth.backends.ModelBackend', # default
)
ANONYMOUS_USER_ID = -1
AUTH_PROFILE_MODULE = 'accounts.UserProfile'
LOGIN_REDIRECT_URL = '/accounts/%(username)s/' #'/geoq/' #
LOGIN_URL = '/accounts/signin/'
LOGOUT_URL = '/geoq'
EMAIL_BACKEND = 'django.core.mail.backends.dummy.EmailBackend'
USERENA_ACTIVATION_DAYS = 3
USERENA_MUGSHOT_DEFAULT = 'identicon'
USERENA_HIDE_EMAIL = True
USERENA_HTML_EMAIL = False
ROOT_URLCONF = 'geoq.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'geoq.wsgi.application'
TEMPLATE_DIRS = (
os.path.join(SITE_ROOT, 'templates'),
SITE_ROOT,
)
# works with crispy forms.
CRISPY_TEMPLATE_PACK = 'bootstrap'
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.admin',
'south',
'django_select2',
'reversion',
'easy_thumbnails',
'userena',
'geoq.accounts',
'geoq.core',
'geoq.maps',
'httpproxy',
'guardian',
'geoq.feedback',
'django.contrib.messages',
'userena.contrib.umessages',
'geoq.locations',
'geoq.proxy',
'geoq.training',
'django.contrib.gis',
'django.contrib.humanize',
'django.contrib.staticfiles',
'django.contrib.humanize',
'compressor',
'geoexplorer',
'bootstrap_toolkit',
'leaflet',
'jsonfield',
'crispy_forms',
'django_extensions',
'debug_toolbar',
'geoq.mgrs',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
# Set default login location
#LOGIN_REDIRECT_URL = '/'
# Gamification variables
GAMIFICATION_SERVER = ''
GAMIFICATION_PROJECT = 'geoq'
#GeoServer
GEOSERVER_WFS_JOB_LAYER = None
# Bootstrap variables to work with django-bootstrap-toolkit
# Comment these out to use cdnjs.cloudflare.com versions of Bootstrap
BOOTSTRAP_BASE_URL = STATIC_URL
BOOTSTRAP_JS_BASE_URL = BOOTSTRAP_BASE_URL + 'js/'
BOOTSTRAP_JS_URL = BOOTSTRAP_JS_BASE_URL + 'bootstrap.min.js'
BOOTSTRAP_CSS_BASE_URL = BOOTSTRAP_BASE_URL + 'css/'
BOOTSTRAP_CSS_URL = BOOTSTRAP_CSS_BASE_URL + 'bootstrap.css'
# Override production settings with local settings if they exist
try:
from local_settings import *
except ImportError, e:
# local_settings does not exist
pass
Django Toolbar setting added to resolve errors seen on geo-q.com
# -*- coding: utf-8 -*-
# This technical data was produced for the U. S. Government under Contract No. W15P7T-13-C-F600, and
# is subject to the Rights in Technical Data-Noncommercial Items clause at DFARS 252.227-7013 (FEB 2012)
import os
#DJANGO_ROOT = os.path.dirname(os.path.realpath(django.__file__))
SITE_ROOT = os.path.dirname(os.path.realpath(__file__))
DEBUG = True
TEMPLATE_DEBUG = True
ADMINS = (
('Admin User', 'admin@domain.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.contrib.gis.db.backends.postgis', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'geoq', # Or path to database file if using sqlite3.
# The following settings are not used with sqlite3:
'USER': 'geoq',
'PASSWORD': 'geoq',
'HOST': 'localhost', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': '5432', # Set to empty string for default.
}
}
# Use this to change the base bootstrap library
BOOTSTRAP_BASE_URL = '/static/bootstrap/'
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/New_York'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = '/opt/src/pyenv/geoq/nga-geoq'
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = '/images/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_URL_FOLDER = '' # Can be set to something like 'geoq-test/' if the app is not run at root level
STATIC_ROOT = '{0}{1}'.format('/var/www/static/', STATIC_URL_FOLDER)
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '{0}{1}'.format('/static/', STATIC_URL_FOLDER)
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(SITE_ROOT, 'static'),
# TODO: Should we add this static location back in?
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
#'django.contrib.staticfiles.finders.DefaultStorageFinder',
'compressor.finders.CompressorFinder',
)
COMPRESS_ENABLED = True
COMPRESS_PRECOMPILERS = (
('text/less', 'lessc {infile} {outfile}'),
)
LEAFLET_CSS = [
STATIC_URL + 'leaflet/leaflet-draw/leaflet.draw.css',
os.path.join(STATIC_ROOT, '/static/leaflet/leaflet-draw/leaflet.draw.css')
]
LEAFLET_CONFIG = {
'RESET_VIEW' : False,
'PLUGINS': {
'draw': {
'css': LEAFLET_CSS,
'js': STATIC_URL + 'leaflet/leaflet-draw/leaflet.draw-src.js',
'repo': 'https://github.com/Leaflet/Leaflet.draw'
},
'esri': {
'css': [],
'js': [STATIC_URL + 'leaflet/esri-leaflet-src.js'],
'repo': 'https://github.com/Esri/esri-leaflet'
},
'esriCluster': {
'css': [STATIC_URL + 'leaflet/MarkerCluster.css'],
'js': [STATIC_URL + 'leaflet/ClusteredFeatureLayer.js', STATIC_URL + 'leaflet/leaflet.markercluster.js'],
'repo': 'https://github.com/Esri/esri-leaflet'
},
'MakiMarkers': {
'css': [],
'js': [STATIC_URL + 'leaflet/Leaflet.MakiMarkers.js'],
'repo': 'https://github.com/jseppi/Leaflet.MakiMarkers'
}
}
}
# Make this unique, and don't share it with anybody.
SECRET_KEY = '2t=^l2e$e5!du$0^c@3&qk4h_*stwwgp#1o$*n7#eisc)^2(wk'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
#'django.template.loaders.eggs.Loader',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.request',
'django.core.context_processors.static',
'django.contrib.messages.context_processors.messages',
'geoq.core.contextprocessors.app_settings',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
#'django.middleware.clickjacking.XFrameOptionsMiddleware',
'geoq.core.middleware.UserPermsMiddleware', # works w/ guardian
'geoq.core.middleware.Http403Middleware',
)
# auth setup
AUTHENTICATION_BACKENDS = (
'userena.backends.UserenaAuthenticationBackend',
'guardian.backends.ObjectPermissionBackend',
'django.contrib.auth.backends.ModelBackend', # default
)
ANONYMOUS_USER_ID = -1
AUTH_PROFILE_MODULE = 'accounts.UserProfile'
LOGIN_REDIRECT_URL = '/accounts/%(username)s/' #'/geoq/' #
LOGIN_URL = '/accounts/signin/'
LOGOUT_URL = '/geoq'
EMAIL_BACKEND = 'django.core.mail.backends.dummy.EmailBackend'
USERENA_ACTIVATION_DAYS = 3
USERENA_MUGSHOT_DEFAULT = 'identicon'
USERENA_HIDE_EMAIL = True
USERENA_HTML_EMAIL = False
ROOT_URLCONF = 'geoq.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'geoq.wsgi.application'
TEMPLATE_DIRS = (
os.path.join(SITE_ROOT, 'templates'),
SITE_ROOT,
)
# works with crispy forms.
CRISPY_TEMPLATE_PACK = 'bootstrap'
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.admin',
'south',
'django_select2',
'reversion',
'easy_thumbnails',
'userena',
'geoq.accounts',
'geoq.core',
'geoq.maps',
'httpproxy',
'guardian',
'geoq.feedback',
'django.contrib.messages',
'userena.contrib.umessages',
'geoq.locations',
'geoq.proxy',
'geoq.training',
'django.contrib.gis',
'django.contrib.humanize',
'django.contrib.staticfiles',
'django.contrib.humanize',
'compressor',
'geoexplorer',
'bootstrap_toolkit',
'leaflet',
'jsonfield',
'crispy_forms',
'django_extensions',
'debug_toolbar',
'geoq.mgrs',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
# Set default login location
#LOGIN_REDIRECT_URL = '/'
# Gamification variables
GAMIFICATION_SERVER = ''
GAMIFICATION_PROJECT = 'geoq'
#GeoServer
GEOSERVER_WFS_JOB_LAYER = None
# For Django Debug Toolbar - need to set this to resolve some errors
DEBUG_TOOLBAR_PATCH_SETTINGS = False
# Bootstrap variables to work with django-bootstrap-toolkit
# Comment these out to use cdnjs.cloudflare.com versions of Bootstrap
BOOTSTRAP_BASE_URL = STATIC_URL
BOOTSTRAP_JS_BASE_URL = BOOTSTRAP_BASE_URL + 'js/'
BOOTSTRAP_JS_URL = BOOTSTRAP_JS_BASE_URL + 'bootstrap.min.js'
BOOTSTRAP_CSS_BASE_URL = BOOTSTRAP_BASE_URL + 'css/'
BOOTSTRAP_CSS_URL = BOOTSTRAP_CSS_BASE_URL + 'bootstrap.css'
# Override production settings with local settings if they exist
try:
from local_settings import *
except ImportError, e:
# local_settings does not exist
pass
|
# Copyright (c) 2011, 2012, 2013 by California Institute of Technology
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the California Institute of Technology nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL CALTECH
# OR THE CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
# USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
# OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
"""
Classes:
- LtiSysDyn
- PwaSysDyn
- HybridSysDyn
NO, 2 Jul 2013.
"""
import logging
logger = logging.getLogger(__name__)
import itertools
import numpy as np
import polytope as pc
try:
from tulip.graphics import newax, quiver
except Exception, e:
logger.error(e)
quiver = None
class LtiSysDyn(object):
"""Represent discrete-time continuous dynamics:
s[t+1] = A*s[t] + B*u[t] + E*d[t] + K
subject to the constraints:
u[t] \in Uset
d[t] \in Wset
s[t] \in domain
where:
- u[t] the control input
- d[t] the disturbance input
- s[t] the system state
A LtiSysDyn object contains the fields:
- A, B, E, K, (matrices)
- Uset, Wset and domain (each a polytope.Polytope)
as defined above.
Note: For state-dependent bounds on the input,
[u[t]; s[t]] \in Uset
can be used.
see also
--------
PwaSysDyn, HybridSysDyn, polytope.Polytope
"""
def __init__(self, A=[], B=[], E=[], K=[],
Uset=None,Wset=None, domain=None):
if Uset == None:
print("Warning: Uset not given in LtiSysDyn()")
if (Uset != None) and (not isinstance(Uset, pc.Polytope)):
raise Exception("LtiSysDyn: `Uset` has to be a Polytope")
if domain == None:
print("Warning: domain is not given in LtiSysDyn()")
if (domain != None) and (not isinstance(domain, pc.Polytope)):
raise Exception("LtiSysDyn: `domain` has to be a Polytope")
self.A = A
self.B = B
if len(K) == 0:
if len(A) != 0:
self.K = np.zeros([A.shape[1], 1])
else:
self.K = K
else:
self.K = K.reshape(K.size,1)
if (len(E) == 0) & (len(A) != 0):
self.E = np.zeros([A.shape[1], 1])
self.Wset = pc.Polytope()
else:
self.E = E
self.Wset = Wset
self.Uset = Uset
self.domain = domain
def __repr__(self):
output = "A =\n"+str(self.A)
output += "\nB =\n"+str(self.B)
output += "\nE =\n"+str(self.E)
output += "\nK =\n"+str(self.K)
output += "\nUset =\n"+str(self.Uset)
output += "\nWset =\n"+str(self.Wset)
return output
def plot(self, ax=None, color=np.random.rand(3), show_domain=True):
if quiver is None:
logger.warn('pyvectorized not found. No plotting.')
return
(x, res) = pc.grid_region(self.domain)
n = self.A.shape[0]
DA = self.A - np.eye(n)
v = DA.dot(x)
if ax is None:
ax, fig = newax()
if show_domain:
self.domain.plot(ax, color)
quiver(x, v, ax)
return ax
class PwaSysDyn(object):
"""PwaSysDyn class for specifying a polytopic piecewise affine system.
A PwaSysDyn object contains the fields:
- C{list_subsys}: list of LtiSysDyn
- C{domain}: domain over which piecewise affine system is defined,
type: polytope.Polytope
For the system to be well-defined the domains of its subsystems should be
mutually exclusive (modulo intersections with empty interior) and cover the
domain.
see also
--------
LtiSysDyn, HybridSysDyn, polytope.Polytope
"""
def __init__(self, list_subsys=[], domain=None):
if domain == None:
print("Warning: domain not given in PwaSysDyn()")
if ((domain != None) and
(not (isinstance(domain, pc.Polytope) or
isinstance(domain, pc.Region))
)
):
raise Exception("PwaSysDyn: `domain` has to be a Polytope or Region")
if len(list_subsys) > 0:
uncovered_dom = domain.copy()
n = list_subsys[0].A.shape[1] # State space dimension
m = list_subsys[0].B.shape[1] # Input space dimension
p = list_subsys[0].E.shape[1] # Disturbance space dimension
for subsys in list_subsys:
uncovered_dom = uncovered_dom.diff(subsys.domain)
if (n!=subsys.A.shape[1] or m!=subsys.B.shape[1] or
p!=subsys.E.shape[1]):
raise Exception("PwaSysDyn: state, input, disturbance " +
"dimensions have to be the same for all " +
"subsystems")
if not pc.is_empty(uncovered_dom):
raise Exception("PwaSysDyn: subdomains must cover the domain")
for x in itertools.combinations(list_subsys, 2):
if pc.is_fulldim(x[0].domain.intersect(x[1].domain) ):
raise Exception("PwaSysDyn: subdomains have to be mutually"+
" exclusive")
self.list_subsys = list_subsys
self.domain = domain
@classmethod
def from_lti(cls, A=[], B=[], E=[], K=[],
Uset=None, Wset=None,domain=None):
lti_sys = LtiSysDyn(A,B,E,K,Uset,Wset,domain)
return cls([lti_sys], domain)
def plot(self, ax=None, show_domain=True):
if ax is None:
ax, fig = newax()
for subsystem in self.list_subsys:
subsystem.plot(ax, color=np.random.rand(3),
show_domain=show_domain)
class HybridSysDyn(object):
"""Represent hybrid systems switching between dynamic modes.
A HybridSysDyn represents a system with switching modes
that depend on both discrete:
- n_env environment variables (uncontrolled)
- n_sys system variables (controlled)
A HybridSysDyn object contains the fields:
- C{disc_domain_size}: 2-tuple of numbers of modes
type: (n_env, n_sys)
- C{env_labels}: (optional) labels for discrete environment variables
type: list of len(n_env)
default: range(n_env)
- C{disc_sys_labels}: (optional) labels for discrete system variables
type: list of len(n_sys)
default: range(n_sys)
- C{dynamics}: mapping mode 2-tuples to active dynamics:
(env_label, sys_label) -> PwaSysDyn
type: dict
default: If no env_label or sys_label passed,
then default to int indices (i,j) PwaSysDyn.
- C{cts_ss}: continuous state space over which hybrid system is defined.
type: polytope.Region
- C{time_semantics}: TBD. Current default semantics are discrete-time.
- State s[t] and
- discrete environment env[t]
are observed and:
- continuous input u[t] and
- discrete system variable m[t]
are determined based on:
- env[t] and
- s[t] (synchronously at time t).
Note: We assume that system and environment switching modes
are independent of one another.
(Use LTL statement to make it not so.)
see also
--------
LtiSysDyn, PwaSysDyn, polytope.Region
"""
def __init__(self, disc_domain_size=(1,1),
dynamics=None, cts_ss=None,
env_labels=None, disc_sys_labels=None):
# check that the continuous domain is specified
if cts_ss is None:
logger.warn('continuous state space not given to HybridSysDyn')
else:
if not isinstance(cts_ss, (pc.Polytope, pc.Region) ):
raise Exception('HybridSysDyn: ' +
'`cts_ss` must be a Polytope or Region')
self.disc_domain_size = disc_domain_size
# If label numbers agree with disc_domain_size, then use them.
# Otherwise, ignore the labels.
n_env = disc_domain_size[0]
n_sys = disc_domain_size[1]
self._env_labels = self._check_labels(n_env, env_labels)
self._disc_sys_labels = self._check_labels(n_sys, disc_sys_labels)
# Check each dynamics key is a valid mode,
# i.e., a valid combination of env and sys labels.
if dynamics is not None:
modes = [(a,b) for a in self.env_labels
for b in self.disc_sys_labels]
undefined_modes = set(dynamics.keys()).difference(modes)
if undefined_modes:
msg = 'HybridSysDyn: `dynamics` keys inconsistent'
msg += ' with discrete mode labels.\n'
msg += 'Undefined modes:\n' + str(undefined_modes)
raise ValueError(msg)
missing_modes = set(modes).difference(dynamics.keys())
if missing_modes:
msg = 'Missing the modes:\n' + str(missing_modes)
msg += '\n Make sure you did not forget any modes,\n'
msg += 'otherwise this is fine.'
logger.warn(msg)
self.dynamics = dynamics
self.cts_ss = cts_ss
def _check_labels(self, n, labels):
# don't complain for default
if labels is None:
return None
# len exists ?
try:
# is len correct ?
if len(labels) != n:
msg = 'number of environment labels is inconsistent'
msg += ' with discrete domain size.\n'
msg += 'Ignoring given environment labels.\n'
msg += 'Defaulting to integer labels.'
logger.warn(msg)
return None
except:
logger.warn('Environment labels of type: ' +
type(labels) + 'have no len()')
return None
return labels
@property
def env_labels(self):
if self._env_labels is None:
return range(self._env_labels)
else:
return self._env_labels
@property
def disc_sys_labels(self):
if self._disc_sys_labels is None:
return range(self._disc_sys_labels)
else:
return self._disc_sys_labels
@classmethod
def from_pwa(cls, list_subsys=[], domain=None):
pwa_sys = PwaSysDyn(list_subsys,domain)
return cls((1,1), {(0,0):pwa_sys}, domain)
@classmethod
def from_lti(cls, A=[], B=[], E=[], K=[],
Uset=None, Wset=None,domain=None):
pwa_sys = PwaSysDyn.from_lti(A, B, E, K,
Uset, Wset, domain)
return cls((1,1), {(0,0):pwa_sys}, domain)
check each HybridSysDyn mode has PwaSysDyn dynamics
# Copyright (c) 2011, 2012, 2013 by California Institute of Technology
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the California Institute of Technology nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL CALTECH
# OR THE CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
# USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
# OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
"""
Classes:
- LtiSysDyn
- PwaSysDyn
- HybridSysDyn
NO, 2 Jul 2013.
"""
import logging
logger = logging.getLogger(__name__)
import itertools
import numpy as np
import polytope as pc
try:
from tulip.graphics import newax, quiver
except Exception, e:
logger.error(e)
quiver = None
class LtiSysDyn(object):
"""Represent discrete-time continuous dynamics:
s[t+1] = A*s[t] + B*u[t] + E*d[t] + K
subject to the constraints:
u[t] \in Uset
d[t] \in Wset
s[t] \in domain
where:
- u[t] the control input
- d[t] the disturbance input
- s[t] the system state
A LtiSysDyn object contains the fields:
- A, B, E, K, (matrices)
- Uset, Wset and domain (each a polytope.Polytope)
as defined above.
Note: For state-dependent bounds on the input,
[u[t]; s[t]] \in Uset
can be used.
see also
--------
PwaSysDyn, HybridSysDyn, polytope.Polytope
"""
def __init__(self, A=[], B=[], E=[], K=[],
Uset=None,Wset=None, domain=None):
if Uset == None:
print("Warning: Uset not given in LtiSysDyn()")
if (Uset != None) and (not isinstance(Uset, pc.Polytope)):
raise Exception("LtiSysDyn: `Uset` has to be a Polytope")
if domain == None:
print("Warning: domain is not given in LtiSysDyn()")
if (domain != None) and (not isinstance(domain, pc.Polytope)):
raise Exception("LtiSysDyn: `domain` has to be a Polytope")
self.A = A
self.B = B
if len(K) == 0:
if len(A) != 0:
self.K = np.zeros([A.shape[1], 1])
else:
self.K = K
else:
self.K = K.reshape(K.size,1)
if (len(E) == 0) & (len(A) != 0):
self.E = np.zeros([A.shape[1], 1])
self.Wset = pc.Polytope()
else:
self.E = E
self.Wset = Wset
self.Uset = Uset
self.domain = domain
def __repr__(self):
output = "A =\n"+str(self.A)
output += "\nB =\n"+str(self.B)
output += "\nE =\n"+str(self.E)
output += "\nK =\n"+str(self.K)
output += "\nUset =\n"+str(self.Uset)
output += "\nWset =\n"+str(self.Wset)
return output
def plot(self, ax=None, color=np.random.rand(3), show_domain=True):
if quiver is None:
logger.warn('pyvectorized not found. No plotting.')
return
(x, res) = pc.grid_region(self.domain)
n = self.A.shape[0]
DA = self.A - np.eye(n)
v = DA.dot(x)
if ax is None:
ax, fig = newax()
if show_domain:
self.domain.plot(ax, color)
quiver(x, v, ax)
return ax
class PwaSysDyn(object):
"""PwaSysDyn class for specifying a polytopic piecewise affine system.
A PwaSysDyn object contains the fields:
- C{list_subsys}: list of LtiSysDyn
- C{domain}: domain over which piecewise affine system is defined,
type: polytope.Polytope
For the system to be well-defined the domains of its subsystems should be
mutually exclusive (modulo intersections with empty interior) and cover the
domain.
see also
--------
LtiSysDyn, HybridSysDyn, polytope.Polytope
"""
def __init__(self, list_subsys=[], domain=None):
if domain == None:
print("Warning: domain not given in PwaSysDyn()")
if ((domain != None) and
(not (isinstance(domain, pc.Polytope) or
isinstance(domain, pc.Region))
)
):
raise Exception("PwaSysDyn: `domain` has to be a Polytope or Region")
if len(list_subsys) > 0:
uncovered_dom = domain.copy()
n = list_subsys[0].A.shape[1] # State space dimension
m = list_subsys[0].B.shape[1] # Input space dimension
p = list_subsys[0].E.shape[1] # Disturbance space dimension
for subsys in list_subsys:
uncovered_dom = uncovered_dom.diff(subsys.domain)
if (n!=subsys.A.shape[1] or m!=subsys.B.shape[1] or
p!=subsys.E.shape[1]):
raise Exception("PwaSysDyn: state, input, disturbance " +
"dimensions have to be the same for all " +
"subsystems")
if not pc.is_empty(uncovered_dom):
raise Exception("PwaSysDyn: subdomains must cover the domain")
for x in itertools.combinations(list_subsys, 2):
if pc.is_fulldim(x[0].domain.intersect(x[1].domain) ):
raise Exception("PwaSysDyn: subdomains have to be mutually"+
" exclusive")
self.list_subsys = list_subsys
self.domain = domain
@classmethod
def from_lti(cls, A=[], B=[], E=[], K=[],
Uset=None, Wset=None,domain=None):
lti_sys = LtiSysDyn(A,B,E,K,Uset,Wset,domain)
return cls([lti_sys], domain)
def plot(self, ax=None, show_domain=True):
if ax is None:
ax, fig = newax()
for subsystem in self.list_subsys:
subsystem.plot(ax, color=np.random.rand(3),
show_domain=show_domain)
class HybridSysDyn(object):
"""Represent hybrid systems switching between dynamic modes.
A HybridSysDyn represents a system with switching modes
that depend on both discrete:
- n_env environment variables (uncontrolled)
- n_sys system variables (controlled)
A HybridSysDyn object contains the fields:
- C{disc_domain_size}: 2-tuple of numbers of modes
type: (n_env, n_sys)
- C{env_labels}: (optional) labels for discrete environment variables
type: list of len(n_env)
default: range(n_env)
- C{disc_sys_labels}: (optional) labels for discrete system variables
type: list of len(n_sys)
default: range(n_sys)
- C{dynamics}: mapping mode 2-tuples to active dynamics:
(env_label, sys_label) -> PwaSysDyn
type: dict
default: If no env_label or sys_label passed,
then default to int indices (i,j) PwaSysDyn.
- C{cts_ss}: continuous state space over which hybrid system is defined.
type: polytope.Region
- C{time_semantics}: TBD. Current default semantics are discrete-time.
- State s[t] and
- discrete environment env[t]
are observed and:
- continuous input u[t] and
- discrete system variable m[t]
are determined based on:
- env[t] and
- s[t] (synchronously at time t).
Note: We assume that system and environment switching modes
are independent of one another.
(Use LTL statement to make it not so.)
see also
--------
LtiSysDyn, PwaSysDyn, polytope.Region
"""
def __init__(self, disc_domain_size=(1,1),
dynamics=None, cts_ss=None,
env_labels=None, disc_sys_labels=None):
# check that the continuous domain is specified
if cts_ss is None:
logger.warn('continuous state space not given to HybridSysDyn')
else:
if not isinstance(cts_ss, (pc.Polytope, pc.Region) ):
raise Exception('HybridSysDyn: ' +
'`cts_ss` must be a Polytope or Region')
self.disc_domain_size = disc_domain_size
# If label numbers agree with disc_domain_size, then use them.
# Otherwise, ignore the labels.
n_env, n_sys = disc_domain_size
self._env_labels = self._check_labels(n_env, env_labels)
self._disc_sys_labels = self._check_labels(n_sys, disc_sys_labels)
# Check each dynamics key is a valid mode,
# i.e., a valid combination of env and sys labels.
if dynamics is not None:
modes = [(a,b) for a in self.env_labels
for b in self.disc_sys_labels]
undefined_modes = set(dynamics.keys()).difference(modes)
if undefined_modes:
msg = 'HybridSysDyn: `dynamics` keys inconsistent'
msg += ' with discrete mode labels.\n'
msg += 'Undefined modes:\n' + str(undefined_modes)
raise ValueError(msg)
missing_modes = set(modes).difference(dynamics.keys())
if missing_modes:
msg = 'Missing the modes:\n' + str(missing_modes)
msg += '\n Make sure you did not forget any modes,\n'
msg += 'otherwise this is fine.'
logger.warn(msg)
if not all([isinstance(sys, PwaSysDyn)
for sys in dynamics.values()]):
msg = 'For each mode dynamics must be PwaSysDyn.\n'
msg += 'Got instead: ' +str(type(sys))
raise Exception(msg)
self.dynamics = dynamics
self.cts_ss = cts_ss
def _check_labels(self, n, labels):
# don't complain for default
if labels is None:
return None
# len exists ?
try:
# is len correct ?
if len(labels) != n:
msg = 'number of environment labels is inconsistent'
msg += ' with discrete domain size.\n'
msg += 'Ignoring given environment labels.\n'
msg += 'Defaulting to integer labels.'
logger.warn(msg)
return None
except:
logger.warn('Environment labels of type: ' +
type(labels) + 'have no len()')
return None
return labels
@property
def env_labels(self):
if self._env_labels is None:
return range(self._env_labels)
else:
return self._env_labels
@property
def disc_sys_labels(self):
if self._disc_sys_labels is None:
return range(self._disc_sys_labels)
else:
return self._disc_sys_labels
@classmethod
def from_pwa(cls, list_subsys=[], domain=None):
pwa_sys = PwaSysDyn(list_subsys,domain)
return cls((1,1), {(0,0):pwa_sys}, domain)
@classmethod
def from_lti(cls, A=[], B=[], E=[], K=[],
Uset=None, Wset=None,domain=None):
pwa_sys = PwaSysDyn.from_lti(A, B, E, K,
Uset, Wset, domain)
return cls((1,1), {(0,0):pwa_sys}, domain)
|
#
# Copyright (c) 2004 Specifix, Inc.
# All rights reserved
#
import imp, sys
import os
import util
import build
import package
import shutil
import types
import inspect
import lookaside
import rpmhelper
import gzip
baseMacros = (
# Note that these macros cannot be represented as a dictionary,
# because the items need to be added in order so that they will
# be properly interpolated.
#
# paths
('prefix' , '/usr'),
('sysconfdir' , '/etc'),
('lib' , 'lib'), # may be overridden with 'lib64'
('exec_prefix' , '%(prefix)s'),
('bindir' , '%(exec_prefix)s/bin'),
('sbindir' , '%(exec_prefix)s/sbin'),
('libdir' , '%(exec_prefix)s/%(lib)s'),
('libexecdir' , '%(exec_prefix)s/libexec'),
('localstatedir' , '%(prefix)s/var'),
('sharedstatedir' , '%(prefix)s/com'),
('includedir' , '%(prefix)s/include'),
('datadir' , '%(prefix)s/share'),
('mandir' , '%(datadir)s/man'),
('infodir' , '%(datadir)s/info'),
('docdir' , '%(datadir)s/doc'),
('develdocdir' , '%(datadir)s/develdoc'),
# arguments/flags (empty ones are for documentation; non-existant = empty)
('cflags' , '-O2'),
('mflags' , ''),
('parallelmflags' , ''),
('sysroot' , ''),
)
crossMacros = (
# set crossdir from cook, directly or indirectly, before adding the rest
#('crossdir' , 'cross-target'),
('prefix' , '/opt/%(crossdir)s'),
('sysroot' , '%(prefix)s/sys-root'),
('headerpath' , '%(sysroot)s/usr/include')
)
# XXX TEMPORARY - remove directories such as /usr/include from this
# list when filesystem package is in place.
baseAutoSpec = (
# automatic subpackage names and sets of regexps that define them
# cannot be a dictionary because it is ordered; first match wins
('devel',
('\.a',
'\.so',
'.*/include/.*\.h',
'/usr/include/',
'/usr/include',
'/usr/share/man/man(2|3)/',
'/usr/share/man/man(2|3)',
'/usr/share/develdoc/',
'/usr/share/develdoc',
'/usr/share/aclocal/',
'/usr/share/aclocal')),
('lib', ('.*/lib/.*\.so\..*')),
('doc', ('/usr/share/(doc|man|info)/',
'/usr/share/(doc|man|info)')),
('locale', ('/usr/share/locale/',
'/usr/share/locale')),
('runtime', ('.*',)),
)
class Macros(dict):
def __setitem__(self, name, value):
dict.__setitem__(self, name, value % self)
# we want keys that don't exist to default to empty strings
def __getitem__(self, name):
if self.has_key(name):
return dict.__getitem__(self, name)
return ''
def addMacros(self, *macroSet):
# must be in order; later macros in the set can depend on
# earlier ones
# for ease of use, we allow passing in a tuple of tuples, or
# a simple set of tuples
if len(macroSet) == 1 and type(macroSet[0]) is tuple:
# we were passed a tuple of tuples (like baseMacros)
macroSet = macroSet[0]
if len(macroSet) > 0 and type(macroSet[0]) is not tuple:
# we were passed something like ('foo', 'bar')
macroSet = (macroSet,)
for key, value in macroSet:
self[key] = value
def copy(self):
new = Macros()
new.update(self)
return new
def flatten(list):
if type(list) != types.ListType: return [list]
if list == []: return list
return flatten(list[0]) + flatten(list[1:])
def extractSourceFromRPM(rpm, targetfile):
filename = os.path.basename(targetfile)
directory = os.path.dirname(targetfile)
r = file(rpm, 'r')
rpmhelper.seekToData(r)
gz = gzip.GzipFile(fileobj=r)
(rpipe, wpipe) = os.pipe()
pid = os.fork()
if not pid:
os.dup2(rpipe, 0)
os.chdir(directory)
os.execl('/bin/cpio', 'cpio', '-ium', filename)
sys.exit(1)
while 1:
buf = gz.read(4096)
if not buf:
break
os.write(wpipe, buf)
os.close(wpipe)
(pid, status) = os.waitpid(pid, 0)
if not os.WIFEXITED(status):
raise IOError, 'cpio died extracting %s from RPM %s' \
%(filename, os.path.basename(rpm))
if os.WEXITSTATUS(status):
raise IOError, 'cpio returned failure %d extracting %s from RPM %s' \
%(os.WEXITSTATUS(status), filename, os.path.basename(rpm))
if not os.path.exists(targetfile):
raise IOError, 'failed to extract source %s from RPM %s' \
%(filename, os.path.basename(rpm))
class RecipeLoader(types.DictionaryType):
def __init__(self, file):
if file[0] != "/":
raise IOError, "recipe file names must be absolute paths"
self.file = os.path.basename(file).replace('.', '-')
self.module = imp.new_module(self.file)
sys.modules[self.file] = self.module
f = open(file)
exec 'from recipe import Recipe' in self.module.__dict__
exec 'from recipe import loadRecipe' in self.module.__dict__
exec 'import build, os, package, sys, util' in self.module.__dict__
if sys.excepthook == util.excepthook:
exec 'sys.excepthook = util.excepthook' in self.module.__dict__
exec 'filename = "%s"' %(file) in self.module.__dict__
code = compile(f.read(), file, 'exec')
exec code in self.module.__dict__
for (name, obj) in self.module.__dict__.items():
if type(obj) == types.ClassType:
# make sure the class is derived from Recipe
# and has a name
if obj.__dict__.has_key('ignore'):
continue
if issubclass(obj, Recipe) and obj.__dict__.has_key('name'):
obj.__dict__['filename'] = file
self[name] = obj
def __del__(self):
try:
del sys.modules[self.file]
except:
pass
# XXX this should be extended to load a recipe from srs
def loadRecipe(file):
callerGlobals = inspect.stack()[1][0].f_globals
if file[0] != '/':
recipepath = os.path.dirname(callerGlobals['filename'])
file = recipepath + '/' + file
recipes = RecipeLoader(file)
for name, recipe in recipes.items():
# XXX hack to hide parent recipes
recipe.ignore = 1
callerGlobals[name] = recipe
# stash a reference to the module in the namespace
# of the recipe that loaded it, or else it will be destroyed
callerGlobals[os.path.basename(file).replace('.', '-')] = recipes
#def bootstrapRecipe(file, class, buildRequires):
# loadRecipe(file) # XXX not necessary if we put boostraps in main files
# exec """class Bootstrap%s(%s):
# buildRequires = %s
# name = "bootstrap-%s"
# def setup(self):
# FIXMEcrossmacros(self.recipeCfg)
# FIXMEcrossenv
# FIXMEself.mainDir(class, self.version)
# %s.setup(self)
# """ %(class, class, buildRequires.repr(), class, class)
class Recipe:
buildRequires = []
runRequires = []
def addSignature(self, file, keyid):
# do not search unless a gpg keyid is specified
if not keyid:
return
gpg = '%s.sig' %(file)
c = lookaside.searchAll(self.cfg, self.laReposCache, gpg,
self.name, self.srcdirs)
if not c:
gpg = '%s.sign' %(file)
c = lookaside.searchAll(self.cfg, self.laReposCache,
gpg, self.name, self.srcdirs)
if c:
if not self.signatures.has_key(file):
self.signatures[file] = []
self.signatures[file].append((gpg, c, keyid))
def addTarball(self, file, extractDir='', keyid=None):
self.tarballs.append((file, extractDir))
self.addSignature(file, keyid)
def addTarballFromRPM(self, rpm, file, extractDir='', keyid=None):
f = lookaside.searchAll(self.cfg, self.laReposCache,
os.path.basename(file), self.name, self.srcdirs)
if not f:
r = lookaside.findAll(self.cfg, self.laReposCache, rpm,
self.name, self.srcdirs)
c = lookaside.createCacheName(self.cfg, file, self.name)
extractSourceFromRPM(r, c)
f = lookaside.findAll(self.cfg, self.laReposCache, file,
self.name, self.srcdirs)
self.tarballs.append((file, extractDir))
self.addSignature(f, keyid)
def addPatch(self, file, level='1', backup='', keyid=None):
self.patches.append((file, level, backup))
self.addSignature(file, keyid)
def addSource(self, file, keyid=None):
self.sources.append(file)
self.addSignature(file, keyid)
def allSources(self):
sources = []
for (tarball, extractdir) in self.tarballs:
sources.append(tarball)
for (patch, level, backup) in self.patches:
sources.append(patch)
for signaturelist in self.signatures.values():
for (gpg, cached, keyid) in signaturelist:
sources.append(gpg)
return sources + self.sources
def mainDir(self, new = None):
if new:
self.theMainDir = new
return self.theMainDir
def nameVer(self):
return self.name + "-" + self.version
def cleanup(self, builddir, destdir):
shutil.rmtree(builddir)
shutil.rmtree(destdir)
def checkSignatures(self, filepath, file):
if not self.signatures.has_key(file):
return
for (gpg, signature, keyid) in self.signatures[file]:
# FIXME: our own keyring
if os.system("gpg --no-secmem-warning --verify %s %s"
%(signature, filepath)):
# FIXME: only do this if key missing, this is cheap for now
os.system("gpg --keyserver pgp.mit.edu --recv-keys 0x %s"
%(keyid))
if os.system("gpg --no-secmem-warning --verify %s %s"
%(signature, filepath)):
raise RuntimeError, "GPG signature %s failed" %(signature)
def unpackSources(self, builddir):
if os.path.exists(builddir):
shutil.rmtree(builddir)
util.mkdirChain(builddir)
for (file, extractdir) in self.tarballs:
f = lookaside.findAll(self.cfg, self.laReposCache, file,
self.name, self.srcdirs)
self.checkSignatures(f, file)
if f.endswith(".bz2"):
tarflags = "-jxf"
elif f.endswith(".gz") or f.endswith(".tgz"):
tarflags = "-zxf"
else:
raise RuntimeError, "unknown archive compression"
if extractdir:
destdir = '%s/%s' % (builddir, extractdir)
util.execute("mkdir -p %s" % destdir)
else:
destdir = builddir
util.execute("tar -C %s %s %s" % (destdir, tarflags, f))
for file in self.sources:
f = lookaside.findAll(self.cfg, self.laReposCache, file,
self.name, self.srcdirs)
destDir = builddir + "/" + self.theMainDir
util.mkdirChain(destDir)
shutil.copyfile(f, destDir + "/" + file)
for (file, level, backup) in self.patches:
# XXX handle .gz/.bz2 patch files
f = util.findFile(file, self.srcdirs)
destDir = builddir + "/" + self.theMainDir
if backup:
backup = '-b -z %s' % backup
util.execute('patch -d %s -p%s %s < %s' %(destDir, level, backup, f))
def doBuild(self, buildpath):
builddir = buildpath + "/" + self.mainDir()
self.macros['builddir'] = builddir
if self.build is None:
pass
elif type(self.build) is str:
util.execute(self.build %self.macros)
elif type(self.build) is tuple:
for bld in self.build:
if type(bld) is str:
util.execute(bld %self.macros)
else:
bld.doBuild(self.macros)
else:
self.build.doBuild(self.macros)
def doInstall(self, buildpath, root):
builddir = buildpath + "/" + self.mainDir()
self.addMacros(('builddir', builddir),
('destdir', root))
if self.install is None:
pass
elif type(self.install) is str:
util.execute(self.install %self.macros)
elif type(self.install) is tuple:
for inst in self.install:
if type(inst) is str:
util.execute(inst %self.macros)
else:
inst.doInstall(self.macros)
else:
self.install.doInstall(self.macros)
def packages(self, root):
self.autoSpecList = []
for spec in baseAutoSpec:
self.autoSpecList.append(package.PackageSpec(spec[0], spec[1]))
# "None" will be replaced by explicit subpackage list
self.packageSpecSet = package.PackageSpecSet(self.autoSpecList, None)
self.packageSet = package.Auto(self.name, root, self.packageSpecSet)
def getPackageSet(self):
return self.packageSet
def __init__(self, cfg, laReposCache, srcdirs, extraMacros=()):
self.tarballs = []
self.patches = []
self.sources = []
self.signatures = {}
self.cfg = cfg
self.laReposCache = laReposCache
self.srcdirs = srcdirs
self.theMainDir = self.name + "-" + self.version
self.build = build.Make()
self.install = build.MakeInstall()
self.macros = Macros()
self.addMacros = self.macros.addMacros
self.addMacros(baseMacros)
self.macros['name'] = self.name
self.macros['version'] = self.version
if extraMacros:
self.addMacros(extraMacros)
remove unused code
#
# Copyright (c) 2004 Specifix, Inc.
# All rights reserved
#
import imp, sys
import os
import util
import build
import package
import shutil
import types
import inspect
import lookaside
import rpmhelper
import gzip
baseMacros = (
# Note that these macros cannot be represented as a dictionary,
# because the items need to be added in order so that they will
# be properly interpolated.
#
# paths
('prefix' , '/usr'),
('sysconfdir' , '/etc'),
('lib' , 'lib'), # may be overridden with 'lib64'
('exec_prefix' , '%(prefix)s'),
('bindir' , '%(exec_prefix)s/bin'),
('sbindir' , '%(exec_prefix)s/sbin'),
('libdir' , '%(exec_prefix)s/%(lib)s'),
('libexecdir' , '%(exec_prefix)s/libexec'),
('localstatedir' , '%(prefix)s/var'),
('sharedstatedir' , '%(prefix)s/com'),
('includedir' , '%(prefix)s/include'),
('datadir' , '%(prefix)s/share'),
('mandir' , '%(datadir)s/man'),
('infodir' , '%(datadir)s/info'),
('docdir' , '%(datadir)s/doc'),
('develdocdir' , '%(datadir)s/develdoc'),
# arguments/flags (empty ones are for documentation; non-existant = empty)
('cflags' , '-O2'),
('mflags' , ''),
('parallelmflags' , ''),
('sysroot' , ''),
)
crossMacros = (
# set crossdir from cook, directly or indirectly, before adding the rest
#('crossdir' , 'cross-target'),
('prefix' , '/opt/%(crossdir)s'),
('sysroot' , '%(prefix)s/sys-root'),
('headerpath' , '%(sysroot)s/usr/include')
)
# XXX TEMPORARY - remove directories such as /usr/include from this
# list when filesystem package is in place.
baseAutoSpec = (
# automatic subpackage names and sets of regexps that define them
# cannot be a dictionary because it is ordered; first match wins
('devel',
('\.a',
'\.so',
'.*/include/.*\.h',
'/usr/include/',
'/usr/include',
'/usr/share/man/man(2|3)/',
'/usr/share/man/man(2|3)',
'/usr/share/develdoc/',
'/usr/share/develdoc',
'/usr/share/aclocal/',
'/usr/share/aclocal')),
('lib', ('.*/lib/.*\.so\..*')),
('doc', ('/usr/share/(doc|man|info)/',
'/usr/share/(doc|man|info)')),
('locale', ('/usr/share/locale/',
'/usr/share/locale')),
('runtime', ('.*',)),
)
class Macros(dict):
def __setitem__(self, name, value):
dict.__setitem__(self, name, value % self)
# we want keys that don't exist to default to empty strings
def __getitem__(self, name):
if self.has_key(name):
return dict.__getitem__(self, name)
return ''
def addMacros(self, *macroSet):
# must be in order; later macros in the set can depend on
# earlier ones
# for ease of use, we allow passing in a tuple of tuples, or
# a simple set of tuples
if len(macroSet) == 1 and type(macroSet[0]) is tuple:
# we were passed a tuple of tuples (like baseMacros)
macroSet = macroSet[0]
if len(macroSet) > 0 and type(macroSet[0]) is not tuple:
# we were passed something like ('foo', 'bar')
macroSet = (macroSet,)
for key, value in macroSet:
self[key] = value
def copy(self):
new = Macros()
new.update(self)
return new
def extractSourceFromRPM(rpm, targetfile):
filename = os.path.basename(targetfile)
directory = os.path.dirname(targetfile)
r = file(rpm, 'r')
rpmhelper.seekToData(r)
gz = gzip.GzipFile(fileobj=r)
(rpipe, wpipe) = os.pipe()
pid = os.fork()
if not pid:
os.dup2(rpipe, 0)
os.chdir(directory)
os.execl('/bin/cpio', 'cpio', '-ium', filename)
sys.exit(1)
while 1:
buf = gz.read(4096)
if not buf:
break
os.write(wpipe, buf)
os.close(wpipe)
(pid, status) = os.waitpid(pid, 0)
if not os.WIFEXITED(status):
raise IOError, 'cpio died extracting %s from RPM %s' \
%(filename, os.path.basename(rpm))
if os.WEXITSTATUS(status):
raise IOError, 'cpio returned failure %d extracting %s from RPM %s' \
%(os.WEXITSTATUS(status), filename, os.path.basename(rpm))
if not os.path.exists(targetfile):
raise IOError, 'failed to extract source %s from RPM %s' \
%(filename, os.path.basename(rpm))
class RecipeLoader(types.DictionaryType):
def __init__(self, file):
if file[0] != "/":
raise IOError, "recipe file names must be absolute paths"
self.file = os.path.basename(file).replace('.', '-')
self.module = imp.new_module(self.file)
sys.modules[self.file] = self.module
f = open(file)
exec 'from recipe import Recipe' in self.module.__dict__
exec 'from recipe import loadRecipe' in self.module.__dict__
exec 'import build, os, package, sys, util' in self.module.__dict__
if sys.excepthook == util.excepthook:
exec 'sys.excepthook = util.excepthook' in self.module.__dict__
exec 'filename = "%s"' %(file) in self.module.__dict__
code = compile(f.read(), file, 'exec')
exec code in self.module.__dict__
for (name, obj) in self.module.__dict__.items():
if type(obj) == types.ClassType:
# make sure the class is derived from Recipe
# and has a name
if obj.__dict__.has_key('ignore'):
continue
if issubclass(obj, Recipe) and obj.__dict__.has_key('name'):
obj.__dict__['filename'] = file
self[name] = obj
def __del__(self):
try:
del sys.modules[self.file]
except:
pass
# XXX this should be extended to load a recipe from srs
def loadRecipe(file):
callerGlobals = inspect.stack()[1][0].f_globals
if file[0] != '/':
recipepath = os.path.dirname(callerGlobals['filename'])
file = recipepath + '/' + file
recipes = RecipeLoader(file)
for name, recipe in recipes.items():
# XXX hack to hide parent recipes
recipe.ignore = 1
callerGlobals[name] = recipe
# stash a reference to the module in the namespace
# of the recipe that loaded it, or else it will be destroyed
callerGlobals[os.path.basename(file).replace('.', '-')] = recipes
#def bootstrapRecipe(file, class, buildRequires):
# loadRecipe(file) # XXX not necessary if we put boostraps in main files
# exec """class Bootstrap%s(%s):
# buildRequires = %s
# name = "bootstrap-%s"
# def setup(self):
# FIXMEcrossmacros(self.recipeCfg)
# FIXMEcrossenv
# FIXMEself.mainDir(class, self.version)
# %s.setup(self)
# """ %(class, class, buildRequires.repr(), class, class)
class Recipe:
buildRequires = []
runRequires = []
def addSignature(self, file, keyid):
# do not search unless a gpg keyid is specified
if not keyid:
return
gpg = '%s.sig' %(file)
c = lookaside.searchAll(self.cfg, self.laReposCache, gpg,
self.name, self.srcdirs)
if not c:
gpg = '%s.sign' %(file)
c = lookaside.searchAll(self.cfg, self.laReposCache,
gpg, self.name, self.srcdirs)
if c:
if not self.signatures.has_key(file):
self.signatures[file] = []
self.signatures[file].append((gpg, c, keyid))
def addTarball(self, file, extractDir='', keyid=None):
self.tarballs.append((file, extractDir))
self.addSignature(file, keyid)
def addTarballFromRPM(self, rpm, file, extractDir='', keyid=None):
f = lookaside.searchAll(self.cfg, self.laReposCache,
os.path.basename(file), self.name, self.srcdirs)
if not f:
r = lookaside.findAll(self.cfg, self.laReposCache, rpm,
self.name, self.srcdirs)
c = lookaside.createCacheName(self.cfg, file, self.name)
extractSourceFromRPM(r, c)
f = lookaside.findAll(self.cfg, self.laReposCache, file,
self.name, self.srcdirs)
self.tarballs.append((file, extractDir))
self.addSignature(f, keyid)
def addPatch(self, file, level='1', backup='', keyid=None):
self.patches.append((file, level, backup))
self.addSignature(file, keyid)
def addSource(self, file, keyid=None):
self.sources.append(file)
self.addSignature(file, keyid)
def allSources(self):
sources = []
for (tarball, extractdir) in self.tarballs:
sources.append(tarball)
for (patch, level, backup) in self.patches:
sources.append(patch)
for signaturelist in self.signatures.values():
for (gpg, cached, keyid) in signaturelist:
sources.append(gpg)
return sources + self.sources
def mainDir(self, new = None):
if new:
self.theMainDir = new
return self.theMainDir
def nameVer(self):
return self.name + "-" + self.version
def cleanup(self, builddir, destdir):
shutil.rmtree(builddir)
shutil.rmtree(destdir)
def checkSignatures(self, filepath, file):
if not self.signatures.has_key(file):
return
for (gpg, signature, keyid) in self.signatures[file]:
# FIXME: our own keyring
if os.system("gpg --no-secmem-warning --verify %s %s"
%(signature, filepath)):
# FIXME: only do this if key missing, this is cheap for now
os.system("gpg --keyserver pgp.mit.edu --recv-keys 0x %s"
%(keyid))
if os.system("gpg --no-secmem-warning --verify %s %s"
%(signature, filepath)):
raise RuntimeError, "GPG signature %s failed" %(signature)
def unpackSources(self, builddir):
if os.path.exists(builddir):
shutil.rmtree(builddir)
util.mkdirChain(builddir)
for (file, extractdir) in self.tarballs:
f = lookaside.findAll(self.cfg, self.laReposCache, file,
self.name, self.srcdirs)
self.checkSignatures(f, file)
if f.endswith(".bz2"):
tarflags = "-jxf"
elif f.endswith(".gz") or f.endswith(".tgz"):
tarflags = "-zxf"
else:
raise RuntimeError, "unknown archive compression"
if extractdir:
destdir = '%s/%s' % (builddir, extractdir)
util.execute("mkdir -p %s" % destdir)
else:
destdir = builddir
util.execute("tar -C %s %s %s" % (destdir, tarflags, f))
for file in self.sources:
f = lookaside.findAll(self.cfg, self.laReposCache, file,
self.name, self.srcdirs)
destDir = builddir + "/" + self.theMainDir
util.mkdirChain(destDir)
shutil.copyfile(f, destDir + "/" + file)
for (file, level, backup) in self.patches:
# XXX handle .gz/.bz2 patch files
f = util.findFile(file, self.srcdirs)
destDir = builddir + "/" + self.theMainDir
if backup:
backup = '-b -z %s' % backup
util.execute('patch -d %s -p%s %s < %s' %(destDir, level, backup, f))
def doBuild(self, buildpath):
builddir = buildpath + "/" + self.mainDir()
self.macros['builddir'] = builddir
if self.build is None:
pass
elif type(self.build) is str:
util.execute(self.build %self.macros)
elif type(self.build) is tuple:
for bld in self.build:
if type(bld) is str:
util.execute(bld %self.macros)
else:
bld.doBuild(self.macros)
else:
self.build.doBuild(self.macros)
def doInstall(self, buildpath, root):
builddir = buildpath + "/" + self.mainDir()
self.addMacros(('builddir', builddir),
('destdir', root))
if self.install is None:
pass
elif type(self.install) is str:
util.execute(self.install %self.macros)
elif type(self.install) is tuple:
for inst in self.install:
if type(inst) is str:
util.execute(inst %self.macros)
else:
inst.doInstall(self.macros)
else:
self.install.doInstall(self.macros)
def packages(self, root):
self.autoSpecList = []
for spec in baseAutoSpec:
self.autoSpecList.append(package.PackageSpec(spec[0], spec[1]))
# "None" will be replaced by explicit subpackage list
self.packageSpecSet = package.PackageSpecSet(self.autoSpecList, None)
self.packageSet = package.Auto(self.name, root, self.packageSpecSet)
def getPackageSet(self):
return self.packageSet
def __init__(self, cfg, laReposCache, srcdirs, extraMacros=()):
self.tarballs = []
self.patches = []
self.sources = []
self.signatures = {}
self.cfg = cfg
self.laReposCache = laReposCache
self.srcdirs = srcdirs
self.theMainDir = self.name + "-" + self.version
self.build = build.Make()
self.install = build.MakeInstall()
self.macros = Macros()
self.addMacros = self.macros.addMacros
self.addMacros(baseMacros)
self.macros['name'] = self.name
self.macros['version'] = self.version
if extraMacros:
self.addMacros(extraMacros)
|
"""便利関数とか"""
def generate_token():
# まだスタブ
return 'N0IHdzb2MiLCJzdWIiOiJBQkNERUYiLCJhdWQiOiJJSktMTU4iLCJ'
トークンを生成する関数
"""便利関数とか"""
import base64
import uuid
def generate_token():
"""アクセストークンの生成."""
return base64.b64encode(uuid.uuid4().bytes).decode()
|
# -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Pedro Algarvio (pedro@algarvio.me)`
tests.integration.states.pip
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import errno
import os
import glob
import shutil
import sys
try:
import pwd
HAS_PWD = True
except ImportError:
HAS_PWD = False
# Import Salt Testing libs
from tests.support.case import ModuleCase
from tests.support.helpers import (
destructiveTest,
requires_system_grains,
with_system_user,
skip_if_not_root,
with_tempdir
)
from tests.support.mixins import SaltReturnAssertsMixin
from tests.support.runtests import RUNTIME_VARS
from tests.support.unit import skipIf
# Import salt libs
import salt.utils.files
import salt.utils.path
import salt.utils.platform
import salt.utils.versions
import salt.utils.win_dacl
import salt.utils.win_functions
from salt.modules.virtualenv_mod import KNOWN_BINARY_NAMES
from salt.exceptions import CommandExecutionError
# Import 3rd-party libs
from salt.ext import six
class VirtualEnv(object):
def __init__(self, test, venv_dir):
self.venv_dir = venv_dir
self.test = test
def __enter__(self):
ret = self.test.run_function('virtualenv.create', [self.venv_dir])
self.test.assertEqual(ret['retcode'], 0)
def __exit__(self, exc_type, exc_value, traceback):
if os.path.isdir(self.venv_dir):
shutil.rmtree(self.venv_dir, ignore_errors=True)
@skipIf(salt.utils.path.which_bin(KNOWN_BINARY_NAMES) is None, 'virtualenv not installed')
class PipStateTest(ModuleCase, SaltReturnAssertsMixin):
@skip_if_not_root
def test_pip_installed_removed(self):
'''
Tests installed and removed states
'''
name = 'pudb'
if name in self.run_function('pip.list'):
self.skipTest('{0} is already installed, uninstall to run this test'.format(name))
ret = self.run_state('pip.installed', name=name)
self.assertSaltTrueReturn(ret)
ret = self.run_state('pip.removed', name=name)
self.assertSaltTrueReturn(ret)
def test_pip_installed_removed_venv(self):
venv_dir = os.path.join(
RUNTIME_VARS.TMP, 'pip_installed_removed'
)
with VirtualEnv(self, venv_dir):
name = 'pudb'
ret = self.run_state('pip.installed', name=name, bin_env=venv_dir)
self.assertSaltTrueReturn(ret)
ret = self.run_state('pip.removed', name=name, bin_env=venv_dir)
self.assertSaltTrueReturn(ret)
def test_pip_installed_errors(self):
venv_dir = os.path.join(
RUNTIME_VARS.TMP, 'pip-installed-errors'
)
orig_shell = os.environ.get('SHELL')
try:
# Since we don't have the virtualenv created, pip.installed will
# throw an error.
# Example error strings:
# * "Error installing 'pep8': /tmp/pip-installed-errors: not found"
# * "Error installing 'pep8': /bin/sh: 1: /tmp/pip-installed-errors: not found"
# * "Error installing 'pep8': /bin/bash: /tmp/pip-installed-errors: No such file or directory"
os.environ['SHELL'] = '/bin/sh'
ret = self.run_function('state.sls', mods='pip-installed-errors')
self.assertSaltFalseReturn(ret)
self.assertSaltCommentRegexpMatches(
ret,
'Error installing \'pep8\':'
)
# We now create the missing virtualenv
ret = self.run_function('virtualenv.create', [venv_dir])
self.assertEqual(ret['retcode'], 0)
# The state should not have any issues running now
ret = self.run_function('state.sls', mods='pip-installed-errors')
self.assertSaltTrueReturn(ret)
finally:
if orig_shell is None:
# Didn't exist before, don't leave it there. This should never
# happen, but if it does, we don't want this test to affect
# others elsewhere in the suite.
os.environ.pop('SHELL')
else:
os.environ['SHELL'] = orig_shell
if os.path.isdir(venv_dir):
shutil.rmtree(venv_dir, ignore_errors=True)
@skipIf(six.PY3, 'Issue is specific to carbon module, which is PY2-only')
@skipIf(salt.utils.platform.is_windows(), "Carbon does not install in Windows")
@requires_system_grains
def test_pip_installed_weird_install(self, grains=None):
# First, check to see if this is running on CentOS 5 or MacOS.
# If so, skip this test.
if grains['os'] in ('CentOS',) and grains['osrelease_info'][0] in (5,):
self.skipTest('This test does not run reliably on CentOS 5')
if grains['os'] in ('MacOS',):
self.skipTest('This test does not run reliably on MacOS')
ographite = '/opt/graphite'
if os.path.isdir(ographite):
self.skipTest(
'You already have \'{0}\'. This test would overwrite this '
'directory'.format(ographite)
)
try:
os.makedirs(ographite)
except OSError as err:
if err.errno == errno.EACCES:
# Permission denied
self.skipTest(
'You don\'t have the required permissions to run this test'
)
finally:
if os.path.isdir(ographite):
shutil.rmtree(ographite, ignore_errors=True)
venv_dir = os.path.join(RUNTIME_VARS.TMP, 'pip-installed-weird-install')
try:
# We may be able to remove this, I had to add it because the custom
# modules from the test suite weren't available in the jinja
# context when running the call to state.sls that comes after.
self.run_function('saltutil.sync_modules')
# Since we don't have the virtualenv created, pip.installed will
# throw an error.
ret = self.run_function(
'state.sls', mods='pip-installed-weird-install'
)
self.assertSaltTrueReturn(ret)
# We cannot use assertInSaltComment here because we need to skip
# some of the state return parts
for key in six.iterkeys(ret):
self.assertTrue(ret[key]['result'])
if ret[key]['name'] != 'carbon < 1.1':
continue
self.assertEqual(
ret[key]['comment'],
'There was no error installing package \'carbon < 1.1\' '
'although it does not show when calling \'pip.freeze\'.'
)
break
else:
raise Exception('Expected state did not run')
finally:
if os.path.isdir(ographite):
shutil.rmtree(ographite, ignore_errors=True)
def test_issue_2028_pip_installed_state(self):
ret = self.run_function('state.sls', mods='issue-2028-pip-installed')
venv_dir = os.path.join(
RUNTIME_VARS.TMP, 'issue-2028-pip-installed'
)
pep8_bin = os.path.join(venv_dir, 'bin', 'pep8')
if salt.utils.platform.is_windows():
pep8_bin = os.path.join(venv_dir, 'Scripts', 'pep8.exe')
try:
self.assertSaltTrueReturn(ret)
self.assertTrue(
os.path.isfile(pep8_bin)
)
finally:
if os.path.isdir(venv_dir):
shutil.rmtree(venv_dir, ignore_errors=True)
def test_issue_2087_missing_pip(self):
venv_dir = os.path.join(
RUNTIME_VARS.TMP, 'issue-2087-missing-pip'
)
try:
# Let's create the testing virtualenv
ret = self.run_function('virtualenv.create', [venv_dir])
self.assertEqual(ret['retcode'], 0)
# Let's remove the pip binary
pip_bin = os.path.join(venv_dir, 'bin', 'pip')
py_dir = 'python{0}.{1}'.format(*sys.version_info[:2])
site_dir = os.path.join(venv_dir, 'lib', py_dir, 'site-packages')
if salt.utils.platform.is_windows():
pip_bin = os.path.join(venv_dir, 'Scripts', 'pip.exe')
site_dir = os.path.join(venv_dir, 'lib', 'site-packages')
if not os.path.isfile(pip_bin):
self.skipTest(
'Failed to find the pip binary to the test virtualenv'
)
os.remove(pip_bin)
# Also remove the pip dir from site-packages
# This is needed now that we're using python -m pip instead of the
# pip binary directly. python -m pip will still work even if the
# pip binary is missing
shutil.rmtree(os.path.join(site_dir, 'pip'))
# Let's run the state which should fail because pip is missing
ret = self.run_function('state.sls', mods='issue-2087-missing-pip')
self.assertSaltFalseReturn(ret)
self.assertInSaltComment(
'Error installing \'pep8\': Could not find a `pip` binary',
ret
)
finally:
if os.path.isdir(venv_dir):
shutil.rmtree(venv_dir, ignore_errors=True)
def test_issue_5940_multiple_pip_mirrors(self):
'''
Test multiple pip mirrors. This test only works with pip < 7.0.0
'''
ret = self.run_function(
'state.sls', mods='issue-5940-multiple-pip-mirrors'
)
venv_dir = os.path.join(
RUNTIME_VARS.TMP, '5940-multiple-pip-mirrors'
)
try:
self.assertSaltTrueReturn(ret)
self.assertTrue(
os.path.isfile(os.path.join(venv_dir, 'bin', 'pep8'))
)
except (AssertionError, CommandExecutionError):
pip_version = self.run_function('pip.version', [venv_dir])
if salt.utils.versions.compare(ver1=pip_version, oper='>=', ver2='7.0.0'):
self.skipTest('the --mirrors arg has been deprecated and removed in pip==7.0.0')
finally:
if os.path.isdir(venv_dir):
shutil.rmtree(venv_dir, ignore_errors=True)
@destructiveTest
@skip_if_not_root
@with_system_user('issue-6912', on_existing='delete', delete=True,
password='PassWord1!')
@with_tempdir()
def test_issue_6912_wrong_owner(self, temp_dir, username):
# Setup virtual environment directory to be used throughout the test
venv_dir = os.path.join(temp_dir, '6912-wrong-owner')
# The virtual environment needs to be in a location that is accessible
# by both the user running the test and the runas user
if salt.utils.platform.is_windows():
salt.utils.win_dacl.set_permissions(temp_dir, username, 'full_control')
else:
uid = self.run_function('file.user_to_uid', [username])
os.chown(temp_dir, uid, -1)
# Create the virtual environment
venv_create = self.run_function(
'virtualenv.create', [venv_dir], user=username,
password='PassWord1!')
if venv_create['retcode'] > 0:
self.skipTest('Failed to create testcase virtual environment: {0}'
''.format(venv_create))
# pip install passing the package name in `name`
ret = self.run_state(
'pip.installed', name='pep8', user=username, bin_env=venv_dir,
no_cache_dir=True, password='PassWord1!')
self.assertSaltTrueReturn(ret)
if HAS_PWD:
uid = pwd.getpwnam(username).pw_uid
for globmatch in (os.path.join(venv_dir, '**', 'pep8*'),
os.path.join(venv_dir, '*', '**', 'pep8*'),
os.path.join(venv_dir, '*', '*', '**', 'pep8*')):
for path in glob.glob(globmatch):
if HAS_PWD:
self.assertEqual(uid, os.stat(path).st_uid)
elif salt.utils.platform.is_windows():
self.assertEqual(
salt.utils.win_dacl.get_owner(path), username)
@destructiveTest
@skip_if_not_root
@with_system_user('issue-6912', on_existing='delete', delete=True,
password='PassWord1!')
@with_tempdir()
def test_issue_6912_wrong_owner_requirements_file(self, temp_dir, username):
# Setup virtual environment directory to be used throughout the test
venv_dir = os.path.join(temp_dir, '6912-wrong-owner')
# The virtual environment needs to be in a location that is accessible
# by both the user running the test and the runas user
if salt.utils.platform.is_windows():
salt.utils.win_dacl.set_permissions(temp_dir, username, 'full_control')
else:
uid = self.run_function('file.user_to_uid', [username])
os.chown(temp_dir, uid, -1)
# Create the virtual environment again as it should have been removed
venv_create = self.run_function(
'virtualenv.create', [venv_dir], user=username,
password='PassWord1!')
if venv_create['retcode'] > 0:
self.skipTest('failed to create testcase virtual environment: {0}'
''.format(venv_create))
# pip install using a requirements file
req_filename = os.path.join(
RUNTIME_VARS.TMP_STATE_TREE, 'issue-6912-requirements.txt'
)
with salt.utils.files.fopen(req_filename, 'wb') as reqf:
reqf.write(b'pep8\n')
ret = self.run_state(
'pip.installed', name='', user=username, bin_env=venv_dir,
requirements='salt://issue-6912-requirements.txt',
no_cache_dir=True, password='PassWord1!')
self.assertSaltTrueReturn(ret)
if HAS_PWD:
uid = pwd.getpwnam(username).pw_uid
for globmatch in (os.path.join(venv_dir, '**', 'pep8*'),
os.path.join(venv_dir, '*', '**', 'pep8*'),
os.path.join(venv_dir, '*', '*', '**', 'pep8*')):
for path in glob.glob(globmatch):
if HAS_PWD:
self.assertEqual(uid, os.stat(path).st_uid)
elif salt.utils.platform.is_windows():
self.assertEqual(
salt.utils.win_dacl.get_owner(path), username)
def test_issue_6833_pip_upgrade_pip(self):
# Create the testing virtualenv
venv_dir = os.path.join(
RUNTIME_VARS.TMP, '6833-pip-upgrade-pip'
)
ret = self.run_function('virtualenv.create', [venv_dir])
try:
try:
self.assertEqual(ret['retcode'], 0)
self.assertIn(
'New python executable',
ret['stdout']
)
except AssertionError:
import pprint
pprint.pprint(ret)
raise
# Let's install a fixed version pip over whatever pip was
# previously installed
ret = self.run_function(
'pip.install', ['pip==8.0'], upgrade=True,
bin_env=venv_dir
)
try:
self.assertEqual(ret['retcode'], 0)
self.assertIn(
'Successfully installed pip',
ret['stdout']
)
except AssertionError:
import pprint
pprint.pprint(ret)
raise
# Let's make sure we have pip 8.0 installed
self.assertEqual(
self.run_function('pip.list', ['pip'], bin_env=venv_dir),
{'pip': '8.0.0'}
)
# Now the actual pip upgrade pip test
ret = self.run_state(
'pip.installed', name='pip==8.0.1', upgrade=True,
bin_env=venv_dir
)
try:
self.assertSaltTrueReturn(ret)
self.assertSaltStateChangesEqual(
ret, {'pip==8.0.1': 'Installed'})
except AssertionError:
import pprint
pprint.pprint(ret)
raise
finally:
if os.path.isdir(venv_dir):
shutil.rmtree(venv_dir, ignore_errors=True)
def test_pip_installed_specific_env(self):
# Create the testing virtualenv
venv_dir = os.path.join(
RUNTIME_VARS.TMP, 'pip-installed-specific-env'
)
# Let's write a requirements file
requirements_file = os.path.join(
RUNTIME_VARS.TMP_PRODENV_STATE_TREE, 'prod-env-requirements.txt'
)
with salt.utils.files.fopen(requirements_file, 'wb') as reqf:
reqf.write(b'pep8\n')
try:
self.run_function('virtualenv.create', [venv_dir])
# The requirements file should not be found the base environment
ret = self.run_state(
'pip.installed', name='', bin_env=venv_dir,
requirements='salt://prod-env-requirements.txt'
)
self.assertSaltFalseReturn(ret)
self.assertInSaltComment(
"'salt://prod-env-requirements.txt' not found", ret
)
# The requirements file must be found in the prod environment
ret = self.run_state(
'pip.installed', name='', bin_env=venv_dir, saltenv='prod',
requirements='salt://prod-env-requirements.txt'
)
self.assertSaltTrueReturn(ret)
self.assertInSaltComment(
'Successfully processed requirements file '
'salt://prod-env-requirements.txt', ret
)
# We're using the base environment but we're passing the prod
# environment as an url arg to salt://
ret = self.run_state(
'pip.installed', name='', bin_env=venv_dir,
requirements='salt://prod-env-requirements.txt?saltenv=prod'
)
self.assertSaltTrueReturn(ret)
self.assertInSaltComment(
'Requirements were already installed.',
ret
)
finally:
if os.path.isdir(venv_dir):
shutil.rmtree(venv_dir, ignore_errors=True)
if os.path.isfile(requirements_file):
os.unlink(requirements_file)
def test_22359_pip_installed_unless_does_not_trigger_warnings(self):
# This test case should be moved to a format_call unit test specific to
# the state internal keywords
venv_dir = os.path.join(RUNTIME_VARS.TMP, 'pip-installed-unless')
venv_create = self.run_function('virtualenv.create', [venv_dir])
if venv_create['retcode'] > 0:
self.skipTest(
'Failed to create testcase virtual environment: {0}'.format(
venv_create
)
)
false_cmd = '/bin/false'
if salt.utils.platform.is_windows():
false_cmd = 'exit 1 >nul'
try:
ret = self.run_state(
'pip.installed', name='pep8', bin_env=venv_dir, unless=false_cmd
)
self.assertSaltTrueReturn(ret)
self.assertNotIn('warnings', next(six.itervalues(ret)))
finally:
if os.path.isdir(venv_dir):
shutil.rmtree(venv_dir, ignore_errors=True)
@skipIf(sys.version_info[:2] >= (3, 6), 'Old version of virtualenv too old for python3.6')
@skipIf(salt.utils.platform.is_windows(), "Carbon does not install in Windows")
def test_46127_pip_env_vars(self):
'''
Test that checks if env_vars passed to pip.installed are also passed
to pip.freeze while checking for existing installations
'''
# This issue is most easily checked while installing carbon
# Much of the code here comes from the test_weird_install function above
ographite = '/opt/graphite'
if os.path.isdir(ographite):
self.skipTest(
'You already have \'{0}\'. This test would overwrite this '
'directory'.format(ographite)
)
try:
os.makedirs(ographite)
except OSError as err:
if err.errno == errno.EACCES:
# Permission denied
self.skipTest(
'You don\'t have the required permissions to run this test'
)
finally:
if os.path.isdir(ographite):
shutil.rmtree(ographite, ignore_errors=True)
venv_dir = os.path.join(RUNTIME_VARS.TMP, 'issue-46127-pip-env-vars')
try:
# We may be able to remove this, I had to add it because the custom
# modules from the test suite weren't available in the jinja
# context when running the call to state.sls that comes after.
self.run_function('saltutil.sync_modules')
# Since we don't have the virtualenv created, pip.installed will
# throw an error.
ret = self.run_function(
'state.sls', mods='issue-46127-pip-env-vars'
)
self.assertSaltTrueReturn(ret)
for key in six.iterkeys(ret):
self.assertTrue(ret[key]['result'])
if ret[key]['name'] != 'carbon < 1.3':
continue
self.assertEqual(
ret[key]['comment'],
'All packages were successfully installed'
)
break
else:
raise Exception('Expected state did not run')
# Run the state again. Now the already installed message should
# appear
ret = self.run_function(
'state.sls', mods='issue-46127-pip-env-vars'
)
self.assertSaltTrueReturn(ret)
# We cannot use assertInSaltComment here because we need to skip
# some of the state return parts
for key in six.iterkeys(ret):
self.assertTrue(ret[key]['result'])
# As we are re-running the formula, some states will not be run
# and "name" may or may not be present, so we use .get() pattern
if ret[key].get('name', '') != 'carbon < 1.3':
continue
self.assertEqual(
ret[key]['comment'],
('Python package carbon < 1.3 was already installed\n'
'All specified packages are already installed'))
break
else:
raise Exception('Expected state did not run')
finally:
if os.path.isdir(ographite):
shutil.rmtree(ographite, ignore_errors=True)
if os.path.isdir(venv_dir):
shutil.rmtree(venv_dir)
Skip tests when we can not use runas
# -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Pedro Algarvio (pedro@algarvio.me)`
tests.integration.states.pip
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import errno
import os
import glob
import shutil
import sys
try:
import pwd
HAS_PWD = True
except ImportError:
HAS_PWD = False
# Import Salt Testing libs
from tests.support.case import ModuleCase
from tests.support.helpers import (
destructiveTest,
requires_system_grains,
with_system_user,
skip_if_not_root,
with_tempdir
)
from tests.support.mixins import SaltReturnAssertsMixin
from tests.support.runtests import RUNTIME_VARS
from tests.support.unit import skipIf
# Import salt libs
import salt.utils.files
import salt.utils.path
import salt.utils.platform
import salt.utils.versions
import salt.utils.win_dacl
import salt.utils.win_functions
import salt.utils.win_runas
from salt.modules.virtualenv_mod import KNOWN_BINARY_NAMES
from salt.exceptions import CommandExecutionError
# Import 3rd-party libs
from salt.ext import six
def can_runas():
'''
Detect if we are running in a limited shell (winrm) and are un-able to use
the runas
'''
if salt.utils.is_windows():
try:
salt.utils.win_runas.run_as(
'cmd.exe /c echo 1', 'noexistuser', 'n0existp4ss',
)
except WindowsError as exc:
if exc.winerror == 5:
# Access Denied
return False
return True
CAN_RUNAS = can_runas()
class VirtualEnv(object):
def __init__(self, test, venv_dir):
self.venv_dir = venv_dir
self.test = test
def __enter__(self):
ret = self.test.run_function('virtualenv.create', [self.venv_dir])
self.test.assertEqual(ret['retcode'], 0)
def __exit__(self, exc_type, exc_value, traceback):
if os.path.isdir(self.venv_dir):
shutil.rmtree(self.venv_dir, ignore_errors=True)
@skipIf(salt.utils.path.which_bin(KNOWN_BINARY_NAMES) is None, 'virtualenv not installed')
class PipStateTest(ModuleCase, SaltReturnAssertsMixin):
@skip_if_not_root
def test_pip_installed_removed(self):
'''
Tests installed and removed states
'''
name = 'pudb'
if name in self.run_function('pip.list'):
self.skipTest('{0} is already installed, uninstall to run this test'.format(name))
ret = self.run_state('pip.installed', name=name)
self.assertSaltTrueReturn(ret)
ret = self.run_state('pip.removed', name=name)
self.assertSaltTrueReturn(ret)
def test_pip_installed_removed_venv(self):
venv_dir = os.path.join(
RUNTIME_VARS.TMP, 'pip_installed_removed'
)
with VirtualEnv(self, venv_dir):
name = 'pudb'
ret = self.run_state('pip.installed', name=name, bin_env=venv_dir)
self.assertSaltTrueReturn(ret)
ret = self.run_state('pip.removed', name=name, bin_env=venv_dir)
self.assertSaltTrueReturn(ret)
def test_pip_installed_errors(self):
venv_dir = os.path.join(
RUNTIME_VARS.TMP, 'pip-installed-errors'
)
orig_shell = os.environ.get('SHELL')
try:
# Since we don't have the virtualenv created, pip.installed will
# throw an error.
# Example error strings:
# * "Error installing 'pep8': /tmp/pip-installed-errors: not found"
# * "Error installing 'pep8': /bin/sh: 1: /tmp/pip-installed-errors: not found"
# * "Error installing 'pep8': /bin/bash: /tmp/pip-installed-errors: No such file or directory"
os.environ['SHELL'] = '/bin/sh'
ret = self.run_function('state.sls', mods='pip-installed-errors')
self.assertSaltFalseReturn(ret)
self.assertSaltCommentRegexpMatches(
ret,
'Error installing \'pep8\':'
)
# We now create the missing virtualenv
ret = self.run_function('virtualenv.create', [venv_dir])
self.assertEqual(ret['retcode'], 0)
# The state should not have any issues running now
ret = self.run_function('state.sls', mods='pip-installed-errors')
self.assertSaltTrueReturn(ret)
finally:
if orig_shell is None:
# Didn't exist before, don't leave it there. This should never
# happen, but if it does, we don't want this test to affect
# others elsewhere in the suite.
os.environ.pop('SHELL')
else:
os.environ['SHELL'] = orig_shell
if os.path.isdir(venv_dir):
shutil.rmtree(venv_dir, ignore_errors=True)
@skipIf(six.PY3, 'Issue is specific to carbon module, which is PY2-only')
@skipIf(salt.utils.platform.is_windows(), "Carbon does not install in Windows")
@requires_system_grains
def test_pip_installed_weird_install(self, grains=None):
# First, check to see if this is running on CentOS 5 or MacOS.
# If so, skip this test.
if grains['os'] in ('CentOS',) and grains['osrelease_info'][0] in (5,):
self.skipTest('This test does not run reliably on CentOS 5')
if grains['os'] in ('MacOS',):
self.skipTest('This test does not run reliably on MacOS')
ographite = '/opt/graphite'
if os.path.isdir(ographite):
self.skipTest(
'You already have \'{0}\'. This test would overwrite this '
'directory'.format(ographite)
)
try:
os.makedirs(ographite)
except OSError as err:
if err.errno == errno.EACCES:
# Permission denied
self.skipTest(
'You don\'t have the required permissions to run this test'
)
finally:
if os.path.isdir(ographite):
shutil.rmtree(ographite, ignore_errors=True)
venv_dir = os.path.join(RUNTIME_VARS.TMP, 'pip-installed-weird-install')
try:
# We may be able to remove this, I had to add it because the custom
# modules from the test suite weren't available in the jinja
# context when running the call to state.sls that comes after.
self.run_function('saltutil.sync_modules')
# Since we don't have the virtualenv created, pip.installed will
# throw an error.
ret = self.run_function(
'state.sls', mods='pip-installed-weird-install'
)
self.assertSaltTrueReturn(ret)
# We cannot use assertInSaltComment here because we need to skip
# some of the state return parts
for key in six.iterkeys(ret):
self.assertTrue(ret[key]['result'])
if ret[key]['name'] != 'carbon < 1.1':
continue
self.assertEqual(
ret[key]['comment'],
'There was no error installing package \'carbon < 1.1\' '
'although it does not show when calling \'pip.freeze\'.'
)
break
else:
raise Exception('Expected state did not run')
finally:
if os.path.isdir(ographite):
shutil.rmtree(ographite, ignore_errors=True)
def test_issue_2028_pip_installed_state(self):
ret = self.run_function('state.sls', mods='issue-2028-pip-installed')
venv_dir = os.path.join(
RUNTIME_VARS.TMP, 'issue-2028-pip-installed'
)
pep8_bin = os.path.join(venv_dir, 'bin', 'pep8')
if salt.utils.platform.is_windows():
pep8_bin = os.path.join(venv_dir, 'Scripts', 'pep8.exe')
try:
self.assertSaltTrueReturn(ret)
self.assertTrue(
os.path.isfile(pep8_bin)
)
finally:
if os.path.isdir(venv_dir):
shutil.rmtree(venv_dir, ignore_errors=True)
def test_issue_2087_missing_pip(self):
venv_dir = os.path.join(
RUNTIME_VARS.TMP, 'issue-2087-missing-pip'
)
try:
# Let's create the testing virtualenv
ret = self.run_function('virtualenv.create', [venv_dir])
self.assertEqual(ret['retcode'], 0)
# Let's remove the pip binary
pip_bin = os.path.join(venv_dir, 'bin', 'pip')
py_dir = 'python{0}.{1}'.format(*sys.version_info[:2])
site_dir = os.path.join(venv_dir, 'lib', py_dir, 'site-packages')
if salt.utils.platform.is_windows():
pip_bin = os.path.join(venv_dir, 'Scripts', 'pip.exe')
site_dir = os.path.join(venv_dir, 'lib', 'site-packages')
if not os.path.isfile(pip_bin):
self.skipTest(
'Failed to find the pip binary to the test virtualenv'
)
os.remove(pip_bin)
# Also remove the pip dir from site-packages
# This is needed now that we're using python -m pip instead of the
# pip binary directly. python -m pip will still work even if the
# pip binary is missing
shutil.rmtree(os.path.join(site_dir, 'pip'))
# Let's run the state which should fail because pip is missing
ret = self.run_function('state.sls', mods='issue-2087-missing-pip')
self.assertSaltFalseReturn(ret)
self.assertInSaltComment(
'Error installing \'pep8\': Could not find a `pip` binary',
ret
)
finally:
if os.path.isdir(venv_dir):
shutil.rmtree(venv_dir, ignore_errors=True)
def test_issue_5940_multiple_pip_mirrors(self):
'''
Test multiple pip mirrors. This test only works with pip < 7.0.0
'''
ret = self.run_function(
'state.sls', mods='issue-5940-multiple-pip-mirrors'
)
venv_dir = os.path.join(
RUNTIME_VARS.TMP, '5940-multiple-pip-mirrors'
)
try:
self.assertSaltTrueReturn(ret)
self.assertTrue(
os.path.isfile(os.path.join(venv_dir, 'bin', 'pep8'))
)
except (AssertionError, CommandExecutionError):
pip_version = self.run_function('pip.version', [venv_dir])
if salt.utils.versions.compare(ver1=pip_version, oper='>=', ver2='7.0.0'):
self.skipTest('the --mirrors arg has been deprecated and removed in pip==7.0.0')
finally:
if os.path.isdir(venv_dir):
shutil.rmtree(venv_dir, ignore_errors=True)
@destructiveTest
@skip_if_not_root
@skipIf(not CAN_RUNAS, 'Runas support required')
@with_system_user('issue-6912', on_existing='delete', delete=True,
password='PassWord1!')
@with_tempdir()
def test_issue_6912_wrong_owner(self, temp_dir, username):
# Setup virtual environment directory to be used throughout the test
venv_dir = os.path.join(temp_dir, '6912-wrong-owner')
# The virtual environment needs to be in a location that is accessible
# by both the user running the test and the runas user
if salt.utils.platform.is_windows():
salt.utils.win_dacl.set_permissions(temp_dir, username, 'full_control')
else:
uid = self.run_function('file.user_to_uid', [username])
os.chown(temp_dir, uid, -1)
# Create the virtual environment
venv_create = self.run_function(
'virtualenv.create', [venv_dir], user=username,
password='PassWord1!')
if venv_create['retcode'] > 0:
self.skipTest('Failed to create testcase virtual environment: {0}'
''.format(venv_create))
# pip install passing the package name in `name`
ret = self.run_state(
'pip.installed', name='pep8', user=username, bin_env=venv_dir,
no_cache_dir=True, password='PassWord1!')
self.assertSaltTrueReturn(ret)
if HAS_PWD:
uid = pwd.getpwnam(username).pw_uid
for globmatch in (os.path.join(venv_dir, '**', 'pep8*'),
os.path.join(venv_dir, '*', '**', 'pep8*'),
os.path.join(venv_dir, '*', '*', '**', 'pep8*')):
for path in glob.glob(globmatch):
if HAS_PWD:
self.assertEqual(uid, os.stat(path).st_uid)
elif salt.utils.platform.is_windows():
self.assertEqual(
salt.utils.win_dacl.get_owner(path), username)
@destructiveTest
@skip_if_not_root
@skipIf(not CAN_RUNAS, 'Runas support required')
@with_system_user('issue-6912', on_existing='delete', delete=True,
password='PassWord1!')
@with_tempdir()
def test_issue_6912_wrong_owner_requirements_file(self, temp_dir, username):
# Setup virtual environment directory to be used throughout the test
venv_dir = os.path.join(temp_dir, '6912-wrong-owner')
# The virtual environment needs to be in a location that is accessible
# by both the user running the test and the runas user
if salt.utils.platform.is_windows():
salt.utils.win_dacl.set_permissions(temp_dir, username, 'full_control')
else:
uid = self.run_function('file.user_to_uid', [username])
os.chown(temp_dir, uid, -1)
# Create the virtual environment again as it should have been removed
venv_create = self.run_function(
'virtualenv.create', [venv_dir], user=username,
password='PassWord1!')
if venv_create['retcode'] > 0:
self.skipTest('failed to create testcase virtual environment: {0}'
''.format(venv_create))
# pip install using a requirements file
req_filename = os.path.join(
RUNTIME_VARS.TMP_STATE_TREE, 'issue-6912-requirements.txt'
)
with salt.utils.files.fopen(req_filename, 'wb') as reqf:
reqf.write(b'pep8\n')
ret = self.run_state(
'pip.installed', name='', user=username, bin_env=venv_dir,
requirements='salt://issue-6912-requirements.txt',
no_cache_dir=True, password='PassWord1!')
self.assertSaltTrueReturn(ret)
if HAS_PWD:
uid = pwd.getpwnam(username).pw_uid
for globmatch in (os.path.join(venv_dir, '**', 'pep8*'),
os.path.join(venv_dir, '*', '**', 'pep8*'),
os.path.join(venv_dir, '*', '*', '**', 'pep8*')):
for path in glob.glob(globmatch):
if HAS_PWD:
self.assertEqual(uid, os.stat(path).st_uid)
elif salt.utils.platform.is_windows():
self.assertEqual(
salt.utils.win_dacl.get_owner(path), username)
def test_issue_6833_pip_upgrade_pip(self):
# Create the testing virtualenv
venv_dir = os.path.join(
RUNTIME_VARS.TMP, '6833-pip-upgrade-pip'
)
ret = self.run_function('virtualenv.create', [venv_dir])
try:
try:
self.assertEqual(ret['retcode'], 0)
self.assertIn(
'New python executable',
ret['stdout']
)
except AssertionError:
import pprint
pprint.pprint(ret)
raise
# Let's install a fixed version pip over whatever pip was
# previously installed
ret = self.run_function(
'pip.install', ['pip==8.0'], upgrade=True,
bin_env=venv_dir
)
try:
self.assertEqual(ret['retcode'], 0)
self.assertIn(
'Successfully installed pip',
ret['stdout']
)
except AssertionError:
import pprint
pprint.pprint(ret)
raise
# Let's make sure we have pip 8.0 installed
self.assertEqual(
self.run_function('pip.list', ['pip'], bin_env=venv_dir),
{'pip': '8.0.0'}
)
# Now the actual pip upgrade pip test
ret = self.run_state(
'pip.installed', name='pip==8.0.1', upgrade=True,
bin_env=venv_dir
)
try:
self.assertSaltTrueReturn(ret)
self.assertSaltStateChangesEqual(
ret, {'pip==8.0.1': 'Installed'})
except AssertionError:
import pprint
pprint.pprint(ret)
raise
finally:
if os.path.isdir(venv_dir):
shutil.rmtree(venv_dir, ignore_errors=True)
def test_pip_installed_specific_env(self):
# Create the testing virtualenv
venv_dir = os.path.join(
RUNTIME_VARS.TMP, 'pip-installed-specific-env'
)
# Let's write a requirements file
requirements_file = os.path.join(
RUNTIME_VARS.TMP_PRODENV_STATE_TREE, 'prod-env-requirements.txt'
)
with salt.utils.files.fopen(requirements_file, 'wb') as reqf:
reqf.write(b'pep8\n')
try:
self.run_function('virtualenv.create', [venv_dir])
# The requirements file should not be found the base environment
ret = self.run_state(
'pip.installed', name='', bin_env=venv_dir,
requirements='salt://prod-env-requirements.txt'
)
self.assertSaltFalseReturn(ret)
self.assertInSaltComment(
"'salt://prod-env-requirements.txt' not found", ret
)
# The requirements file must be found in the prod environment
ret = self.run_state(
'pip.installed', name='', bin_env=venv_dir, saltenv='prod',
requirements='salt://prod-env-requirements.txt'
)
self.assertSaltTrueReturn(ret)
self.assertInSaltComment(
'Successfully processed requirements file '
'salt://prod-env-requirements.txt', ret
)
# We're using the base environment but we're passing the prod
# environment as an url arg to salt://
ret = self.run_state(
'pip.installed', name='', bin_env=venv_dir,
requirements='salt://prod-env-requirements.txt?saltenv=prod'
)
self.assertSaltTrueReturn(ret)
self.assertInSaltComment(
'Requirements were already installed.',
ret
)
finally:
if os.path.isdir(venv_dir):
shutil.rmtree(venv_dir, ignore_errors=True)
if os.path.isfile(requirements_file):
os.unlink(requirements_file)
def test_22359_pip_installed_unless_does_not_trigger_warnings(self):
# This test case should be moved to a format_call unit test specific to
# the state internal keywords
venv_dir = os.path.join(RUNTIME_VARS.TMP, 'pip-installed-unless')
venv_create = self.run_function('virtualenv.create', [venv_dir])
if venv_create['retcode'] > 0:
self.skipTest(
'Failed to create testcase virtual environment: {0}'.format(
venv_create
)
)
false_cmd = '/bin/false'
if salt.utils.platform.is_windows():
false_cmd = 'exit 1 >nul'
try:
ret = self.run_state(
'pip.installed', name='pep8', bin_env=venv_dir, unless=false_cmd
)
self.assertSaltTrueReturn(ret)
self.assertNotIn('warnings', next(six.itervalues(ret)))
finally:
if os.path.isdir(venv_dir):
shutil.rmtree(venv_dir, ignore_errors=True)
@skipIf(sys.version_info[:2] >= (3, 6), 'Old version of virtualenv too old for python3.6')
@skipIf(salt.utils.platform.is_windows(), "Carbon does not install in Windows")
def test_46127_pip_env_vars(self):
'''
Test that checks if env_vars passed to pip.installed are also passed
to pip.freeze while checking for existing installations
'''
# This issue is most easily checked while installing carbon
# Much of the code here comes from the test_weird_install function above
ographite = '/opt/graphite'
if os.path.isdir(ographite):
self.skipTest(
'You already have \'{0}\'. This test would overwrite this '
'directory'.format(ographite)
)
try:
os.makedirs(ographite)
except OSError as err:
if err.errno == errno.EACCES:
# Permission denied
self.skipTest(
'You don\'t have the required permissions to run this test'
)
finally:
if os.path.isdir(ographite):
shutil.rmtree(ographite, ignore_errors=True)
venv_dir = os.path.join(RUNTIME_VARS.TMP, 'issue-46127-pip-env-vars')
try:
# We may be able to remove this, I had to add it because the custom
# modules from the test suite weren't available in the jinja
# context when running the call to state.sls that comes after.
self.run_function('saltutil.sync_modules')
# Since we don't have the virtualenv created, pip.installed will
# throw an error.
ret = self.run_function(
'state.sls', mods='issue-46127-pip-env-vars'
)
self.assertSaltTrueReturn(ret)
for key in six.iterkeys(ret):
self.assertTrue(ret[key]['result'])
if ret[key]['name'] != 'carbon < 1.3':
continue
self.assertEqual(
ret[key]['comment'],
'All packages were successfully installed'
)
break
else:
raise Exception('Expected state did not run')
# Run the state again. Now the already installed message should
# appear
ret = self.run_function(
'state.sls', mods='issue-46127-pip-env-vars'
)
self.assertSaltTrueReturn(ret)
# We cannot use assertInSaltComment here because we need to skip
# some of the state return parts
for key in six.iterkeys(ret):
self.assertTrue(ret[key]['result'])
# As we are re-running the formula, some states will not be run
# and "name" may or may not be present, so we use .get() pattern
if ret[key].get('name', '') != 'carbon < 1.3':
continue
self.assertEqual(
ret[key]['comment'],
('Python package carbon < 1.3 was already installed\n'
'All specified packages are already installed'))
break
else:
raise Exception('Expected state did not run')
finally:
if os.path.isdir(ographite):
shutil.rmtree(ographite, ignore_errors=True)
if os.path.isdir(venv_dir):
shutil.rmtree(venv_dir)
|
# Copyright (c) 2011, 2012, 2013 by California Institute of Technology
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the California Institute of Technology nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL CALTECH
# OR THE CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
# USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
# OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
"""
Classes representing hybrid dynamical systems.
"""
import logging
logger = logging.getLogger(__name__)
from warnings import warn
import itertools
from pprint import pformat
import numpy as np
import polytope as pc
try:
from tulip.graphics import newax, quiver
except Exception, e:
logger.error(e)
quiver = None
class LtiSysDyn(object):
"""Represent discrete-time continuous dynamics::
s[t+1] = A*s[t] + B*u[t] + E*d[t] + K
subject to the constraints::
u[t] \in Uset
d[t] \in Wset
s[t] \in domain
where:
- u[t] the control input
- d[t] the disturbance input
- s[t] the system state
A LtiSysDyn object contains the fields:
- A, B, E, K, (matrices)
- Uset, Wset and domain (each a L{polytope.Polytope})
- time_semantics: 'discrete' (if system is originally a discrete-time
system) or 'continuous' (if system is sampled from a continuous-time
system)
- timestep: A positive real number containing the timestep.
as defined above.
Note
====
For state-dependent bounds on the input,::
[u[t]; s[t]] \in Uset
can be used.
See Also
========
L{PwaSysDyn}, L{HybridSysDyn}, L{polytope.Polytope}
"""
def __init__(self, A=None, B=None, E=None, K=None,
Uset=None,Wset=None, domain=None, time_semantics=None,
timestep=None):
if Uset is None:
warn("Uset not given to LtiSysDyn()")
if (Uset is not None) and (not isinstance(Uset, pc.Polytope)):
raise Exception("LtiSysDyn: `Uset` has to be a Polytope")
if domain is None:
warn("Domain is not given in LtiSysDyn()")
if (domain is not None) and (not isinstance(domain, pc.Polytope)):
raise Exception("LtiSysDyn: `domain` has to be a Polytope")
# check dimensions agree
try:
nA, mA = A.shape
except:
raise TypeError('A matrix must be 2d array')
if nA != mA:
raise ValueError('A must be square')
if B is not None:
try:
nB, mB = B.shape
except:
raise TypeError('B matrix must be 2d array')
if nA != nB:
raise ValueError('A and B must have same number of rows')
if E is not None:
try:
nE, mE = E.shape
except:
raise TypeError('E matrix must be 2d array')
if nA != nE:
raise ValueError('A and E must have same number of rows')
if K is not None:
try:
nK, mK = K.shape
except:
raise TypeError('K column vector must be 2d array')
if nA != nK:
raise ValueError('A and K must have same number of rows')
if mK != 1:
raise ValueError('K must be a column vector')
self.A = A
self.B = B
if K is None:
if len(A) != 0:
self.K = np.zeros([mA, 1])
else:
self.K = K
else:
self.K = K.reshape(K.size,1)
if E is None and (len(A) != 0):
self.E = np.zeros([mA, 1])
self.Wset = pc.Polytope()
else:
self.E = E
self.Wset = Wset
self.Uset = Uset
self.domain = domain
# Check that timestep and semantics are valid.
_check_time_data(time_semantics, timestep)
self.time_semantics = time_semantics
self.timestep = timestep
def __str__(self):
output = "A =\n"+str(self.A)
output += "\nB =\n"+str(self.B)
output += "\nE =\n"+str(self.E)
output += "\nK =\n"+str(self.K)
output += "\nUset =\n"+str(self.Uset)
output += "\nWset =\n"+str(self.Wset)
return output
def plot(self, ax=None, color=np.random.rand(3), show_domain=True):
if quiver is None:
warn('pyvectorized not found. No plotting.')
return
(x, res) = pc.grid_region(self.domain)
n = self.A.shape[0]
DA = self.A - np.eye(n)
v = DA.dot(x)
if ax is None:
ax, fig = newax()
if show_domain:
self.domain.plot(ax, color)
quiver(x, v, ax)
return ax
class PwaSysDyn(object):
"""PwaSysDyn class for specifying a polytopic piecewise affine system.
A PwaSysDyn object contains the fields:
- C{list_subsys}: list of L{LtiSysDyn}
- C{domain}: domain over which piecewise affine system is defined,
type: polytope.Polytope
- C{time_semantics}: 'discrete' (if system is originally a discrete-time
system) or 'continuous' (if system is sampled from a continuous-time
system)
- C{timestep}: A positive real number containing the timestep.
For the system to be well-defined the domains of its subsystems should be
mutually exclusive (modulo intersections with empty interior) and cover the
domain.
See Also
========
L{LtiSysDyn}, L{HybridSysDyn}, L{polytope.Polytope}
"""
def __init__(self, list_subsys=[], domain=None, time_semantics=None,
timestep=None, overwrite_time=True):
"""
@type overwrite_time: bool
@param overwrite_time: If true, then overwrites any time data in the
objects in C{list_subsys} with the data in
C{time_semantics} and C{timestep} variables.
Otherwise checks that the time data of the
objects in C{list_subsys} are consistent with
C{time_semantics} and C{timestep}.
"""
if domain is None:
warn("Domain not given to PwaSysDyn()")
if ((domain is not None) and
(not (isinstance(domain, pc.Polytope) or
isinstance(domain, pc.Region))
)
):
raise Exception("PwaSysDyn: `domain` has to be a Polytope or Region")
if len(list_subsys) > 0:
uncovered_dom = domain.copy()
n = list_subsys[0].A.shape[1] # State space dimension
m = list_subsys[0].B.shape[1] # Input space dimension
p = list_subsys[0].E.shape[1] # Disturbance space dimension
for subsys in list_subsys:
uncovered_dom = uncovered_dom.diff(subsys.domain)
if (n!=subsys.A.shape[1] or m!=subsys.B.shape[1] or
p!=subsys.E.shape[1]):
raise Exception("PwaSysDyn: state, input, disturbance " +
"dimensions have to be the same for all " +
"subsystems")
if not pc.is_empty(uncovered_dom):
raise Exception("PwaSysDyn: subdomains must cover the domain")
for x in itertools.combinations(list_subsys, 2):
if pc.is_fulldim(x[0].domain.intersect(x[1].domain) ):
raise Exception("PwaSysDyn: subdomains have to be mutually"+
" exclusive")
self.list_subsys = list_subsys
self.domain = domain
# Input time semantics
_check_time_data(time_semantics, timestep)
if overwrite_time:
_push_time_data(self.list_subsys, time_semantics, timestep)
else:
_check_time_consistency(list_subsys, time_semantics, timestep)
self.timestep = timestep
self.time_semantics = time_semantics
def __str__(self):
s = 'Piecewise-Affine System Dynamics\n'
s += 30 * '-' + 2*'\n'
s += 'Domain:\n\n'
s += pformat(self.domain) + '\n'
for i, sys in enumerate(self.list_subsys):
s += 'Subsystem: ' + str(i) +'\n'
s += str(sys) +2*'\n'
return s
@classmethod
def from_lti(cls, A=[], B=[], E=[], K=[],
Uset=None, Wset=None,domain=None):
lti_sys = LtiSysDyn(A,B,E,K,Uset,Wset,domain)
return cls([lti_sys], domain)
def plot(self, ax=None, show_domain=True):
if ax is None:
ax, fig = newax()
for subsystem in self.list_subsys:
subsystem.plot(ax, color=np.random.rand(3),
show_domain=show_domain)
class HybridSysDyn(object):
"""Represent hybrid systems switching between dynamic modes.
A HybridSysDyn represents a system with switching modes
that depend on both discrete:
- n_env environment variables (uncontrolled)
- n_sys system variables (controlled)
A HybridSysDyn object contains the fields:
- C{disc_domain_size}: 2-tuple of numbers of modes
type: (n_env, n_sys)
- C{env_labels}: (optional) labels for discrete environment variables
type: list of len(n_env)
default: range(n_env)
- C{disc_sys_labels}: (optional) labels for discrete system variables
type: list of len(n_sys)
default: range(n_sys)
- C{dynamics}: mapping mode 2-tuples to active dynamics::
(env_label, sys_label) -> PwaSysDyn
type: dict
default: If no env_label or sys_label passed,
then default to int indices (i,j) L{PwaSysDyn}.
- C{cts_ss}: continuous state space over which hybrid system is defined.
type: L{polytope.Region}
- C{time_semantics}: 'discrete' (if system is originally a discrete-time
system) or 'continuous' (if system is sampled from a continuous-time
system)
- C{timestep}: A positive real number containing the timestep.
Note
====
We assume that system and environment switching modes are
independent of one another. (Use LTL statement to make it not so.)
See Also
========
L{LtiSysDyn}, L{PwaSysDyn}, L{polytope.Region}
"""
def __init__(self, disc_domain_size=(1,1),
dynamics=None, cts_ss=None,
env_labels=None, disc_sys_labels=None, time_semantics=None,
timestep=None, overwrite_time=True):
"""
@type overwrite_time: bool
@param overwrite_time: If true, then overwrites any time data in the
objects in C{list_subsys} with the data in
C{time_semantics} and C{timestep} variables.
Otherwise checks that the time data of the
objects in C{list_subsys} are consistent with
C{time_semantics} and C{timestep}.
"""
# check that the continuous domain is specified
if cts_ss is None:
warn('continuous state space not given to HybridSysDyn')
else:
if not isinstance(cts_ss, (pc.Polytope, pc.Region) ):
raise Exception('HybridSysDyn: ' +
'`cts_ss` must be a Polytope or Region')
self.disc_domain_size = disc_domain_size
# If label numbers agree with disc_domain_size, then use them.
# Otherwise, ignore the labels.
n_env, n_sys = disc_domain_size
self._env_labels = self._check_labels(n_env, env_labels)
self._disc_sys_labels = self._check_labels(n_sys, disc_sys_labels)
# Check each dynamics key is a valid mode,
# i.e., a valid combination of env and sys labels.
if dynamics is not None:
modes = self.all_mode_combs
undefined_modes = set(dynamics.keys()).difference(modes)
if undefined_modes:
msg = 'HybridSysDyn: `dynamics` keys inconsistent'
msg += ' with discrete mode labels.\n'
msg += 'Undefined modes:\n' + str(undefined_modes)
raise ValueError(msg)
missing_modes = set(modes).difference(dynamics.keys())
if missing_modes:
msg = 'Missing the modes:\n' + str(missing_modes)
msg += '\n Make sure you did not forget any modes,\n'
msg += 'otherwise this is fine.'
warn(msg)
if not all([isinstance(sys, PwaSysDyn)
for sys in dynamics.values()]):
msg = 'For each mode dynamics must be PwaSysDyn.\n'
msg += 'Got instead: ' +str(type(sys))
raise Exception(msg)
self.dynamics = dynamics
self.cts_ss = cts_ss
_check_time_data(time_semantics, timestep)
if overwrite_time:
_push_time_data(self.dynamics.values(), time_semantics, timestep)
else:
_check_time_consistency(dynamics.values(), time_semantics, timestep)
self.timestep = timestep
self.time_semantics = time_semantics
def __str__(self):
n_env, n_sys = self.disc_domain_size
s = 'Hybrid System Dynamics\n'
s += 30 * '-' + '\n'
s += 'Modes:\n'
s += 4*' ' + 'Environment (' + str(n_env) + ' modes):\n'
s += 6*' ' + pformat(self.env_labels, indent=3) + 2*'\n'
s += 4*' ' + 'System: (' + str(n_sys) + ' modes)\n'
s += 6*' ' + pformat(self.disc_sys_labels, indent=3) + 2*'\n'
s += 'Continuous State Space:\n\n'
s += pformat(self.cts_ss) + '\n'
s += 'Dynamics:\n'
for mode, pwa in self.dynamics.iteritems():
s += ' mode: ' + str(mode) + '\n'
s += ' dynamics: ' + pformat(pwa, indent=3) +'\n\n'
return s
def _check_labels(self, n, labels):
# don't complain for default
if labels is None:
return None
# len exists ?
try:
# is len correct ?
if len(labels) != n:
msg = 'number of environment labels is inconsistent'
msg += ' with discrete domain size.\n'
msg += 'Ignoring given environment labels.\n'
msg += 'Defaulting to integer labels.'
warn(msg)
return None
except:
warn('Environment labels of type: ' +
type(labels) + 'have no len()')
return None
return labels
@property
def all_mode_combs(self):
"""Return all possible combinations of modes.
"""
modes = [(a,b) for a in self.env_labels
for b in self.disc_sys_labels]
logger.debug('Available modes: ' + str(modes) )
return modes
@property
def modes(self):
if self.dynamics is None:
warn('No dynamics defined (None).')
return None
return self.dynamics.keys()
@property
def env_labels(self):
if self._env_labels is None:
return range(self.disc_domain_size[0])
else:
return self._env_labels
@property
def disc_sys_labels(self):
if self._disc_sys_labels is None:
return range(self.disc_domain_size[1])
else:
return self._disc_sys_labels
@classmethod
def from_pwa(cls, list_subsys=[], domain=None):
pwa_sys = PwaSysDyn(list_subsys,domain)
return cls((1,1), {(0,0):pwa_sys}, domain)
@classmethod
def from_lti(cls, A=[], B=[], E=[], K=[],
Uset=None, Wset=None,domain=None):
pwa_sys = PwaSysDyn.from_lti(A, B, E, K,
Uset, Wset, domain)
return cls((1,1), {(0,0):pwa_sys}, domain)
def _push_time_data(system_list, time_semantics, timestep):
"""Overwrite the time data in system list. Throws warnings if overwriting
existing data."""
for system in system_list:
if (system.time_semantics != time_semantics) and (system.time_semantics
is not None):
warn('Overwriting existing time semantics data.')
if (system.timestep != timestep) and (system.timestep is not None):
warn('Overwriting existing timestep data.')
system.time_semantics = time_semantics
system.timestep = timestep
# Overwrite LTI in system if system is a PWA
if isinstance(system, PwaSysDyn):
_push_time_data(system.list_subsys, time_semantics, timestep)
def _check_time_data(semantics, timestep):
"""Checks that time semantics and timestep are correctly specified. Raises
ValueErrors if that's not the case.
@type semantics: string
@param timestep: any positive number
@type timestep: int or float or long
@rtype: None
"""
if semantics not in ['continuous', 'discrete', None]:
raise ValueError('Time semantics must be discrete or ' +
'continuous (sampled from continuous time system).')
if timestep is not None:
error_string = 'Timestep must be a positive real number or unspecified.'
if timestep <= 0:
raise ValueError(error_string)
if not isinstance(timestep, (int, float, long)):
raise TypeError(error_string)
def _check_time_consistency(system_list, time_semantics, timestep):
"""Checks that all the dynamical systems in system_list have the same time
semantics and timestep. Raises ValueError if not the case.
@type system_list: list of L{LtiSysDyn} or L{PwaSysDyn}
@rtype: None
"""
# Check that time semantics for all subsystems match
for ind in range(len(system_list)-1):
if system_list[ind].timestep != system_list[ind+1].timestep:
raise ValueError('Not all timesteps in child systems are the same.')
if system_list[ind].time_semantics != system_list[ind+1].time_semantics:
raise ValueError('Not all time semantics are the same.')
# Check that time semantics for all subsystems match specified system and
# timestep
if system_list[0].timestep != timestep:
raise ValueError('Timestep of subsystems do not match specified ' +
'timestep.')
if system_list[0].time_semantics != time_semantics:
raise ValueError('Time semantics of subsystems do not match ' +
'specified time semantics.')
Replace 'continuous' with 'sampled'
# Copyright (c) 2011, 2012, 2013 by California Institute of Technology
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the California Institute of Technology nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL CALTECH
# OR THE CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
# USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
# OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
"""
Classes representing hybrid dynamical systems.
"""
import logging
logger = logging.getLogger(__name__)
from warnings import warn
import itertools
from pprint import pformat
import numpy as np
import polytope as pc
try:
from tulip.graphics import newax, quiver
except Exception, e:
logger.error(e)
quiver = None
class LtiSysDyn(object):
"""Represent discrete-time continuous-state dynamics::
s[t+1] = A*s[t] + B*u[t] + E*d[t] + K
subject to the constraints::
u[t] \in Uset
d[t] \in Wset
s[t] \in domain
where:
- u[t] the control input
- d[t] the disturbance input
- s[t] the system state
A LtiSysDyn object contains the fields:
- A, B, E, K, (matrices)
- Uset, Wset and domain (each a L{polytope.Polytope})
- time_semantics: 'discrete' (if system is originally a discrete-time
system) or 'sampled' (if system is sampled from a continuous-time
system)
- timestep: A positive real number containing the timestep (for sampled
system)
as defined above.
Note
====
For state-dependent bounds on the input,::
[u[t]; s[t]] \in Uset
can be used.
See Also
========
L{PwaSysDyn}, L{HybridSysDyn}, L{polytope.Polytope}
"""
def __init__(self, A=None, B=None, E=None, K=None,
Uset=None,Wset=None, domain=None, time_semantics=None,
timestep=None):
if Uset is None:
warn("Uset not given to LtiSysDyn()")
if (Uset is not None) and (not isinstance(Uset, pc.Polytope)):
raise Exception("LtiSysDyn: `Uset` has to be a Polytope")
if domain is None:
warn("Domain is not given in LtiSysDyn()")
if (domain is not None) and (not isinstance(domain, pc.Polytope)):
raise Exception("LtiSysDyn: `domain` has to be a Polytope")
# check dimensions agree
try:
nA, mA = A.shape
except:
raise TypeError('A matrix must be 2d array')
if nA != mA:
raise ValueError('A must be square')
if B is not None:
try:
nB, mB = B.shape
except:
raise TypeError('B matrix must be 2d array')
if nA != nB:
raise ValueError('A and B must have same number of rows')
if E is not None:
try:
nE, mE = E.shape
except:
raise TypeError('E matrix must be 2d array')
if nA != nE:
raise ValueError('A and E must have same number of rows')
if K is not None:
try:
nK, mK = K.shape
except:
raise TypeError('K column vector must be 2d array')
if nA != nK:
raise ValueError('A and K must have same number of rows')
if mK != 1:
raise ValueError('K must be a column vector')
self.A = A
self.B = B
if K is None:
if len(A) != 0:
self.K = np.zeros([mA, 1])
else:
self.K = K
else:
self.K = K.reshape(K.size,1)
if E is None and (len(A) != 0):
self.E = np.zeros([mA, 1])
self.Wset = pc.Polytope()
else:
self.E = E
self.Wset = Wset
self.Uset = Uset
self.domain = domain
# Check that timestep and semantics are valid.
_check_time_data(time_semantics, timestep)
self.time_semantics = time_semantics
self.timestep = timestep
def __str__(self):
output = "A =\n"+str(self.A)
output += "\nB =\n"+str(self.B)
output += "\nE =\n"+str(self.E)
output += "\nK =\n"+str(self.K)
output += "\nUset =\n"+str(self.Uset)
output += "\nWset =\n"+str(self.Wset)
return output
def plot(self, ax=None, color=np.random.rand(3), show_domain=True):
if quiver is None:
warn('pyvectorized not found. No plotting.')
return
(x, res) = pc.grid_region(self.domain)
n = self.A.shape[0]
DA = self.A - np.eye(n)
v = DA.dot(x)
if ax is None:
ax, fig = newax()
if show_domain:
self.domain.plot(ax, color)
quiver(x, v, ax)
return ax
class PwaSysDyn(object):
"""PwaSysDyn class for specifying a polytopic piecewise affine system.
A PwaSysDyn object contains the fields:
- C{list_subsys}: list of L{LtiSysDyn}
- C{domain}: domain over which piecewise affine system is defined,
type: polytope.Polytope
- C{time_semantics}: 'discrete' (if system is originally a discrete-time
system) or 'sampled' (if system is sampled from a continuous-time
system)
- C{timestep}: A positive real number containing the timestep (for sampled
systems)
For the system to be well-defined the domains of its subsystems should be
mutually exclusive (modulo intersections with empty interior) and cover the
domain.
See Also
========
L{LtiSysDyn}, L{HybridSysDyn}, L{polytope.Polytope}
"""
def __init__(self, list_subsys=[], domain=None, time_semantics=None,
timestep=None, overwrite_time=True):
"""
@type overwrite_time: bool
@param overwrite_time: If true, then overwrites any time data in the
objects in C{list_subsys} with the data in
C{time_semantics} and C{timestep} variables.
Otherwise checks that the time data of the
objects in C{list_subsys} are consistent with
C{time_semantics} and C{timestep}.
"""
if domain is None:
warn("Domain not given to PwaSysDyn()")
if ((domain is not None) and
(not (isinstance(domain, pc.Polytope) or
isinstance(domain, pc.Region))
)
):
raise Exception("PwaSysDyn: `domain` has to be a Polytope or Region")
if len(list_subsys) > 0:
uncovered_dom = domain.copy()
n = list_subsys[0].A.shape[1] # State space dimension
m = list_subsys[0].B.shape[1] # Input space dimension
p = list_subsys[0].E.shape[1] # Disturbance space dimension
for subsys in list_subsys:
uncovered_dom = uncovered_dom.diff(subsys.domain)
if (n!=subsys.A.shape[1] or m!=subsys.B.shape[1] or
p!=subsys.E.shape[1]):
raise Exception("PwaSysDyn: state, input, disturbance " +
"dimensions have to be the same for all " +
"subsystems")
if not pc.is_empty(uncovered_dom):
raise Exception("PwaSysDyn: subdomains must cover the domain")
for x in itertools.combinations(list_subsys, 2):
if pc.is_fulldim(x[0].domain.intersect(x[1].domain) ):
raise Exception("PwaSysDyn: subdomains have to be mutually"+
" exclusive")
self.list_subsys = list_subsys
self.domain = domain
# Input time semantics
_check_time_data(time_semantics, timestep)
if overwrite_time:
_push_time_data(self.list_subsys, time_semantics, timestep)
else:
_check_time_consistency(list_subsys, time_semantics, timestep)
self.timestep = timestep
self.time_semantics = time_semantics
def __str__(self):
s = 'Piecewise-Affine System Dynamics\n'
s += 30 * '-' + 2*'\n'
s += 'Domain:\n\n'
s += pformat(self.domain) + '\n'
for i, sys in enumerate(self.list_subsys):
s += 'Subsystem: ' + str(i) +'\n'
s += str(sys) +2*'\n'
return s
@classmethod
def from_lti(cls, A=[], B=[], E=[], K=[],
Uset=None, Wset=None,domain=None):
lti_sys = LtiSysDyn(A,B,E,K,Uset,Wset,domain)
return cls([lti_sys], domain)
def plot(self, ax=None, show_domain=True):
if ax is None:
ax, fig = newax()
for subsystem in self.list_subsys:
subsystem.plot(ax, color=np.random.rand(3),
show_domain=show_domain)
class HybridSysDyn(object):
"""Represent hybrid systems switching between dynamic modes.
A HybridSysDyn represents a system with switching modes
that depend on both discrete:
- n_env environment variables (uncontrolled)
- n_sys system variables (controlled)
A HybridSysDyn object contains the fields:
- C{disc_domain_size}: 2-tuple of numbers of modes
type: (n_env, n_sys)
- C{env_labels}: (optional) labels for discrete environment variables
type: list of len(n_env)
default: range(n_env)
- C{disc_sys_labels}: (optional) labels for discrete system variables
type: list of len(n_sys)
default: range(n_sys)
- C{dynamics}: mapping mode 2-tuples to active dynamics::
(env_label, sys_label) -> PwaSysDyn
type: dict
default: If no env_label or sys_label passed,
then default to int indices (i,j) L{PwaSysDyn}.
- C{cts_ss}: continuous state space over which hybrid system is defined.
type: L{polytope.Region}
- C{time_semantics}: 'discrete' (if system is originally a discrete-time
system) or 'sampled' (if system is sampled from a continuous-time
system)
- C{timestep}: A positive real number containing the timestep (for sampled
systems)
Note
====
We assume that system and environment switching modes are
independent of one another. (Use LTL statement to make it not so.)
See Also
========
L{LtiSysDyn}, L{PwaSysDyn}, L{polytope.Region}
"""
def __init__(self, disc_domain_size=(1,1),
dynamics=None, cts_ss=None,
env_labels=None, disc_sys_labels=None, time_semantics=None,
timestep=None, overwrite_time=True):
"""
@type overwrite_time: bool
@param overwrite_time: If true, then overwrites any time data in the
objects in C{list_subsys} with the data in
C{time_semantics} and C{timestep} variables.
Otherwise checks that the time data of the
objects in C{list_subsys} are consistent with
C{time_semantics} and C{timestep}.
"""
# check that the continuous domain is specified
if cts_ss is None:
warn('continuous state space not given to HybridSysDyn')
else:
if not isinstance(cts_ss, (pc.Polytope, pc.Region) ):
raise Exception('HybridSysDyn: ' +
'`cts_ss` must be a Polytope or Region')
self.disc_domain_size = disc_domain_size
# If label numbers agree with disc_domain_size, then use them.
# Otherwise, ignore the labels.
n_env, n_sys = disc_domain_size
self._env_labels = self._check_labels(n_env, env_labels)
self._disc_sys_labels = self._check_labels(n_sys, disc_sys_labels)
# Check each dynamics key is a valid mode,
# i.e., a valid combination of env and sys labels.
if dynamics is not None:
modes = self.all_mode_combs
undefined_modes = set(dynamics.keys()).difference(modes)
if undefined_modes:
msg = 'HybridSysDyn: `dynamics` keys inconsistent'
msg += ' with discrete mode labels.\n'
msg += 'Undefined modes:\n' + str(undefined_modes)
raise ValueError(msg)
missing_modes = set(modes).difference(dynamics.keys())
if missing_modes:
msg = 'Missing the modes:\n' + str(missing_modes)
msg += '\n Make sure you did not forget any modes,\n'
msg += 'otherwise this is fine.'
warn(msg)
if not all([isinstance(sys, PwaSysDyn)
for sys in dynamics.values()]):
msg = 'For each mode dynamics must be PwaSysDyn.\n'
msg += 'Got instead: ' +str(type(sys))
raise Exception(msg)
self.dynamics = dynamics
self.cts_ss = cts_ss
_check_time_data(time_semantics, timestep)
if overwrite_time:
_push_time_data(self.dynamics.values(), time_semantics, timestep)
else:
_check_time_consistency(dynamics.values(), time_semantics, timestep)
self.timestep = timestep
self.time_semantics = time_semantics
def __str__(self):
n_env, n_sys = self.disc_domain_size
s = 'Hybrid System Dynamics\n'
s += 30 * '-' + '\n'
s += 'Modes:\n'
s += 4*' ' + 'Environment (' + str(n_env) + ' modes):\n'
s += 6*' ' + pformat(self.env_labels, indent=3) + 2*'\n'
s += 4*' ' + 'System: (' + str(n_sys) + ' modes)\n'
s += 6*' ' + pformat(self.disc_sys_labels, indent=3) + 2*'\n'
s += 'Continuous State Space:\n\n'
s += pformat(self.cts_ss) + '\n'
s += 'Dynamics:\n'
for mode, pwa in self.dynamics.iteritems():
s += ' mode: ' + str(mode) + '\n'
s += ' dynamics: ' + pformat(pwa, indent=3) +'\n\n'
return s
def _check_labels(self, n, labels):
# don't complain for default
if labels is None:
return None
# len exists ?
try:
# is len correct ?
if len(labels) != n:
msg = 'number of environment labels is inconsistent'
msg += ' with discrete domain size.\n'
msg += 'Ignoring given environment labels.\n'
msg += 'Defaulting to integer labels.'
warn(msg)
return None
except:
warn('Environment labels of type: ' +
type(labels) + 'have no len()')
return None
return labels
@property
def all_mode_combs(self):
"""Return all possible combinations of modes.
"""
modes = [(a,b) for a in self.env_labels
for b in self.disc_sys_labels]
logger.debug('Available modes: ' + str(modes) )
return modes
@property
def modes(self):
if self.dynamics is None:
warn('No dynamics defined (None).')
return None
return self.dynamics.keys()
@property
def env_labels(self):
if self._env_labels is None:
return range(self.disc_domain_size[0])
else:
return self._env_labels
@property
def disc_sys_labels(self):
if self._disc_sys_labels is None:
return range(self.disc_domain_size[1])
else:
return self._disc_sys_labels
@classmethod
def from_pwa(cls, list_subsys=[], domain=None):
pwa_sys = PwaSysDyn(list_subsys,domain)
return cls((1,1), {(0,0):pwa_sys}, domain)
@classmethod
def from_lti(cls, A=[], B=[], E=[], K=[],
Uset=None, Wset=None,domain=None):
pwa_sys = PwaSysDyn.from_lti(A, B, E, K,
Uset, Wset, domain)
return cls((1,1), {(0,0):pwa_sys}, domain)
def _push_time_data(system_list, time_semantics, timestep):
"""Overwrite the time data in system list. Throws warnings if overwriting
existing data."""
for system in system_list:
if (system.time_semantics != time_semantics) and (system.time_semantics
is not None):
warn('Overwriting existing time semantics data.')
if (system.timestep != timestep) and (system.timestep is not None):
warn('Overwriting existing timestep data.')
system.time_semantics = time_semantics
system.timestep = timestep
# Overwrite LTI in system if system is a PWA
if isinstance(system, PwaSysDyn):
_push_time_data(system.list_subsys, time_semantics, timestep)
def _check_time_data(semantics, timestep):
"""Checks that time semantics and timestep are correctly specified. Raises
ValueErrors if that's not the case.
@type semantics: string
@param timestep: any positive number
@type timestep: int or float or long
@rtype: None
"""
if semantics not in ['sampled', 'discrete', None]:
raise ValueError('Time semantics must be discrete or ' +
'sampled (sampled from continuous time system).')
elif ((timestep == 'discrete') && (timestep is not None)):
raise ValueError('Discrete semantics must not have a timestep')
elif timestep is not None:
error_string = 'Timestep must be a positive real number or unspecified.'
if timestep <= 0:
raise ValueError(error_string)
if not isinstance(timestep, (int, float, long)):
raise TypeError(error_string)
def _check_time_consistency(system_list, time_semantics, timestep):
"""Checks that all the dynamical systems in system_list have the same time
semantics and timestep. Raises ValueError if not the case.
@type system_list: list of L{LtiSysDyn} or L{PwaSysDyn}
@rtype: None
"""
# Check that time semantics for all subsystems match
for ind in range(len(system_list)-1):
if system_list[ind].timestep != system_list[ind+1].timestep:
raise ValueError('Not all timesteps in child systems are the same.')
if system_list[ind].time_semantics != system_list[ind+1].time_semantics:
raise ValueError('Not all time semantics are the same.')
# Check that time semantics for all subsystems match specified system and
# timestep
if system_list[0].timestep != timestep:
raise ValueError('Timestep of subsystems do not match specified ' +
'timestep.')
if system_list[0].time_semantics != time_semantics:
raise ValueError('Time semantics of subsystems do not match ' +
'specified time semantics.')
|
#
# Copyright (c) 2004-2005 Specifix, Inc.
#
# This program is distributed under the terms of the Common Public License,
# version 1.0. A copy of this license should have been distributed with this
# source file in a file called LICENSE. If it is not present, the license
# is always available at http://www.opensource.org/licenses/cpl.php.
#
# This program is distributed in the hope that it will be useful, but
# without any waranty; without even the implied warranty of merchantability
# or fitness for a particular purpose. See the Common Public License for
# full details.
#
"""
Contains the base Recipe class, default macros, and miscellaneous
components used by conary .recipe files
"""
#stdlib
import errno
from fnmatch import fnmatchcase
import imp
import inspect
from itertools import izip
import os
import sys
import tempfile
import types
#conary
import build
import buildpackage
from deps import deps
import destdirpolicy
import files
from lib import log
from lib import magic
from lib import util
from local import database
import macros
import packagepolicy
from repository import repository
import source
import use
import updatecmd
import versions
baseMacros = {
# paths
'prefix' : '/usr',
'sysconfdir' : '/etc',
'initdir' : '%(sysconfdir)s/init.d',
'lib' : 'lib', # may be overridden with 'lib64'
'exec_prefix' : '%(prefix)s',
'bindir' : '%(exec_prefix)s/bin',
'essentialbindir' : '/bin',
'sbindir' : '%(exec_prefix)s/sbin',
'essentialsbindir' : '/sbin',
'libdir' : '%(exec_prefix)s/%(lib)s',
'essentiallibdir' : '/%(lib)s',
'libexecdir' : '%(exec_prefix)s/libexec',
'localstatedir' : '/var',
'servicedir' : '/srv',
'cachedir' : '%(localstatedir)s/cache',
'sharedstatedir' : '%(prefix)s/com',
'includedir' : '%(prefix)s/include',
'datadir' : '%(prefix)s/share',
'mandir' : '%(datadir)s/man',
'infodir' : '%(datadir)s/info',
'docdir' : '%(datadir)s/doc',
'thisdocdir' : '%(docdir)s/%(name)s-%(version)s',
'tagdescriptiondir' : '%(sysconfdir)s/conary/tags',
'taghandlerdir' : '%(libexecdir)s/conary/tags',
'tagdatadir' : '%(datadir)s/conary/tags',
'testdir' : '%(localstatedir)s/conary/tests',
'thistestdir' : '%(testdir)s/%(name)s-%(version)s',
'debuglibdir' : '/usr/lib/debug', # no %(prefix)s or %(lib)s!
'debugsrcdir' : '/usr/src/debug', # no %(prefix)s!
# special component prefixes that the whole system needs to share
'krbprefix' : '%(exec_prefix)s/kerberos',
'x11prefix' : '%(exec_prefix)s/X11R6',
# arguments/flags (empty ones are for documentation; non-existant = empty)
'cc' : 'gcc',
'cxx' : 'g++',
'cxxflags' : '', # cxx specific flags
'optflags' : '-O2',
'dbgflags' : '-g', # for debuginfo
'cflags' : '%(optflags)s %(dbgflags)s',
'cppflags' : '', # just for providing in recipes
'ldflags' : '%(dbgflags)s',
'mflags' : '', # make flags
'parallelmflags' : '',
'sysroot' : '',
'os' : 'linux',
'target' : '%(targetarch)s-unknown-linux',
'debugedit' : 'debugedit',
'strip' : 'eu-strip', # eu-strip for debuginfo
'strip-archive' : 'strip', # eu-strip segfaults on ar
'buildbranch' : '',
'buildlabel' : '',
}
crossMacros = {
# set crossdir from cook, directly or indirectly, before adding the rest
#'crossdir' : 'cross-target',
'prefix' : '/opt/%(crossdir)s',
'sysroot' : '%(prefix)s/sys-root',
'headerpath' : '%(sysroot)s/usr/include',
}
def localImport(d, package, modules=()):
"""
import a package into a non-global context.
@param d: the context to import the module
@type d: dict
@param package: the name of the module to import
@type package: str
@param modules: a sequence of modules to import from the package.
If a 2-tuple is in the sequence, rename the imported module to
the second value in the tuple.
@type modules: sequence of strings or tuples, or empty tuple
Examples of translated import statements::
from foo import bar as baz:
localImport(d, "foo", (("bar", "baz"))
from bar import fred, george:
localImport(d, "bar", ("fred", "george"))
import os
localImport(d, "os")
"""
m = __import__(package, d, {}, modules)
if modules:
for name in modules:
if type(name) is tuple:
mod = name[0]
name = name[1]
else:
mod = name
d[name] = getattr(m, mod)
else:
d[package] = m
# save a reference to the module inside this context, so it won't
# be garbage collected until the context is deleted.
l = d.setdefault('__localImportModules', [])
l.append(m)
def setupRecipeDict(d, filename):
localImport(d, 'build', ('build', 'action'))
localImport(d, 'build.recipe', ('PackageRecipe', 'GroupRecipe',
'RedirectRecipe', 'FilesetRecipe',
'loadRecipe'))
localImport(d, 'lib', ('util',))
for x in ('os', 're', 'sys', 'stat'):
localImport(d, x)
localImport(d, 'build.use', ('Arch', 'Use', ('LocalFlags', 'Flags')))
d['filename'] = filename
class RecipeLoader:
def __init__(self, filename, cfg=None, repos=None, component=None,
branch=None, ignoreInstalled=False):
self.recipes = {}
if filename[0] != "/":
raise IOError, "recipe file names must be absolute paths"
if component:
pkgname = component.split(':')[0]
else:
pkgname = filename.split('/')[-1]
pkgname = pkgname[:-len('.recipe')]
basename = os.path.basename(filename)
self.file = basename.replace('.', '-')
self.module = imp.new_module(self.file)
sys.modules[self.file] = self.module
f = open(filename)
setupRecipeDict(self.module.__dict__, filename)
# store cfg and repos, so that the recipe can load
# recipes out of the repository
self.module.__dict__['cfg'] = cfg
self.module.__dict__['repos'] = repos
self.module.__dict__['component'] = component
self.module.__dict__['branch'] = branch
self.module.__dict__['name'] = pkgname
self.module.__dict__['ignoreInstalled'] = ignoreInstalled
# create the recipe class by executing the code in the recipe
try:
code = compile(f.read(), filename, 'exec')
except SyntaxError, err:
msg = ('Error in recipe file "%s": %s\n' %(basename, err))
if err.offset is not None:
msg += '%s%s^\n' %(err.text, ' ' * (err.offset-1))
else:
msg += err.text
raise RecipeFileError(msg)
use.resetUsed()
exec code in self.module.__dict__
# all recipes that could be loaded by loadRecipe are loaded;
# get rid of our references to cfg and repos
del self.module.__dict__['cfg']
del self.module.__dict__['repos']
del self.module.__dict__['component']
del self.module.__dict__['branch']
del self.module.__dict__['name']
del self.module.__dict__['ignoreInstalled']
found = False
for (name, obj) in self.module.__dict__.items():
if type(obj) is not types.ClassType:
continue
# if a recipe has been marked to be ignored (for example, if
# it was loaded from another recipe by loadRecipe()
# (don't use hasattr here, we want to check only the recipe
# class itself, not any parent class
if 'ignore' in obj.__dict__:
continue
recipename = getattr(obj, 'name', '')
# make sure the class is derived from Recipe
if (issubclass(obj, PackageRecipe)
and obj is not PackageRecipe) or \
(issubclass(obj, RedirectRecipe)
and obj is not RedirectRecipe):
if recipename.startswith('group-'):
raise RecipeFileError(
'Error in recipe file "%s": package name cannot '
'begin with "group-"' %basename)
if recipename.startswith('fileset-'):
raise RecipeFileError(
'Error in recipe file "%s": package name cannot '
'begin with "fileset-"' %basename)
elif issubclass(obj, GroupRecipe) and obj is not GroupRecipe:
if recipename and not recipename.startswith('group-'):
raise RecipeFileError(
'Error in recipe file "%s": group name must '
'begin with "group-"' %basename)
elif issubclass(obj, FilesetRecipe) and obj is not FilesetRecipe:
if recipename and not recipename.startswith('fileset-'):
raise RecipeFileError(
'Error in recipe file "%s": fileset name must '
'begin with "fileset-"' %basename)
else:
continue
self.recipes[name] = obj
obj.filename = filename
if hasattr(obj, 'name') and hasattr(obj, 'version'):
if found:
raise RecipeFileError(
'Error in recipe file "%s": multiple recipe classes '
'with both name and version exist' %basename)
self.recipe = obj
if '-' in obj.version:
raise RecipeFileError(
"Version string %s has illegal '-' character"
%obj.version)
if obj.name != pkgname:
raise RecipeFileError(
"Recipe object name '%s' does not match "
"file/component name '%s'"
% (obj.name, pkgname))
found = True
else:
raise RecipeFileError(
"Recipe in file/component '%s' did not contain both a name"
" and a version attribute." % pkgname)
# inherit any tracked flags that we found while loading parent
# classes
if found:
if self.recipe._trackedFlags is not None:
use.setUsed(self.recipe._trackedFlags)
# add in the tracked flags that we found while loading this
# class
self.recipe._trackedFlags = use.getUsed()
else:
# we'll get this if the recipe file is empty
raise RecipeFileError(
"file/component '%s' did not contain a valid recipe" % pkgname)
def allRecipes(self):
return self.recipes
def getRecipe(self):
return self.recipe
def __del__(self):
try:
del sys.modules[self.file]
except:
pass
def recipeLoaderFromSourceComponent(name, cfg, repos,
versionStr=None, labelPath=None,
ignoreInstalled=False):
name = name.split(':')[0]
component = name + ":source"
filename = name + '.recipe'
if not labelPath:
labelPath = cfg.buildLabel
try:
pkgs = repos.findTrove(labelPath,
(component, versionStr, deps.DependencySet()))
if len(pkgs) > 1:
raise RecipeFileError("source component %s has multiple versions "
"with label %s" %(component,
cfg.buildLabel.asString()))
sourceComponent = repos.getTrove(*pkgs[0])
except repository.TroveMissing:
raise RecipeFileError, 'cannot find source component %s' % component
(fd, recipeFile) = tempfile.mkstemp(".recipe", 'temp-%s-' %name)
outF = os.fdopen(fd, "w")
inF = None
for (pathId, filePath, fileId, fileVersion) in sourceComponent.iterFileList():
if filePath == filename:
inF = repos.getFileContents([ (fileId, fileVersion) ])[0].get()
break
if not inF:
raise RecipeFileError("version %s of %s does not contain %s" %
(sourceComponent.getName(),
sourceComponent.getVersion().asString(),
filename))
util.copyfileobj(inF, outF)
del inF
del outF
try:
loader = RecipeLoader(recipeFile, cfg, repos, component,
sourceComponent.getVersion().branch(),
ignoreInstalled=ignoreInstalled)
finally:
os.unlink(recipeFile)
recipe = loader.getRecipe()
recipe._trove = sourceComponent.copy()
return (loader, sourceComponent.getVersion())
def loadRecipe(troveSpec, label=None):
"""
Load a recipe so that its class/data can be used in another recipe.
If a complete version is not specified in the trovespec, the version of
the recipe to load will be based on what is installed on the system.
For example, if C{loadRecipe('foo')} is called, and package C{foo} with
version C{/bar.org@bar:devel/4.1-1-1} is installed on the system, then
C{foo:source} with version C{/bar.org@bar:devel/4.1-1} will be loaded.
The recipe will also be loaded with the installed package's flavor.
If the package is not installed anywhere on the system, the C{labelPath}
will be searched without reference to the installed system.
@param troveSpec: C{name}I{[}C{=I{version}}I{][}C{[I{flavor}]}I{]}
specification of the trove to load. The flavor given will be used
to find the given recipe and also to set the flavor of the loaded recipe.
@param label: label string to search for the given recipe in place of
using the default C{labelPath}.
If not specified, the labels listed in the version in the including
recipe will be used as the c{labelPath} to search.
For example, if called from recipe with version
C{/conary.specifix.com@spx:devel//shadow/1.0-1-1},
the default C{labelPath} that would be constructed would be:
C{[conary.specifix.com@spx:shadow, conary.specifix.com@spx:devel]}
"""
def _findInstalledVersion(db, labelPath, name, versionStr, flavor):
""" Specialized search of the installed system along a labelPath,
defaulting to searching the whole system if the trove is not
found along the label path.
The version and flavor of the first found installed trove is
returned, or C{None} if no trove is found.
"""
# first search on the labelPath.
try:
troves = db.findTrove(labelPath, name, flavor, versionStr)
if len(troves) > 1:
raise RuntimeError, (
'Multiple troves could match loadRecipe'
' request %s' % troveSpec)
if troves:
return troves[0][1].getSourceVersion(), troves[0][2]
except repository.TroveNotFound:
pass
if labelPath is None:
return None
try:
troves = db.findTrove(None, name, flavor, versionStr)
if len(troves) > 1:
raise RuntimeError, (
'Multiple troves could match loadRecipe'
' request for %s' % name)
if troves:
return troves[0][1].getSourceVersion(), troves[0][2]
except repository.TroveNotFound:
pass
return None
callerGlobals = inspect.stack()[1][0].f_globals
cfg = callerGlobals['cfg']
repos = callerGlobals['repos']
branch = callerGlobals['branch']
ignoreInstalled = callerGlobals['ignoreInstalled']
parentPackageName = callerGlobals['name']
oldUsed = use.getUsed()
name, versionStr, flavor = updatecmd.parseTroveSpec(troveSpec, None)
if name.endswith('.recipe'):
file = name
name = name[:-len('.recipe')]
else:
file = name + '.recipe'
#first check to see if a filename was specified, and if that
#recipe actually exists.
loader = None
if not (label or versionStr or flavor):
if name[0] != '/':
recipepath = os.path.dirname(callerGlobals['filename'])
localfile = recipepath + '/' + file
else:
localfile = name + '.recipe'
if os.path.exists(localfile):
if flavor:
oldBuildFlavor = cfg.buildFlavor
cfg.buildFlavor = deps.overrideFlavor(oldBuildFlavor, flavor)
use.setBuildFlagsFromFlavor(name, cfg.buildFlavor)
loader = RecipeLoader(localfile, cfg,
ignoreInstalled=ignoreInstalled)
if not loader:
if label:
labelPath = [versions.Label(label)]
elif branch:
# if no labelPath was specified, search backwards through the
# labels on the current branch.
labelPath = [branch.label()]
while branch.hasParentBranch():
branch = branch.parentBranch()
labelPath.append(branch.label())
else:
labelPath = None
if not ignoreInstalled:
# look on the local system to find a trove that is installed that
# matches this loadrecipe request. Use that trove's version
# and flavor information to grab the source out of the repository
db = database.Database(cfg.root, cfg.dbPath)
parts = _findInstalledVersion(db, labelPath, name,
versionStr, flavor)
if parts:
version, flavor = parts
if (version.isLocalCook() or version.isEmerge()
or version.isLocal()):
version = version.getSourceVersion().parentVersion()
versionStr = version.getSourceVersion().asString()
if flavor:
# override the current flavor with the flavor found in the
# installed trove (or the troveSpec flavor, if no installed
# trove was found.
oldBuildFlavor = cfg.buildFlavor
cfg.buildFlavor = deps.overrideFlavor(oldBuildFlavor, flavor)
use.setBuildFlagsFromFlavor(name, cfg.buildFlavor)
loader = recipeLoaderFromSourceComponent(name, cfg, repos,
labelPath=labelPath,
versionStr=versionStr,
ignoreInstalled=ignoreInstalled)[0]
if flavor:
cfg.buildFlavor = oldBuildFlavor
use.setBuildFlagsFromFlavor(parentPackageName, cfg.buildFlavor)
for name, recipe in loader.allRecipes().items():
# hide all recipes from RecipeLoader - we don't want to return
# a recipe that has been loaded by loadRecipe
recipe.ignore = 1
callerGlobals[name] = recipe
# stash a reference to the module in the namespace
# of the recipe that loaded it, or else it will be destroyed
callerGlobals[os.path.basename(file).replace('.', '-')] = loader
# return the tracked flags to their state before loading this recipe
use.resetUsed()
use.setUsed(oldUsed)
class _sourceHelper:
def __init__(self, theclass, recipe):
self.theclass = theclass
self.recipe = recipe
def __call__(self, *args, **keywords):
self.recipe._sources.append(self.theclass(self.recipe, *args, **keywords))
class _recipeHelper:
def __init__(self, list, recipe, theclass):
self.list = list
self.theclass = theclass
self.recipe = recipe
def __call__(self, *args, **keywords):
self.list.append(self.theclass(self.recipe, *args, **keywords))
class _policyUpdater:
def __init__(self, theobject):
self.theobject = theobject
def __call__(self, *args, **keywords):
self.theobject.updateArgs(*args, **keywords)
class Recipe:
"""Virtual base class for all Recipes"""
_trove = None
_trackedFlags = None
def __init__(self):
assert(self.__class__ is not Recipe)
def __repr__(self):
return "<%s Object>" % self.__class__
class PackageRecipe(Recipe):
buildRequires = []
Flags = use.LocalFlags
def mainDir(self, new = None):
if new:
self.theMainDir = new % self.macros
self.macros.maindir = self.theMainDir
return self.theMainDir
def nameVer(self):
return '-'.join((self.name, self.version))
def cleanup(self, builddir, destdir):
if 'noClean' in self.cfg.__dict__ and self.cfg.noClean:
pass
else:
util.rmtree(builddir)
def sourceMap(self, path):
basepath = os.path.basename(path)
if basepath in self.sourcePathMap:
if basepath == path:
# we only care about truly different source locations with the
# same basename
return
if basepath in self.pathConflicts:
self.pathConflicts[basepath].append(path)
else:
self.pathConflicts[basepath] = [
# previous (first) instance
self.sourcePathMap[basepath],
# this instance
path
]
else:
self.sourcePathMap[basepath] = path
def fetchAllSources(self):
"""
returns a list of file locations for all the sources in
the package recipe
"""
# first make sure we had no path conflicts:
if self.pathConflicts:
errlist = []
for basepath in self.pathConflicts.keys():
errlist.extend([x for x in self.pathConflicts[basepath]])
raise RecipeFileError, '\n'.join(errlist)
self.prepSources()
files = []
for src in self._sources:
f = src.fetch()
if f:
if type(f) in (tuple, list):
files.extend(f)
else:
files.append(f)
return files
def checkBuildRequirements(self, cfg, sourceVersion, ignoreDeps=False):
""" Checks to see if the build requirements for the recipe
are installed
"""
db = database.Database(cfg.root, cfg.dbPath)
time = sourceVersion.timeStamps()[-1]
reqMap = {}
missingReqs = []
for buildReq in self.buildRequires:
(name, versionStr, flavor) = updatecmd.parseTroveSpec(buildReq,
None)
try:
troves = db.findTrove(None, name)
troves = db.getTroves(troves)
except repository.TroveNotFound:
missingReqs.append(buildReq)
continue
break
versionMatches = []
for trove in troves:
if versionStr is None:
versionMatches.append(trove)
continue
if versionStr.find('@') == -1:
label = trove.getVersion().branch().label()
if versionStr.find(':') == -1:
if label.getLabel() == versionStr:
versionMatches.append(trove)
continue
if ("%s:%s" % (label.getNamespace(), label.getLabel())\
== versionStr):
versionMatches.append(trove)
break
continue
else:
raise RecipeFileError("Unsupported buildReq format")
if not versionMatches:
missingReqs.append(buildReq)
continue
versionMatches.sort(lambda a, b: a.getVersion().__cmp__(b.getVersion()))
if not flavor:
reqMap[buildReq] = versionMatches[-1]
continue
for trove in reversed(versionMatches):
troveFlavor = trove.getFlavor()
if troveFlavor.stronglySatisfies(flavor):
reqMap[buildReq] = trove
break
if buildReq not in reqMap:
missingReqs.append(buildReq)
if missingReqs:
if not ignoreDeps:
raise RuntimeError, ("Could not find the following troves "
"needed to cook this recipe:\n"
"%s" % '\n'.join(missingReqs))
self.buildReqMap = reqMap
def extraSource(self, action):
"""
extraSource allows you to append a source list item that is
not a part of source.py. Be aware when writing these source
list items that you are writing conary internals! In particular,
anything that needs to add a source file to the repository will
need to implement fetch(), and all source files will have to be
sought using the lookaside cache.
"""
self._sources.append(action)
def prepSources(self):
for source in self._sources:
source.doPrep()
def processResumeList(self, resume):
resumelist = []
if resume:
lines = resume.split(',')
for line in lines:
if ':' in line:
begin, end = line.split(':')
if begin:
begin = int(begin)
if end:
end = int(end)
resumelist.append([begin, end])
else:
if len(lines) == 1:
resumelist.append([int(line), False])
else:
resumelist.append([int(line), int(line)])
self.resumeList = resumelist
def iterResumeList(self, actions):
resume = self.resumeList
resumeBegin = resume[0][0]
resumeEnd = resume[0][1]
for action in actions:
if not resumeBegin or action.linenum >= resumeBegin:
if not resumeEnd or action.linenum <= resumeEnd:
yield action
elif resumeEnd:
resume = resume[1:]
if not resume:
return
resumeBegin = resume[0][0]
resumeEnd = resume[0][1]
if action.linenum == resumeBegin:
yield action
def unpackSources(self, builddir, destdir, resume=None):
self.macros.builddir = builddir
self.macros.destdir = destdir
if resume == 'policy':
return
elif resume:
log.debug("Resuming on line(s) %s" % resume)
# note resume lines must be in order
self.processResumeList(resume)
for source in self.iterResumeList(self._sources):
source.doPrep()
source.doAction()
else:
for source in self._sources:
source.doPrep()
source.doAction()
def extraBuild(self, action):
"""
extraBuild allows you to append a build list item that is
not a part of build.py. Be aware when writing these build
list items that you are writing conary internals!
"""
self._build.append(action)
def doBuild(self, buildPath, resume=None):
builddir = os.sep.join((buildPath, self.mainDir()))
self.macros.builddir = builddir
self.magic = magic.magicCache(self.macros.destdir)
if resume == 'policy':
return
if resume:
for bld in self.iterResumeList(self._build):
bld.doAction()
else:
for bld in self._build:
bld.doAction()
def doDestdirProcess(self):
for post in self.destdirPolicy:
post.doProcess(self)
def getPackages(self):
# policies look at the recipe instance for all information
for policy in self.packagePolicy:
policy.doProcess(self)
return self.autopkg.getPackages()
def getUnpackagedComponentNames(self):
# someday, this will probably be per-branch policy
return ('test', 'debuginfo')
def disableParallelMake(self):
self.macros.parallelmflags = ''
def populateLcache(self):
"""
Populate a repository lookaside cache
"""
recipeClass = self.__class__
repos = self.laReposCache.repos
# build a list containing this recipe class and any ancestor class
# from which it descends
classes = [ recipeClass ]
bases = list(recipeClass.__bases__)
while bases:
parent = bases.pop()
bases.extend(list(parent.__bases__))
if issubclass(parent, PackageRecipe):
classes.append(parent)
# reverse the class list, this way the files will be found in the
# youngest descendant first
classes.reverse()
# populate the repository source lookaside cache from the :source
# components
for rclass in classes:
if not rclass._trove:
continue
srcName = rclass._trove.getName()
srcVersion = rclass._trove.getVersion()
for f in repos.iterFilesInTrove(srcName, srcVersion,
deps.DependencySet(),
withFiles=True):
pathId, path, fileId, version, fileObj = f
assert(path[0] != "/")
# we might need to retrieve this source file
# to enable a build, so we need to find the
# sha1 hash of it since that's how it's indexed
# in the file store
if isinstance(fileObj, files.RegularFile):
# it only makes sense to fetch regular files, skip
# anything that isn't
self.laReposCache.addFileHash(srcName, srcVersion, pathId,
path, fileId, version, fileObj.contents.sha1())
def __getattr__(self, name):
"""
Allows us to dynamically suck in namespace of other modules
with modifications.
- The public namespace of the build module is accessible,
and build objects are created and put on the build list
automatically when they are referenced.
- The public namespaces of the policy modules are accessible;
policy objects already on their respective lists are returned,
policy objects not on their respective lists are added to
the end of their respective lists like build objects are
added to the build list.
"""
if not name.startswith('_'):
if name.startswith('add'):
return _sourceHelper(source.__dict__[name[3:]], self)
if name in build.__dict__:
return _recipeHelper(self._build, self, build.__dict__[name])
for (policy, list) in (
(destdirpolicy, self.destdirPolicy),
(packagepolicy, self.packagePolicy)):
if name in policy.__dict__:
policyClass = policy.__dict__[name]
for policyObj in list:
if isinstance(policyObj, policyClass):
return _policyUpdater(policyObj)
return _recipeHelper(list, self, policyClass)
return self.__dict__[name]
def __delattr__(self, name):
"""
Allows us to delete policy items from their respective lists
by deleting a name in the recipe self namespace. For example,
to remove the EtcConfig package policy from the package policy
list, one could do::
del self.EtcConfig
This would prevent the EtcConfig package policy from being
executed. The policy objects are carefully ordered in the
default policy lists; deleting a policy object and then
referencing it again will cause it to show up at the end of
the list. Don't do that.
In general, delete policy only as a last resort; you can
usually disable policy entirely with the keyword argument::
exceptions='.*'
"""
for (policy, list) in (
(destdirpolicy, self.destdirPolicy),
(packagepolicy, self.packagePolicy)):
if name in policy.__dict__:
policyClass = policy.__dict__[name]
for index in range(len(list)):
policyObj = list[index]
if isinstance(policyObj, policyClass):
del list[index]
return
del self.__dict__[name]
def __init__(self, cfg, laReposCache, srcdirs, extraMacros={}):
assert(self.__class__ is not Recipe)
self._sources = []
self._build = []
self.destdirPolicy = destdirpolicy.DefaultPolicy(self)
self.packagePolicy = packagepolicy.DefaultPolicy(self)
self.cfg = cfg
self.laReposCache = laReposCache
self.srcdirs = srcdirs
self.macros = macros.Macros()
self.macros.update(baseMacros)
self.macros.update(use.Arch._getMacros())
# allow for architecture not to be set -- this could happen
# when storing the recipe e.g.
for key in cfg.macroKeys():
self.macros._override(key, cfg['macros.' + key])
self.macros.name = self.name
self.macros.version = self.version
self.packages = { self.name : True }
if extraMacros:
self.macros.update(extraMacros)
self.mainDir(self.nameVer())
self.sourcePathMap = {}
self.pathConflicts = {}
class SingleGroup:
def addTrove(self, name, versionStr = None, flavor = None, source = None,
byDefault = True):
assert(flavor is None or isinstance(flavor, str))
if flavor is not None:
flavor = deps.parseFlavor(flavor)
if flavor is None:
raise ValueError, 'invalid flavor %s' % flavorStr
self.addTroveList.append((name, versionStr, flavor, source, byDefault))
def findTroves(self, cfg, repos, label):
self.size = 0
validSize = True
troveList = []
flavorMap = {}
findTroveList = []
for (name, versionStr, flavor, source, byDefault) in self.addTroveList:
desFlavor = cfg.buildFlavor.copy()
if flavor is not None:
desFlavor = deps.overrideFlavor(desFlavor, flavor)
findTroveList.append((name, versionStr, desFlavor))
flavorMap[flavor] = desFlavor
try:
results = repos.findTroves(label, findTroveList)
except repository.TroveNotFound, e:
raise RecipeFileError, str(e)
for (name, versionStr, flavor, source, byDefault) in self.addTroveList:
desFlavor = flavorMap[flavor]
pkgList = results[name, versionStr, desFlavor]
assert(len(pkgList) == 1)
troveList.append((pkgList[0], byDefault))
assert(desFlavor.score(pkgList[0][2]) is not False)
troves = repos.getTroves([ x[0] for x in troveList ],
withFiles = False)
for (((name, v, f), byDefault), trove) in izip(troveList, troves):
l = self.troveVersionFlavors.get(name, [])
if (v, f) not in l:
l.append((v,f, byDefault))
self.troveVersionFlavors[name] = l
# XXX this code is to deal with troves that existed
# before troveInfo was added
if validSize:
size = trove.getSize()
# allow older changesets that are missing size
# info to be added ( this will make the size
# invalid, so don't store it)
if size is not None:
self.size += trove.getSize()
else:
validSize = False
if not validSize:
self.size = None
def getRequires(self):
return self.requires
def getTroveList(self):
return self.troveVersionFlavors
def __init__(self):
self.addTroveList = []
self.requires = deps.DependencySet()
self.troveVersionFlavors = {}
def Requires(self, requirement):
self.requires.addDep(deps.TroveDependencies,
deps.Dependency(requirement))
class GroupRecipe(Recipe):
Flags = use.LocalFlags
def Requires(self, requirement, groupName = None):
if requirement[0] == '/':
raise RecipeFileError, 'file requirements not allowed in groups'
if groupName is None: groupName = self.name
self.groups[groupName].Requires(requirement)
def addTrove(self, name, versionStr = None, flavor = None, source = None,
byDefault = True, groupName = None):
if groupName is None: groupName = self.name
self.groups[groupName].addTrove(name, versionStr = versionStr,
flavor = flavor, source = source,
byDefault = byDefault)
def findTroves(self, groupName = None):
if groupName is None: groupName = self.name
self.groups[groupName].findTroves(self.cfg, self.repos, self.label)
def getRequires(self, groupName = None):
if groupName is None: groupName = self.name
return self.groups[groupName].getRequires()
def getTroveList(self, groupName = None):
if groupName is None: groupName = self.name
return self.groups[groupName].getTroveList()
def getSize(self, groupName = None):
if groupName is None: groupName = self.name
return self.groups[groupName].size
def createGroup(self, groupName):
if self.groups.has_key(groupName):
raise RecipeFileError, 'group %s was already created' % groupName
if not groupName.startswith('group-'):
raise RecipeFileError, 'group names must start with "group-"'
self.groups[groupName] = SingleGroup()
def getGroupNames(self):
return self.groups.keys()
def __init__(self, repos, cfg, label, flavor, extraMacros={}):
self.repos = repos
self.cfg = cfg
self.label = label
self.flavor = flavor
self.macros = macros.Macros()
self.macros.update(extraMacros)
self.groups = {}
self.groups[self.name] = SingleGroup()
class RedirectRecipe(Recipe):
Flags = use.LocalFlags
def addRedirect(self, name, versionStr = None, flavorStr = None,
fromTrove = None):
if flavorStr is not None:
flavor = deps.parseFlavor(flavorStr)
if flavor is None:
raise ValueError, 'invalid flavor %s' % flavorStr
else:
flavor = None
if fromTrove is None:
fromTrove = self.name
elif fromTrove.find(":") != -1:
raise ValueError, 'components cannot be individually redirected'
self.addTroveList.append((name, versionStr, flavor, fromTrove))
def findTroves(self):
self.size = 0
validSize = True
troveList = []
packageSet = {}
for (name, versionStr, flavor, fromName) in self.addTroveList:
try:
desFlavor = self.cfg.buildFlavor.copy()
if flavor is not None:
desFlavor.union(flavor, deps.DEP_MERGE_TYPE_OVERRIDE)
pkgList = self.repos.findTrove(self.label,
(name, versionStr, desFlavor))
except repository.TroveNotFound, e:
raise RecipeFileError, str(e)
assert(len(pkgList) == 1)
packageSet[pkgList[0]] = fromName
troveList.append(pkgList[0])
troves = self.repos.getTroves(troveList, withFiles = False)
redirections = {}
for topLevelTrove in troves:
topName = topLevelTrove.getName()
topVersion = topLevelTrove.getVersion()
topFlavor = topLevelTrove.getFlavor()
fromName = packageSet[(topName, topVersion, topFlavor)]
d = self.redirections.setdefault(fromName, {})
# this redirects from oldPackage -> newPackage
d[(topName, topVersion, topFlavor)] = True
for (name, version, flavor) in topLevelTrove.iterTroveList():
# redirect from oldPackage -> referencedPackage
d[(name, version, flavor)] = True
if name.find(":") != -1:
compName = fromName + ":" + name.split(":")[1]
# redirect from oldPackage -> oldPackage:component. we
# leave version/flavor alone; they get filled in later
d[(compName, None, None)] = True
# redirect from oldPackage:component -> newPackage:component
d2 = self.redirections.setdefault(compName, {})
d2[(name, version, flavor)] = True
for name,d in redirections.iteritems():
self.redirections[name] = [ (x[0], x[1], x[2]) for x in d ]
def getRedirections(self):
return self.redirections
def __init__(self, repos, cfg, label, flavor, extraMacros={}):
self.repos = repos
self.cfg = cfg
self.redirections = {}
self.label = label
self.flavor = flavor
self.addTroveList = []
self.macros = macros.Macros()
self.macros.update(extraMacros)
class FilesetRecipe(Recipe):
# XXX need to work on adding files from different flavors of troves
def addFileFromPackage(self, pattern, pkg, recurse, remapList):
pathMap = {}
for (pathId, pkgPath, fileId, version) in pkg.iterFileList():
pathMap[pkgPath] = (pathId, fileId, version)
patternList = util.braceExpand(pattern)
matches = {}
for pattern in patternList:
if not recurse:
matchList = [ n for n in pathMap.keys() if
fnmatchcase(n, pattern)]
else:
matchList = []
dirCount = pattern.count("/")
for n in pathMap.iterkeys():
i = n.count("/")
if i > dirCount:
dirName = os.sep.join(n.split(os.sep)[:dirCount + 1])
match = fnmatchcase(dirName, pattern)
elif i == dirCount:
match = fnmatchcase(n, pattern)
else:
match = False
if match: matchList.append(n)
for path in matchList:
matches[path] = pathMap[path]
if not matches:
return False
for path in matches.keys():
(pathId, fileId, version) = matches[path]
for (old, new) in remapList:
if path == old:
path = new
break
elif len(path) > len(old) and path.startswith(old) and \
path[len(old)] == "/":
path = new + path[len(old):]
break
if self.paths.has_key(path):
raise RecipeFileError, "%s has been included multiple times" \
% path
self.files[pathId] = (path, fileId, version)
self.paths[path] = 1
return True
def addFile(self, pattern, component, versionStr = None, recurse = True,
remap = []):
"""
Adds files which match pattern from version versionStr of component.
Pattern is glob-style, with brace expansion. If recurse is set,
anything below a directory which matches pattern is also included,
and the directory itself does not have to be part of the trove.
Remap is a list of (oldPath, newPath) tuples. The first oldPath
which matches the start of a matched pattern is rewritten as
newPath.
"""
if type(remap) == tuple:
remap = [ remap ]
try:
pkgList = self.repos.findTrove(self.label,
(component, versionStr, self.flavor))
except repository.TroveNotFound, e:
raise RecipeFileError, str(e)
if len(pkgList) == 0:
raise RecipeFileError, "no packages match %s" % component
elif len(pkgList) > 1:
raise RecipeFileError, "too many packages match %s" % component
foundIt = False
pkg = self.repos.getTrove(*pkgList[0])
for sub in self.repos.walkTroveSet(pkg):
foundIt = foundIt or self.addFileFromPackage(pattern, sub, recurse,
remap)
if not foundIt:
raise RecipeFileError, "%s does not exist in version %s of %s" % \
(pattern, pkg.getVersion().asString(), pkg.getName())
def iterFileList(self):
for (pathId, (path, fileId, version)) in self.files.iteritems():
yield (pathId, path, fileId, version)
def __init__(self, repos, cfg, label, flavor, extraMacros={}):
self.repos = repos
self.cfg = cfg
self.files = {}
self.paths = {}
self.label = label
self.flavor = flavor
self.macros = macros.Macros()
self.macros.update(extraMacros)
class RecipeFileError(Exception):
def __init__(self, msg):
self.msg = msg
def __repr__(self):
return self.msg
def __str__(self):
return repr(self)
switch to preferring trove=:branchname as opposed to trove=branchname for
buildRequires. Will eventually remove support for the latter format.
#
# Copyright (c) 2004-2005 Specifix, Inc.
#
# This program is distributed under the terms of the Common Public License,
# version 1.0. A copy of this license should have been distributed with this
# source file in a file called LICENSE. If it is not present, the license
# is always available at http://www.opensource.org/licenses/cpl.php.
#
# This program is distributed in the hope that it will be useful, but
# without any waranty; without even the implied warranty of merchantability
# or fitness for a particular purpose. See the Common Public License for
# full details.
#
"""
Contains the base Recipe class, default macros, and miscellaneous
components used by conary .recipe files
"""
#stdlib
import errno
from fnmatch import fnmatchcase
import imp
import inspect
from itertools import izip
import os
import sys
import tempfile
import types
#conary
import build
import buildpackage
from deps import deps
import destdirpolicy
import files
from lib import log
from lib import magic
from lib import util
from local import database
import macros
import packagepolicy
from repository import repository
import source
import use
import updatecmd
import versions
baseMacros = {
# paths
'prefix' : '/usr',
'sysconfdir' : '/etc',
'initdir' : '%(sysconfdir)s/init.d',
'lib' : 'lib', # may be overridden with 'lib64'
'exec_prefix' : '%(prefix)s',
'bindir' : '%(exec_prefix)s/bin',
'essentialbindir' : '/bin',
'sbindir' : '%(exec_prefix)s/sbin',
'essentialsbindir' : '/sbin',
'libdir' : '%(exec_prefix)s/%(lib)s',
'essentiallibdir' : '/%(lib)s',
'libexecdir' : '%(exec_prefix)s/libexec',
'localstatedir' : '/var',
'servicedir' : '/srv',
'cachedir' : '%(localstatedir)s/cache',
'sharedstatedir' : '%(prefix)s/com',
'includedir' : '%(prefix)s/include',
'datadir' : '%(prefix)s/share',
'mandir' : '%(datadir)s/man',
'infodir' : '%(datadir)s/info',
'docdir' : '%(datadir)s/doc',
'thisdocdir' : '%(docdir)s/%(name)s-%(version)s',
'tagdescriptiondir' : '%(sysconfdir)s/conary/tags',
'taghandlerdir' : '%(libexecdir)s/conary/tags',
'tagdatadir' : '%(datadir)s/conary/tags',
'testdir' : '%(localstatedir)s/conary/tests',
'thistestdir' : '%(testdir)s/%(name)s-%(version)s',
'debuglibdir' : '/usr/lib/debug', # no %(prefix)s or %(lib)s!
'debugsrcdir' : '/usr/src/debug', # no %(prefix)s!
# special component prefixes that the whole system needs to share
'krbprefix' : '%(exec_prefix)s/kerberos',
'x11prefix' : '%(exec_prefix)s/X11R6',
# arguments/flags (empty ones are for documentation; non-existant = empty)
'cc' : 'gcc',
'cxx' : 'g++',
'cxxflags' : '', # cxx specific flags
'optflags' : '-O2',
'dbgflags' : '-g', # for debuginfo
'cflags' : '%(optflags)s %(dbgflags)s',
'cppflags' : '', # just for providing in recipes
'ldflags' : '%(dbgflags)s',
'mflags' : '', # make flags
'parallelmflags' : '',
'sysroot' : '',
'os' : 'linux',
'target' : '%(targetarch)s-unknown-linux',
'debugedit' : 'debugedit',
'strip' : 'eu-strip', # eu-strip for debuginfo
'strip-archive' : 'strip', # eu-strip segfaults on ar
'buildbranch' : '',
'buildlabel' : '',
}
crossMacros = {
# set crossdir from cook, directly or indirectly, before adding the rest
#'crossdir' : 'cross-target',
'prefix' : '/opt/%(crossdir)s',
'sysroot' : '%(prefix)s/sys-root',
'headerpath' : '%(sysroot)s/usr/include',
}
def localImport(d, package, modules=()):
"""
import a package into a non-global context.
@param d: the context to import the module
@type d: dict
@param package: the name of the module to import
@type package: str
@param modules: a sequence of modules to import from the package.
If a 2-tuple is in the sequence, rename the imported module to
the second value in the tuple.
@type modules: sequence of strings or tuples, or empty tuple
Examples of translated import statements::
from foo import bar as baz:
localImport(d, "foo", (("bar", "baz"))
from bar import fred, george:
localImport(d, "bar", ("fred", "george"))
import os
localImport(d, "os")
"""
m = __import__(package, d, {}, modules)
if modules:
for name in modules:
if type(name) is tuple:
mod = name[0]
name = name[1]
else:
mod = name
d[name] = getattr(m, mod)
else:
d[package] = m
# save a reference to the module inside this context, so it won't
# be garbage collected until the context is deleted.
l = d.setdefault('__localImportModules', [])
l.append(m)
def setupRecipeDict(d, filename):
localImport(d, 'build', ('build', 'action'))
localImport(d, 'build.recipe', ('PackageRecipe', 'GroupRecipe',
'RedirectRecipe', 'FilesetRecipe',
'loadRecipe'))
localImport(d, 'lib', ('util',))
for x in ('os', 're', 'sys', 'stat'):
localImport(d, x)
localImport(d, 'build.use', ('Arch', 'Use', ('LocalFlags', 'Flags')))
d['filename'] = filename
class RecipeLoader:
def __init__(self, filename, cfg=None, repos=None, component=None,
branch=None, ignoreInstalled=False):
self.recipes = {}
if filename[0] != "/":
raise IOError, "recipe file names must be absolute paths"
if component:
pkgname = component.split(':')[0]
else:
pkgname = filename.split('/')[-1]
pkgname = pkgname[:-len('.recipe')]
basename = os.path.basename(filename)
self.file = basename.replace('.', '-')
self.module = imp.new_module(self.file)
sys.modules[self.file] = self.module
f = open(filename)
setupRecipeDict(self.module.__dict__, filename)
# store cfg and repos, so that the recipe can load
# recipes out of the repository
self.module.__dict__['cfg'] = cfg
self.module.__dict__['repos'] = repos
self.module.__dict__['component'] = component
self.module.__dict__['branch'] = branch
self.module.__dict__['name'] = pkgname
self.module.__dict__['ignoreInstalled'] = ignoreInstalled
# create the recipe class by executing the code in the recipe
try:
code = compile(f.read(), filename, 'exec')
except SyntaxError, err:
msg = ('Error in recipe file "%s": %s\n' %(basename, err))
if err.offset is not None:
msg += '%s%s^\n' %(err.text, ' ' * (err.offset-1))
else:
msg += err.text
raise RecipeFileError(msg)
use.resetUsed()
exec code in self.module.__dict__
# all recipes that could be loaded by loadRecipe are loaded;
# get rid of our references to cfg and repos
del self.module.__dict__['cfg']
del self.module.__dict__['repos']
del self.module.__dict__['component']
del self.module.__dict__['branch']
del self.module.__dict__['name']
del self.module.__dict__['ignoreInstalled']
found = False
for (name, obj) in self.module.__dict__.items():
if type(obj) is not types.ClassType:
continue
# if a recipe has been marked to be ignored (for example, if
# it was loaded from another recipe by loadRecipe()
# (don't use hasattr here, we want to check only the recipe
# class itself, not any parent class
if 'ignore' in obj.__dict__:
continue
recipename = getattr(obj, 'name', '')
# make sure the class is derived from Recipe
if (issubclass(obj, PackageRecipe)
and obj is not PackageRecipe) or \
(issubclass(obj, RedirectRecipe)
and obj is not RedirectRecipe):
if recipename.startswith('group-'):
raise RecipeFileError(
'Error in recipe file "%s": package name cannot '
'begin with "group-"' %basename)
if recipename.startswith('fileset-'):
raise RecipeFileError(
'Error in recipe file "%s": package name cannot '
'begin with "fileset-"' %basename)
elif issubclass(obj, GroupRecipe) and obj is not GroupRecipe:
if recipename and not recipename.startswith('group-'):
raise RecipeFileError(
'Error in recipe file "%s": group name must '
'begin with "group-"' %basename)
elif issubclass(obj, FilesetRecipe) and obj is not FilesetRecipe:
if recipename and not recipename.startswith('fileset-'):
raise RecipeFileError(
'Error in recipe file "%s": fileset name must '
'begin with "fileset-"' %basename)
else:
continue
self.recipes[name] = obj
obj.filename = filename
if hasattr(obj, 'name') and hasattr(obj, 'version'):
if found:
raise RecipeFileError(
'Error in recipe file "%s": multiple recipe classes '
'with both name and version exist' %basename)
self.recipe = obj
if '-' in obj.version:
raise RecipeFileError(
"Version string %s has illegal '-' character"
%obj.version)
if obj.name != pkgname:
raise RecipeFileError(
"Recipe object name '%s' does not match "
"file/component name '%s'"
% (obj.name, pkgname))
found = True
else:
raise RecipeFileError(
"Recipe in file/component '%s' did not contain both a name"
" and a version attribute." % pkgname)
# inherit any tracked flags that we found while loading parent
# classes
if found:
if self.recipe._trackedFlags is not None:
use.setUsed(self.recipe._trackedFlags)
# add in the tracked flags that we found while loading this
# class
self.recipe._trackedFlags = use.getUsed()
else:
# we'll get this if the recipe file is empty
raise RecipeFileError(
"file/component '%s' did not contain a valid recipe" % pkgname)
def allRecipes(self):
return self.recipes
def getRecipe(self):
return self.recipe
def __del__(self):
try:
del sys.modules[self.file]
except:
pass
def recipeLoaderFromSourceComponent(name, cfg, repos,
versionStr=None, labelPath=None,
ignoreInstalled=False):
name = name.split(':')[0]
component = name + ":source"
filename = name + '.recipe'
if not labelPath:
labelPath = cfg.buildLabel
try:
pkgs = repos.findTrove(labelPath,
(component, versionStr, deps.DependencySet()))
if len(pkgs) > 1:
raise RecipeFileError("source component %s has multiple versions "
"with label %s" %(component,
cfg.buildLabel.asString()))
sourceComponent = repos.getTrove(*pkgs[0])
except repository.TroveMissing:
raise RecipeFileError, 'cannot find source component %s' % component
(fd, recipeFile) = tempfile.mkstemp(".recipe", 'temp-%s-' %name)
outF = os.fdopen(fd, "w")
inF = None
for (pathId, filePath, fileId, fileVersion) in sourceComponent.iterFileList():
if filePath == filename:
inF = repos.getFileContents([ (fileId, fileVersion) ])[0].get()
break
if not inF:
raise RecipeFileError("version %s of %s does not contain %s" %
(sourceComponent.getName(),
sourceComponent.getVersion().asString(),
filename))
util.copyfileobj(inF, outF)
del inF
del outF
try:
loader = RecipeLoader(recipeFile, cfg, repos, component,
sourceComponent.getVersion().branch(),
ignoreInstalled=ignoreInstalled)
finally:
os.unlink(recipeFile)
recipe = loader.getRecipe()
recipe._trove = sourceComponent.copy()
return (loader, sourceComponent.getVersion())
def loadRecipe(troveSpec, label=None):
"""
Load a recipe so that its class/data can be used in another recipe.
If a complete version is not specified in the trovespec, the version of
the recipe to load will be based on what is installed on the system.
For example, if C{loadRecipe('foo')} is called, and package C{foo} with
version C{/bar.org@bar:devel/4.1-1-1} is installed on the system, then
C{foo:source} with version C{/bar.org@bar:devel/4.1-1} will be loaded.
The recipe will also be loaded with the installed package's flavor.
If the package is not installed anywhere on the system, the C{labelPath}
will be searched without reference to the installed system.
@param troveSpec: C{name}I{[}C{=I{version}}I{][}C{[I{flavor}]}I{]}
specification of the trove to load. The flavor given will be used
to find the given recipe and also to set the flavor of the loaded recipe.
@param label: label string to search for the given recipe in place of
using the default C{labelPath}.
If not specified, the labels listed in the version in the including
recipe will be used as the c{labelPath} to search.
For example, if called from recipe with version
C{/conary.specifix.com@spx:devel//shadow/1.0-1-1},
the default C{labelPath} that would be constructed would be:
C{[conary.specifix.com@spx:shadow, conary.specifix.com@spx:devel]}
"""
def _findInstalledVersion(db, labelPath, name, versionStr, flavor):
""" Specialized search of the installed system along a labelPath,
defaulting to searching the whole system if the trove is not
found along the label path.
The version and flavor of the first found installed trove is
returned, or C{None} if no trove is found.
"""
# first search on the labelPath.
try:
troves = db.findTrove(labelPath, name, flavor, versionStr)
if len(troves) > 1:
raise RuntimeError, (
'Multiple troves could match loadRecipe'
' request %s' % troveSpec)
if troves:
return troves[0][1].getSourceVersion(), troves[0][2]
except repository.TroveNotFound:
pass
if labelPath is None:
return None
try:
troves = db.findTrove(None, name, flavor, versionStr)
if len(troves) > 1:
raise RuntimeError, (
'Multiple troves could match loadRecipe'
' request for %s' % name)
if troves:
return troves[0][1].getSourceVersion(), troves[0][2]
except repository.TroveNotFound:
pass
return None
callerGlobals = inspect.stack()[1][0].f_globals
cfg = callerGlobals['cfg']
repos = callerGlobals['repos']
branch = callerGlobals['branch']
ignoreInstalled = callerGlobals['ignoreInstalled']
parentPackageName = callerGlobals['name']
oldUsed = use.getUsed()
name, versionStr, flavor = updatecmd.parseTroveSpec(troveSpec, None)
if name.endswith('.recipe'):
file = name
name = name[:-len('.recipe')]
else:
file = name + '.recipe'
#first check to see if a filename was specified, and if that
#recipe actually exists.
loader = None
if not (label or versionStr or flavor):
if name[0] != '/':
recipepath = os.path.dirname(callerGlobals['filename'])
localfile = recipepath + '/' + file
else:
localfile = name + '.recipe'
if os.path.exists(localfile):
if flavor:
oldBuildFlavor = cfg.buildFlavor
cfg.buildFlavor = deps.overrideFlavor(oldBuildFlavor, flavor)
use.setBuildFlagsFromFlavor(name, cfg.buildFlavor)
loader = RecipeLoader(localfile, cfg,
ignoreInstalled=ignoreInstalled)
if not loader:
if label:
labelPath = [versions.Label(label)]
elif branch:
# if no labelPath was specified, search backwards through the
# labels on the current branch.
labelPath = [branch.label()]
while branch.hasParentBranch():
branch = branch.parentBranch()
labelPath.append(branch.label())
else:
labelPath = None
if not ignoreInstalled:
# look on the local system to find a trove that is installed that
# matches this loadrecipe request. Use that trove's version
# and flavor information to grab the source out of the repository
db = database.Database(cfg.root, cfg.dbPath)
parts = _findInstalledVersion(db, labelPath, name,
versionStr, flavor)
if parts:
version, flavor = parts
if (version.isLocalCook() or version.isEmerge()
or version.isLocal()):
version = version.getSourceVersion().parentVersion()
versionStr = version.getSourceVersion().asString()
if flavor:
# override the current flavor with the flavor found in the
# installed trove (or the troveSpec flavor, if no installed
# trove was found.
oldBuildFlavor = cfg.buildFlavor
cfg.buildFlavor = deps.overrideFlavor(oldBuildFlavor, flavor)
use.setBuildFlagsFromFlavor(name, cfg.buildFlavor)
loader = recipeLoaderFromSourceComponent(name, cfg, repos,
labelPath=labelPath,
versionStr=versionStr,
ignoreInstalled=ignoreInstalled)[0]
if flavor:
cfg.buildFlavor = oldBuildFlavor
use.setBuildFlagsFromFlavor(parentPackageName, cfg.buildFlavor)
for name, recipe in loader.allRecipes().items():
# hide all recipes from RecipeLoader - we don't want to return
# a recipe that has been loaded by loadRecipe
recipe.ignore = 1
callerGlobals[name] = recipe
# stash a reference to the module in the namespace
# of the recipe that loaded it, or else it will be destroyed
callerGlobals[os.path.basename(file).replace('.', '-')] = loader
# return the tracked flags to their state before loading this recipe
use.resetUsed()
use.setUsed(oldUsed)
class _sourceHelper:
def __init__(self, theclass, recipe):
self.theclass = theclass
self.recipe = recipe
def __call__(self, *args, **keywords):
self.recipe._sources.append(self.theclass(self.recipe, *args, **keywords))
class _recipeHelper:
def __init__(self, list, recipe, theclass):
self.list = list
self.theclass = theclass
self.recipe = recipe
def __call__(self, *args, **keywords):
self.list.append(self.theclass(self.recipe, *args, **keywords))
class _policyUpdater:
def __init__(self, theobject):
self.theobject = theobject
def __call__(self, *args, **keywords):
self.theobject.updateArgs(*args, **keywords)
class Recipe:
"""Virtual base class for all Recipes"""
_trove = None
_trackedFlags = None
def __init__(self):
assert(self.__class__ is not Recipe)
def __repr__(self):
return "<%s Object>" % self.__class__
class PackageRecipe(Recipe):
buildRequires = []
Flags = use.LocalFlags
def mainDir(self, new = None):
if new:
self.theMainDir = new % self.macros
self.macros.maindir = self.theMainDir
return self.theMainDir
def nameVer(self):
return '-'.join((self.name, self.version))
def cleanup(self, builddir, destdir):
if 'noClean' in self.cfg.__dict__ and self.cfg.noClean:
pass
else:
util.rmtree(builddir)
def sourceMap(self, path):
basepath = os.path.basename(path)
if basepath in self.sourcePathMap:
if basepath == path:
# we only care about truly different source locations with the
# same basename
return
if basepath in self.pathConflicts:
self.pathConflicts[basepath].append(path)
else:
self.pathConflicts[basepath] = [
# previous (first) instance
self.sourcePathMap[basepath],
# this instance
path
]
else:
self.sourcePathMap[basepath] = path
def fetchAllSources(self):
"""
returns a list of file locations for all the sources in
the package recipe
"""
# first make sure we had no path conflicts:
if self.pathConflicts:
errlist = []
for basepath in self.pathConflicts.keys():
errlist.extend([x for x in self.pathConflicts[basepath]])
raise RecipeFileError, '\n'.join(errlist)
self.prepSources()
files = []
for src in self._sources:
f = src.fetch()
if f:
if type(f) in (tuple, list):
files.extend(f)
else:
files.append(f)
return files
def checkBuildRequirements(self, cfg, sourceVersion, ignoreDeps=False):
""" Checks to see if the build requirements for the recipe
are installed
"""
db = database.Database(cfg.root, cfg.dbPath)
time = sourceVersion.timeStamps()[-1]
reqMap = {}
missingReqs = []
for buildReq in self.buildRequires:
(name, versionStr, flavor) = updatecmd.parseTroveSpec(buildReq,
None)
# XXX move this to use more of db.findTrove's features, instead
# of hand parsing
try:
troves = db.findTrove(None, name)
troves = db.getTroves(troves)
except repository.TroveNotFound:
missingReqs.append(buildReq)
continue
break
versionMatches = []
for trove in troves:
if versionStr is None:
versionMatches.append(trove)
continue
if versionStr.find('@') == -1:
label = trove.getVersion().branch().label()
if versionStr[0] == ':' or versionStr.find(':') == -1:
if versionStr[0] == ':':
versionStr = versionStr[1:]
else:
log.warning('Deprecated buildreq format. Use '
' foo=:label, not foo=label')
if label.getLabel() == versionStr:
versionMatches.append(trove)
continue
if ("%s:%s" % (label.getNamespace(), label.getLabel())\
== versionStr):
versionMatches.append(trove)
break
continue
else:
raise RecipeFileError("Unsupported buildReq format")
if not versionMatches:
missingReqs.append(buildReq)
continue
versionMatches.sort(lambda a, b: a.getVersion().__cmp__(b.getVersion()))
if not flavor:
reqMap[buildReq] = versionMatches[-1]
continue
for trove in reversed(versionMatches):
troveFlavor = trove.getFlavor()
if troveFlavor.stronglySatisfies(flavor):
reqMap[buildReq] = trove
break
if buildReq not in reqMap:
missingReqs.append(buildReq)
if missingReqs:
if not ignoreDeps:
raise RuntimeError, ("Could not find the following troves "
"needed to cook this recipe:\n"
"%s" % '\n'.join(missingReqs))
self.buildReqMap = reqMap
def extraSource(self, action):
"""
extraSource allows you to append a source list item that is
not a part of source.py. Be aware when writing these source
list items that you are writing conary internals! In particular,
anything that needs to add a source file to the repository will
need to implement fetch(), and all source files will have to be
sought using the lookaside cache.
"""
self._sources.append(action)
def prepSources(self):
for source in self._sources:
source.doPrep()
def processResumeList(self, resume):
resumelist = []
if resume:
lines = resume.split(',')
for line in lines:
if ':' in line:
begin, end = line.split(':')
if begin:
begin = int(begin)
if end:
end = int(end)
resumelist.append([begin, end])
else:
if len(lines) == 1:
resumelist.append([int(line), False])
else:
resumelist.append([int(line), int(line)])
self.resumeList = resumelist
def iterResumeList(self, actions):
resume = self.resumeList
resumeBegin = resume[0][0]
resumeEnd = resume[0][1]
for action in actions:
if not resumeBegin or action.linenum >= resumeBegin:
if not resumeEnd or action.linenum <= resumeEnd:
yield action
elif resumeEnd:
resume = resume[1:]
if not resume:
return
resumeBegin = resume[0][0]
resumeEnd = resume[0][1]
if action.linenum == resumeBegin:
yield action
def unpackSources(self, builddir, destdir, resume=None):
self.macros.builddir = builddir
self.macros.destdir = destdir
if resume == 'policy':
return
elif resume:
log.debug("Resuming on line(s) %s" % resume)
# note resume lines must be in order
self.processResumeList(resume)
for source in self.iterResumeList(self._sources):
source.doPrep()
source.doAction()
else:
for source in self._sources:
source.doPrep()
source.doAction()
def extraBuild(self, action):
"""
extraBuild allows you to append a build list item that is
not a part of build.py. Be aware when writing these build
list items that you are writing conary internals!
"""
self._build.append(action)
def doBuild(self, buildPath, resume=None):
builddir = os.sep.join((buildPath, self.mainDir()))
self.macros.builddir = builddir
self.magic = magic.magicCache(self.macros.destdir)
if resume == 'policy':
return
if resume:
for bld in self.iterResumeList(self._build):
bld.doAction()
else:
for bld in self._build:
bld.doAction()
def doDestdirProcess(self):
for post in self.destdirPolicy:
post.doProcess(self)
def getPackages(self):
# policies look at the recipe instance for all information
for policy in self.packagePolicy:
policy.doProcess(self)
return self.autopkg.getPackages()
def getUnpackagedComponentNames(self):
# someday, this will probably be per-branch policy
return ('test', 'debuginfo')
def disableParallelMake(self):
self.macros.parallelmflags = ''
def populateLcache(self):
"""
Populate a repository lookaside cache
"""
recipeClass = self.__class__
repos = self.laReposCache.repos
# build a list containing this recipe class and any ancestor class
# from which it descends
classes = [ recipeClass ]
bases = list(recipeClass.__bases__)
while bases:
parent = bases.pop()
bases.extend(list(parent.__bases__))
if issubclass(parent, PackageRecipe):
classes.append(parent)
# reverse the class list, this way the files will be found in the
# youngest descendant first
classes.reverse()
# populate the repository source lookaside cache from the :source
# components
for rclass in classes:
if not rclass._trove:
continue
srcName = rclass._trove.getName()
srcVersion = rclass._trove.getVersion()
for f in repos.iterFilesInTrove(srcName, srcVersion,
deps.DependencySet(),
withFiles=True):
pathId, path, fileId, version, fileObj = f
assert(path[0] != "/")
# we might need to retrieve this source file
# to enable a build, so we need to find the
# sha1 hash of it since that's how it's indexed
# in the file store
if isinstance(fileObj, files.RegularFile):
# it only makes sense to fetch regular files, skip
# anything that isn't
self.laReposCache.addFileHash(srcName, srcVersion, pathId,
path, fileId, version, fileObj.contents.sha1())
def __getattr__(self, name):
"""
Allows us to dynamically suck in namespace of other modules
with modifications.
- The public namespace of the build module is accessible,
and build objects are created and put on the build list
automatically when they are referenced.
- The public namespaces of the policy modules are accessible;
policy objects already on their respective lists are returned,
policy objects not on their respective lists are added to
the end of their respective lists like build objects are
added to the build list.
"""
if not name.startswith('_'):
if name.startswith('add'):
return _sourceHelper(source.__dict__[name[3:]], self)
if name in build.__dict__:
return _recipeHelper(self._build, self, build.__dict__[name])
for (policy, list) in (
(destdirpolicy, self.destdirPolicy),
(packagepolicy, self.packagePolicy)):
if name in policy.__dict__:
policyClass = policy.__dict__[name]
for policyObj in list:
if isinstance(policyObj, policyClass):
return _policyUpdater(policyObj)
return _recipeHelper(list, self, policyClass)
return self.__dict__[name]
def __delattr__(self, name):
"""
Allows us to delete policy items from their respective lists
by deleting a name in the recipe self namespace. For example,
to remove the EtcConfig package policy from the package policy
list, one could do::
del self.EtcConfig
This would prevent the EtcConfig package policy from being
executed. The policy objects are carefully ordered in the
default policy lists; deleting a policy object and then
referencing it again will cause it to show up at the end of
the list. Don't do that.
In general, delete policy only as a last resort; you can
usually disable policy entirely with the keyword argument::
exceptions='.*'
"""
for (policy, list) in (
(destdirpolicy, self.destdirPolicy),
(packagepolicy, self.packagePolicy)):
if name in policy.__dict__:
policyClass = policy.__dict__[name]
for index in range(len(list)):
policyObj = list[index]
if isinstance(policyObj, policyClass):
del list[index]
return
del self.__dict__[name]
def __init__(self, cfg, laReposCache, srcdirs, extraMacros={}):
assert(self.__class__ is not Recipe)
self._sources = []
self._build = []
self.destdirPolicy = destdirpolicy.DefaultPolicy(self)
self.packagePolicy = packagepolicy.DefaultPolicy(self)
self.cfg = cfg
self.laReposCache = laReposCache
self.srcdirs = srcdirs
self.macros = macros.Macros()
self.macros.update(baseMacros)
self.macros.update(use.Arch._getMacros())
# allow for architecture not to be set -- this could happen
# when storing the recipe e.g.
for key in cfg.macroKeys():
self.macros._override(key, cfg['macros.' + key])
self.macros.name = self.name
self.macros.version = self.version
self.packages = { self.name : True }
if extraMacros:
self.macros.update(extraMacros)
self.mainDir(self.nameVer())
self.sourcePathMap = {}
self.pathConflicts = {}
class SingleGroup:
def addTrove(self, name, versionStr = None, flavor = None, source = None,
byDefault = True):
assert(flavor is None or isinstance(flavor, str))
if flavor is not None:
flavor = deps.parseFlavor(flavor)
if flavor is None:
raise ValueError, 'invalid flavor %s' % flavorStr
self.addTroveList.append((name, versionStr, flavor, source, byDefault))
def findTroves(self, cfg, repos, label):
self.size = 0
validSize = True
troveList = []
flavorMap = {}
findTroveList = []
for (name, versionStr, flavor, source, byDefault) in self.addTroveList:
desFlavor = cfg.buildFlavor.copy()
if flavor is not None:
desFlavor = deps.overrideFlavor(desFlavor, flavor)
findTroveList.append((name, versionStr, desFlavor))
flavorMap[flavor] = desFlavor
try:
results = repos.findTroves(label, findTroveList)
except repository.TroveNotFound, e:
raise RecipeFileError, str(e)
for (name, versionStr, flavor, source, byDefault) in self.addTroveList:
desFlavor = flavorMap[flavor]
pkgList = results[name, versionStr, desFlavor]
assert(len(pkgList) == 1)
troveList.append((pkgList[0], byDefault))
assert(desFlavor.score(pkgList[0][2]) is not False)
troves = repos.getTroves([ x[0] for x in troveList ],
withFiles = False)
for (((name, v, f), byDefault), trove) in izip(troveList, troves):
l = self.troveVersionFlavors.get(name, [])
if (v, f) not in l:
l.append((v,f, byDefault))
self.troveVersionFlavors[name] = l
# XXX this code is to deal with troves that existed
# before troveInfo was added
if validSize:
size = trove.getSize()
# allow older changesets that are missing size
# info to be added ( this will make the size
# invalid, so don't store it)
if size is not None:
self.size += trove.getSize()
else:
validSize = False
if not validSize:
self.size = None
def getRequires(self):
return self.requires
def getTroveList(self):
return self.troveVersionFlavors
def __init__(self):
self.addTroveList = []
self.requires = deps.DependencySet()
self.troveVersionFlavors = {}
def Requires(self, requirement):
self.requires.addDep(deps.TroveDependencies,
deps.Dependency(requirement))
class GroupRecipe(Recipe):
Flags = use.LocalFlags
def Requires(self, requirement, groupName = None):
if requirement[0] == '/':
raise RecipeFileError, 'file requirements not allowed in groups'
if groupName is None: groupName = self.name
self.groups[groupName].Requires(requirement)
def addTrove(self, name, versionStr = None, flavor = None, source = None,
byDefault = True, groupName = None):
if groupName is None: groupName = self.name
self.groups[groupName].addTrove(name, versionStr = versionStr,
flavor = flavor, source = source,
byDefault = byDefault)
def findTroves(self, groupName = None):
if groupName is None: groupName = self.name
self.groups[groupName].findTroves(self.cfg, self.repos, self.label)
def getRequires(self, groupName = None):
if groupName is None: groupName = self.name
return self.groups[groupName].getRequires()
def getTroveList(self, groupName = None):
if groupName is None: groupName = self.name
return self.groups[groupName].getTroveList()
def getSize(self, groupName = None):
if groupName is None: groupName = self.name
return self.groups[groupName].size
def createGroup(self, groupName):
if self.groups.has_key(groupName):
raise RecipeFileError, 'group %s was already created' % groupName
if not groupName.startswith('group-'):
raise RecipeFileError, 'group names must start with "group-"'
self.groups[groupName] = SingleGroup()
def getGroupNames(self):
return self.groups.keys()
def __init__(self, repos, cfg, label, flavor, extraMacros={}):
self.repos = repos
self.cfg = cfg
self.label = label
self.flavor = flavor
self.macros = macros.Macros()
self.macros.update(extraMacros)
self.groups = {}
self.groups[self.name] = SingleGroup()
class RedirectRecipe(Recipe):
Flags = use.LocalFlags
def addRedirect(self, name, versionStr = None, flavorStr = None,
fromTrove = None):
if flavorStr is not None:
flavor = deps.parseFlavor(flavorStr)
if flavor is None:
raise ValueError, 'invalid flavor %s' % flavorStr
else:
flavor = None
if fromTrove is None:
fromTrove = self.name
elif fromTrove.find(":") != -1:
raise ValueError, 'components cannot be individually redirected'
self.addTroveList.append((name, versionStr, flavor, fromTrove))
def findTroves(self):
self.size = 0
validSize = True
troveList = []
packageSet = {}
for (name, versionStr, flavor, fromName) in self.addTroveList:
try:
desFlavor = self.cfg.buildFlavor.copy()
if flavor is not None:
desFlavor.union(flavor, deps.DEP_MERGE_TYPE_OVERRIDE)
pkgList = self.repos.findTrove(self.label,
(name, versionStr, desFlavor))
except repository.TroveNotFound, e:
raise RecipeFileError, str(e)
assert(len(pkgList) == 1)
packageSet[pkgList[0]] = fromName
troveList.append(pkgList[0])
troves = self.repos.getTroves(troveList, withFiles = False)
redirections = {}
for topLevelTrove in troves:
topName = topLevelTrove.getName()
topVersion = topLevelTrove.getVersion()
topFlavor = topLevelTrove.getFlavor()
fromName = packageSet[(topName, topVersion, topFlavor)]
d = self.redirections.setdefault(fromName, {})
# this redirects from oldPackage -> newPackage
d[(topName, topVersion, topFlavor)] = True
for (name, version, flavor) in topLevelTrove.iterTroveList():
# redirect from oldPackage -> referencedPackage
d[(name, version, flavor)] = True
if name.find(":") != -1:
compName = fromName + ":" + name.split(":")[1]
# redirect from oldPackage -> oldPackage:component. we
# leave version/flavor alone; they get filled in later
d[(compName, None, None)] = True
# redirect from oldPackage:component -> newPackage:component
d2 = self.redirections.setdefault(compName, {})
d2[(name, version, flavor)] = True
for name,d in redirections.iteritems():
self.redirections[name] = [ (x[0], x[1], x[2]) for x in d ]
def getRedirections(self):
return self.redirections
def __init__(self, repos, cfg, label, flavor, extraMacros={}):
self.repos = repos
self.cfg = cfg
self.redirections = {}
self.label = label
self.flavor = flavor
self.addTroveList = []
self.macros = macros.Macros()
self.macros.update(extraMacros)
class FilesetRecipe(Recipe):
# XXX need to work on adding files from different flavors of troves
def addFileFromPackage(self, pattern, pkg, recurse, remapList):
pathMap = {}
for (pathId, pkgPath, fileId, version) in pkg.iterFileList():
pathMap[pkgPath] = (pathId, fileId, version)
patternList = util.braceExpand(pattern)
matches = {}
for pattern in patternList:
if not recurse:
matchList = [ n for n in pathMap.keys() if
fnmatchcase(n, pattern)]
else:
matchList = []
dirCount = pattern.count("/")
for n in pathMap.iterkeys():
i = n.count("/")
if i > dirCount:
dirName = os.sep.join(n.split(os.sep)[:dirCount + 1])
match = fnmatchcase(dirName, pattern)
elif i == dirCount:
match = fnmatchcase(n, pattern)
else:
match = False
if match: matchList.append(n)
for path in matchList:
matches[path] = pathMap[path]
if not matches:
return False
for path in matches.keys():
(pathId, fileId, version) = matches[path]
for (old, new) in remapList:
if path == old:
path = new
break
elif len(path) > len(old) and path.startswith(old) and \
path[len(old)] == "/":
path = new + path[len(old):]
break
if self.paths.has_key(path):
raise RecipeFileError, "%s has been included multiple times" \
% path
self.files[pathId] = (path, fileId, version)
self.paths[path] = 1
return True
def addFile(self, pattern, component, versionStr = None, recurse = True,
remap = []):
"""
Adds files which match pattern from version versionStr of component.
Pattern is glob-style, with brace expansion. If recurse is set,
anything below a directory which matches pattern is also included,
and the directory itself does not have to be part of the trove.
Remap is a list of (oldPath, newPath) tuples. The first oldPath
which matches the start of a matched pattern is rewritten as
newPath.
"""
if type(remap) == tuple:
remap = [ remap ]
try:
pkgList = self.repos.findTrove(self.label,
(component, versionStr, self.flavor))
except repository.TroveNotFound, e:
raise RecipeFileError, str(e)
if len(pkgList) == 0:
raise RecipeFileError, "no packages match %s" % component
elif len(pkgList) > 1:
raise RecipeFileError, "too many packages match %s" % component
foundIt = False
pkg = self.repos.getTrove(*pkgList[0])
for sub in self.repos.walkTroveSet(pkg):
foundIt = foundIt or self.addFileFromPackage(pattern, sub, recurse,
remap)
if not foundIt:
raise RecipeFileError, "%s does not exist in version %s of %s" % \
(pattern, pkg.getVersion().asString(), pkg.getName())
def iterFileList(self):
for (pathId, (path, fileId, version)) in self.files.iteritems():
yield (pathId, path, fileId, version)
def __init__(self, repos, cfg, label, flavor, extraMacros={}):
self.repos = repos
self.cfg = cfg
self.files = {}
self.paths = {}
self.label = label
self.flavor = flavor
self.macros = macros.Macros()
self.macros.update(extraMacros)
class RecipeFileError(Exception):
def __init__(self, msg):
self.msg = msg
def __repr__(self):
return self.msg
def __str__(self):
return repr(self)
|
'''
@author: Remi Cattiau
'''
from nxdrive.logging_config import get_logger
from nxdrive.engine.workers import Worker, ThreadInterrupt
from nxdrive.engine.blacklist_queue import BlacklistQueue
from nxdrive.engine.watcher.local_watcher import DriveFSEventHandler, normalize_event_filename
from nxdrive.engine.activity import Action
from nxdrive.client.local_client import LocalClient
from nxdrive.client.base_automation_client import DOWNLOAD_TMP_FILE_PREFIX
from nxdrive.client.base_automation_client import DOWNLOAD_TMP_FILE_SUFFIX
from nxdrive.client.common import safe_filename, NotFound
from nxdrive.utils import force_decode
import os
import sys
import urllib2
from time import sleep
import shutil
from PyQt4.QtCore import pyqtSignal
from Queue import Queue, Empty
log = get_logger(__name__)
class DriveEdit(Worker):
localScanFinished = pyqtSignal()
driveEditUploadCompleted = pyqtSignal()
'''
classdocs
'''
def __init__(self, manager, folder):
'''
Constructor
'''
super(DriveEdit, self).__init__()
self._manager = manager
self._thread.started.connect(self.run)
self._event_handler = None
self._metrics = dict()
self._metrics['edit_files'] = 0
self._observer = None
if type(folder) == str:
folder = unicode(folder)
self._folder = folder
self._local_client = LocalClient(self._folder)
self._upload_queue = Queue()
self._error_queue = BlacklistQueue()
self._stop = False
def stop(self):
super(DriveEdit, self).stop()
self._stop = True
def stop_client(self, reason):
if self._stop:
raise ThreadInterrupt
def _cleanup(self):
log.debug("Cleanup DriveEdit folder")
shutil.rmtree(self._folder, ignore_errors=True)
if not os.path.exists(self._folder):
os.mkdir(self._folder)
def _get_engine(self, url, user=None):
if url.endswith('/'):
url = url[:-1]
for engine in self._manager.get_engines().values():
bind = engine.get_binder()
server_url = bind.server_url
if server_url.endswith('/'):
server_url = server_url[:-1]
if server_url == url and (user is None or user == bind.username):
return engine
# Some backend are case insensitive
if user is None:
return None
user = user.lower()
for engine in self._manager.get_engines().values():
bind = engine.get_binder()
server_url = bind.server_url
if server_url.endswith('/'):
server_url = server_url[:-1]
if server_url == url and user == bind.username.lower():
return engine
return None
def _download_content(self, engine, remote_client, info, file_path, url=None):
file_dir = os.path.dirname(file_path)
file_name = os.path.basename(file_path)
file_out = os.path.join(file_dir, DOWNLOAD_TMP_FILE_PREFIX + file_name
+ DOWNLOAD_TMP_FILE_SUFFIX)
# Close to processor method - should try to refactor ?
pair = engine.get_dao().get_valid_duplicate_file(info.digest)
if pair:
local_client = engine.get_local_client()
shutil.copy(local_client._abspath(pair.local_path), file_out)
else:
if url is not None:
remote_client.do_get(url, file_out=file_out, digest=info.digest, digest_algorithm=info.digest_algorithm)
else:
remote_client.get_blob(info.uid, file_out=file_out)
return file_out
def _prepare_edit(self, server_url, doc_id, filename, user=None, download_url=None):
engine = self._get_engine(server_url, user=user)
if engine is None:
# TO_REVIEW Display an error message
log.debug("No engine found for %s(%s)", server_url, doc_id)
return
# Get document info
remote_client = engine.get_remote_doc_client()
# Avoid any link with the engine, remote_doc are not cached so we can do that
remote_client.check_suspended = self.stop_client
info = remote_client.get_info(doc_id)
# Create local structure
dir_path = os.path.join(self._folder, doc_id)
if not os.path.exists(dir_path):
os.mkdir(dir_path)
log.trace('Raw filename: %r', filename)
filename = safe_filename(urllib2.unquote(filename))
log.trace('Unquoted filename = %r', filename)
decoded_filename = force_decode(filename)
if decoded_filename is None:
decoded_filename = filename
else:
# Always use utf-8 encoding for xattr
filename = decoded_filename.encode('utf-8')
log.debug("Editing %r ('nxdriveeditname' xattr: %r)", decoded_filename, filename)
file_path = os.path.join(dir_path, decoded_filename)
# Download the file
url = None
if download_url is not None:
url = server_url
if not url.endswith('/'):
url += '/'
url += download_url
tmp_file = self._download_content(engine, remote_client, info, file_path, url=url)
if tmp_file is None:
log.debug("Download failed")
return
# Set the remote_id
dir_path = self._local_client.get_path(os.path.dirname(file_path))
self._local_client.set_remote_id(dir_path, doc_id)
self._local_client.set_remote_id(dir_path, server_url, "nxdriveedit")
if user is not None:
self._local_client.set_remote_id(dir_path, user, "nxdriveedituser")
self._local_client.set_remote_id(dir_path, info.digest_algorithm, "nxdriveeditdigestalgorithm")
self._local_client.set_remote_id(dir_path, info.digest, "nxdriveeditdigest")
self._local_client.set_remote_id(dir_path, filename, "nxdriveeditname")
# Rename to final filename
# Under Windows first need to delete target file if exists, otherwise will get a 183 WindowsError
if sys.platform == 'win32' and os.path.exists(file_path):
os.unlink(file_path)
os.rename(tmp_file, file_path)
return file_path
def edit(self, server_url, doc_id, filename, user=None, download_url=None):
# Download file
file_path = self._prepare_edit(server_url, doc_id, filename, user=user, download_url=download_url)
# Launch it
if file_path is not None:
self._manager.open_local_file(file_path)
def _handle_queue(self):
uploaded = False
# Unqueue any errors
item = self._error_queue.get()
while (item is not None):
self._upload_queue.put(item.get())
item = self._error_queue.get()
# Handle the upload queue
while (not self._upload_queue.empty()):
try:
ref = self._upload_queue.get_nowait()
log.trace('Handling DriveEdit queue ref: %r', ref)
except Empty:
break
dir_path = os.path.dirname(ref)
uid = self._local_client.get_remote_id(dir_path)
server_url = self._local_client.get_remote_id(dir_path, "nxdriveedit")
user = self._local_client.get_remote_id(dir_path, "nxdriveedituser")
engine = self._get_engine(server_url, user=user)
remote_client = engine.get_remote_doc_client()
remote_client.check_suspended = self.stop_client
digest_algorithm = self._local_client.get_remote_id(dir_path, "nxdriveeditdigestalgorithm")
digest = self._local_client.get_remote_id(dir_path, "nxdriveeditdigest")
# Don't update if digest are the same
info = self._local_client.get_info(ref)
try:
if info.get_digest(digest_func=digest_algorithm) == digest:
continue
# TO_REVIEW Should check if server-side blob has changed ?
# Update the document - should verify the remote hash - NXDRIVE-187
log.debug('Uploading file %s for user %s', self._local_client._abspath(ref),
user)
remote_client.stream_update(uid, self._local_client._abspath(ref), apply_versioning_policy=True)
except ThreadInterrupt:
raise
except Exception as e:
# Try again in 30s
log.trace("Exception on drive edit: %r", e)
self._error_queue.push(ref, ref)
continue
uploaded = True
if uploaded:
log.debug('Emitting driveEditUploadCompleted')
self.driveEditUploadCompleted.emit()
def _execute(self):
try:
self._watchdog_queue = Queue()
self._action = Action("Clean up folder")
self._cleanup()
self._action = Action("Setup watchdog")
self._setup_watchdog()
self._end_action()
while (1):
self._interact()
try:
self._handle_queue()
except NotFound:
pass
while (not self._watchdog_queue.empty()):
evt = self._watchdog_queue.get()
self.handle_watchdog_event(evt)
sleep(0.01)
except ThreadInterrupt:
raise
finally:
self._stop_watchdog()
def get_metrics(self):
metrics = super(DriveEdit, self).get_metrics()
if self._event_handler is not None:
metrics['fs_events'] = self._event_handler.counter
return dict(metrics.items() + self._metrics.items())
def _setup_watchdog(self):
from watchdog.observers import Observer
log.debug("Watching FS modification on : %s", self._folder)
self._event_handler = DriveFSEventHandler(self)
self._observer = Observer()
self._observer.schedule(self._event_handler, self._folder, recursive=True)
self._observer.start()
def _stop_watchdog(self, raise_on_error=True):
if self._observer is None:
return
log.info("Stopping FS Observer thread")
try:
self._observer.stop()
except Exception as e:
log.warn("Can't stop FS observer : %r", e)
# Wait for all observers to stop
try:
self._observer.join()
except Exception as e:
log.warn("Can't join FS observer : %r", e)
# Delete all observers
self._observer = None
def handle_watchdog_event(self, evt):
self._action = Action("Handle watchdog event")
log.debug("Handling watchdog event [%s] on %r", evt.event_type, evt.src_path)
try:
src_path = normalize_event_filename(evt.src_path)
# Event on the folder by itself
if os.path.isdir(src_path):
return
ref = self._local_client.get_path(src_path)
file_name = os.path.basename(src_path)
if self._local_client.is_temp_file(file_name):
return
queue = False
if evt.event_type == 'modified' or evt.event_type == 'created':
queue = True
if evt.event_type == 'moved':
ref = self._local_client.get_path(evt.dest_path)
file_name = os.path.basename(evt.dest_path)
queue = True
dir_path = self._local_client.get_path(os.path.dirname(src_path))
name = self._local_client.get_remote_id(dir_path, "nxdriveeditname")
if name is None:
return
decoded_name = force_decode(name)
if decoded_name is not None:
name = decoded_name
if name != file_name:
return
if queue:
# ADD TO UPLOAD QUEUE
self._upload_queue.put(ref)
return
except Exception as e:
log.warn("Watchdog exception : %r" % e)
log.exception(e)
finally:
self._end_action()
NXDRIVE-343: Fix Drive Edit in case of a Note document
'''
@author: Remi Cattiau
'''
from nxdrive.logging_config import get_logger
from nxdrive.engine.workers import Worker, ThreadInterrupt
from nxdrive.engine.blacklist_queue import BlacklistQueue
from nxdrive.engine.watcher.local_watcher import DriveFSEventHandler, normalize_event_filename
from nxdrive.engine.activity import Action
from nxdrive.client.local_client import LocalClient
from nxdrive.client.base_automation_client import DOWNLOAD_TMP_FILE_PREFIX
from nxdrive.client.base_automation_client import DOWNLOAD_TMP_FILE_SUFFIX
from nxdrive.client.common import safe_filename, NotFound
from nxdrive.utils import force_decode
import os
import sys
import urllib2
from time import sleep
import shutil
from PyQt4.QtCore import pyqtSignal
from Queue import Queue, Empty
log = get_logger(__name__)
class DriveEdit(Worker):
localScanFinished = pyqtSignal()
driveEditUploadCompleted = pyqtSignal()
'''
classdocs
'''
def __init__(self, manager, folder):
'''
Constructor
'''
super(DriveEdit, self).__init__()
self._manager = manager
self._thread.started.connect(self.run)
self._event_handler = None
self._metrics = dict()
self._metrics['edit_files'] = 0
self._observer = None
if type(folder) == str:
folder = unicode(folder)
self._folder = folder
self._local_client = LocalClient(self._folder)
self._upload_queue = Queue()
self._error_queue = BlacklistQueue()
self._stop = False
def stop(self):
super(DriveEdit, self).stop()
self._stop = True
def stop_client(self, reason):
if self._stop:
raise ThreadInterrupt
def _cleanup(self):
log.debug("Cleanup DriveEdit folder")
shutil.rmtree(self._folder, ignore_errors=True)
if not os.path.exists(self._folder):
os.mkdir(self._folder)
def _get_engine(self, url, user=None):
if url.endswith('/'):
url = url[:-1]
for engine in self._manager.get_engines().values():
bind = engine.get_binder()
server_url = bind.server_url
if server_url.endswith('/'):
server_url = server_url[:-1]
if server_url == url and (user is None or user == bind.username):
return engine
# Some backend are case insensitive
if user is None:
return None
user = user.lower()
for engine in self._manager.get_engines().values():
bind = engine.get_binder()
server_url = bind.server_url
if server_url.endswith('/'):
server_url = server_url[:-1]
if server_url == url and user == bind.username.lower():
return engine
return None
def _download_content(self, engine, remote_client, info, file_path, url=None):
file_dir = os.path.dirname(file_path)
file_name = os.path.basename(file_path)
file_out = os.path.join(file_dir, DOWNLOAD_TMP_FILE_PREFIX + file_name
+ DOWNLOAD_TMP_FILE_SUFFIX)
# Close to processor method - should try to refactor ?
pair = engine.get_dao().get_valid_duplicate_file(info.digest)
if pair:
local_client = engine.get_local_client()
shutil.copy(local_client._abspath(pair.local_path), file_out)
else:
if url is not None:
remote_client.do_get(url, file_out=file_out, digest=info.digest, digest_algorithm=info.digest_algorithm)
else:
remote_client.get_blob(info.uid, file_out=file_out)
return file_out
def _prepare_edit(self, server_url, doc_id, filename, user=None, download_url=None):
engine = self._get_engine(server_url, user=user)
if engine is None:
# TO_REVIEW Display an error message
log.debug("No engine found for %s(%s)", server_url, doc_id)
return
# Get document info
remote_client = engine.get_remote_doc_client()
# Avoid any link with the engine, remote_doc are not cached so we can do that
remote_client.check_suspended = self.stop_client
info = remote_client.get_info(doc_id)
# Create local structure
dir_path = os.path.join(self._folder, doc_id)
if not os.path.exists(dir_path):
os.mkdir(dir_path)
log.trace('Raw filename: %r', filename)
filename = safe_filename(urllib2.unquote(filename))
log.trace('Unquoted filename = %r', filename)
decoded_filename = force_decode(filename)
if decoded_filename is None:
decoded_filename = filename
else:
# Always use utf-8 encoding for xattr
filename = decoded_filename.encode('utf-8')
log.debug("Editing %r ('nxdriveeditname' xattr: %r)", decoded_filename, filename)
file_path = os.path.join(dir_path, decoded_filename)
# Download the file
url = None
if download_url is not None:
url = server_url
if not url.endswith('/'):
url += '/'
url += download_url
tmp_file = self._download_content(engine, remote_client, info, file_path, url=url)
if tmp_file is None:
log.debug("Download failed")
return
# Set the remote_id
dir_path = self._local_client.get_path(os.path.dirname(file_path))
self._local_client.set_remote_id(dir_path, doc_id)
self._local_client.set_remote_id(dir_path, server_url, "nxdriveedit")
if user is not None:
self._local_client.set_remote_id(dir_path, user, "nxdriveedituser")
if info.digest_algorithm is not None:
self._local_client.set_remote_id(dir_path, info.digest_algorithm, "nxdriveeditdigestalgorithm")
if info.digest is not None:
self._local_client.set_remote_id(dir_path, info.digest, "nxdriveeditdigest")
self._local_client.set_remote_id(dir_path, filename, "nxdriveeditname")
# Rename to final filename
# Under Windows first need to delete target file if exists, otherwise will get a 183 WindowsError
if sys.platform == 'win32' and os.path.exists(file_path):
os.unlink(file_path)
os.rename(tmp_file, file_path)
return file_path
def edit(self, server_url, doc_id, filename, user=None, download_url=None):
# Download file
file_path = self._prepare_edit(server_url, doc_id, filename, user=user, download_url=download_url)
# Launch it
if file_path is not None:
self._manager.open_local_file(file_path)
def _handle_queue(self):
uploaded = False
# Unqueue any errors
item = self._error_queue.get()
while (item is not None):
self._upload_queue.put(item.get())
item = self._error_queue.get()
# Handle the upload queue
while (not self._upload_queue.empty()):
try:
ref = self._upload_queue.get_nowait()
log.trace('Handling DriveEdit queue ref: %r', ref)
except Empty:
break
dir_path = os.path.dirname(ref)
uid = self._local_client.get_remote_id(dir_path)
server_url = self._local_client.get_remote_id(dir_path, "nxdriveedit")
user = self._local_client.get_remote_id(dir_path, "nxdriveedituser")
engine = self._get_engine(server_url, user=user)
remote_client = engine.get_remote_doc_client()
remote_client.check_suspended = self.stop_client
digest_algorithm = self._local_client.get_remote_id(dir_path, "nxdriveeditdigestalgorithm")
digest = self._local_client.get_remote_id(dir_path, "nxdriveeditdigest")
# Don't update if digest are the same
info = self._local_client.get_info(ref)
try:
if info.get_digest(digest_func=digest_algorithm) == digest:
continue
# TO_REVIEW Should check if server-side blob has changed ?
# Update the document - should verify the remote hash - NXDRIVE-187
log.debug('Uploading file %s for user %s', self._local_client._abspath(ref),
user)
remote_client.stream_update(uid, self._local_client._abspath(ref), apply_versioning_policy=True)
except ThreadInterrupt:
raise
except Exception as e:
# Try again in 30s
log.trace("Exception on drive edit: %r", e)
self._error_queue.push(ref, ref)
continue
uploaded = True
if uploaded:
log.debug('Emitting driveEditUploadCompleted')
self.driveEditUploadCompleted.emit()
def _execute(self):
try:
self._watchdog_queue = Queue()
self._action = Action("Clean up folder")
self._cleanup()
self._action = Action("Setup watchdog")
self._setup_watchdog()
self._end_action()
while (1):
self._interact()
try:
self._handle_queue()
except NotFound:
pass
while (not self._watchdog_queue.empty()):
evt = self._watchdog_queue.get()
self.handle_watchdog_event(evt)
sleep(0.01)
except ThreadInterrupt:
raise
finally:
self._stop_watchdog()
def get_metrics(self):
metrics = super(DriveEdit, self).get_metrics()
if self._event_handler is not None:
metrics['fs_events'] = self._event_handler.counter
return dict(metrics.items() + self._metrics.items())
def _setup_watchdog(self):
from watchdog.observers import Observer
log.debug("Watching FS modification on : %s", self._folder)
self._event_handler = DriveFSEventHandler(self)
self._observer = Observer()
self._observer.schedule(self._event_handler, self._folder, recursive=True)
self._observer.start()
def _stop_watchdog(self, raise_on_error=True):
if self._observer is None:
return
log.info("Stopping FS Observer thread")
try:
self._observer.stop()
except Exception as e:
log.warn("Can't stop FS observer : %r", e)
# Wait for all observers to stop
try:
self._observer.join()
except Exception as e:
log.warn("Can't join FS observer : %r", e)
# Delete all observers
self._observer = None
def handle_watchdog_event(self, evt):
self._action = Action("Handle watchdog event")
log.debug("Handling watchdog event [%s] on %r", evt.event_type, evt.src_path)
try:
src_path = normalize_event_filename(evt.src_path)
# Event on the folder by itself
if os.path.isdir(src_path):
return
ref = self._local_client.get_path(src_path)
file_name = os.path.basename(src_path)
if self._local_client.is_temp_file(file_name):
return
queue = False
if evt.event_type == 'modified' or evt.event_type == 'created':
queue = True
if evt.event_type == 'moved':
ref = self._local_client.get_path(evt.dest_path)
file_name = os.path.basename(evt.dest_path)
queue = True
dir_path = self._local_client.get_path(os.path.dirname(src_path))
name = self._local_client.get_remote_id(dir_path, "nxdriveeditname")
if name is None:
return
decoded_name = force_decode(name)
if decoded_name is not None:
name = decoded_name
if name != file_name:
return
if queue:
# ADD TO UPLOAD QUEUE
self._upload_queue.put(ref)
return
except Exception as e:
log.warn("Watchdog exception : %r" % e)
log.exception(e)
finally:
self._end_action()
|
import os
import tempfile
import pytest
from rancher import ApiError
from kubernetes.client import CoreV1Api
from .common import auth_check, random_str, string_to_encoding
from .conftest import wait_for
import time
def test_node_fields(admin_mc):
cclient = admin_mc.client
fields = {
'annotations': 'cru',
'appliedNodeVersion': 'r',
'labels': 'cru',
'nodeTaints': 'r',
'namespaceId': 'cr',
'conditions': 'r',
'allocatable': 'r',
'capacity': 'r',
'hostname': 'r',
'info': 'r',
'ipAddress': 'r',
'externalIpAddress': 'r',
'limits': 'r',
'publicEndpoints': 'r',
'nodePoolId': 'r',
'nodePlan': 'r',
'nodeName': 'r',
'requested': 'r',
'clusterId': 'cr',
'etcd': 'cr',
'controlPlane': 'cr',
'worker': 'cr',
'requestedHostname': 'cr',
'volumesAttached': 'r',
'nodeTemplateId': 'cr',
'volumesInUse': 'r',
'podCidr': 'r',
'podCidrs': 'r',
'name': 'cru',
'taints': 'ru',
'unschedulable': 'r',
'providerId': 'r',
'sshUser': 'r',
'imported': 'cru',
'dockerInfo': 'r',
}
for name in cclient.schema.types['node'].resourceFields.keys():
if name.endswith("Config"):
fields[name] = 'cr'
fields['customConfig'] = 'cru'
auth_check(cclient.schema, 'node', 'crud', fields)
def test_node_template_delete(admin_mc, remove_resource):
"""Test deleting a nodeTemplate that is in use by a nodePool.
The nodeTemplate should not be deleted while in use, after the nodePool is
removed, the nodes referencing the nodeTemplate will be deleted
and the nodeTemplate should delete
"""
client = admin_mc.client
node_template, cloud_credential = create_node_template(client)
node_pool = client.create_node_pool(
nodeTemplateId=node_template.id,
hostnamePrefix="test1",
clusterId="local")
# node_pool needs to come first or the API will stop the delete if the
# template still exists
remove_resource(node_pool)
remove_resource(node_template)
assert node_pool.nodeTemplateId == node_template.id
# Attempting to delete the template should raise an ApiError
with pytest.raises(ApiError) as e:
client.delete(node_template)
assert e.value.error.status == 405
# remove link should not be available
node_template = client.reload(node_template)
assert 'remove' not in node_template.links
client.delete(node_pool)
def _node_pool_reload():
np = client.reload(node_pool)
return np is None
wait_for(_node_pool_reload)
def _wait_for_remove_link():
nt = client.reload(node_template)
if hasattr(nt.links, "remove"):
return True
return False
wait_for(_wait_for_remove_link)
# NodePool and Nodes are gone, template should delete
client.delete(node_template)
node_template = client.reload(node_template)
assert node_template is None
def test_cloud_credential_delete(admin_mc, remove_resource):
"""Test deleting a cloud credential that is referenced by nodeTemplate, which
is in use by nodePool
"""
client = admin_mc.client
node_template, cloud_credential = create_node_template(client)
node_pool = client.create_node_pool(
nodeTemplateId=node_template.id,
hostnamePrefix="test1",
clusterId="local")
assert node_pool.nodeTemplateId == node_template.id
wait_for_node_template(client, node_template.id)
# Attempting to delete the template should raise an ApiError
with pytest.raises(ApiError) as e:
client.delete(cloud_credential)
assert e.value.error.status == 405
def test_writing_config_to_disk(admin_mc, wait_remove_resource):
"""Test that userdata and other fields from node driver configs are being
written to disk as expected.
"""
client = admin_mc.client
tempdir = tempfile.gettempdir()
cloud_credential = client.create_cloud_credential(
digitaloceancredentialConfig={"accessToken": "test"})
wait_remove_resource(cloud_credential)
data = {'userdata': 'do cool stuff\n',
# This validates ssh keys don't drop the ending \n
'id_rsa': 'some\nfake\nstuff\n'
}
def _node_template():
try:
return client.create_node_template(
digitaloceanConfig={
'userdata': data['userdata'],
'sshKeyContents': data['id_rsa']
},
name=random_str(),
cloudCredentialId=cloud_credential.id)
except ApiError:
return False
node_template = wait_for(_node_template,
fail_handler=lambda:
'failed to create node template')
wait_remove_resource(node_template)
node_pool = client.create_node_pool(
nodeTemplateId=node_template.id,
hostnamePrefix="test1",
clusterId="local")
wait_remove_resource(node_pool)
for key, value in data.items():
dir_name = string_to_encoding(value)
full_path = os.path.join(tempdir, dir_name, key)
def file_exists():
try:
os.stat(full_path)
return True
except FileNotFoundError:
return False
wait_for(file_exists, timeout=10,
fail_handler=lambda: 'file is missing from disk')
with open(full_path, 'r') as f:
contents = f.read()
assert contents == value
def test_node_driver_schema(admin_mc):
"""Test node driver schemas have path fields removed."""
drivers = ['amazonec2config', 'digitaloceanconfig', 'azureconfig']
bad_fields = ['sshKeypath', 'sshKeyPath', 'existingKeyPath']
client = admin_mc.client
for driver in drivers:
schema = client.schema.types[driver]
for field in bad_fields:
assert field not in schema.resourceFields, \
'Driver {} has field {}'.format(driver, field)
def test_amazon_node_driver_schema(admin_mc):
"""Test amazon node driver schema supports AWS-specific resource fields"""
required_fields = ['encryptEbsVolume']
client = admin_mc.client
schema = client.schema.types['amazonec2config']
for field in required_fields:
assert field in schema.resourceFields, \
'amazonec2config missing support for field {}'.format(field)
def create_node_template(client, clientId="test"):
cloud_credential = client.create_cloud_credential(
azurecredentialConfig={"clientId": clientId,
"subscriptionId": "test",
"clientSecret": "test"})
wait_for_cloud_credential(client, cloud_credential.id)
node_template = client.create_node_template(
azureConfig={},
cloudCredentialId=cloud_credential.id)
assert node_template.cloudCredentialId == cloud_credential.id
return node_template, cloud_credential
def wait_for_cloud_credential(client, cloud_credential_id, timeout=60):
start = time.time()
interval = 0.5
creds = client.list_cloud_credential()
cred = None
for val in creds:
if val["id"] == cloud_credential_id:
cred = val
while cred is None:
if time.time() - start > timeout:
print(cred)
raise Exception('Timeout waiting for cloud credential')
time.sleep(interval)
interval *= 2
creds = client.list_cloud_credential()
for val in creds:
if val["id"] == cloud_credential_id:
cred = val
return cred
def wait_for_node_template(client, node_template_id, timeout=60):
start = time.time()
interval = 0.5
template = None
while template is None:
if time.time() - start > timeout:
raise Exception('Timeout waiting for node template lister')
time.sleep(interval)
interval *= 2
nodeTemplates = client.list_node_template()
for each_template in nodeTemplates:
if each_template["id"] == node_template_id:
template = each_template
def test_user_access_to_other_template(user_factory, remove_resource):
"""Asserts that a normal user's nodepool cannot reference another user's
nodetemplate"""
user1_client = user_factory().client
user2_client = user_factory().client
user2_node_template = user2_client.create_node_template(name="nt-" +
random_str(),
azureConfig={})
remove_resource(user2_node_template)
wait_for_node_template(user2_client, user2_node_template.id)
with pytest.raises(ApiError) as e:
user1_client.create_node_pool(
nodeTemplateId=user2_node_template.id,
hostnamePrefix="test1",
clusterId="local")
assert e.value.error.status == 404
assert e.value.error.message == \
"unable to find node template [%s]" % user2_node_template.id
@pytest.mark.skip(reason="flaky, todo in 27885")
def test_user_cluster_owner_access_to_pool(admin_mc,
user_factory,
remove_resource,
wait_remove_resource):
"""Test that a cluster created by the admin is accessible by another user
added as a cluster-owner, validate nodepool changing and switching
nodetemplate"""
# make an admin and user client
admin_client = admin_mc.client
k8sclient = CoreV1Api(admin_mc.k8s_client)
user = user_factory()
# make a cluster
cluster = admin_client.create_cluster(
name=random_str(),
rancherKubernetesEngineConfig={
"accessKey": "junk"
}
)
remove_resource(cluster)
# wait for the namespace created by the cluster
def _check_namespace(cluster):
for n in k8sclient.list_namespace().items:
if n.metadata.name == cluster.id:
return True
return False
wait_for(lambda: _check_namespace(cluster))
# add user as cluster-owner to the cluster
crtb = admin_client.create_cluster_role_template_binding(
userId=user.user.id,
roleTemplateId="cluster-owner",
clusterId=cluster.id,
)
remove_resource(crtb)
# admin creates a node template and assigns to a pool
admin_node_template, admin_cloud_credential = create_node_template(
admin_client, "admincloudcred-" + random_str())
admin_pool = admin_client.create_node_pool(
nodeTemplateId=admin_node_template.id,
hostnamePrefix="test",
clusterId=cluster.id)
wait_remove_resource(admin_pool)
remove_resource(admin_cloud_credential)
remove_resource(admin_node_template)
# create a template for the user to try and assign
user_node_template, user_cloud_credential = create_node_template(
user.client, "usercloudcred-" + random_str())
remove_resource(user_cloud_credential)
remove_resource(user_node_template)
# will pass, cluster owner user can change pool quantity
user.client.update(admin_pool, quantity=2)
# will pass, can set to a template owned by the user
user.client.update(admin_pool, nodeTemplateId=user_node_template.id)
# will fail, can not update nodepool template,
# if no access to the original template
with pytest.raises(ApiError) as e:
user.client.update(admin_pool, nodeTemplateId=admin_node_template.id)
assert e.value.error.status == 404
assert e.value.error.message == "unable to find node template [%s]" % \
admin_node_template.id
# delete this by hand and the rest will cleanup
admin_client.delete(admin_pool)
def test_admin_access_to_node_template(admin_mc, list_remove_resource):
"""Asserts that an admin user's nodepool can reference
nodetemplates they have created"""
admin_client = admin_mc.client
admin_node_template = admin_client.create_node_template(name="nt-" +
random_str(),
azureConfig={})
remove_list = [admin_node_template]
list_remove_resource(remove_list)
# Admin has access to create nodepool and nodepool create only happens
# after it passes validation.
node_pool = admin_client.create_node_pool(
nodeTemplateId=admin_node_template.id,
hostnamePrefix="test1",
clusterId="local")
remove_list.insert(0, node_pool)
def test_user_access_to_node_template(user_mc, remove_resource):
"""Asserts that a normal user's nodepool can reference
nodetemplates they have created"""
user_client = user_mc.client
user_node_template = user_client.create_node_template(name="nt-" +
random_str(),
azureConfig={})
remove_resource(user_node_template)
wait_for_node_template(user_client, user_node_template.id)
with pytest.raises(ApiError) as e:
user_client.create_node_pool(
nodeTemplateId=user_node_template.id,
hostnamePrefix="test1",
clusterId="local")
# User does not have access to create nodepools but has
# access to nodetemplate. Nodepool create happens after
# validation has passed.
assert e.value.error.status == 403
assert 'cannot create resource "nodepools"' in e.value.error.message
def test_admin_access_user_template(admin_mc, user_mc, list_remove_resource):
"""Asserts that an admin user's nodepool can reference another user's
nodetemplates"""
admin_client = admin_mc.client
user_client = user_mc.client
user_node_template = user_client.create_node_template(name="nt-" +
random_str(),
azureConfig={})
remove_list = [user_node_template]
list_remove_resource(remove_list)
# Admin has access to create nodepool and nodepool create only happens
# after it passes validation.
node_pool = admin_client.create_node_pool(
nodeTemplateId=user_node_template.id,
hostnamePrefix="test1",
clusterId="local")
remove_list.insert(0, node_pool)
def test_no_node_template(user_mc):
"""Asserts that a nodepool cannot create without a valid
nodetemplate"""
user_client = user_mc.client
invalid_template_id = "thisinsnotatemplateid"
with pytest.raises(ApiError) as e:
user_client.create_node_pool(
nodeTemplateId=invalid_template_id,
hostnamePrefix="test1",
clusterId="local")
assert e.value.error.status == 404
assert e.value.error.message == \
"unable to find node template [%s]" % invalid_template_id
Increasing wait timeout for flaky test
import os
import tempfile
import pytest
from rancher import ApiError
from kubernetes.client import CoreV1Api
from .common import auth_check, random_str, string_to_encoding
from .conftest import wait_for
import time
def test_node_fields(admin_mc):
cclient = admin_mc.client
fields = {
'annotations': 'cru',
'appliedNodeVersion': 'r',
'labels': 'cru',
'nodeTaints': 'r',
'namespaceId': 'cr',
'conditions': 'r',
'allocatable': 'r',
'capacity': 'r',
'hostname': 'r',
'info': 'r',
'ipAddress': 'r',
'externalIpAddress': 'r',
'limits': 'r',
'publicEndpoints': 'r',
'nodePoolId': 'r',
'nodePlan': 'r',
'nodeName': 'r',
'requested': 'r',
'clusterId': 'cr',
'etcd': 'cr',
'controlPlane': 'cr',
'worker': 'cr',
'requestedHostname': 'cr',
'volumesAttached': 'r',
'nodeTemplateId': 'cr',
'volumesInUse': 'r',
'podCidr': 'r',
'podCidrs': 'r',
'name': 'cru',
'taints': 'ru',
'unschedulable': 'r',
'providerId': 'r',
'sshUser': 'r',
'imported': 'cru',
'dockerInfo': 'r',
}
for name in cclient.schema.types['node'].resourceFields.keys():
if name.endswith("Config"):
fields[name] = 'cr'
fields['customConfig'] = 'cru'
auth_check(cclient.schema, 'node', 'crud', fields)
def test_node_template_delete(admin_mc, remove_resource):
"""Test deleting a nodeTemplate that is in use by a nodePool.
The nodeTemplate should not be deleted while in use, after the nodePool is
removed, the nodes referencing the nodeTemplate will be deleted
and the nodeTemplate should delete
"""
client = admin_mc.client
node_template, cloud_credential = create_node_template(client)
node_pool = client.create_node_pool(
nodeTemplateId=node_template.id,
hostnamePrefix="test1",
clusterId="local")
# node_pool needs to come first or the API will stop the delete if the
# template still exists
remove_resource(node_pool)
remove_resource(node_template)
assert node_pool.nodeTemplateId == node_template.id
# Attempting to delete the template should raise an ApiError
with pytest.raises(ApiError) as e:
client.delete(node_template)
assert e.value.error.status == 405
# remove link should not be available
node_template = client.reload(node_template)
assert 'remove' not in node_template.links
client.delete(node_pool)
def _node_pool_reload():
np = client.reload(node_pool)
return np is None
wait_for(_node_pool_reload)
def _wait_for_remove_link():
nt = client.reload(node_template)
if hasattr(nt.links, "remove"):
return True
return False
wait_for(_wait_for_remove_link)
# NodePool and Nodes are gone, template should delete
client.delete(node_template)
node_template = client.reload(node_template)
assert node_template is None
def test_cloud_credential_delete(admin_mc, remove_resource):
"""Test deleting a cloud credential that is referenced by nodeTemplate, which
is in use by nodePool
"""
client = admin_mc.client
node_template, cloud_credential = create_node_template(client)
node_pool = client.create_node_pool(
nodeTemplateId=node_template.id,
hostnamePrefix="test1",
clusterId="local")
assert node_pool.nodeTemplateId == node_template.id
wait_for_node_template(client, node_template.id)
# Attempting to delete the template should raise an ApiError
with pytest.raises(ApiError) as e:
client.delete(cloud_credential)
assert e.value.error.status == 405
def test_writing_config_to_disk(admin_mc, wait_remove_resource):
"""Test that userdata and other fields from node driver configs are being
written to disk as expected.
"""
client = admin_mc.client
tempdir = tempfile.gettempdir()
cloud_credential = client.create_cloud_credential(
digitaloceancredentialConfig={"accessToken": "test"})
wait_remove_resource(cloud_credential)
data = {'userdata': 'do cool stuff\n',
# This validates ssh keys don't drop the ending \n
'id_rsa': 'some\nfake\nstuff\n'
}
def _node_template():
try:
return client.create_node_template(
digitaloceanConfig={
'userdata': data['userdata'],
'sshKeyContents': data['id_rsa']
},
name=random_str(),
cloudCredentialId=cloud_credential.id)
except ApiError:
return False
node_template = wait_for(_node_template,
fail_handler=lambda:
'failed to create node template')
wait_remove_resource(node_template)
node_pool = client.create_node_pool(
nodeTemplateId=node_template.id,
hostnamePrefix="test1",
clusterId="local")
wait_remove_resource(node_pool)
for key, value in data.items():
dir_name = string_to_encoding(value)
full_path = os.path.join(tempdir, dir_name, key)
def file_exists():
try:
os.stat(full_path)
return True
except FileNotFoundError:
return False
wait_for(file_exists, timeout=120,
fail_handler=lambda: 'file is missing from disk')
with open(full_path, 'r') as f:
contents = f.read()
assert contents == value
def test_node_driver_schema(admin_mc):
"""Test node driver schemas have path fields removed."""
drivers = ['amazonec2config', 'digitaloceanconfig', 'azureconfig']
bad_fields = ['sshKeypath', 'sshKeyPath', 'existingKeyPath']
client = admin_mc.client
for driver in drivers:
schema = client.schema.types[driver]
for field in bad_fields:
assert field not in schema.resourceFields, \
'Driver {} has field {}'.format(driver, field)
def test_amazon_node_driver_schema(admin_mc):
"""Test amazon node driver schema supports AWS-specific resource fields"""
required_fields = ['encryptEbsVolume']
client = admin_mc.client
schema = client.schema.types['amazonec2config']
for field in required_fields:
assert field in schema.resourceFields, \
'amazonec2config missing support for field {}'.format(field)
def create_node_template(client, clientId="test"):
cloud_credential = client.create_cloud_credential(
azurecredentialConfig={"clientId": clientId,
"subscriptionId": "test",
"clientSecret": "test"})
wait_for_cloud_credential(client, cloud_credential.id)
node_template = client.create_node_template(
azureConfig={},
cloudCredentialId=cloud_credential.id)
assert node_template.cloudCredentialId == cloud_credential.id
return node_template, cloud_credential
def wait_for_cloud_credential(client, cloud_credential_id, timeout=60):
start = time.time()
interval = 0.5
creds = client.list_cloud_credential()
cred = None
for val in creds:
if val["id"] == cloud_credential_id:
cred = val
while cred is None:
if time.time() - start > timeout:
print(cred)
raise Exception('Timeout waiting for cloud credential')
time.sleep(interval)
interval *= 2
creds = client.list_cloud_credential()
for val in creds:
if val["id"] == cloud_credential_id:
cred = val
return cred
def wait_for_node_template(client, node_template_id, timeout=60):
start = time.time()
interval = 0.5
template = None
while template is None:
if time.time() - start > timeout:
raise Exception('Timeout waiting for node template lister')
time.sleep(interval)
interval *= 2
nodeTemplates = client.list_node_template()
for each_template in nodeTemplates:
if each_template["id"] == node_template_id:
template = each_template
def test_user_access_to_other_template(user_factory, remove_resource):
"""Asserts that a normal user's nodepool cannot reference another user's
nodetemplate"""
user1_client = user_factory().client
user2_client = user_factory().client
user2_node_template = user2_client.create_node_template(name="nt-" +
random_str(),
azureConfig={})
remove_resource(user2_node_template)
wait_for_node_template(user2_client, user2_node_template.id)
with pytest.raises(ApiError) as e:
user1_client.create_node_pool(
nodeTemplateId=user2_node_template.id,
hostnamePrefix="test1",
clusterId="local")
assert e.value.error.status == 404
assert e.value.error.message == \
"unable to find node template [%s]" % user2_node_template.id
@pytest.mark.skip(reason="flaky, todo in 27885")
def test_user_cluster_owner_access_to_pool(admin_mc,
user_factory,
remove_resource,
wait_remove_resource):
"""Test that a cluster created by the admin is accessible by another user
added as a cluster-owner, validate nodepool changing and switching
nodetemplate"""
# make an admin and user client
admin_client = admin_mc.client
k8sclient = CoreV1Api(admin_mc.k8s_client)
user = user_factory()
# make a cluster
cluster = admin_client.create_cluster(
name=random_str(),
rancherKubernetesEngineConfig={
"accessKey": "junk"
}
)
remove_resource(cluster)
# wait for the namespace created by the cluster
def _check_namespace(cluster):
for n in k8sclient.list_namespace().items:
if n.metadata.name == cluster.id:
return True
return False
wait_for(lambda: _check_namespace(cluster))
# add user as cluster-owner to the cluster
crtb = admin_client.create_cluster_role_template_binding(
userId=user.user.id,
roleTemplateId="cluster-owner",
clusterId=cluster.id,
)
remove_resource(crtb)
# admin creates a node template and assigns to a pool
admin_node_template, admin_cloud_credential = create_node_template(
admin_client, "admincloudcred-" + random_str())
admin_pool = admin_client.create_node_pool(
nodeTemplateId=admin_node_template.id,
hostnamePrefix="test",
clusterId=cluster.id)
wait_remove_resource(admin_pool)
remove_resource(admin_cloud_credential)
remove_resource(admin_node_template)
# create a template for the user to try and assign
user_node_template, user_cloud_credential = create_node_template(
user.client, "usercloudcred-" + random_str())
remove_resource(user_cloud_credential)
remove_resource(user_node_template)
# will pass, cluster owner user can change pool quantity
user.client.update(admin_pool, quantity=2)
# will pass, can set to a template owned by the user
user.client.update(admin_pool, nodeTemplateId=user_node_template.id)
# will fail, can not update nodepool template,
# if no access to the original template
with pytest.raises(ApiError) as e:
user.client.update(admin_pool, nodeTemplateId=admin_node_template.id)
assert e.value.error.status == 404
assert e.value.error.message == "unable to find node template [%s]" % \
admin_node_template.id
# delete this by hand and the rest will cleanup
admin_client.delete(admin_pool)
def test_admin_access_to_node_template(admin_mc, list_remove_resource):
"""Asserts that an admin user's nodepool can reference
nodetemplates they have created"""
admin_client = admin_mc.client
admin_node_template = admin_client.create_node_template(name="nt-" +
random_str(),
azureConfig={})
remove_list = [admin_node_template]
list_remove_resource(remove_list)
# Admin has access to create nodepool and nodepool create only happens
# after it passes validation.
node_pool = admin_client.create_node_pool(
nodeTemplateId=admin_node_template.id,
hostnamePrefix="test1",
clusterId="local")
remove_list.insert(0, node_pool)
def test_user_access_to_node_template(user_mc, remove_resource):
"""Asserts that a normal user's nodepool can reference
nodetemplates they have created"""
user_client = user_mc.client
user_node_template = user_client.create_node_template(name="nt-" +
random_str(),
azureConfig={})
remove_resource(user_node_template)
wait_for_node_template(user_client, user_node_template.id)
with pytest.raises(ApiError) as e:
user_client.create_node_pool(
nodeTemplateId=user_node_template.id,
hostnamePrefix="test1",
clusterId="local")
# User does not have access to create nodepools but has
# access to nodetemplate. Nodepool create happens after
# validation has passed.
assert e.value.error.status == 403
assert 'cannot create resource "nodepools"' in e.value.error.message
def test_admin_access_user_template(admin_mc, user_mc, list_remove_resource):
"""Asserts that an admin user's nodepool can reference another user's
nodetemplates"""
admin_client = admin_mc.client
user_client = user_mc.client
user_node_template = user_client.create_node_template(name="nt-" +
random_str(),
azureConfig={})
remove_list = [user_node_template]
list_remove_resource(remove_list)
# Admin has access to create nodepool and nodepool create only happens
# after it passes validation.
node_pool = admin_client.create_node_pool(
nodeTemplateId=user_node_template.id,
hostnamePrefix="test1",
clusterId="local")
remove_list.insert(0, node_pool)
def test_no_node_template(user_mc):
"""Asserts that a nodepool cannot create without a valid
nodetemplate"""
user_client = user_mc.client
invalid_template_id = "thisinsnotatemplateid"
with pytest.raises(ApiError) as e:
user_client.create_node_pool(
nodeTemplateId=invalid_template_id,
hostnamePrefix="test1",
clusterId="local")
assert e.value.error.status == 404
assert e.value.error.message == \
"unable to find node template [%s]" % invalid_template_id
|
# -*- coding: utf-8 -*-
import FreeCAD,Draft,Spreadsheet
# create spreadsheet and prepere it for data
if FreeCAD.ActiveDocument.getObject("toCut"):
FreeCAD.ActiveDocument.removeObject("toCut")
result = FreeCAD.ActiveDocument.addObject("Spreadsheet::Sheet","toCut")
result.mergeCells('B1:D1')
result.set( 'A1', 'Typ' )
result.set( 'B1', 'Wymiary' )
result.set( 'E1', 'Grubość' )
result.set( 'F1', 'Sztuki' )
result.set( 'G1', 'Metry kwadratowe' )
result.setForeground( 'A1:G1', (0,0,0) )
result.setBackground( 'A1:G1', (1,1,1) )
result.setStyle( 'A1:G1', 'bold', 'add')
result.setAlignment( 'A1:G1', 'top', 'keep' )
result.setAlignment( 'A1:G1', 'center', 'keep' )
# scan all objects and count chipboards (cubes)
objs = FreeCAD.ActiveDocument.Objects
quantity = dict()
sqmSum = dict()
for obj in objs:
# support for cube objects
if obj.isDerivedFrom("Part::Box"):
keyArr = [ str(obj.Length), str(obj.Width), str(obj.Height) ]
keyArr.sort()
key = "x".join(keyArr)
if key in quantity:
quantity[key] = quantity[key] + 1
else:
quantity[key] = 1
# support for array objects with cube as base
elif obj.isDerivedFrom("Part::FeaturePython") and obj.Base.isDerivedFrom("Part::Box"):
# the main box cube will be added in next loop
arrayQuantity = obj.NumberX * obj.NumberY * obj.NumberZ - 1
keyArr = [ str(obj.Base.Length), str(obj.Base.Width), str(obj.Base.Height) ]
keyArr.sort()
key = "x".join(keyArr)
if key in quantity:
quantity[key] = quantity[key] + arrayQuantity
else:
quantity[key] = arrayQuantity
# check what we have...
sqm = 0
i = 1
for obj in objs:
if obj.isDerivedFrom("Part::Box"):
keyArr = [ str(obj.Length), str(obj.Width), str(obj.Height) ]
keyArr.sort()
key = "x".join(keyArr)
if not key in quantity:
continue
i = i + 1
if obj.Length.Value < 30:
size1 = obj.Width
size2 = obj.Height
thick = obj.Length
elif obj.Width.Value < 30:
size1 = obj.Length
size2 = obj.Height
thick = obj.Width
else:
size1 = obj.Length
size2 = obj.Width
thick = obj.Height
sqm = (quantity[key] * size1 * size2 / 1000000).Value
# ...and add to spreadsheet
result.set( 'A'+str(i), str(obj.Label) )
result.set( 'B'+str(i), str(size1) )
result.set( 'C'+str(i), 'x' )
result.set( 'D'+str(i), str(size2) )
result.set( 'E'+str(i), str(thick) )
result.set( 'F'+str(i), str(quantity[key]) )
result.set( 'G'+str(i), str(sqm) )
# recalculate and add partial square meters
del quantity[key]
key = str(thick)
if key in sqmSum:
sqmSum[key] = sqmSum[key] + sqm
else:
sqmSum[key] = sqm
# add to spreadsheet summary for square meters
i = i + 1
for key in sqmSum.keys():
i = i + 1
result.set( 'A'+str(i), 'Suma' )
result.set( 'E'+str(i), str(key) )
result.set( 'G'+str(i), str(sqmSum[key]) )
# final decoration
result.setForeground( 'A2:G'+str(i), (0,0,0) )
result.setBackground( 'A2:G'+str(i), (1,1,1) )
result.setStyle( 'A2:A'+str(i), 'bold', 'add')
result.setColumnWidth( 'A', 135 )
result.setColumnWidth( 'B', 80 )
result.setColumnWidth( 'C', 40 )
result.setColumnWidth( 'D', 80 )
result.setColumnWidth( 'E', 70 )
result.setColumnWidth( 'F', 65 )
result.setColumnWidth( 'G', 160 )
result.setAlignment( 'B2:B'+str(i), 'right', 'keep' )
result.setAlignment( 'C2:C'+str(i), 'right', 'keep' )
result.setAlignment( 'D2:D'+str(i), 'right', 'keep' )
result.setAlignment( 'F2:F'+str(i), 'center', 'keep' )
result.setAlignment( 'G2:G'+str(i), 'right', 'keep' )
# refresh document
App.ActiveDocument.recompute()
support for polar array type
# -*- coding: utf-8 -*-
import FreeCAD,Draft,Spreadsheet
# create spreadsheet and prepere it for data
if FreeCAD.ActiveDocument.getObject("toCut"):
FreeCAD.ActiveDocument.removeObject("toCut")
result = FreeCAD.ActiveDocument.addObject("Spreadsheet::Sheet","toCut")
result.mergeCells('B1:D1')
result.set( 'A1', 'Typ' )
result.set( 'B1', 'Wymiary' )
result.set( 'E1', 'Grubość' )
result.set( 'F1', 'Sztuki' )
result.set( 'G1', 'Metry kwadratowe' )
result.setForeground( 'A1:G1', (0,0,0) )
result.setBackground( 'A1:G1', (1,1,1) )
result.setStyle( 'A1:G1', 'bold', 'add')
result.setAlignment( 'A1:G1', 'top', 'keep' )
result.setAlignment( 'A1:G1', 'center', 'keep' )
# scan all objects and count chipboards (cubes)
objs = FreeCAD.ActiveDocument.Objects
quantity = dict()
sqmSum = dict()
for obj in objs:
# support for cube objects
if obj.isDerivedFrom("Part::Box"):
keyArr = [ str(obj.Length), str(obj.Width), str(obj.Height) ]
keyArr.sort()
key = "x".join(keyArr)
if key in quantity:
quantity[key] = quantity[key] + 1
else:
quantity[key] = 1
# support for array objects with cube as base
elif obj.isDerivedFrom("Part::FeaturePython") and obj.Base.isDerivedFrom("Part::Box"):
# the main box cube will be added in next loop
if obj.ArrayType == "polar":
arrayQuantity = obj.NumberPolar - 1
else:
arrayQuantity = obj.NumberX * obj.NumberY * obj.NumberZ - 1
keyArr = [ str(obj.Base.Length), str(obj.Base.Width), str(obj.Base.Height) ]
keyArr.sort()
key = "x".join(keyArr)
if key in quantity:
quantity[key] = quantity[key] + arrayQuantity
else:
quantity[key] = arrayQuantity
# check what we have...
sqm = 0
i = 1
for obj in objs:
if obj.isDerivedFrom("Part::Box"):
keyArr = [ str(obj.Length), str(obj.Width), str(obj.Height) ]
keyArr.sort()
key = "x".join(keyArr)
if not key in quantity:
continue
i = i + 1
if obj.Length.Value < 30:
size1 = obj.Width
size2 = obj.Height
thick = obj.Length
elif obj.Width.Value < 30:
size1 = obj.Length
size2 = obj.Height
thick = obj.Width
else:
size1 = obj.Length
size2 = obj.Width
thick = obj.Height
sqm = (quantity[key] * size1 * size2 / 1000000).Value
# ...and add to spreadsheet
result.set( 'A'+str(i), str(obj.Label) )
result.set( 'B'+str(i), str(size1) )
result.set( 'C'+str(i), 'x' )
result.set( 'D'+str(i), str(size2) )
result.set( 'E'+str(i), str(thick) )
result.set( 'F'+str(i), str(quantity[key]) )
result.set( 'G'+str(i), str(sqm) )
# recalculate and add partial square meters
del quantity[key]
key = str(thick)
if key in sqmSum:
sqmSum[key] = sqmSum[key] + sqm
else:
sqmSum[key] = sqm
# add to spreadsheet summary for square meters
i = i + 1
for key in sqmSum.keys():
i = i + 1
result.set( 'A'+str(i), 'Suma' )
result.set( 'E'+str(i), str(key) )
result.set( 'G'+str(i), str(sqmSum[key]) )
# final decoration
result.setForeground( 'A2:G'+str(i), (0,0,0) )
result.setBackground( 'A2:G'+str(i), (1,1,1) )
result.setStyle( 'A2:A'+str(i), 'bold', 'add')
result.setColumnWidth( 'A', 135 )
result.setColumnWidth( 'B', 80 )
result.setColumnWidth( 'C', 40 )
result.setColumnWidth( 'D', 80 )
result.setColumnWidth( 'E', 70 )
result.setColumnWidth( 'F', 65 )
result.setColumnWidth( 'G', 160 )
result.setAlignment( 'B2:B'+str(i), 'right', 'keep' )
result.setAlignment( 'C2:C'+str(i), 'right', 'keep' )
result.setAlignment( 'D2:D'+str(i), 'right', 'keep' )
result.setAlignment( 'F2:F'+str(i), 'center', 'keep' )
result.setAlignment( 'G2:G'+str(i), 'right', 'keep' )
# refresh document
App.ActiveDocument.recompute()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from spiderx.httpclient.ipshardclient import IPShardDelayClient
from spiderx.xml.sax.htmlparser import HTMLParser
from spiderx.core.ilog import get
import re
import socket
logger = get(__file__)
class Meta :
def __init__(self) :
self.sip6 = sip4 = None
self.cip6 = cip4 = None
self.routepre = None
TIDPattern = re.compile("tid=([0-9]+)", re.S)
IPV4ADDRPATTERN = re.compile("[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}", re.S)
WIN7_CLIENTIP6 = re.compile("netsh\s+interface\s+ipv6\s+add\s+address\s+IP6Tunnel\s+([:0-9a-zA-Z]+)", re.S)
WIN7_SERVERIP6 = re.compile("netsh\s+interface\s+ipv6\s+add\s+route\s+([^\s]+)\s+IP6Tunnel\s+([:0-9a-zA-Z]+)", re.S)
"""
login tunnelbroker.net
get 2 paires ip to use
"""
class Broker :
def __init__(self) :
self._client = IPShardDelayClient(delayMS=30, timeout=30, isEnCK=True)
self._bip = self._lip = None
def login(self, username, pwd) :
# login index
logger.info("index page......")
prelogin = self._client.open("http://tunnelbroker.net/")
if None == prelogin or 200 != prelogin.status :
logger.error("index: %s", prelogin.msg)
return False
logger.info("logining......")
login = self._client.open("http://tunnelbroker.net/login.php", headers={
"Referer" : "http://tunnelbroker.net/"
}, data = {
"f_user":username,
"f_pass":pwd,
"redir":"",
"Login":"Login"
})
if None == login or 302 != login.status :
logger.error("login: %s", login.msg)
return False
logger.info("login: %s", username)
return True
def destroy_exists(self) :
pass
"""
return Meta ins
"""
def create(self) :
if not self.get_bestip_localip() :
return False
bestip = self._bip
localIP = self._lip
# create process
logger.info("Go to New Tunnel(local ip = %s, best ip = %s)......", localIP, bestip)
newPage = self._client.open("http://tunnelbroker.net/new_tunnel.php", headers = {
"Origin":"http://tunnelbroker.net",
"Referer":"http://tunnelbroker.net/new_tunnel.php"
},
data = "ipv4z=%s&tserv=%s&normaltunnel=Create+Tunnel" % (localIP, bestip)
)
if None == newPage or 302 != newPage.status :
if None == newPage :
logger.error("new page: None")
else :
logger.error("new page: %d %s", newPage.status, newPage.msg)
return False
return True
def get_tunnel_meta(self, tid) :
# get win7 command
logger.info("Fetch win7 cmd json......")
cmdsJson = self._client.open("http://tunnelbroker.net/tunnel_detail.php?tid=%s&ajax=true" % (tid), data={"config": "10"});
if None == cmdsJson or 200 != cmdsJson.status or None == cmdsJson.msg :
logger.error("cmd page: %d", cmdsJson.status)
return False
return self.parse_cmd(cmdsJson.msg)
def parse_cmd(self, html) :
meta = Meta()
# server ip 6 and route prefix
m = WIN7_SERVERIP6.search(html)
if None == m or 2 > len(m.groups()):
logger.error("parse server ip6: %s", html)
return False
meta.routepre = m.group(1).strip().replace("\\", "")
meta.sip6 = m.group(2).strip()
# server ip 4
ipv4all = IPV4ADDRPATTERN.findall(html)
if None == ipv4all or 2 > len(ipv4all):
logger.error("parse server ip4: %s", html)
return False
meta.cip4 = ipv4all[0]
meta.sip4 = ipv4all[1]
# client ip 6
m = WIN7_CLIENTIP6.search(html)
if None == m or 1 > len(m.groups()):
logger.error("parse client ip6: %s", html)
return False
meta.cip6 = m.group(1).strip()
return meta
def get_localnet_ip(self) :
# client ip 4
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("www.tunnelbroker.net",80))
cip4 = s.getsockname()[0]
s.close()
return cip4
def delete(self, tid) :
deletePage = self._client.open("http://tunnelbroker.net/tunnel_detail.php?tid=%s&delete=true" % (tid))
if None == deletePage or 200 != deletePage.status :
logger.error("deleting %s", tid)
return False
return True
def delete_all(self) :
tids = self.get_all_tunnel_ids()
if None == tids or 1 > len(tids) :
logger.info("no tunnel to delete")
return True
for tid in tids :
if not self.delete(tid) :
logger.error("deleting tid: %s", tid)
return False
#end
logger.info("deleted all: %s", str(tids))
return True
def get_all_tunnel_ids(self) :
logger.info("get all tunnel ids......")
indexPage = self._client.open("http://tunnelbroker.net/")
if None == indexPage or None == indexPage.msg :
logger.erro("delete index %d %s", indexPage.status, indexPage.msg)
return None
return TIDPattern.findall(indexPage.msg)
def get_bestip_localip(self) :
if None != self._bip :
return True
# create tunel get best location ip
logger.info("BestLocation......")
blpage = self._client.open("http://anycast.tunnelbroker.net/info.html?r=1")
if None == blpage or 200 != blpage.status or None == blpage.msg :
logger.error("get best location: %s", blpage.msg)
return False
# re retrieve best IP
mt = IPV4ADDRPATTERN.search(blpage.msg)
if None == mt :
logger.error("url re match: %s", blpage.msg)
return False
locTuple = mt.span()
self._bip = blpage.msg[locTuple[0] : locTuple[1]]
# best location not avail now then fix to los angel
# parse local ip
page = self._client.open("http://ip38.com")
if None == page or 200 != page.status or None == page.msg :
logger.error("get localip page : %s", page.msg)
self._bip = None
return False
m = IPV4ADDRPATTERN.search(page.msg)
if None == m :
logger.error("local ip 404: %s", page.msg)
self._bip = None
return False
locTuple = m.span()
self._lip = page.msg[locTuple[0]: locTuple[1]]
return True
def modify_local_ip(self, tid, newip) :
# create tunel get best location ip
logger.info("modify local ip......")
blpage = self._client.open("http://tunnelbroker.net/tunnel_detail.php?tid=%s&ajax=true"%(tid), data={"ipv4z":newip})
if None == blpage or 200 != blpage.status :
logger.error("modify local ip: %d", blpage.status)
return False
return True
def get_matched_tunnel(self) :
tids = self.get_all_tunnel_ids()
if None == tids or 1 > len(tids) :
return False
if not self.get_bestip_localip() :
return False
bestip = self._bip
localip = self._lip
# 4
for tid in tids :
meta = self.get_tunnel_meta(tid)
if False == meta :
logger.error("tunnel meta %s", tid)
continue
if meta.sip4 == bestip :
logger.info("matched (%s,%s) found", bestip, localip)
return meta, tid, localip
# end
logger.info("no matched (%s,%s) found", bestip, localip)
return False
"""
1. have any tunnel
2. get the best ip
3. get the local ip
4. do have then : loop get the matchted best ip tunnel id else create return
5. set the locol ip
6. get cmd and execute
"""
def nonexist_tunnel_create_or_set(self) :
# 1
metaPairs = self.get_matched_tunnel()
meta = tid = localip = None
if False == metaPairs :
try :
self.create()
except :
pass
metaPairs = self.get_matched_tunnel()
if False == metaPairs :
return False
meta = metaPairs[0]
tid = metaPairs[1]
localip = metaPairs[2]
if localip != meta.cip4 :
# 5
logger.info("%s modify local ip from %s to %s", self.__class__, meta.cip4, localip)
if not self.modify_local_ip(tid, localip) :
return False
else :
# tunnel exist
logger.info("tunnel exist(%s,%s) found", meta.sip4, meta.cip4)
# 6
meta.cip4 = self.get_localnet_ip()
return meta
if __name__ == "__main__" :
html = """
{"commands":"netsh interface teredo set state disabled\r\nnetsh interface ipv6 add v6v4tunnel IP6Tunnel 115.221.184.71 66.220.18.42\r\nnetsh interface ipv6 add address IP6Tunnel 2001:470:c:ba4::2\r\nnetsh interface ipv6 add route ::\/0 IP6Tunnel 2001:470:c:ba4::1","additionalNotes":"","description":"Copy and paste the following commands into a command window:"}
"""
b = Broker()
m = b.parse_cmd(html)
print m.cip4, m.cip6, m.sip4, m.sip6, m.routepre
add log to get tunnel ids fail
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from spiderx.httpclient.ipshardclient import IPShardDelayClient
from spiderx.xml.sax.htmlparser import HTMLParser
from spiderx.core.ilog import get
import re
import socket
logger = get(__file__)
class Meta :
def __init__(self) :
self.sip6 = sip4 = None
self.cip6 = cip4 = None
self.routepre = None
TIDPattern = re.compile("tid=([0-9]+)", re.S)
IPV4ADDRPATTERN = re.compile("[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}", re.S)
WIN7_CLIENTIP6 = re.compile("netsh\s+interface\s+ipv6\s+add\s+address\s+IP6Tunnel\s+([:0-9a-zA-Z]+)", re.S)
WIN7_SERVERIP6 = re.compile("netsh\s+interface\s+ipv6\s+add\s+route\s+([^\s]+)\s+IP6Tunnel\s+([:0-9a-zA-Z]+)", re.S)
"""
login tunnelbroker.net
get 2 paires ip to use
"""
class Broker :
def __init__(self) :
self._client = IPShardDelayClient(delayMS=30, timeout=30, isEnCK=True)
self._bip = self._lip = None
def login(self, username, pwd) :
# login index
logger.info("index page......")
prelogin = self._client.open("http://tunnelbroker.net/")
if None == prelogin or 200 != prelogin.status :
logger.error("index: %s", prelogin.msg)
return False
logger.info("logining......")
login = self._client.open("http://tunnelbroker.net/login.php", headers={
"Referer" : "http://tunnelbroker.net/"
}, data = {
"f_user":username,
"f_pass":pwd,
"redir":"",
"Login":"Login"
})
if None == login or 302 != login.status :
logger.error("login: %s", login.msg)
return False
logger.info("login: %s", username)
return True
def destroy_exists(self) :
pass
"""
return Meta ins
"""
def create(self) :
if not self.get_bestip_localip() :
return False
bestip = self._bip
localIP = self._lip
# create process
logger.info("Go to New Tunnel(local ip = %s, best ip = %s)......", localIP, bestip)
newPage = self._client.open("http://tunnelbroker.net/new_tunnel.php", headers = {
"Origin":"http://tunnelbroker.net",
"Referer":"http://tunnelbroker.net/new_tunnel.php"
},
data = "ipv4z=%s&tserv=%s&normaltunnel=Create+Tunnel" % (localIP, bestip)
)
if None == newPage or 302 != newPage.status :
if None == newPage :
logger.error("new page: None")
else :
logger.error("new page: %d %s", newPage.status, newPage.msg)
return False
return True
def get_tunnel_meta(self, tid) :
# get win7 command
logger.info("Fetch win7 cmd json......")
cmdsJson = self._client.open("http://tunnelbroker.net/tunnel_detail.php?tid=%s&ajax=true" % (tid), data={"config": "10"});
if None == cmdsJson or 200 != cmdsJson.status or None == cmdsJson.msg :
logger.error("cmd page: %d", cmdsJson.status)
return False
return self.parse_cmd(cmdsJson.msg)
def parse_cmd(self, html) :
meta = Meta()
# server ip 6 and route prefix
m = WIN7_SERVERIP6.search(html)
if None == m or 2 > len(m.groups()):
logger.error("parse server ip6: %s", html)
return False
meta.routepre = m.group(1).strip().replace("\\", "")
meta.sip6 = m.group(2).strip()
# server ip 4
ipv4all = IPV4ADDRPATTERN.findall(html)
if None == ipv4all or 2 > len(ipv4all):
logger.error("parse server ip4: %s", html)
return False
meta.cip4 = ipv4all[0]
meta.sip4 = ipv4all[1]
# client ip 6
m = WIN7_CLIENTIP6.search(html)
if None == m or 1 > len(m.groups()):
logger.error("parse client ip6: %s", html)
return False
meta.cip6 = m.group(1).strip()
return meta
def get_localnet_ip(self) :
# client ip 4
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("www.tunnelbroker.net",80))
cip4 = s.getsockname()[0]
s.close()
return cip4
def delete(self, tid) :
deletePage = self._client.open("http://tunnelbroker.net/tunnel_detail.php?tid=%s&delete=true" % (tid))
if None == deletePage or 200 != deletePage.status :
logger.error("deleting %s", tid)
return False
return True
def delete_all(self) :
tids = self.get_all_tunnel_ids()
if None == tids or 1 > len(tids) :
logger.info("no tunnel to delete")
return True
for tid in tids :
if not self.delete(tid) :
logger.error("deleting tid: %s", tid)
return False
#end
logger.info("deleted all: %s", str(tids))
return True
def get_all_tunnel_ids(self) :
logger.info("get all tunnel ids......")
indexPage = self._client.open("http://tunnelbroker.net/")
if None == indexPage or None == indexPage.msg :
logger.erro("delete index %d %s", indexPage.status, indexPage.msg)
return None
return TIDPattern.findall(indexPage.msg)
def get_bestip_localip(self) :
if None != self._bip :
return True
# create tunel get best location ip
logger.info("BestLocation......")
blpage = self._client.open("http://anycast.tunnelbroker.net/info.html?r=1")
if None == blpage or 200 != blpage.status or None == blpage.msg :
logger.error("get best location: %s", blpage.msg)
return False
# re retrieve best IP
mt = IPV4ADDRPATTERN.search(blpage.msg)
if None == mt :
logger.error("url re match: %s", blpage.msg)
return False
locTuple = mt.span()
self._bip = blpage.msg[locTuple[0] : locTuple[1]]
# best location not avail now then fix to los angel
# parse local ip
page = self._client.open("http://ip38.com")
if None == page or 200 != page.status or None == page.msg :
logger.error("get localip page : %s", page.msg)
self._bip = None
return False
m = IPV4ADDRPATTERN.search(page.msg)
if None == m :
logger.error("local ip 404: %s", page.msg)
self._bip = None
return False
locTuple = m.span()
self._lip = page.msg[locTuple[0]: locTuple[1]]
return True
def modify_local_ip(self, tid, newip) :
# create tunel get best location ip
logger.info("modify local ip......")
blpage = self._client.open("http://tunnelbroker.net/tunnel_detail.php?tid=%s&ajax=true"%(tid), data={"ipv4z":newip})
if None == blpage or 200 != blpage.status :
logger.error("modify local ip: %d", blpage.status)
return False
return True
def get_matched_tunnel(self) :
tids = self.get_all_tunnel_ids()
if None == tids or 1 > len(tids) :
logger.error("all tunnel id fetch ......")
return False
if not self.get_bestip_localip() :
return False
bestip = self._bip
localip = self._lip
# 4
for tid in tids :
meta = self.get_tunnel_meta(tid)
if False == meta :
logger.error("tunnel meta %s", tid)
continue
if meta.sip4 == bestip :
logger.info("matched (%s,%s) found", bestip, localip)
return meta, tid, localip
# end
logger.info("no matched (%s,%s) found", bestip, localip)
return False
"""
1. have any tunnel
2. get the best ip
3. get the local ip
4. do have then : loop get the matchted best ip tunnel id else create return
5. set the locol ip
6. get cmd and execute
"""
def nonexist_tunnel_create_or_set(self) :
# 1
metaPairs = self.get_matched_tunnel()
meta = tid = localip = None
if False == metaPairs :
try :
self.create()
except :
pass
metaPairs = self.get_matched_tunnel()
if False == metaPairs :
return False
meta = metaPairs[0]
tid = metaPairs[1]
localip = metaPairs[2]
if localip != meta.cip4 :
# 5
logger.info("%s modify local ip from %s to %s", self.__class__, meta.cip4, localip)
if not self.modify_local_ip(tid, localip) :
return False
else :
# tunnel exist
logger.info("tunnel exist(%s,%s) found", meta.sip4, meta.cip4)
# 6
meta.cip4 = self.get_localnet_ip()
return meta
if __name__ == "__main__" :
html = """
{"commands":"netsh interface teredo set state disabled\r\nnetsh interface ipv6 add v6v4tunnel IP6Tunnel 115.221.184.71 66.220.18.42\r\nnetsh interface ipv6 add address IP6Tunnel 2001:470:c:ba4::2\r\nnetsh interface ipv6 add route ::\/0 IP6Tunnel 2001:470:c:ba4::1","additionalNotes":"","description":"Copy and paste the following commands into a command window:"}
"""
b = Broker()
m = b.parse_cmd(html)
print m.cip4, m.cip6, m.sip4, m.sip6, m.routepre
|
#
# Copyright (c) 2004-2005 rPath, Inc.
#
# This program is distributed under the terms of the Common Public License,
# version 1.0. A copy of this license should have been distributed with this
# source file in a file called LICENSE. If it is not present, the license
# is always available at http://www.opensource.org/licenses/cpl.php.
#
# This program is distributed in the hope that it will be useful, but
# without any waranty; without even the implied warranty of merchantability
# or fitness for a particular purpose. See the Common Public License for
# full details.
#
"""
Contains the base Recipe class, default macros, and miscellaneous
components used by conary .recipe files
"""
#stdlib
import errno
from fnmatch import fnmatchcase
import imp
import inspect
from itertools import chain,izip
import new
import os
import string
import sys
import tempfile
import types
#conary
import build
import buildpackage
import usergroup
import conaryclient
import cook
from deps import deps
import destdirpolicy
import files
from lib import log
from lib import magic
from lib import util
from local import database
import macros
import packagepolicy
from repository import repository,trovesource
import source
import use
import updatecmd
import versions
baseMacros = {
# paths
'prefix' : '/usr',
'sysconfdir' : '/etc',
'initdir' : '%(sysconfdir)s/init.d',
'lib' : 'lib', # may be overridden with 'lib64'
'exec_prefix' : '%(prefix)s',
'bindir' : '%(exec_prefix)s/bin',
'essentialbindir' : '/bin',
'sbindir' : '%(exec_prefix)s/sbin',
'essentialsbindir' : '/sbin',
'libdir' : '%(exec_prefix)s/%(lib)s',
'essentiallibdir' : '/%(lib)s',
'libexecdir' : '%(exec_prefix)s/libexec',
'localstatedir' : '/var',
'servicedir' : '/srv',
'cachedir' : '%(localstatedir)s/cache',
'sharedstatedir' : '%(prefix)s/com',
'includedir' : '%(prefix)s/include',
'datadir' : '%(prefix)s/share',
'mandir' : '%(datadir)s/man',
'infodir' : '%(datadir)s/info',
'docdir' : '%(datadir)s/doc',
'thisdocdir' : '%(docdir)s/%(name)s-%(version)s',
'tagdescriptiondir' : '%(sysconfdir)s/conary/tags',
'taghandlerdir' : '%(libexecdir)s/conary/tags',
'tagdatadir' : '%(datadir)s/conary/tags',
'testdir' : '%(localstatedir)s/conary/tests',
'thistestdir' : '%(testdir)s/%(name)s-%(version)s',
'debuglibdir' : '/usr/lib/debug', # no %(prefix)s or %(lib)s!
'debugsrcdir' : '/usr/src/debug', # no %(prefix)s!
'userinfodir' : '%(sysconfdir)s/conary/userinfo',
'groupinfodir' : '%(sysconfdir)s/conary/groupinfo',
'buildlogpath' : '%(debugsrcdir)s/buildlogs/%(name)s-%(version)s-log.bz2',
# special component prefixes that the whole system needs to share
'krbprefix' : '%(exec_prefix)s/kerberos',
'x11prefix' : '%(exec_prefix)s/X11R6',
# programs/options (empty ones are for documentation)
'cc' : 'gcc',
'cxx' : 'g++',
'cxxflags' : '', # cxx specific flags
'optflags' : '-O2',
'dbgflags' : '-g', # for debuginfo
'cflags' : '%(optflags)s %(dbgflags)s',
'cppflags' : '', # just for providing in recipes
'ldflags' : '%(dbgflags)s',
'mflags' : '', # make flags
'parallelmflags' : '',
'sysroot' : '',
'os' : 'linux',
'target' : '%(targetarch)s-unknown-linux',
'debugedit' : 'debugedit',
'strip' : 'eu-strip', # eu-strip for debuginfo, "strip -g" else
'strip-archive' : 'strip -g', # eu-strip segfaults on ar
'monodis' : '%(bindir)s/monodis',
# filled in at cook time
'buildbranch' : '',
'buildlabel' : '',
}
crossMacros = {
# set crossdir from cook, directly or indirectly, before adding the rest
#'crossdir' : 'cross-target',
'prefix' : '/opt/%(crossdir)s',
'sysroot' : '%(prefix)s/sys-root',
'headerpath' : '%(sysroot)s/usr/include',
}
def localImport(d, package, modules=()):
"""
import a package into a non-global context.
@param d: the context to import the module
@type d: dict
@param package: the name of the module to import
@type package: str
@param modules: a sequence of modules to import from the package.
If a 2-tuple is in the sequence, rename the imported module to
the second value in the tuple.
@type modules: sequence of strings or tuples, or empty tuple
Examples of translated import statements::
from foo import bar as baz:
localImport(d, "foo", (("bar", "baz"))
from bar import fred, george:
localImport(d, "bar", ("fred", "george"))
import os
localImport(d, "os")
"""
m = __import__(package, d, {}, modules)
if modules:
for name in modules:
if type(name) is tuple:
mod = name[0]
name = name[1]
else:
mod = name
d[name] = getattr(m, mod)
else:
d[package] = m
# save a reference to the module inside this context, so it won't
# be garbage collected until the context is deleted.
l = d.setdefault('__localImportModules', [])
l.append(m)
def setupRecipeDict(d, filename):
localImport(d, 'build', ('build', 'action'))
localImport(d, 'build.recipe', ('PackageRecipe', 'GroupRecipe',
'RedirectRecipe', 'FilesetRecipe',
'DistroPackageRecipe',
'BuildPackageRecipe',
'CPackageRecipe',
'AutoPackageRecipe',
'UserInfoRecipe',
'GroupInfoRecipe',
'loadSuperClass', 'loadInstalled',
'clearBuildReqs',
# XXX when all recipes have been migrated
# we can get rid of loadRecipe
('loadSuperClass', 'loadRecipe')))
localImport(d, 'lib', ('util',))
for x in ('os', 're', 'sys', 'stat'):
localImport(d, x)
localImport(d, 'build.use', ('Arch', 'Use', ('LocalFlags', 'Flags')))
d['filename'] = filename
class RecipeLoader:
_recipesToCopy = []
@classmethod
def addRecipeToCopy(class_, recipeClass):
class_._recipesToCopy.append(recipeClass)
def _copyReusedRecipes(self, moduleDict):
# XXX HACK - get rid of this when we move the
# recipe classes to the repository.
# makes copies of some of the superclass recipes that are
# created in this trove. (specifically, the ones with buildreqs)
for recipeClass in self._recipesToCopy:
name = recipeClass.__name__
# when we create a new class object, it needs its superclasses.
# get the original superclass list and substitute in any
# copies
mro = list(inspect.getmro(recipeClass)[1:])
newMro = []
for superClass in mro:
superName = superClass.__name__
newMro.append(moduleDict.get(superName, superClass))
recipeCopy = new.classobj(name, tuple(newMro),
recipeClass.__dict__.copy())
recipeCopy.buildRequires = recipeCopy.buildRequires[:]
moduleDict[name] = recipeCopy
def __init__(self, filename, cfg=None, repos=None, component=None,
branch=None, ignoreInstalled=False):
self.recipes = {}
if filename[0] != "/":
raise IOError, "recipe file names must be absolute paths"
if component:
pkgname = component.split(':')[0]
else:
pkgname = filename.split('/')[-1]
pkgname = pkgname[:-len('.recipe')]
basename = os.path.basename(filename)
self.file = basename.replace('.', '-')
self.module = imp.new_module(self.file)
sys.modules[self.file] = self.module
f = open(filename)
setupRecipeDict(self.module.__dict__, filename)
# store cfg and repos, so that the recipe can load
# recipes out of the repository
self.module.__dict__['cfg'] = cfg
self.module.__dict__['repos'] = repos
self.module.__dict__['component'] = component
self.module.__dict__['branch'] = branch
self.module.__dict__['name'] = pkgname
self.module.__dict__['ignoreInstalled'] = ignoreInstalled
self.module.__dict__['loadedTroves'] = []
self.module.__dict__['loadedSpecs'] = {}
self._copyReusedRecipes(self.module.__dict__)
# create the recipe class by executing the code in the recipe
try:
code = compile(f.read(), filename, 'exec')
except SyntaxError, err:
msg = ('Error in recipe file "%s": %s\n' %(basename, err))
if err.offset is not None:
msg += '%s%s^\n' %(err.text, ' ' * (err.offset-1))
else:
msg += err.text
raise RecipeFileError(msg)
use.resetUsed()
exec code in self.module.__dict__
# all recipes that could be loaded by loadRecipe are loaded;
# get rid of our references to cfg and repos
del self.module.__dict__['cfg']
del self.module.__dict__['repos']
del self.module.__dict__['component']
del self.module.__dict__['branch']
del self.module.__dict__['name']
del self.module.__dict__['ignoreInstalled']
found = False
for (name, obj) in self.module.__dict__.items():
if type(obj) is not types.ClassType:
continue
# if a recipe has been marked to be ignored (for example, if
# it was loaded from another recipe by loadRecipe()
# (don't use hasattr here, we want to check only the recipe
# class itself, not any parent class
if 'ignore' in obj.__dict__:
continue
recipename = getattr(obj, 'name', '')
# make sure the class is derived from Recipe
if ((issubclass(obj, PackageRecipe)
and obj is not PackageRecipe
and not issubclass(obj, UserGroupInfoRecipe)) or
(issubclass(obj, RedirectRecipe)
and obj is not RedirectRecipe)):
if recipename[0] not in string.ascii_letters + string.digits:
raise RecipeFileError(
'Error in recipe file "%s": package name must start '
'with an ascii letter or digit.' %basename)
if recipename.startswith('group-'):
raise RecipeFileError(
'Error in recipe file "%s": package name cannot '
'begin with "group-"' %basename)
if recipename.startswith('fileset-'):
raise RecipeFileError(
'Error in recipe file "%s": package name cannot '
'begin with "fileset-"' %basename)
if recipename.startswith('info-'):
raise RecipeFileError(
'Error in recipe file "%s": package name cannot '
'begin with "info-"' %basename)
elif issubclass(obj, GroupRecipe) and obj is not GroupRecipe:
if recipename and not recipename.startswith('group-'):
raise RecipeFileError(
'Error in recipe file "%s": group name must '
'begin with "group-"' %basename)
elif issubclass(obj, FilesetRecipe) and obj is not FilesetRecipe:
if recipename and not recipename.startswith('fileset-'):
raise RecipeFileError(
'Error in recipe file "%s": fileset name must '
'begin with "fileset-"' %basename)
elif issubclass(obj, UserGroupInfoRecipe) and obj is not UserGroupInfoRecipe:
if recipename and not recipename.startswith('info-'):
raise RecipeFileError(
'Error in recipe file "%s": info name must '
'begin with "info-"' %basename)
else:
continue
self.recipes[name] = obj
obj.filename = filename
if hasattr(obj, 'name') and hasattr(obj, 'version'):
if found:
raise RecipeFileError(
'Error in recipe file "%s": multiple recipe classes '
'with both name and version exist' %basename)
self.recipe = obj
if '-' in obj.version:
raise RecipeFileError(
"Version string %s has illegal '-' character"
%obj.version)
if obj.name != pkgname:
raise RecipeFileError(
"Recipe object name '%s' does not match "
"file/component name '%s'"
% (obj.name, pkgname))
found = True
else:
raise RecipeFileError(
"Recipe in file/component '%s' did not contain both a name"
" and a version attribute." % pkgname)
if found:
# inherit any tracked flags that we found while loading parent
# classes. Also inherit the list of recipes classes needed to load
# this recipe.
self.recipe._loadedTroves = self.module.__dict__['loadedTroves']
self.recipe._loadedSpecs = self.module.__dict__['loadedSpecs']
if self.recipe._trackedFlags is not None:
use.setUsed(self.recipe._trackedFlags)
self.recipe._trackedFlags = use.getUsed()
else:
# we'll get this if the recipe file is empty
raise RecipeFileError(
"file/component '%s' did not contain a valid recipe" % pkgname)
def allRecipes(self):
return self.recipes
def getRecipe(self):
return self.recipe
def __del__(self):
try:
del sys.modules[self.file]
except:
pass
def recipeLoaderFromSourceComponent(name, cfg, repos,
versionStr=None, labelPath=None,
ignoreInstalled=False,
filterVersions=False):
def _scoreVersion(labelPath, version):
""" These labels all match the given labelPath.
We score them based on the number of matching labels in
the label path, and return the one that's "best".
The following rules should apply:
* if the labelPath is [bar, foo] and you are choosing between
/foo/bar/ and /foo/blah/bar, choose /foo/bar. Assumption
is that any other shadow/branch in the path may be from a
maintenance branch.
* if the labelPath is [bar] and you are choosing between
/foo/bar/ and /foo/blah/bar, choose /foo/bar.
"""
# FIXME I'm quite sure this heuristic will get replaced with
# something smarter/more sane as time progresses
score = 0
labelPath = [ x for x in reversed(labelPath)]
branch = version.branch()
while True:
label = branch.label()
try:
index = labelPath.index(label)
except ValueError:
index = -1
score += index
if not branch.hasParentBranch():
break
branch = branch.parentBranch()
return score
def _getBestTroveTups(labelPath, troveTups):
scores = [ (_scoreVersion(labelPath, x[1]), x) for x in troveTups ]
maxScore = max(scores)[0]
return [x[1] for x in scores if x[0] == maxScore ]
name = name.split(':')[0]
component = name + ":source"
filename = name + '.recipe'
if not labelPath:
labelPath = [cfg.buildLabel]
try:
pkgs = repos.findTrove(labelPath,
(component, versionStr, deps.DependencySet()))
except repository.TroveMissing:
raise RecipeFileError, 'cannot find source component %s' % component
if filterVersions:
pkgs = _getBestTroveTups(labelPath, pkgs)
if len(pkgs) > 1:
raise RecipeFileError("source component %s has multiple versions "
"on labelPath %s: %s" %(component,
', '.join(x.asString() for x in labelPath),
pkgs))
sourceComponent = repos.getTrove(*pkgs[0])
(fd, recipeFile) = tempfile.mkstemp(".recipe", 'temp-%s-' %name)
outF = os.fdopen(fd, "w")
inF = None
for (pathId, filePath, fileId, fileVersion) in sourceComponent.iterFileList():
if filePath == filename:
inF = repos.getFileContents([ (fileId, fileVersion) ])[0].get()
break
if not inF:
raise RecipeFileError("version %s of %s does not contain %s" %
(sourceComponent.getName(),
sourceComponent.getVersion().asString(),
filename))
util.copyfileobj(inF, outF)
del inF
del outF
try:
loader = RecipeLoader(recipeFile, cfg, repos, component,
sourceComponent.getVersion().branch(),
ignoreInstalled=ignoreInstalled)
finally:
os.unlink(recipeFile)
recipe = loader.getRecipe()
recipe._trove = sourceComponent.copy()
return (loader, sourceComponent.getVersion())
def loadSuperClass(troveSpec, label=None):
"""
Load a recipe so that its class/data can be used as a super class for
this recipe.
If the package is not installed anywhere on the system, the C{labelPath}
will be searched without reference to the installed system.
@param troveSpec: C{name}I{[}C{=I{version}}I{][}C{[I{flavor}]}I{]}
specification of the trove to load. The flavor given will be used
to find the given recipe and also to set the flavor of the loaded recipe.
@param label: label string to search for the given recipe in place of
using the default C{labelPath}.
If not specified, the labels listed in the version in the including
recipe will be used as the c{labelPath} to search.
For example, if called from recipe with version
C{/conary.rpath.com@rpl:devel//shadow/1.0-1-1},
the default C{labelPath} that would be constructed would be:
C{[conary.rpath.com@rpl:shadow, conary.rpath.com@rpl:devel]}
"""
callerGlobals = inspect.stack()[1][0].f_globals
ignoreInstalled = True
_loadRecipe(troveSpec, label, callerGlobals, False)
def loadInstalled(troveSpec, label=None):
"""
Load a recipe so that its data about the installed system can be used
in this recipe.
If a complete version is not specified in the trovespec, the version of
the recipe to load will be based on what is installed on the system.
For example, if C{loadRecipe('foo')} is called, and package C{foo} with
version C{/bar.org@bar:devel/4.1-1-1} is installed on the system, then
C{foo:source} with version C{/bar.org@bar:devel/4.1-1} will be loaded.
The recipe will also be loaded with the installed package's flavor.
If the package is not installed anywhere on the system, the C{labelPath}
will be searched without reference to the installed system.
@param troveSpec: C{name}I{[}C{=I{version}}I{][}C{[I{flavor}]}I{]}
specification of the trove to load. The flavor given will be used
to find the given recipe and also to set the flavor of the loaded recipe.
@param label: label string to search for the given recipe in place of
using the default C{labelPath}.
If not specified, the labels listed in the version in the including
recipe will be used as the c{labelPath} to search.
For example, if called from recipe with version
C{/conary.rpath.com@rpl:devel//shadow/1.0-1-1},
the default C{labelPath} that would be constructed would be:
C{[conary.rpath.com@rpl:shadow, conary.rpath.com@rpl:devel]}
"""
callerGlobals = inspect.stack()[1][0].f_globals
_loadRecipe(troveSpec, label, callerGlobals, True)
def _loadRecipe(troveSpec, label, callerGlobals, findInstalled):
""" See docs for loadInstalledPackage and loadSuperClass. """
def _findInstalledVersion(db, labelPath, name, versionStr, flavor):
""" Specialized search of the installed system along a labelPath,
defaulting to searching the whole system if the trove is not
found along the label path.
The version and flavor of the first found installed trove is
returned, or C{None} if no trove is found.
"""
# first search on the labelPath.
try:
troves = db.findTrove(labelPath, (name, versionStr, flavor))
if len(troves) > 1:
raise RuntimeError, (
'Multiple troves could match loadInstalled'
' request %s' % troveSpec)
if troves:
return troves[0][1].getSourceVersion(), troves[0][2]
except repository.TroveNotFound:
pass
if labelPath is None:
return None
try:
troves = db.findTrove(None, (name, versionStr, flavor))
if len(troves) > 1:
raise RuntimeError, (
'Multiple troves could match loadRecipe'
' request for %s' % name)
if troves:
return troves[0][1].getSourceVersion(), troves[0][2]
except repository.TroveNotFound:
pass
return None
cfg = callerGlobals['cfg']
repos = callerGlobals['repos']
branch = callerGlobals['branch']
parentPackageName = callerGlobals['name']
if 'ignoreInstalled' in callerGlobals:
alwaysIgnoreInstalled = callerGlobals['ignoreInstalled']
else:
alwaysIgnoreInstalled = False
oldUsed = use.getUsed()
name, versionStr, flavor = updatecmd.parseTroveSpec(troveSpec)
if name.endswith('.recipe'):
file = name
name = name[:-len('.recipe')]
else:
file = name + '.recipe'
#first check to see if a filename was specified, and if that
#recipe actually exists.
loader = None
if not (label or versionStr or flavor):
if name[0] != '/':
parentFilePath = callerGlobals['filename']
localfile = os.path.dirname(parentFilePath) + '/' + file
else:
localfile = name + '.recipe'
if os.path.exists(localfile):
if flavor:
oldBuildFlavor = cfg.buildFlavor
cfg.buildFlavor = deps.overrideFlavor(oldBuildFlavor, flavor)
use.setBuildFlagsFromFlavor(name, cfg.buildFlavor)
loader = RecipeLoader(localfile, cfg,
ignoreInstalled=alwaysIgnoreInstalled)
if not loader:
if label:
labelPath = [versions.Label(label)]
elif branch:
# if no labelPath was specified, search backwards through the
# labels on the current branch.
labelPath = [branch.label()]
while branch.hasParentBranch():
branch = branch.parentBranch()
labelPath.append(branch.label())
else:
labelPath = None
if findInstalled and not alwaysIgnoreInstalled:
# look on the local system to find a trove that is installed that
# matches this loadrecipe request. Use that trove's version
# and flavor information to grab the source out of the repository
db = database.Database(cfg.root, cfg.dbPath)
parts = _findInstalledVersion(db, labelPath, name,
versionStr, flavor)
if parts:
version, flavor = parts
versionStr = version.getSourceVersion().asString()
if flavor:
# override the current flavor with the flavor found in the
# installed trove (or the troveSpec flavor, if no installed
# trove was found.
oldBuildFlavor = cfg.buildFlavor
cfg.buildFlavor = deps.overrideFlavor(oldBuildFlavor, flavor)
use.setBuildFlagsFromFlavor(name, cfg.buildFlavor)
loader = recipeLoaderFromSourceComponent(name, cfg, repos,
labelPath=labelPath,
versionStr=versionStr,
ignoreInstalled=alwaysIgnoreInstalled,
filterVersions=True)[0]
for name, recipe in loader.allRecipes().items():
# hide all recipes from RecipeLoader - we don't want to return
# a recipe that has been loaded by loadRecipe
recipe.ignore = 1
callerGlobals[name] = recipe
if recipe._trove:
# create a tuple with the version and flavor information needed to
# load this trove again. You might be able to rely on the
# flavor that the trove was built with, but when you load a
# recipe that is not a superclass of the current recipe,
# its flavor is not assumed to be relevant to the resulting
# package (otherwise you might have completely irrelevant flavors
# showing up for any package that loads the python recipe, e.g.)
usedFlavor = use.createFlavor(name, recipe._trackedFlags)
troveTuple = (recipe._trove.getName(), recipe._trove.getVersion(),
usedFlavor)
callerGlobals['loadedTroves'].extend(recipe._loadedTroves)
callerGlobals['loadedTroves'].append(troveTuple)
callerGlobals['loadedSpecs'][troveSpec] = (troveTuple, recipe)
if flavor:
cfg.buildFlavor = oldBuildFlavor
# must set this flavor back after the above use.createFlavor()
use.setBuildFlagsFromFlavor(parentPackageName, cfg.buildFlavor)
# stash a reference to the module in the namespace
# of the recipe that loaded it, or else it will be destroyed
callerGlobals[os.path.basename(file).replace('.', '-')] = loader
# return the tracked flags to their state before loading this recipe
use.resetUsed()
use.setUsed(oldUsed)
class _sourceHelper:
def __init__(self, theclass, recipe):
self.theclass = theclass
self.recipe = recipe
def __call__(self, *args, **keywords):
self.recipe._sources.append(self.theclass(self.recipe, *args, **keywords))
def clearBuildReqs(*buildReqs):
""" Clears inherited build requirement lists of a given set of packages,
or all packages if none listed.
"""
def _removePackages(class_, pkgs):
if not pkgs:
class_.buildRequires = []
else:
for pkg in pkgs:
if pkg in class_.buildRequires:
class_.buildRequires.remove(pkg)
callerGlobals = inspect.stack()[1][0].f_globals
classes = []
for value in callerGlobals.itervalues():
if inspect.isclass(value) and issubclass(value, PackageRecipe):
classes.append(value)
for class_ in classes:
_removePackages(class_, buildReqs)
for base in inspect.getmro(class_):
if issubclass(base, PackageRecipe):
_removePackages(base, buildReqs)
class _recipeHelper:
def __init__(self, list, recipe, theclass):
self.list = list
self.theclass = theclass
self.recipe = recipe
def __call__(self, *args, **keywords):
self.list.append(self.theclass(self.recipe, *args, **keywords))
class _policyUpdater:
def __init__(self, theobject):
self.theobject = theobject
def __call__(self, *args, **keywords):
self.theobject.updateArgs(*args, **keywords)
class Recipe:
"""Virtual base class for all Recipes"""
_trove = None
_trackedFlags = None
_loadedTroves = []
_loadedSpecs = {}
def __init__(self):
assert(self.__class__ is not Recipe)
@classmethod
def getLoadedTroves(class_):
return class_._loadedTroves
@classmethod
def getLoadedSpecs(class_):
return class_._loadedSpecs
def __repr__(self):
return "<%s Object>" % self.__class__
class PackageRecipe(Recipe):
buildRequires = []
Flags = use.LocalFlags
explicitMainDir = False
def mainDir(self, new=None, explicit=True):
if new:
self.theMainDir = new % self.macros
self.macros.maindir = self.theMainDir
self.explicitMainDir |= explicit
return self.theMainDir
def nameVer(self):
return '-'.join((self.name, self.version))
def cleanup(self, builddir, destdir):
if 'noClean' in self.cfg.__dict__ and self.cfg.noClean:
pass
else:
util.rmtree(builddir)
def sourceMap(self, path):
basepath = os.path.basename(path)
if basepath in self.sourcePathMap:
if basepath == path:
# we only care about truly different source locations with the
# same basename
return
if basepath in self.pathConflicts:
self.pathConflicts[basepath].append(path)
else:
self.pathConflicts[basepath] = [
# previous (first) instance
self.sourcePathMap[basepath],
# this instance
path
]
else:
self.sourcePathMap[basepath] = path
def fetchAllSources(self):
"""
returns a list of file locations for all the sources in
the package recipe
"""
# first make sure we had no path conflicts:
if self.pathConflicts:
errlist = []
for basepath in self.pathConflicts.keys():
errlist.extend([x for x in self.pathConflicts[basepath]])
raise RecipeFileError, '\n'.join(errlist)
self.prepSources()
files = []
for src in self._sources:
f = src.fetch()
if f:
if type(f) in (tuple, list):
files.extend(f)
else:
files.append(f)
return files
def checkBuildRequirements(self, cfg, sourceVersion, ignoreDeps=False):
""" Checks to see if the build requirements for the recipe
are installed
"""
def _filterBuildReqsByVersionStr(versionStr, troves):
if not versionStr:
return troves
versionMatches = []
if versionStr.find('@') == -1:
if versionStr.find(':') == -1:
log.warning('Deprecated buildreq format. Use '
' foo=:tag, not foo=tag')
versionStr = ':' + versionStr
# we don't allow full version strings or just releases
if versionStr[0] not in ':@':
raise RecipeFileError("Unsupported buildReq format")
for trove in troves:
labels = trove.getVersion().iterLabels()
if versionStr[0] == ':':
branchTag = versionStr[1:]
branchTags = [ x.getLabel() for x in labels ]
if branchTag in branchTags:
versionMatches.append(trove)
else:
# versionStr must begin with an @
branchNames = []
for label in labels:
branchNames.append('@%s:%s' % (label.getNamespace(),
label.getLabel()))
if versionStr in branchNames:
versionMatches.append(trove)
return versionMatches
def _filterBuildReqsByFlavor(flavor, troves):
troves.sort(lambda a, b: a.getVersion().__cmp__(b.getVersion()))
if not flavor:
return troves[-1]
for trove in reversed(versionMatches):
troveFlavor = trove.getFlavor()
if troveFlavor.stronglySatisfies(flavor):
return trove
db = database.Database(cfg.root, cfg.dbPath)
time = sourceVersion.timeStamps()[-1]
reqMap = {}
missingReqs = []
for buildReq in self.buildRequires:
(name, versionStr, flavor) = updatecmd.parseTroveSpec(buildReq)
# XXX move this to use more of db.findTrove's features, instead
# of hand parsing
try:
troves = db.trovesByName(name)
troves = db.getTroves(troves)
except repository.TroveNotFound:
missingReqs.append(buildReq)
continue
break
versionMatches = _filterBuildReqsByVersionStr(versionStr, troves)
if not versionMatches:
missingReqs.append(buildReq)
continue
match = _filterBuildReqsByFlavor(flavor, versionMatches)
if match:
reqMap[buildReq] = match
else:
missingReqs.append(buildReq)
if missingReqs:
if not ignoreDeps:
log.error("Could not find the following troves "
"needed to cook this recipe:\n"
"%s" % '\n'.join(sorted(missingReqs)))
raise cook.CookError, 'unresolved build dependencies'
self.buildReqMap = reqMap
def extraSource(self, action):
"""
extraSource allows you to append a source list item that is
not a part of source.py. Be aware when writing these source
list items that you are writing conary internals! In particular,
anything that needs to add a source file to the repository will
need to implement fetch(), and all source files will have to be
sought using the lookaside cache.
"""
self._sources.append(action)
def prepSources(self):
for source in self._sources:
source.doPrep()
def processResumeList(self, resume):
resumelist = []
if resume:
lines = resume.split(',')
for line in lines:
if ':' in line:
begin, end = line.split(':')
if begin:
begin = int(begin)
if end:
end = int(end)
resumelist.append([begin, end])
else:
if len(lines) == 1:
resumelist.append([int(line), False])
else:
resumelist.append([int(line), int(line)])
self.resumeList = resumelist
def iterResumeList(self, actions):
resume = self.resumeList
resumeBegin = resume[0][0]
resumeEnd = resume[0][1]
for action in actions:
if not resumeBegin or action.linenum >= resumeBegin:
if not resumeEnd or action.linenum <= resumeEnd:
yield action
elif resumeEnd:
resume = resume[1:]
if not resume:
return
resumeBegin = resume[0][0]
resumeEnd = resume[0][1]
if action.linenum == resumeBegin:
yield action
def unpackSources(self, builddir, destdir, resume=None):
self.macros.builddir = builddir
self.macros.destdir = destdir
if resume == 'policy':
return
elif resume:
log.debug("Resuming on line(s) %s" % resume)
# note resume lines must be in order
self.processResumeList(resume)
for source in self.iterResumeList(self._sources):
source.doPrep()
source.doAction()
else:
for source in self._sources:
source.doPrep()
source.doAction()
def extraBuild(self, action):
"""
extraBuild allows you to append a build list item that is
not a part of build.py. Be aware when writing these build
list items that you are writing conary internals!
"""
self._build.append(action)
def doBuild(self, buildPath, resume=None):
builddir = os.sep.join((buildPath, self.mainDir()))
self.macros.builddir = builddir
self.magic = magic.magicCache(self.macros.destdir)
if resume == 'policy':
return
if resume:
for bld in self.iterResumeList(self._build):
bld.doAction()
else:
for bld in self._build:
bld.doAction()
def doDestdirProcess(self):
for post in self.destdirPolicy:
post.doProcess(self)
def getPackages(self):
# policies look at the recipe instance for all information
for policy in self.packagePolicy:
policy.doProcess(self)
return self.autopkg.getComponents()
def setByDefaultOn(self, includeSet):
self.byDefaultIncludeSet = includeSet
def setByDefaultOff(self, excludeSet):
self.byDefaultExcludeSet = excludeSet
def byDefault(self, compName):
c = compName[compName.index(':'):]
if compName in self.byDefaultIncludeSet:
# intended for foo:bar overrides :bar in excludelist
return True
if compName in self.byDefaultExcludeSet:
# explicitly excluded
return False
if c in self.byDefaultIncludeSet:
return True
if c in self.byDefaultExcludeSet:
return False
return True
def disableParallelMake(self):
self.macros._override('parallelmflags', '')
def populateLcache(self):
"""
Populate a repository lookaside cache
"""
recipeClass = self.__class__
repos = self.laReposCache.repos
# build a list containing this recipe class and any ancestor class
# from which it descends
classes = [ recipeClass ]
bases = list(recipeClass.__bases__)
while bases:
parent = bases.pop()
bases.extend(list(parent.__bases__))
if issubclass(parent, PackageRecipe):
classes.append(parent)
# reverse the class list, this way the files will be found in the
# youngest descendant first
classes.reverse()
# populate the repository source lookaside cache from the :source
# components
for rclass in classes:
if not rclass._trove:
continue
srcName = rclass._trove.getName()
srcVersion = rclass._trove.getVersion()
for f in repos.iterFilesInTrove(srcName, srcVersion,
deps.DependencySet(),
withFiles=True):
pathId, path, fileId, version, fileObj = f
assert(path[0] != "/")
# we might need to retrieve this source file
# to enable a build, so we need to find the
# sha1 hash of it since that's how it's indexed
# in the file store
if isinstance(fileObj, files.RegularFile):
# it only makes sense to fetch regular files, skip
# anything that isn't
self.laReposCache.addFileHash(srcName, srcVersion, pathId,
path, fileId, version, fileObj.contents.sha1())
def isatty(self, value=None):
if value is not None:
self._tty = value
return self._tty
def __getattr__(self, name):
"""
Allows us to dynamically suck in namespace of other modules
with modifications.
- The public namespace of the build module is accessible,
and build objects are created and put on the build list
automatically when they are referenced.
- The public namespaces of the policy modules are accessible;
policy objects already on their respective lists are returned,
policy objects not on their respective lists are added to
the end of their respective lists like build objects are
added to the build list.
"""
if not name.startswith('_'):
if name.startswith('add'):
return _sourceHelper(source.__dict__[name[3:]], self)
if name in build.__dict__:
return _recipeHelper(self._build, self, build.__dict__[name])
for (policy, list) in (
(destdirpolicy, self.destdirPolicy),
(packagepolicy, self.packagePolicy)):
if name in policy.__dict__:
policyClass = policy.__dict__[name]
for policyObj in list:
if isinstance(policyObj, policyClass):
return _policyUpdater(policyObj)
return _recipeHelper(list, self, policyClass)
if name in self.__dict__:
return self.__dict__[name]
raise AttributeError, name
def __delattr__(self, name):
"""
Allows us to delete policy items from their respective lists
by deleting a name in the recipe self namespace. For example,
to remove the EtcConfig package policy from the package policy
list, one could do::
del self.EtcConfig
This would prevent the EtcConfig package policy from being
executed. The policy objects are carefully ordered in the
default policy lists; deleting a policy object and then
referencing it again will cause it to show up at the end of
the list. Don't do that.
In general, delete policy only as a last resort; you can
usually disable policy entirely with the keyword argument::
exceptions='.*'
"""
for (policy, list) in (
(destdirpolicy, self.destdirPolicy),
(packagepolicy, self.packagePolicy)):
if name in policy.__dict__:
policyClass = policy.__dict__[name]
for index in range(len(list)):
policyObj = list[index]
if isinstance(policyObj, policyClass):
del list[index]
return
del self.__dict__[name]
def _includeSuperClassBuildReqs(self):
""" Include build requirements from super classes by searching
up the class hierarchy for buildRequires. You can only
override this currenly by calling
<superclass>.buildRequires.remove()
"""
buildReqs = set()
for base in inspect.getmro(self.__class__):
buildReqs.update(getattr(base, 'buildRequires', []))
self.buildRequires = list(buildReqs)
def __init__(self, cfg, laReposCache, srcdirs, extraMacros={}):
Recipe.__init__(self)
self._sources = []
self._build = []
self._includeSuperClassBuildReqs()
self.destdirPolicy = destdirpolicy.DefaultPolicy(self)
self.packagePolicy = packagepolicy.DefaultPolicy(self)
self.byDefaultIncludeSet = frozenset()
self.byDefaultExcludeSet = frozenset()
self.cfg = cfg
self.laReposCache = laReposCache
self.srcdirs = srcdirs
self.macros = macros.Macros()
self.macros.update(baseMacros)
self.macros.update(use.Arch._getMacros())
# allow for architecture not to be set -- this could happen
# when storing the recipe e.g.
for key in cfg.macroKeys():
self.macros._override(key, cfg['macros.' + key])
self.macros.name = self.name
self.macros.version = self.version
self.packages = { self.name : True }
if extraMacros:
self.macros.update(extraMacros)
self.mainDir(self.nameVer(), explicit=False)
self.sourcePathMap = {}
self.pathConflicts = {}
class UserGroupInfoRecipe(PackageRecipe):
# abstract base class
ignore = 1
def __init__(self, cfg, laReposCache, srcdirs, extraMacros={}):
PackageRecipe.__init__(self, cfg, laReposCache, srcdirs, extraMacros)
self.destdirPolicy = []
self.packagePolicy = []
self.requires = []
self.infofilename = None
self.realfilename = None
def getPackages(self):
comp = buildpackage.BuildComponent(
'info-%s:%s' %(self.infoname, self.type), self)
f = comp.addFile(self.infofilename, self.realfilename)
f.tags.set("%s-info" %self.type)
self.addProvides(f)
self.addRequires(f)
comp.provides.union(f.provides())
comp.requires.union(f.requires())
return [comp]
def addProvides(self, f):
pass
def addRequires(self, f):
if not self.requires:
return
depSet = deps.DependencySet()
for info, type in self.requires:
if type == 'user':
depClass = deps.UserInfoDependencies
else:
depClass = deps.GroupInfoDependencies
depSet.addDep(depClass, deps.Dependency(info, []))
f.requires.set(depSet)
def requiresUser(self, user):
self.requires.append((user, 'user'))
def requiresGroup(self, group):
self.requires.append((group, 'group'))
def __getattr__(self, name):
if not name.startswith('_'):
if name in usergroup.__dict__:
return _recipeHelper(self._build, self,
usergroup.__dict__[name])
if name in self.__dict__:
return self.__dict__[name]
raise AttributeError, name
class UserInfoRecipe(UserGroupInfoRecipe):
type = 'user'
# abstract base class
ignore = 1
def addProvides(self, f):
depSet = deps.DependencySet()
depSet.addDep(deps.UserInfoDependencies,
deps.Dependency(self.infoname, []))
depSet.addDep(deps.GroupInfoDependencies,
deps.Dependency(self.groupname, []))
f.provides.set(depSet)
class GroupInfoRecipe(UserGroupInfoRecipe):
type = 'group'
# abstract base class
ignore = 1
def addProvides(self, f):
depSet = deps.DependencySet()
depSet.addDep(deps.GroupInfoDependencies,
deps.Dependency(self.infoname, []))
f.provides.set(depSet)
# XXX the next four classes will probably migrate to the repository
# somehow, but not until we have figured out how to do this without
# requiring that every recipe have a loadSuperClass line in it.
class DistroPackageRecipe(PackageRecipe):
"""
Most packages in the distribution should descend from this class,
directly or indirectly, except for direct build requirements of
this class. This package differs from the C{PackageRecipe}
class only by providing explicit build requirements.
"""
# :lib in here is only for runtime, not to link against.
# Any package that needs to link should still specify the :devellib
buildRequires = [
'filesystem:runtime',
'setup:runtime',
'python:runtime',
'python:lib',
'conary:runtime',
'conary:lib',
'conary:python',
'sqlite:lib',
'bzip2:runtime',
'gzip:runtime',
'tar:runtime',
'cpio:runtime',
'patch:runtime',
]
Flags = use.LocalFlags
# abstract base class
ignore = 1
RecipeLoader.addRecipeToCopy(DistroPackageRecipe)
class BuildPackageRecipe(DistroPackageRecipe):
"""
Packages that need to be built with the make utility and basic standard
shell tools should descend from this recipe in order to automatically
have a reasonable set of build requirements. This package differs
from the C{PackageRecipe} class only by providing explicit build
requirements.
"""
# Again, no :devellib here
buildRequires = [
'coreutils:runtime',
'make:runtime',
'mktemp:runtime',
# all the rest of these are for configure
'findutils:runtime',
'gawk:runtime',
'grep:runtime',
'sed:runtime',
'diffutils:runtime',
]
buildRequires.extend(DistroPackageRecipe.buildRequires)
Flags = use.LocalFlags
# abstract base class
ignore = 1
RecipeLoader.addRecipeToCopy(BuildPackageRecipe)
class CPackageRecipe(BuildPackageRecipe):
"""
Most packages should descend from this recipe in order to automatically
have a reasonable set of build requirements for a package that builds
C source code to binaries. This package differs from the
C{PackageRecipe} class only by providing explicit build requirements.
"""
buildRequires = [
'binutils:runtime',
'binutils:lib',
'binutils:devellib',
'gcc:runtime',
'gcc:lib',
'gcc:devellib',
'glibc:runtime',
'glibc:lib',
'glibc:devellib',
'glibc:devel',
'debugedit:runtime',
]
buildRequires.extend(BuildPackageRecipe.buildRequires)
Flags = use.LocalFlags
# abstract base class
ignore = 1
RecipeLoader.addRecipeToCopy(CPackageRecipe)
class AutoPackageRecipe(CPackageRecipe):
"""
Recipe class for simple packages built with auto* tools. Child
classes should provide the C{unpack()} method for populating the
source list. To call policy, implement the C{policy()} method and
put any necessary policy invocations there. Next mostly likely is
to provide a C{makeinstall()} method if C{MakeInstall()} is
insufficient for the package. Least likely to need overriding
are C{configure()} if C{Configure()} is insufficient, and
C{make()} if C{Make()} is insufficient.
"""
Flags = use.LocalFlags
# abstract base class
ignore = 1
def setup(r):
r.unpack()
r.configure()
r.make()
r.makeinstall()
r.policy()
def unpack(r):
pass
def configure(r):
r.Configure()
def make(r):
r.Make()
def makeinstall(r):
r.MakeInstall()
def policy(r):
pass
RecipeLoader.addRecipeToCopy(AutoPackageRecipe)
class SingleGroup:
def addTrove(self, name, versionStr = None, flavor = None, source = None,
byDefault = None, ref = None):
self.addTroveList.append((name, versionStr, flavor, source,
byDefault, ref))
def removeTrove(self, name, versionStr = None, flavor = None):
self.removeTroveList.append((name, versionStr, flavor))
def addAllTroves(self, reference, byDefault = None):
self.addReferenceList.append((reference, byDefault))
def addNewGroup(self, name, byDefault = None):
self.newGroupList.append([ name, byDefault ])
def setByDefault(self, byDefault):
assert(isinstance(byDefault, bool))
self.byDefault = byDefault
def _foundTrove(self, troveTup, size, byDefault, isRedirect):
self.troves[troveTup] = (size, byDefault)
if isRedirect:
# we check later to ensure that all redirects added
# by addTrove lines (or other means) are removed
# by removeTrove lines later.
self.redirects.add(troveTup)
def findTroves(self, troveMap, repos):
self._findTroves(troveMap)
self._removeTroves(repos)
self._checkForRedirects()
def autoResolveDeps(self, cfg, repos, labelPath, includedTroves):
if self.autoResolve:
self._resolveDependencies(cfg, repos, labelPath, includedTroves)
def checkDependencies(self, cfg, includedTroves):
if self.depCheck:
failedDeps = self._checkDependencies(cfg, includedTroves)
if failedDeps:
return failedDeps
def calcSize(self):
self.size = 0
validSize = True
for (n,v,f), (size, byDefault) in self.troves.iteritems():
if size is None:
validSize = False
self.size = None
if validSize:
self.size += size
l = self.troveVersionFlavors.setdefault(n,[])
l.append((v,f,byDefault))
def _findTroves(self, troveMap):
""" given a trove map which already contains a dict for all queries
needed for all groups cooked, pick out those troves that
are relevant to this group.
"""
validSize = True
self.troves = {}
for (name, versionStr, flavor, source, byDefault, refSource) \
in self.addTroveList:
troveList = troveMap[refSource][name, versionStr, flavor]
if byDefault is None:
byDefault = self.byDefault
for (troveTup, size, isRedirect) in troveList:
self._foundTrove(troveTup, size, byDefault, isRedirect)
# these are references which were used in addAllTroves() commands
for refSource, byDefault in self.addReferenceList:
troveList = refSource.getSourceTroves()
troveTups = [ x for x in chain(
*[x.iterTroveList() for x in troveList])]
troveList = refSource.getTroves(troveTups, withFiles=False)
if byDefault is None:
byDefault = self.byDefault
for (troveTup, trv) in izip(troveTups, troveList):
self._foundTrove(troveTup, trv.getSize(), byDefault,
trv.isRedirect())
def getDefaultTroves(self):
return [ x[0] for x in self.troves.iteritems() if x[1][1] ]
def _resolveDependencies(self, cfg, repos, labelPath, includedTroves):
""" adds the troves needed to to resolve all open dependencies
in this group. Will raise an error if not all dependencies
can be resolved.
"""
#FIXME: this should probably be able to resolve against
# other trove source than the repository.
# set up configuration
oldDbPath = cfg.dbPath
cfg.setValue('dbPath', ':memory:')
oldRoot = cfg.root
cfg.setValue('root', ':memory:')
oldInstallLabelPath = cfg.installLabelPath
resolveLabelPath = labelPath
cfg.installLabelPath = labelPath
oldAutoResolve = cfg.autoResolve
cfg.autoResolve = True
# set up a conaryclient to do the dep solving
client = conaryclient.ConaryClient(cfg)
if self.checkOnlyByDefaultDeps:
troveList = self.getDefaultTroves() + includedTroves
else:
troveList = list(self.troves) + includedTroves
# build a list of the troves that we're checking so far
troves = [ (n, (None, None), (v, f), True) for (n,v,f) in troveList]
updJob, suggMap = client.updateChangeSet(troves, recurse = True,
resolveDeps = True,
test = True)
# restore config
cfg.setValue('dbPath', oldDbPath)
cfg.setValue('root', oldRoot)
cfg.installLabelPath = oldInstallLabelPath
cfg.autoResolve = oldAutoResolve
neededTups = set(chain(*suggMap.itervalues()))
troves = repos.getTroves(neededTups, withFiles=False)
for troveTup, trv in izip(neededTups, troves):
self._foundTrove(troveTup, trv.getSize(), self.byDefault,
trv.isRedirect())
def _checkDependencies(self, cfg, includedTroves):
if self.checkOnlyByDefaultDeps:
troveList = self.getDefaultTroves()
else:
troveList = list(self.troves)
troveList += includedTroves
troves = [ (n, (None, None), (v, f), True) for (n,v,f) in troveList]
oldDbPath = cfg.dbPath
cfg.setValue('dbPath', ':memory:')
oldRoot = cfg.root
cfg.setValue('root', ':memory:')
client = conaryclient.ConaryClient(cfg)
if self.checkOnlyByDefaultDeps:
depCs = client.updateChangeSet(troves, recurse = True,
resolveDeps=False, split=False)[0]
cs = depCs.csList[0]
else:
cs = client.repos.createChangeSet(troves,
recurse = True, withFiles=False)
failedDeps = client.db.depCheck(cs)[0]
cfg.setValue('dbPath', oldDbPath)
cfg.setValue('root', oldRoot)
return failedDeps
def _removeTroves(self, source):
groupSource = trovesource.GroupRecipeSource(source, self)
groupSource.searchAsDatabase()
results = groupSource.findTroves(None, self.removeTroveList)
troveTups = chain(*results.itervalues())
for troveTup in troveTups:
del self.troves[troveTup]
self.redirects.discard(troveTup)
def _checkForRedirects(self):
if self.redirects:
redirects = [('%s=%s[%s]' % (n,v.asString(),deps.formatFlavor(f))) \
for (n,v,f) in sorted(self.redirects)]
raise RecipeFileError, \
"found redirects, which are not allowed in groups: \n%s" \
% '\n'.join(redirects)
def getRequires(self):
return self.requires
def getTroveList(self):
return self.troveVersionFlavors
def getNewGroupList(self):
return self.newGroupList
def __init__(self, depCheck, autoResolve, checkOnlyByDefaultDeps,
byDefault = True):
self.redirects = set()
self.addTroveList = []
self.addReferenceList = []
self.removeTroveList = []
self.newGroupList = []
self.requires = deps.DependencySet()
self.troveVersionFlavors = {}
self.depCheck = depCheck
self.autoResolve = autoResolve
self.checkOnlyByDefaultDeps = checkOnlyByDefaultDeps
self.byDefault = byDefault
def Requires(self, requirement):
self.requires.addDep(deps.TroveDependencies,
deps.Dependency(requirement))
class _GroupReference:
""" A reference to a set of troves, created by a trove spec, that
can be searched like a repository using findTrove. Hashable
by the trove spec(s) given. Note the references can be
recursive -- This reference could be relative to another
reference, passed in as the upstreamSource.
"""
def __init__(self, troveSpecs, upstreamSource=None):
self.troveSpecs = troveSpecs
self.upstreamSource = upstreamSource
def __hash__(self):
return hash((self.troveSpecs, self.upstreamSource))
def findSources(self, repos, labelPath, flavorPath):
""" Find the troves that make up this trove reference """
if self.upstreamSource is None:
source = repos
else:
source = self.upstreamSource
results = source.findTroves(labelPath, self.troveSpecs, flavorPath)
troveTups = [ x for x in chain(*results.itervalues())]
self.sourceTups = troveTups
self.source = trovesource.TroveListTroveSource(source, troveTups)
self.source.searchAsRepository()
def findTroves(self, *args, **kw):
return self.source.findTroves(*args, **kw)
def getTroves(self, *args, **kw):
return self.source.getTroves(*args, **kw)
def getSourceTroves(self):
""" Returns the list of troves that form this reference
(without their children).
"""
return self.getTroves(self.sourceTups, withFiles=False)
class GroupRecipe(Recipe):
Flags = use.LocalFlags
depCheck = False
autoResolve = False
checkOnlyByDefaultDeps = True
def Requires(self, requirement, groupName = None):
if requirement[0] == '/':
raise RecipeFileError, 'file requirements not allowed in groups'
if groupName is None: groupName = self.name
self.groups[groupName].Requires(requirement)
def _parseFlavor(self, flavor):
assert(flavor is None or isinstance(flavor, str))
if flavor is None:
return None
flavorObj = deps.parseFlavor(flavor)
if flavorObj is None:
raise ValueError, 'invalid flavor: %s' % flavor
return flavorObj
def _parseGroupNames(self, groupName):
if groupName is None:
return [self.name]
elif not isinstance(groupName, (list, tuple)):
return [groupName]
else:
return groupName
def addTrove(self, name, versionStr = None, flavor = None, source = None,
byDefault = None, groupName = None, ref=None):
groupNames = self._parseGroupNames(groupName)
flavor = self._parseFlavor(flavor)
# track this trove in the GroupRecipe so that it can be found
# as a group with the rest of the troves.
self.toFind.setdefault(ref, set()).add((name, versionStr, flavor))
if ref is not None:
self.sources.add(ref)
for groupName in groupNames:
self.groups[groupName].addTrove(name, versionStr = versionStr,
flavor = flavor,
source = source,
byDefault = byDefault,
ref = ref)
def setByDefault(self, byDefault=True, groupName=None):
""" Set whether troves added to this group are installed by default
or not. (This default value can be overridden by the byDefault
parameter to individual addTrove commands). If you set the
byDefault value for the main group, you set it for any
future groups created.
"""
groupNames = self._parseGroupNames()
for groupName in groupNames:
self.groups[groupName].setByDefault(byDefault)
def addAllTroves(self, reference, groupName=None):
""" Add all of the troves directly contained in the given
reference to groupName. For example, if the cooked group-foo
contains references to the troves
foo1=<version>[flavor] and foo2=<version>[flavor],
the lines
ref = r.addReference('group-foo')
followed by
r.addAllTroves(ref)
would be equivalent to you having added the addTrove lines
r.addTrove('foo1', <version>)
r.addTrove('foo2', <version>)
"""
assert(reference is not None)
self.sources.add(reference)
groupNames = self._parseGroupNames(groupName)
for groupName in groupNames:
self.groups[groupName].addAllTroves(reference)
def removeTrove(self, name, versionStr=None, flavor=None,
groupName=None):
""" Remove a trove added to this group, either by an addAllTroves
line or by an addTrove line.
"""
groupNames = self._parseGroupNames(groupName)
flavor = self._parseFlavor(flavor)
for groupName in groupNames:
self.groups[groupName].removeTrove(name, versionStr, flavor)
def addReference(self, name, versionStr=None, flavor=None, ref=None):
flavor = self._parseFlavor(flavor)
return _GroupReference(((name, versionStr, flavor),), ref)
def addNewGroup(self, name, groupName = None, byDefault = True):
if groupName is None:
groupName = self.name
if not self.groups.has_key(name):
raise RecipeFileError, 'group %s has not been created' % name
self.groups[groupName].addNewGroup(name, byDefault)
def getRequires(self, groupName = None):
if groupName is None: groupName = self.name
return self.groups[groupName].getRequires()
def getTroveList(self, groupName = None):
if groupName is None: groupName = self.name
return self.groups[groupName].getTroveList()
def getNewGroupList(self, groupName = None):
if groupName is None: groupName = self.name
return self.groups[groupName].getNewGroupList()
def getSize(self, groupName = None):
if groupName is None: groupName = self.name
return self.groups[groupName].size
def setLabelPath(self, *path):
self.labelPath = [ versions.Label(x) for x in path ]
def createGroup(self, groupName, depCheck = False, autoResolve = False,
byDefault = None, checkOnlyByDefaultDeps = None):
if self.groups.has_key(groupName):
raise RecipeFileError, 'group %s was already created' % groupName
if not groupName.startswith('group-'):
raise RecipeFileError, 'group names must start with "group-"'
if byDefault is None:
byDefault = self.groups[self.name].byDefault
if checkOnlyByDefaultDeps is None:
checkOnlyByDefaultDeps = self.groups[self.name].checkOnlyByDefaultDeps
self.groups[groupName] = SingleGroup(depCheck, autoResolve,
checkOnlyByDefaultDeps, byDefault)
def getGroupNames(self):
return self.groups.keys()
def _orderGroups(self):
""" Order the groups so that each group is after any group it
contains. Raises an error if a cycle is found.
"""
# boy using a DFS for such a small graph seems like overkill.
# but its handy since we're also trying to find a cycle at the same
# time.
children = {}
groupNames = self.getGroupNames()
for groupName in groupNames:
children[groupName] = \
set([x[0] for x in self.getNewGroupList(groupName)])
timeStamp = 0
# the different items in the seen dict
VISITED = 0 # True if we've added this node to the stack
START = 1 # time at which the node was first visited
FINISH = 2 # time at which all the nodes child nodes were finished
# with
PATH = 3 # path to get to this node from wherever it was
# started.
seen = dict((x, [False, None, None, []]) for x in groupNames)
for groupName in groupNames:
if seen[groupName][VISITED]: continue
stack = [groupName]
seen[groupName][VISITED] = True
while stack:
timeStamp += 1
node = stack[-1]
if not seen[node][START]:
seen[node][START] = timeStamp
childList = []
if children[node]:
path = seen[node][PATH] + [node]
for child in children[node]:
if child in path:
cycle = path[path.index(child):] + [child]
raise RecipeFileError('cycle in groups: %s' % cycle)
if not seen[child][VISITED]:
childList.append(child)
if not childList:
# we've finished with all this nodes children
# mark it as done
seen[node][FINISH] = timeStamp
stack = stack[:-1]
else:
path = seen[node][PATH] + [node]
for child in childList:
seen[child] = [True, None, None, path]
stack.append(child)
groupsByLastSeen = ( (seen[x][FINISH], x) for x in groupNames)
return [x[1] for x in sorted(groupsByLastSeen)]
def _getIncludedTroves(self, groupName, checkOnlyByDefaultDeps):
"""
Returns the troves in all subGroups included by this trove.
If checkOnlyByDefaultDeps is False, exclude troves that are
not included by default.
"""
allTroves = []
childGroups = []
for childGroup, byDefault in self.groups[groupName].getNewGroupList():
if byDefault or not checkOnlyByDefaultDeps:
childGroups.append(childGroup)
while childGroups:
childGroup = childGroups.pop()
groupObj = self.groups[childGroup]
if checkOnlyByDefaultDeps:
allTroves.extend(groupObj.getDefaultTroves())
else:
allTroves.extend(groupObj.troves)
for childGroup, byDft in self.groups[childGroup].getNewGroupList():
if byDft or not checkOnlyByDefaultDeps:
childGroups.append(childGroup)
return allTroves
def findAllTroves(self):
if self.toFind is not None:
# find all troves needed by all included groups together, at
# once. We then pass that information into the individual
# groups.
self._findSources()
self._findTroves()
self.toFind = None
groupNames = self._orderGroups()
for groupName in groupNames:
groupObj = self.groups[groupName]
# assign troves to this group
groupObj.findTroves(self.troveSpecMap, self.repos)
# if ordering is right, we now should be able to recurse through
# the groups included by this group and get all recursively
# included troves
includedTroves = self._getIncludedTroves(groupName,
groupObj.checkOnlyByDefaultDeps)
# include those troves when doing dependency resolution/checking
groupObj.autoResolveDeps(self.cfg, self.repos, self.labelPath,
includedTroves)
failedDeps = groupObj.checkDependencies(self.cfg, includedTroves)
if failedDeps:
return groupName, failedDeps
groupObj.calcSize()
def _findSources(self):
for troveSource in self.sources:
if troveSource is None:
continue
troveSource.findSources(self.repos, self.labelPath, self.flavor)
def _findTroves(self):
""" Finds all the troves needed by all groups, and then
stores the information for retrieval by the individual
groups (stored in troveSpecMap).
"""
repos = self.repos
cfg = self.cfg
troveTups = set()
results = {}
for troveSource, toFind in self.toFind.iteritems():
try:
if troveSource is None:
source = repos
else:
source = troveSource
results[troveSource] = source.findTroves(self.labelPath,
toFind,
cfg.buildFlavor)
except repository.TroveNotFound, e:
raise RecipeFileError, str(e)
for result in results.itervalues():
troveTups.update(chain(*result.itervalues()))
troveTups = list(troveTups)
troves = repos.getTroves(troveTups, withFiles=False)
foundTroves = dict(izip(troveTups, troves))
troveSpecMap = {}
# store the pertinent information in troveSpecMap
# keyed off of source, then troveSpec
# note - redirect troves are not allowed in group recipes.
# we track whether a trove is a redirect because it's possible
# it could be added at one point (say, by an overly general
# addTrove line) and then removed afterwards by a removeTrove.
for troveSource, toFind in self.toFind.iteritems():
d = {}
for troveSpec in toFind:
d[troveSpec] = [ (x,
foundTroves[x].getSize(),
foundTroves[x].isRedirect())
for x in results[troveSource][troveSpec] ]
troveSpecMap[troveSource] = d
self.troveSpecMap = troveSpecMap
def __init__(self, repos, cfg, label, flavor, extraMacros={}):
self.repos = repos
self.cfg = cfg
self.labelPath = [ label ]
self.flavor = flavor
self.macros = macros.Macros()
self.macros.update(extraMacros)
self.toFind = {}
self.troveSpecMap = {}
self.foundTroves = {}
self.sources = set()
self.groups = {}
self.groups[self.name] = SingleGroup(self.depCheck, self.autoResolve,
self.checkOnlyByDefaultDeps)
class RedirectRecipe(Recipe):
Flags = use.LocalFlags
def addRedirect(self, name, versionStr = None, flavorStr = None,
fromTrove = None):
if flavorStr is not None:
flavor = deps.parseFlavor(flavorStr)
if flavor is None:
raise ValueError, 'invalid flavor %s' % flavorStr
else:
flavor = None
if fromTrove is None:
fromTrove = self.name
elif fromTrove.find(":") != -1:
raise ValueError, 'components cannot be individually redirected'
self.addTroveList.append((name, versionStr, flavor, fromTrove))
def findTroves(self):
self.size = 0
validSize = True
troveList = []
packageSet = {}
for (name, versionStr, flavor, fromName) in self.addTroveList:
try:
desFlavor = self.cfg.buildFlavor.copy()
if flavor is not None:
desFlavor.union(flavor, deps.DEP_MERGE_TYPE_OVERRIDE)
pkgList = self.repos.findTrove(self.label,
(name, versionStr, desFlavor))
except repository.TroveNotFound, e:
raise RecipeFileError, str(e)
assert(len(pkgList) == 1)
packageSet[pkgList[0]] = fromName
troveList.append(pkgList[0])
troves = self.repos.getTroves(troveList, withFiles = False)
redirections = {}
for topLevelTrove in troves:
topName = topLevelTrove.getName()
topVersion = topLevelTrove.getVersion()
topFlavor = topLevelTrove.getFlavor()
fromName = packageSet[(topName, topVersion, topFlavor)]
d = self.redirections.setdefault(fromName, {})
# this redirects from oldTrove -> newTrove
d[(topName, topVersion, topFlavor)] = True
for (name, version, flavor) in topLevelTrove.iterTroveList():
# redirect from oldTrove -> referencedPackage
d[(name, version, flavor)] = True
if name.find(":") != -1:
compName = fromName + ":" + name.split(":")[1]
# redirect from oldTrove -> oldTrove:component. we
# leave version/flavor alone; they get filled in later
d[(compName, None, None)] = True
# redirect from oldTrove:component -> newTrove:component
d2 = self.redirections.setdefault(compName, {})
d2[(name, version, flavor)] = True
for name,d in redirections.iteritems():
self.redirections[name] = [ (x[0], x[1], x[2]) for x in d ]
def getRedirections(self):
return self.redirections
def __init__(self, repos, cfg, label, flavor, extraMacros={}):
self.repos = repos
self.cfg = cfg
self.redirections = {}
self.label = label
self.flavor = flavor
self.addTroveList = []
self.macros = macros.Macros()
self.macros.update(extraMacros)
class FilesetRecipe(Recipe):
# XXX need to work on adding files from different flavors of troves
def addFileFromPackage(self, pattern, pkg, recurse, remapList):
pathMap = {}
for (pathId, pkgPath, fileId, version) in pkg.iterFileList():
pathMap[pkgPath] = (pathId, fileId, version)
patternList = util.braceExpand(pattern)
matches = {}
for pattern in patternList:
if not recurse:
matchList = [ n for n in pathMap.keys() if
fnmatchcase(n, pattern)]
else:
matchList = []
dirCount = pattern.count("/")
for n in pathMap.iterkeys():
i = n.count("/")
if i > dirCount:
dirName = os.sep.join(n.split(os.sep)[:dirCount + 1])
match = fnmatchcase(dirName, pattern)
elif i == dirCount:
match = fnmatchcase(n, pattern)
else:
match = False
if match: matchList.append(n)
for path in matchList:
matches[path] = pathMap[path]
if not matches:
return False
for path in matches.keys():
(pathId, fileId, version) = matches[path]
for (old, new) in remapList:
if path == old:
path = new
break
elif len(path) > len(old) and path.startswith(old) and \
path[len(old)] == "/":
path = new + path[len(old):]
break
if self.paths.has_key(path):
raise RecipeFileError, "%s has been included multiple times" \
% path
self.files[pathId] = (path, fileId, version)
self.paths[path] = 1
return True
def addFile(self, pattern, component, versionStr = None, recurse = True,
remap = []):
"""
Adds files which match pattern from version versionStr of component.
Pattern is glob-style, with brace expansion. If recurse is set,
anything below a directory which matches pattern is also included,
and the directory itself does not have to be part of the trove.
Remap is a list of (oldPath, newPath) tuples. The first oldPath
which matches the start of a matched pattern is rewritten as
newPath.
"""
if type(remap) == tuple:
remap = [ remap ]
try:
pkgList = self.repos.findTrove(self.label,
(component, versionStr, None),
self.flavor)
except repository.TroveNotFound, e:
raise RecipeFileError, str(e)
if len(pkgList) == 0:
raise RecipeFileError, "no packages match %s" % component
elif len(pkgList) > 1:
raise RecipeFileError, "too many packages match %s" % component
foundIt = False
pkg = self.repos.getTrove(*pkgList[0])
for sub in self.repos.walkTroveSet(pkg):
foundIt = foundIt or self.addFileFromPackage(pattern, sub, recurse,
remap)
if not foundIt:
raise RecipeFileError, "%s does not exist in version %s of %s" % \
(pattern, pkg.getVersion().asString(), pkg.getName())
def iterFileList(self):
for (pathId, (path, fileId, version)) in self.files.iteritems():
yield (pathId, path, fileId, version)
def __init__(self, repos, cfg, label, flavor, extraMacros={}):
self.repos = repos
self.cfg = cfg
self.files = {}
self.paths = {}
self.label = label
self.flavor = flavor
self.macros = macros.Macros()
self.macros.update(extraMacros)
class RecipeFileError(Exception):
def __init__(self, msg):
self.msg = msg
def __repr__(self):
return self.msg
def __str__(self):
return repr(self)
expose the scoring mechanism for loadRecipe lines
#
# Copyright (c) 2004-2005 rPath, Inc.
#
# This program is distributed under the terms of the Common Public License,
# version 1.0. A copy of this license should have been distributed with this
# source file in a file called LICENSE. If it is not present, the license
# is always available at http://www.opensource.org/licenses/cpl.php.
#
# This program is distributed in the hope that it will be useful, but
# without any waranty; without even the implied warranty of merchantability
# or fitness for a particular purpose. See the Common Public License for
# full details.
#
"""
Contains the base Recipe class, default macros, and miscellaneous
components used by conary .recipe files
"""
#stdlib
import errno
from fnmatch import fnmatchcase
import imp
import inspect
from itertools import chain,izip
import new
import os
import string
import sys
import tempfile
import types
#conary
import build
import buildpackage
import usergroup
import conaryclient
import cook
from deps import deps
import destdirpolicy
import files
from lib import log
from lib import magic
from lib import util
from local import database
import macros
import packagepolicy
from repository import repository,trovesource
import source
import use
import updatecmd
import versions
baseMacros = {
# paths
'prefix' : '/usr',
'sysconfdir' : '/etc',
'initdir' : '%(sysconfdir)s/init.d',
'lib' : 'lib', # may be overridden with 'lib64'
'exec_prefix' : '%(prefix)s',
'bindir' : '%(exec_prefix)s/bin',
'essentialbindir' : '/bin',
'sbindir' : '%(exec_prefix)s/sbin',
'essentialsbindir' : '/sbin',
'libdir' : '%(exec_prefix)s/%(lib)s',
'essentiallibdir' : '/%(lib)s',
'libexecdir' : '%(exec_prefix)s/libexec',
'localstatedir' : '/var',
'servicedir' : '/srv',
'cachedir' : '%(localstatedir)s/cache',
'sharedstatedir' : '%(prefix)s/com',
'includedir' : '%(prefix)s/include',
'datadir' : '%(prefix)s/share',
'mandir' : '%(datadir)s/man',
'infodir' : '%(datadir)s/info',
'docdir' : '%(datadir)s/doc',
'thisdocdir' : '%(docdir)s/%(name)s-%(version)s',
'tagdescriptiondir' : '%(sysconfdir)s/conary/tags',
'taghandlerdir' : '%(libexecdir)s/conary/tags',
'tagdatadir' : '%(datadir)s/conary/tags',
'testdir' : '%(localstatedir)s/conary/tests',
'thistestdir' : '%(testdir)s/%(name)s-%(version)s',
'debuglibdir' : '/usr/lib/debug', # no %(prefix)s or %(lib)s!
'debugsrcdir' : '/usr/src/debug', # no %(prefix)s!
'userinfodir' : '%(sysconfdir)s/conary/userinfo',
'groupinfodir' : '%(sysconfdir)s/conary/groupinfo',
'buildlogpath' : '%(debugsrcdir)s/buildlogs/%(name)s-%(version)s-log.bz2',
# special component prefixes that the whole system needs to share
'krbprefix' : '%(exec_prefix)s/kerberos',
'x11prefix' : '%(exec_prefix)s/X11R6',
# programs/options (empty ones are for documentation)
'cc' : 'gcc',
'cxx' : 'g++',
'cxxflags' : '', # cxx specific flags
'optflags' : '-O2',
'dbgflags' : '-g', # for debuginfo
'cflags' : '%(optflags)s %(dbgflags)s',
'cppflags' : '', # just for providing in recipes
'ldflags' : '%(dbgflags)s',
'mflags' : '', # make flags
'parallelmflags' : '',
'sysroot' : '',
'os' : 'linux',
'target' : '%(targetarch)s-unknown-linux',
'debugedit' : 'debugedit',
'strip' : 'eu-strip', # eu-strip for debuginfo, "strip -g" else
'strip-archive' : 'strip -g', # eu-strip segfaults on ar
'monodis' : '%(bindir)s/monodis',
# filled in at cook time
'buildbranch' : '',
'buildlabel' : '',
}
crossMacros = {
# set crossdir from cook, directly or indirectly, before adding the rest
#'crossdir' : 'cross-target',
'prefix' : '/opt/%(crossdir)s',
'sysroot' : '%(prefix)s/sys-root',
'headerpath' : '%(sysroot)s/usr/include',
}
def localImport(d, package, modules=()):
"""
import a package into a non-global context.
@param d: the context to import the module
@type d: dict
@param package: the name of the module to import
@type package: str
@param modules: a sequence of modules to import from the package.
If a 2-tuple is in the sequence, rename the imported module to
the second value in the tuple.
@type modules: sequence of strings or tuples, or empty tuple
Examples of translated import statements::
from foo import bar as baz:
localImport(d, "foo", (("bar", "baz"))
from bar import fred, george:
localImport(d, "bar", ("fred", "george"))
import os
localImport(d, "os")
"""
m = __import__(package, d, {}, modules)
if modules:
for name in modules:
if type(name) is tuple:
mod = name[0]
name = name[1]
else:
mod = name
d[name] = getattr(m, mod)
else:
d[package] = m
# save a reference to the module inside this context, so it won't
# be garbage collected until the context is deleted.
l = d.setdefault('__localImportModules', [])
l.append(m)
def setupRecipeDict(d, filename):
localImport(d, 'build', ('build', 'action'))
localImport(d, 'build.recipe', ('PackageRecipe', 'GroupRecipe',
'RedirectRecipe', 'FilesetRecipe',
'DistroPackageRecipe',
'BuildPackageRecipe',
'CPackageRecipe',
'AutoPackageRecipe',
'UserInfoRecipe',
'GroupInfoRecipe',
'loadSuperClass', 'loadInstalled',
'clearBuildReqs',
# XXX when all recipes have been migrated
# we can get rid of loadRecipe
('loadSuperClass', 'loadRecipe')))
localImport(d, 'lib', ('util',))
for x in ('os', 're', 'sys', 'stat'):
localImport(d, x)
localImport(d, 'build.use', ('Arch', 'Use', ('LocalFlags', 'Flags')))
d['filename'] = filename
class RecipeLoader:
_recipesToCopy = []
@classmethod
def addRecipeToCopy(class_, recipeClass):
class_._recipesToCopy.append(recipeClass)
def _copyReusedRecipes(self, moduleDict):
# XXX HACK - get rid of this when we move the
# recipe classes to the repository.
# makes copies of some of the superclass recipes that are
# created in this module. (specifically, the ones with buildreqs)
for recipeClass in self._recipesToCopy:
name = recipeClass.__name__
# when we create a new class object, it needs its superclasses.
# get the original superclass list and substitute in any
# copies
mro = list(inspect.getmro(recipeClass)[1:])
newMro = []
for superClass in mro:
superName = superClass.__name__
newMro.append(moduleDict.get(superName, superClass))
recipeCopy = new.classobj(name, tuple(newMro),
recipeClass.__dict__.copy())
recipeCopy.buildRequires = recipeCopy.buildRequires[:]
moduleDict[name] = recipeCopy
def __init__(self, filename, cfg=None, repos=None, component=None,
branch=None, ignoreInstalled=False):
self.recipes = {}
if filename[0] != "/":
raise IOError, "recipe file names must be absolute paths"
if component:
pkgname = component.split(':')[0]
else:
pkgname = filename.split('/')[-1]
pkgname = pkgname[:-len('.recipe')]
basename = os.path.basename(filename)
self.file = basename.replace('.', '-')
self.module = imp.new_module(self.file)
sys.modules[self.file] = self.module
f = open(filename)
setupRecipeDict(self.module.__dict__, filename)
# store cfg and repos, so that the recipe can load
# recipes out of the repository
self.module.__dict__['cfg'] = cfg
self.module.__dict__['repos'] = repos
self.module.__dict__['component'] = component
self.module.__dict__['branch'] = branch
self.module.__dict__['name'] = pkgname
self.module.__dict__['ignoreInstalled'] = ignoreInstalled
self.module.__dict__['loadedTroves'] = []
self.module.__dict__['loadedSpecs'] = {}
self._copyReusedRecipes(self.module.__dict__)
# create the recipe class by executing the code in the recipe
try:
code = compile(f.read(), filename, 'exec')
except SyntaxError, err:
msg = ('Error in recipe file "%s": %s\n' %(basename, err))
if err.offset is not None:
msg += '%s%s^\n' %(err.text, ' ' * (err.offset-1))
else:
msg += err.text
raise RecipeFileError(msg)
use.resetUsed()
exec code in self.module.__dict__
# all recipes that could be loaded by loadRecipe are loaded;
# get rid of our references to cfg and repos
del self.module.__dict__['cfg']
del self.module.__dict__['repos']
del self.module.__dict__['component']
del self.module.__dict__['branch']
del self.module.__dict__['name']
del self.module.__dict__['ignoreInstalled']
found = False
for (name, obj) in self.module.__dict__.items():
if type(obj) is not types.ClassType:
continue
# if a recipe has been marked to be ignored (for example, if
# it was loaded from another recipe by loadRecipe()
# (don't use hasattr here, we want to check only the recipe
# class itself, not any parent class
if 'ignore' in obj.__dict__:
continue
recipename = getattr(obj, 'name', '')
# make sure the class is derived from Recipe
if ((issubclass(obj, PackageRecipe)
and obj is not PackageRecipe
and not issubclass(obj, UserGroupInfoRecipe)) or
(issubclass(obj, RedirectRecipe)
and obj is not RedirectRecipe)):
if recipename[0] not in string.ascii_letters + string.digits:
raise RecipeFileError(
'Error in recipe file "%s": package name must start '
'with an ascii letter or digit.' %basename)
if recipename.startswith('group-'):
raise RecipeFileError(
'Error in recipe file "%s": package name cannot '
'begin with "group-"' %basename)
if recipename.startswith('fileset-'):
raise RecipeFileError(
'Error in recipe file "%s": package name cannot '
'begin with "fileset-"' %basename)
if recipename.startswith('info-'):
raise RecipeFileError(
'Error in recipe file "%s": package name cannot '
'begin with "info-"' %basename)
elif issubclass(obj, GroupRecipe) and obj is not GroupRecipe:
if recipename and not recipename.startswith('group-'):
raise RecipeFileError(
'Error in recipe file "%s": group name must '
'begin with "group-"' %basename)
elif issubclass(obj, FilesetRecipe) and obj is not FilesetRecipe:
if recipename and not recipename.startswith('fileset-'):
raise RecipeFileError(
'Error in recipe file "%s": fileset name must '
'begin with "fileset-"' %basename)
elif issubclass(obj, UserGroupInfoRecipe) and obj is not UserGroupInfoRecipe:
if recipename and not recipename.startswith('info-'):
raise RecipeFileError(
'Error in recipe file "%s": info name must '
'begin with "info-"' %basename)
else:
continue
self.recipes[name] = obj
obj.filename = filename
if hasattr(obj, 'name') and hasattr(obj, 'version'):
if found:
raise RecipeFileError(
'Error in recipe file "%s": multiple recipe classes '
'with both name and version exist' %basename)
self.recipe = obj
if '-' in obj.version:
raise RecipeFileError(
"Version string %s has illegal '-' character"
%obj.version)
if obj.name != pkgname:
raise RecipeFileError(
"Recipe object name '%s' does not match "
"file/component name '%s'"
% (obj.name, pkgname))
found = True
else:
raise RecipeFileError(
"Recipe in file/component '%s' did not contain both a name"
" and a version attribute." % pkgname)
if found:
# inherit any tracked flags that we found while loading parent
# classes. Also inherit the list of recipes classes needed to load
# this recipe.
self.recipe._loadedTroves = self.module.__dict__['loadedTroves']
self.recipe._loadedSpecs = self.module.__dict__['loadedSpecs']
if self.recipe._trackedFlags is not None:
use.setUsed(self.recipe._trackedFlags)
self.recipe._trackedFlags = use.getUsed()
else:
# we'll get this if the recipe file is empty
raise RecipeFileError(
"file/component '%s' did not contain a valid recipe" % pkgname)
def allRecipes(self):
return self.recipes
def getRecipe(self):
return self.recipe
def __del__(self):
try:
del sys.modules[self.file]
except:
pass
def _scoreLoadRecipeChoice(labelPath, version):
""" These labels all match the given labelPath.
We score them based on the number of matching labels in
the label path, and return the one that's "best".
The following rules should apply:
* if the labelPath is [bar, foo] and you are choosing between
/foo/bar/ and /foo/blah/bar, choose /foo/bar. Assumption
is that any other shadow/branch in the path may be from a
maintenance branch.
* if the labelPath is [bar] and you are choosing between
/foo/bar/ and /foo/blah/bar, choose /foo/bar.
"""
# FIXME I'm quite sure this heuristic will get replaced with
# something smarter/more sane as time progresses
score = 0
labelPath = [ x for x in reversed(labelPath)]
branch = version.branch()
while True:
label = branch.label()
try:
index = labelPath.index(label)
except ValueError:
index = -1
score += index
if not branch.hasParentBranch():
break
branch = branch.parentBranch()
return score
def getBestLoadRecipeChoices(labelPath, troveTups):
scores = [ (_scoreLoadRecipeChoice(labelPath, x[1]), x) for x in troveTups ]
maxScore = max(scores)[0]
return [x[1] for x in scores if x[0] == maxScore ]
def recipeLoaderFromSourceComponent(name, cfg, repos,
versionStr=None, labelPath=None,
ignoreInstalled=False,
filterVersions=False):
name = name.split(':')[0]
component = name + ":source"
filename = name + '.recipe'
if not labelPath:
labelPath = [cfg.buildLabel]
try:
pkgs = repos.findTrove(labelPath,
(component, versionStr, deps.DependencySet()))
except repository.TroveMissing:
raise RecipeFileError, 'cannot find source component %s' % component
if filterVersions:
pkgs = getBestLoadRecipeChoices(labelPath, pkgs)
if len(pkgs) > 1:
raise RecipeFileError("source component %s has multiple versions "
"on labelPath %s: %s" %(component,
', '.join(x.asString() for x in labelPath),
pkgs))
sourceComponent = repos.getTrove(*pkgs[0])
(fd, recipeFile) = tempfile.mkstemp(".recipe", 'temp-%s-' %name)
outF = os.fdopen(fd, "w")
inF = None
for (pathId, filePath, fileId, fileVersion) in sourceComponent.iterFileList():
if filePath == filename:
inF = repos.getFileContents([ (fileId, fileVersion) ])[0].get()
break
if not inF:
raise RecipeFileError("version %s of %s does not contain %s" %
(sourceComponent.getName(),
sourceComponent.getVersion().asString(),
filename))
util.copyfileobj(inF, outF)
del inF
del outF
try:
loader = RecipeLoader(recipeFile, cfg, repos, component,
sourceComponent.getVersion().branch(),
ignoreInstalled=ignoreInstalled)
finally:
os.unlink(recipeFile)
recipe = loader.getRecipe()
recipe._trove = sourceComponent.copy()
return (loader, sourceComponent.getVersion())
def loadSuperClass(troveSpec, label=None):
"""
Load a recipe so that its class/data can be used as a super class for
this recipe.
If the package is not installed anywhere on the system, the C{labelPath}
will be searched without reference to the installed system.
@param troveSpec: C{name}I{[}C{=I{version}}I{][}C{[I{flavor}]}I{]}
specification of the trove to load. The flavor given will be used
to find the given recipe and also to set the flavor of the loaded recipe.
@param label: label string to search for the given recipe in place of
using the default C{labelPath}.
If not specified, the labels listed in the version in the including
recipe will be used as the c{labelPath} to search.
For example, if called from recipe with version
C{/conary.rpath.com@rpl:devel//shadow/1.0-1-1},
the default C{labelPath} that would be constructed would be:
C{[conary.rpath.com@rpl:shadow, conary.rpath.com@rpl:devel]}
"""
callerGlobals = inspect.stack()[1][0].f_globals
ignoreInstalled = True
_loadRecipe(troveSpec, label, callerGlobals, False)
def loadInstalled(troveSpec, label=None):
"""
Load a recipe so that its data about the installed system can be used
in this recipe.
If a complete version is not specified in the trovespec, the version of
the recipe to load will be based on what is installed on the system.
For example, if C{loadRecipe('foo')} is called, and package C{foo} with
version C{/bar.org@bar:devel/4.1-1-1} is installed on the system, then
C{foo:source} with version C{/bar.org@bar:devel/4.1-1} will be loaded.
The recipe will also be loaded with the installed package's flavor.
If the package is not installed anywhere on the system, the C{labelPath}
will be searched without reference to the installed system.
@param troveSpec: C{name}I{[}C{=I{version}}I{][}C{[I{flavor}]}I{]}
specification of the trove to load. The flavor given will be used
to find the given recipe and also to set the flavor of the loaded recipe.
@param label: label string to search for the given recipe in place of
using the default C{labelPath}.
If not specified, the labels listed in the version in the including
recipe will be used as the c{labelPath} to search.
For example, if called from recipe with version
C{/conary.rpath.com@rpl:devel//shadow/1.0-1-1},
the default C{labelPath} that would be constructed would be:
C{[conary.rpath.com@rpl:shadow, conary.rpath.com@rpl:devel]}
"""
callerGlobals = inspect.stack()[1][0].f_globals
_loadRecipe(troveSpec, label, callerGlobals, True)
def _loadRecipe(troveSpec, label, callerGlobals, findInstalled):
""" See docs for loadInstalledPackage and loadSuperClass. """
def _findInstalledVersion(db, labelPath, name, versionStr, flavor):
""" Specialized search of the installed system along a labelPath,
defaulting to searching the whole system if the trove is not
found along the label path.
The version and flavor of the first found installed trove is
returned, or C{None} if no trove is found.
"""
# first search on the labelPath.
try:
troves = db.findTrove(labelPath, (name, versionStr, flavor))
if len(troves) > 1:
raise RuntimeError, (
'Multiple troves could match loadInstalled'
' request %s' % troveSpec)
if troves:
return troves[0][1].getSourceVersion(), troves[0][2]
except repository.TroveNotFound:
pass
if labelPath is None:
return None
try:
troves = db.findTrove(None, (name, versionStr, flavor))
if len(troves) > 1:
raise RuntimeError, (
'Multiple troves could match loadRecipe'
' request for %s' % name)
if troves:
return troves[0][1].getSourceVersion(), troves[0][2]
except repository.TroveNotFound:
pass
return None
cfg = callerGlobals['cfg']
repos = callerGlobals['repos']
branch = callerGlobals['branch']
parentPackageName = callerGlobals['name']
if 'ignoreInstalled' in callerGlobals:
alwaysIgnoreInstalled = callerGlobals['ignoreInstalled']
else:
alwaysIgnoreInstalled = False
oldUsed = use.getUsed()
name, versionStr, flavor = updatecmd.parseTroveSpec(troveSpec)
if name.endswith('.recipe'):
file = name
name = name[:-len('.recipe')]
else:
file = name + '.recipe'
#first check to see if a filename was specified, and if that
#recipe actually exists.
loader = None
if not (label or versionStr or flavor):
if name[0] != '/':
parentFilePath = callerGlobals['filename']
localfile = os.path.dirname(parentFilePath) + '/' + file
else:
localfile = name + '.recipe'
if os.path.exists(localfile):
if flavor:
oldBuildFlavor = cfg.buildFlavor
cfg.buildFlavor = deps.overrideFlavor(oldBuildFlavor, flavor)
use.setBuildFlagsFromFlavor(name, cfg.buildFlavor)
loader = RecipeLoader(localfile, cfg,
ignoreInstalled=alwaysIgnoreInstalled)
if not loader:
if label:
labelPath = [versions.Label(label)]
elif branch:
# if no labelPath was specified, search backwards through the
# labels on the current branch.
labelPath = [branch.label()]
while branch.hasParentBranch():
branch = branch.parentBranch()
labelPath.append(branch.label())
else:
labelPath = None
if findInstalled and not alwaysIgnoreInstalled:
# look on the local system to find a trove that is installed that
# matches this loadrecipe request. Use that trove's version
# and flavor information to grab the source out of the repository
db = database.Database(cfg.root, cfg.dbPath)
parts = _findInstalledVersion(db, labelPath, name,
versionStr, flavor)
if parts:
version, flavor = parts
versionStr = version.getSourceVersion().asString()
if flavor:
# override the current flavor with the flavor found in the
# installed trove (or the troveSpec flavor, if no installed
# trove was found.
oldBuildFlavor = cfg.buildFlavor
cfg.buildFlavor = deps.overrideFlavor(oldBuildFlavor, flavor)
use.setBuildFlagsFromFlavor(name, cfg.buildFlavor)
loader = recipeLoaderFromSourceComponent(name, cfg, repos,
labelPath=labelPath,
versionStr=versionStr,
ignoreInstalled=alwaysIgnoreInstalled,
filterVersions=True)[0]
for name, recipe in loader.allRecipes().items():
# hide all recipes from RecipeLoader - we don't want to return
# a recipe that has been loaded by loadRecipe
recipe.ignore = 1
callerGlobals[name] = recipe
if recipe._trove:
# create a tuple with the version and flavor information needed to
# load this trove again. You might be able to rely on the
# flavor that the trove was built with, but when you load a
# recipe that is not a superclass of the current recipe,
# its flavor is not assumed to be relevant to the resulting
# package (otherwise you might have completely irrelevant flavors
# showing up for any package that loads the python recipe, e.g.)
usedFlavor = use.createFlavor(name, recipe._trackedFlags)
troveTuple = (recipe._trove.getName(), recipe._trove.getVersion(),
usedFlavor)
callerGlobals['loadedTroves'].extend(recipe._loadedTroves)
callerGlobals['loadedTroves'].append(troveTuple)
callerGlobals['loadedSpecs'][troveSpec] = (troveTuple, recipe)
if flavor:
cfg.buildFlavor = oldBuildFlavor
# must set this flavor back after the above use.createFlavor()
use.setBuildFlagsFromFlavor(parentPackageName, cfg.buildFlavor)
# stash a reference to the module in the namespace
# of the recipe that loaded it, or else it will be destroyed
callerGlobals[os.path.basename(file).replace('.', '-')] = loader
# return the tracked flags to their state before loading this recipe
use.resetUsed()
use.setUsed(oldUsed)
class _sourceHelper:
def __init__(self, theclass, recipe):
self.theclass = theclass
self.recipe = recipe
def __call__(self, *args, **keywords):
self.recipe._sources.append(self.theclass(self.recipe, *args, **keywords))
def clearBuildReqs(*buildReqs):
""" Clears inherited build requirement lists of a given set of packages,
or all packages if none listed.
"""
def _removePackages(class_, pkgs):
if not pkgs:
class_.buildRequires = []
else:
for pkg in pkgs:
if pkg in class_.buildRequires:
class_.buildRequires.remove(pkg)
callerGlobals = inspect.stack()[1][0].f_globals
classes = []
for value in callerGlobals.itervalues():
if inspect.isclass(value) and issubclass(value, PackageRecipe):
classes.append(value)
for class_ in classes:
_removePackages(class_, buildReqs)
for base in inspect.getmro(class_):
if issubclass(base, PackageRecipe):
_removePackages(base, buildReqs)
class _recipeHelper:
def __init__(self, list, recipe, theclass):
self.list = list
self.theclass = theclass
self.recipe = recipe
def __call__(self, *args, **keywords):
self.list.append(self.theclass(self.recipe, *args, **keywords))
class _policyUpdater:
def __init__(self, theobject):
self.theobject = theobject
def __call__(self, *args, **keywords):
self.theobject.updateArgs(*args, **keywords)
class Recipe:
"""Virtual base class for all Recipes"""
_trove = None
_trackedFlags = None
_loadedTroves = []
_loadedSpecs = {}
def __init__(self):
assert(self.__class__ is not Recipe)
@classmethod
def getLoadedTroves(class_):
return class_._loadedTroves
@classmethod
def getLoadedSpecs(class_):
return class_._loadedSpecs
def __repr__(self):
return "<%s Object>" % self.__class__
class PackageRecipe(Recipe):
buildRequires = []
Flags = use.LocalFlags
explicitMainDir = False
def mainDir(self, new=None, explicit=True):
if new:
self.theMainDir = new % self.macros
self.macros.maindir = self.theMainDir
self.explicitMainDir |= explicit
return self.theMainDir
def nameVer(self):
return '-'.join((self.name, self.version))
def cleanup(self, builddir, destdir):
if 'noClean' in self.cfg.__dict__ and self.cfg.noClean:
pass
else:
util.rmtree(builddir)
def sourceMap(self, path):
basepath = os.path.basename(path)
if basepath in self.sourcePathMap:
if basepath == path:
# we only care about truly different source locations with the
# same basename
return
if basepath in self.pathConflicts:
self.pathConflicts[basepath].append(path)
else:
self.pathConflicts[basepath] = [
# previous (first) instance
self.sourcePathMap[basepath],
# this instance
path
]
else:
self.sourcePathMap[basepath] = path
def fetchAllSources(self):
"""
returns a list of file locations for all the sources in
the package recipe
"""
# first make sure we had no path conflicts:
if self.pathConflicts:
errlist = []
for basepath in self.pathConflicts.keys():
errlist.extend([x for x in self.pathConflicts[basepath]])
raise RecipeFileError, '\n'.join(errlist)
self.prepSources()
files = []
for src in self._sources:
f = src.fetch()
if f:
if type(f) in (tuple, list):
files.extend(f)
else:
files.append(f)
return files
def checkBuildRequirements(self, cfg, sourceVersion, ignoreDeps=False):
""" Checks to see if the build requirements for the recipe
are installed
"""
def _filterBuildReqsByVersionStr(versionStr, troves):
if not versionStr:
return troves
versionMatches = []
if versionStr.find('@') == -1:
if versionStr.find(':') == -1:
log.warning('Deprecated buildreq format. Use '
' foo=:tag, not foo=tag')
versionStr = ':' + versionStr
# we don't allow full version strings or just releases
if versionStr[0] not in ':@':
raise RecipeFileError("Unsupported buildReq format")
for trove in troves:
labels = trove.getVersion().iterLabels()
if versionStr[0] == ':':
branchTag = versionStr[1:]
branchTags = [ x.getLabel() for x in labels ]
if branchTag in branchTags:
versionMatches.append(trove)
else:
# versionStr must begin with an @
branchNames = []
for label in labels:
branchNames.append('@%s:%s' % (label.getNamespace(),
label.getLabel()))
if versionStr in branchNames:
versionMatches.append(trove)
return versionMatches
def _filterBuildReqsByFlavor(flavor, troves):
troves.sort(lambda a, b: a.getVersion().__cmp__(b.getVersion()))
if not flavor:
return troves[-1]
for trove in reversed(versionMatches):
troveFlavor = trove.getFlavor()
if troveFlavor.stronglySatisfies(flavor):
return trove
db = database.Database(cfg.root, cfg.dbPath)
time = sourceVersion.timeStamps()[-1]
reqMap = {}
missingReqs = []
for buildReq in self.buildRequires:
(name, versionStr, flavor) = updatecmd.parseTroveSpec(buildReq)
# XXX move this to use more of db.findTrove's features, instead
# of hand parsing
try:
troves = db.trovesByName(name)
troves = db.getTroves(troves)
except repository.TroveNotFound:
missingReqs.append(buildReq)
continue
break
versionMatches = _filterBuildReqsByVersionStr(versionStr, troves)
if not versionMatches:
missingReqs.append(buildReq)
continue
match = _filterBuildReqsByFlavor(flavor, versionMatches)
if match:
reqMap[buildReq] = match
else:
missingReqs.append(buildReq)
if missingReqs:
if not ignoreDeps:
log.error("Could not find the following troves "
"needed to cook this recipe:\n"
"%s" % '\n'.join(sorted(missingReqs)))
raise cook.CookError, 'unresolved build dependencies'
self.buildReqMap = reqMap
def extraSource(self, action):
"""
extraSource allows you to append a source list item that is
not a part of source.py. Be aware when writing these source
list items that you are writing conary internals! In particular,
anything that needs to add a source file to the repository will
need to implement fetch(), and all source files will have to be
sought using the lookaside cache.
"""
self._sources.append(action)
def prepSources(self):
for source in self._sources:
source.doPrep()
def processResumeList(self, resume):
resumelist = []
if resume:
lines = resume.split(',')
for line in lines:
if ':' in line:
begin, end = line.split(':')
if begin:
begin = int(begin)
if end:
end = int(end)
resumelist.append([begin, end])
else:
if len(lines) == 1:
resumelist.append([int(line), False])
else:
resumelist.append([int(line), int(line)])
self.resumeList = resumelist
def iterResumeList(self, actions):
resume = self.resumeList
resumeBegin = resume[0][0]
resumeEnd = resume[0][1]
for action in actions:
if not resumeBegin or action.linenum >= resumeBegin:
if not resumeEnd or action.linenum <= resumeEnd:
yield action
elif resumeEnd:
resume = resume[1:]
if not resume:
return
resumeBegin = resume[0][0]
resumeEnd = resume[0][1]
if action.linenum == resumeBegin:
yield action
def unpackSources(self, builddir, destdir, resume=None):
self.macros.builddir = builddir
self.macros.destdir = destdir
if resume == 'policy':
return
elif resume:
log.debug("Resuming on line(s) %s" % resume)
# note resume lines must be in order
self.processResumeList(resume)
for source in self.iterResumeList(self._sources):
source.doPrep()
source.doAction()
else:
for source in self._sources:
source.doPrep()
source.doAction()
def extraBuild(self, action):
"""
extraBuild allows you to append a build list item that is
not a part of build.py. Be aware when writing these build
list items that you are writing conary internals!
"""
self._build.append(action)
def doBuild(self, buildPath, resume=None):
builddir = os.sep.join((buildPath, self.mainDir()))
self.macros.builddir = builddir
self.magic = magic.magicCache(self.macros.destdir)
if resume == 'policy':
return
if resume:
for bld in self.iterResumeList(self._build):
bld.doAction()
else:
for bld in self._build:
bld.doAction()
def doDestdirProcess(self):
for post in self.destdirPolicy:
post.doProcess(self)
def getPackages(self):
# policies look at the recipe instance for all information
for policy in self.packagePolicy:
policy.doProcess(self)
return self.autopkg.getComponents()
def setByDefaultOn(self, includeSet):
self.byDefaultIncludeSet = includeSet
def setByDefaultOff(self, excludeSet):
self.byDefaultExcludeSet = excludeSet
def byDefault(self, compName):
c = compName[compName.index(':'):]
if compName in self.byDefaultIncludeSet:
# intended for foo:bar overrides :bar in excludelist
return True
if compName in self.byDefaultExcludeSet:
# explicitly excluded
return False
if c in self.byDefaultIncludeSet:
return True
if c in self.byDefaultExcludeSet:
return False
return True
def disableParallelMake(self):
self.macros._override('parallelmflags', '')
def populateLcache(self):
"""
Populate a repository lookaside cache
"""
recipeClass = self.__class__
repos = self.laReposCache.repos
# build a list containing this recipe class and any ancestor class
# from which it descends
classes = [ recipeClass ]
bases = list(recipeClass.__bases__)
while bases:
parent = bases.pop()
bases.extend(list(parent.__bases__))
if issubclass(parent, PackageRecipe):
classes.append(parent)
# reverse the class list, this way the files will be found in the
# youngest descendant first
classes.reverse()
# populate the repository source lookaside cache from the :source
# components
for rclass in classes:
if not rclass._trove:
continue
srcName = rclass._trove.getName()
srcVersion = rclass._trove.getVersion()
for f in repos.iterFilesInTrove(srcName, srcVersion,
deps.DependencySet(),
withFiles=True):
pathId, path, fileId, version, fileObj = f
assert(path[0] != "/")
# we might need to retrieve this source file
# to enable a build, so we need to find the
# sha1 hash of it since that's how it's indexed
# in the file store
if isinstance(fileObj, files.RegularFile):
# it only makes sense to fetch regular files, skip
# anything that isn't
self.laReposCache.addFileHash(srcName, srcVersion, pathId,
path, fileId, version, fileObj.contents.sha1())
def isatty(self, value=None):
if value is not None:
self._tty = value
return self._tty
def __getattr__(self, name):
"""
Allows us to dynamically suck in namespace of other modules
with modifications.
- The public namespace of the build module is accessible,
and build objects are created and put on the build list
automatically when they are referenced.
- The public namespaces of the policy modules are accessible;
policy objects already on their respective lists are returned,
policy objects not on their respective lists are added to
the end of their respective lists like build objects are
added to the build list.
"""
if not name.startswith('_'):
if name.startswith('add'):
return _sourceHelper(source.__dict__[name[3:]], self)
if name in build.__dict__:
return _recipeHelper(self._build, self, build.__dict__[name])
for (policy, list) in (
(destdirpolicy, self.destdirPolicy),
(packagepolicy, self.packagePolicy)):
if name in policy.__dict__:
policyClass = policy.__dict__[name]
for policyObj in list:
if isinstance(policyObj, policyClass):
return _policyUpdater(policyObj)
return _recipeHelper(list, self, policyClass)
if name in self.__dict__:
return self.__dict__[name]
raise AttributeError, name
def __delattr__(self, name):
"""
Allows us to delete policy items from their respective lists
by deleting a name in the recipe self namespace. For example,
to remove the EtcConfig package policy from the package policy
list, one could do::
del self.EtcConfig
This would prevent the EtcConfig package policy from being
executed. The policy objects are carefully ordered in the
default policy lists; deleting a policy object and then
referencing it again will cause it to show up at the end of
the list. Don't do that.
In general, delete policy only as a last resort; you can
usually disable policy entirely with the keyword argument::
exceptions='.*'
"""
for (policy, list) in (
(destdirpolicy, self.destdirPolicy),
(packagepolicy, self.packagePolicy)):
if name in policy.__dict__:
policyClass = policy.__dict__[name]
for index in range(len(list)):
policyObj = list[index]
if isinstance(policyObj, policyClass):
del list[index]
return
del self.__dict__[name]
def _includeSuperClassBuildReqs(self):
""" Include build requirements from super classes by searching
up the class hierarchy for buildRequires. You can only
override this currenly by calling
<superclass>.buildRequires.remove()
"""
buildReqs = set()
for base in inspect.getmro(self.__class__):
buildReqs.update(getattr(base, 'buildRequires', []))
self.buildRequires = list(buildReqs)
def __init__(self, cfg, laReposCache, srcdirs, extraMacros={}):
Recipe.__init__(self)
self._sources = []
self._build = []
self._includeSuperClassBuildReqs()
self.destdirPolicy = destdirpolicy.DefaultPolicy(self)
self.packagePolicy = packagepolicy.DefaultPolicy(self)
self.byDefaultIncludeSet = frozenset()
self.byDefaultExcludeSet = frozenset()
self.cfg = cfg
self.laReposCache = laReposCache
self.srcdirs = srcdirs
self.macros = macros.Macros()
self.macros.update(baseMacros)
self.macros.update(use.Arch._getMacros())
# allow for architecture not to be set -- this could happen
# when storing the recipe e.g.
for key in cfg.macroKeys():
self.macros._override(key, cfg['macros.' + key])
self.macros.name = self.name
self.macros.version = self.version
self.packages = { self.name : True }
if extraMacros:
self.macros.update(extraMacros)
self.mainDir(self.nameVer(), explicit=False)
self.sourcePathMap = {}
self.pathConflicts = {}
class UserGroupInfoRecipe(PackageRecipe):
# abstract base class
ignore = 1
def __init__(self, cfg, laReposCache, srcdirs, extraMacros={}):
PackageRecipe.__init__(self, cfg, laReposCache, srcdirs, extraMacros)
self.destdirPolicy = []
self.packagePolicy = []
self.requires = []
self.infofilename = None
self.realfilename = None
def getPackages(self):
comp = buildpackage.BuildComponent(
'info-%s:%s' %(self.infoname, self.type), self)
f = comp.addFile(self.infofilename, self.realfilename)
f.tags.set("%s-info" %self.type)
self.addProvides(f)
self.addRequires(f)
comp.provides.union(f.provides())
comp.requires.union(f.requires())
return [comp]
def addProvides(self, f):
pass
def addRequires(self, f):
if not self.requires:
return
depSet = deps.DependencySet()
for info, type in self.requires:
if type == 'user':
depClass = deps.UserInfoDependencies
else:
depClass = deps.GroupInfoDependencies
depSet.addDep(depClass, deps.Dependency(info, []))
f.requires.set(depSet)
def requiresUser(self, user):
self.requires.append((user, 'user'))
def requiresGroup(self, group):
self.requires.append((group, 'group'))
def __getattr__(self, name):
if not name.startswith('_'):
if name in usergroup.__dict__:
return _recipeHelper(self._build, self,
usergroup.__dict__[name])
if name in self.__dict__:
return self.__dict__[name]
raise AttributeError, name
class UserInfoRecipe(UserGroupInfoRecipe):
type = 'user'
# abstract base class
ignore = 1
def addProvides(self, f):
depSet = deps.DependencySet()
depSet.addDep(deps.UserInfoDependencies,
deps.Dependency(self.infoname, []))
depSet.addDep(deps.GroupInfoDependencies,
deps.Dependency(self.groupname, []))
f.provides.set(depSet)
class GroupInfoRecipe(UserGroupInfoRecipe):
type = 'group'
# abstract base class
ignore = 1
def addProvides(self, f):
depSet = deps.DependencySet()
depSet.addDep(deps.GroupInfoDependencies,
deps.Dependency(self.infoname, []))
f.provides.set(depSet)
# XXX the next four classes will probably migrate to the repository
# somehow, but not until we have figured out how to do this without
# requiring that every recipe have a loadSuperClass line in it.
class DistroPackageRecipe(PackageRecipe):
"""
Most packages in the distribution should descend from this class,
directly or indirectly, except for direct build requirements of
this class. This package differs from the C{PackageRecipe}
class only by providing explicit build requirements.
"""
# :lib in here is only for runtime, not to link against.
# Any package that needs to link should still specify the :devellib
buildRequires = [
'filesystem:runtime',
'setup:runtime',
'python:runtime',
'python:lib',
'conary:runtime',
'conary:lib',
'conary:python',
'sqlite:lib',
'bzip2:runtime',
'gzip:runtime',
'tar:runtime',
'cpio:runtime',
'patch:runtime',
]
Flags = use.LocalFlags
# abstract base class
ignore = 1
RecipeLoader.addRecipeToCopy(DistroPackageRecipe)
class BuildPackageRecipe(DistroPackageRecipe):
"""
Packages that need to be built with the make utility and basic standard
shell tools should descend from this recipe in order to automatically
have a reasonable set of build requirements. This package differs
from the C{PackageRecipe} class only by providing explicit build
requirements.
"""
# Again, no :devellib here
buildRequires = [
'coreutils:runtime',
'make:runtime',
'mktemp:runtime',
# all the rest of these are for configure
'findutils:runtime',
'gawk:runtime',
'grep:runtime',
'sed:runtime',
'diffutils:runtime',
]
buildRequires.extend(DistroPackageRecipe.buildRequires)
Flags = use.LocalFlags
# abstract base class
ignore = 1
RecipeLoader.addRecipeToCopy(BuildPackageRecipe)
class CPackageRecipe(BuildPackageRecipe):
"""
Most packages should descend from this recipe in order to automatically
have a reasonable set of build requirements for a package that builds
C source code to binaries. This package differs from the
C{PackageRecipe} class only by providing explicit build requirements.
"""
buildRequires = [
'binutils:runtime',
'binutils:lib',
'binutils:devellib',
'gcc:runtime',
'gcc:lib',
'gcc:devellib',
'glibc:runtime',
'glibc:lib',
'glibc:devellib',
'glibc:devel',
'debugedit:runtime',
]
buildRequires.extend(BuildPackageRecipe.buildRequires)
Flags = use.LocalFlags
# abstract base class
ignore = 1
RecipeLoader.addRecipeToCopy(CPackageRecipe)
class AutoPackageRecipe(CPackageRecipe):
"""
Recipe class for simple packages built with auto* tools. Child
classes should provide the C{unpack()} method for populating the
source list. To call policy, implement the C{policy()} method and
put any necessary policy invocations there. Next mostly likely is
to provide a C{makeinstall()} method if C{MakeInstall()} is
insufficient for the package. Least likely to need overriding
are C{configure()} if C{Configure()} is insufficient, and
C{make()} if C{Make()} is insufficient.
"""
Flags = use.LocalFlags
# abstract base class
ignore = 1
def setup(r):
r.unpack()
r.configure()
r.make()
r.makeinstall()
r.policy()
def unpack(r):
pass
def configure(r):
r.Configure()
def make(r):
r.Make()
def makeinstall(r):
r.MakeInstall()
def policy(r):
pass
RecipeLoader.addRecipeToCopy(AutoPackageRecipe)
class SingleGroup:
def addTrove(self, name, versionStr = None, flavor = None, source = None,
byDefault = None, ref = None):
self.addTroveList.append((name, versionStr, flavor, source,
byDefault, ref))
def removeTrove(self, name, versionStr = None, flavor = None):
self.removeTroveList.append((name, versionStr, flavor))
def addAllTroves(self, reference, byDefault = None):
self.addReferenceList.append((reference, byDefault))
def addNewGroup(self, name, byDefault = None):
self.newGroupList.append([ name, byDefault ])
def setByDefault(self, byDefault):
assert(isinstance(byDefault, bool))
self.byDefault = byDefault
def _foundTrove(self, troveTup, size, byDefault, isRedirect):
self.troves[troveTup] = (size, byDefault)
if isRedirect:
# we check later to ensure that all redirects added
# by addTrove lines (or other means) are removed
# by removeTrove lines later.
self.redirects.add(troveTup)
def findTroves(self, troveMap, repos):
self._findTroves(troveMap)
self._removeTroves(repos)
self._checkForRedirects()
def autoResolveDeps(self, cfg, repos, labelPath, includedTroves):
if self.autoResolve:
self._resolveDependencies(cfg, repos, labelPath, includedTroves)
def checkDependencies(self, cfg, includedTroves):
if self.depCheck:
failedDeps = self._checkDependencies(cfg, includedTroves)
if failedDeps:
return failedDeps
def calcSize(self):
self.size = 0
validSize = True
for (n,v,f), (size, byDefault) in self.troves.iteritems():
if size is None:
validSize = False
self.size = None
if validSize:
self.size += size
l = self.troveVersionFlavors.setdefault(n,[])
l.append((v,f,byDefault))
def _findTroves(self, troveMap):
""" given a trove map which already contains a dict for all queries
needed for all groups cooked, pick out those troves that
are relevant to this group.
"""
validSize = True
self.troves = {}
for (name, versionStr, flavor, source, byDefault, refSource) \
in self.addTroveList:
troveList = troveMap[refSource][name, versionStr, flavor]
if byDefault is None:
byDefault = self.byDefault
for (troveTup, size, isRedirect) in troveList:
self._foundTrove(troveTup, size, byDefault, isRedirect)
# these are references which were used in addAllTroves() commands
for refSource, byDefault in self.addReferenceList:
troveList = refSource.getSourceTroves()
troveTups = [ x for x in chain(
*[x.iterTroveList() for x in troveList])]
troveList = refSource.getTroves(troveTups, withFiles=False)
if byDefault is None:
byDefault = self.byDefault
for (troveTup, trv) in izip(troveTups, troveList):
self._foundTrove(troveTup, trv.getSize(), byDefault,
trv.isRedirect())
def getDefaultTroves(self):
return [ x[0] for x in self.troves.iteritems() if x[1][1] ]
def _resolveDependencies(self, cfg, repos, labelPath, includedTroves):
""" adds the troves needed to to resolve all open dependencies
in this group. Will raise an error if not all dependencies
can be resolved.
"""
#FIXME: this should probably be able to resolve against
# other trove source than the repository.
# set up configuration
oldDbPath = cfg.dbPath
cfg.setValue('dbPath', ':memory:')
oldRoot = cfg.root
cfg.setValue('root', ':memory:')
oldInstallLabelPath = cfg.installLabelPath
resolveLabelPath = labelPath
cfg.installLabelPath = labelPath
oldAutoResolve = cfg.autoResolve
cfg.autoResolve = True
# set up a conaryclient to do the dep solving
client = conaryclient.ConaryClient(cfg)
if self.checkOnlyByDefaultDeps:
troveList = self.getDefaultTroves() + includedTroves
else:
troveList = list(self.troves) + includedTroves
# build a list of the troves that we're checking so far
troves = [ (n, (None, None), (v, f), True) for (n,v,f) in troveList]
updJob, suggMap = client.updateChangeSet(troves, recurse = True,
resolveDeps = True,
test = True)
# restore config
cfg.setValue('dbPath', oldDbPath)
cfg.setValue('root', oldRoot)
cfg.installLabelPath = oldInstallLabelPath
cfg.autoResolve = oldAutoResolve
neededTups = set(chain(*suggMap.itervalues()))
troves = repos.getTroves(neededTups, withFiles=False)
for troveTup, trv in izip(neededTups, troves):
self._foundTrove(troveTup, trv.getSize(), self.byDefault,
trv.isRedirect())
def _checkDependencies(self, cfg, includedTroves):
if self.checkOnlyByDefaultDeps:
troveList = self.getDefaultTroves()
else:
troveList = list(self.troves)
troveList += includedTroves
troves = [ (n, (None, None), (v, f), True) for (n,v,f) in troveList]
oldDbPath = cfg.dbPath
cfg.setValue('dbPath', ':memory:')
oldRoot = cfg.root
cfg.setValue('root', ':memory:')
client = conaryclient.ConaryClient(cfg)
if self.checkOnlyByDefaultDeps:
depCs = client.updateChangeSet(troves, recurse = True,
resolveDeps=False, split=False)[0]
cs = depCs.csList[0]
else:
cs = client.repos.createChangeSet(troves,
recurse = True, withFiles=False)
failedDeps = client.db.depCheck(cs)[0]
cfg.setValue('dbPath', oldDbPath)
cfg.setValue('root', oldRoot)
return failedDeps
def _removeTroves(self, source):
groupSource = trovesource.GroupRecipeSource(source, self)
groupSource.searchAsDatabase()
results = groupSource.findTroves(None, self.removeTroveList)
troveTups = chain(*results.itervalues())
for troveTup in troveTups:
del self.troves[troveTup]
self.redirects.discard(troveTup)
def _checkForRedirects(self):
if self.redirects:
redirects = [('%s=%s[%s]' % (n,v.asString(),deps.formatFlavor(f))) \
for (n,v,f) in sorted(self.redirects)]
raise RecipeFileError, \
"found redirects, which are not allowed in groups: \n%s" \
% '\n'.join(redirects)
def getRequires(self):
return self.requires
def getTroveList(self):
return self.troveVersionFlavors
def getNewGroupList(self):
return self.newGroupList
def __init__(self, depCheck, autoResolve, checkOnlyByDefaultDeps,
byDefault = True):
self.redirects = set()
self.addTroveList = []
self.addReferenceList = []
self.removeTroveList = []
self.newGroupList = []
self.requires = deps.DependencySet()
self.troveVersionFlavors = {}
self.depCheck = depCheck
self.autoResolve = autoResolve
self.checkOnlyByDefaultDeps = checkOnlyByDefaultDeps
self.byDefault = byDefault
def Requires(self, requirement):
self.requires.addDep(deps.TroveDependencies,
deps.Dependency(requirement))
class _GroupReference:
""" A reference to a set of troves, created by a trove spec, that
can be searched like a repository using findTrove. Hashable
by the trove spec(s) given. Note the references can be
recursive -- This reference could be relative to another
reference, passed in as the upstreamSource.
"""
def __init__(self, troveSpecs, upstreamSource=None):
self.troveSpecs = troveSpecs
self.upstreamSource = upstreamSource
def __hash__(self):
return hash((self.troveSpecs, self.upstreamSource))
def findSources(self, repos, labelPath, flavorPath):
""" Find the troves that make up this trove reference """
if self.upstreamSource is None:
source = repos
else:
source = self.upstreamSource
results = source.findTroves(labelPath, self.troveSpecs, flavorPath)
troveTups = [ x for x in chain(*results.itervalues())]
self.sourceTups = troveTups
self.source = trovesource.TroveListTroveSource(source, troveTups)
self.source.searchAsRepository()
def findTroves(self, *args, **kw):
return self.source.findTroves(*args, **kw)
def getTroves(self, *args, **kw):
return self.source.getTroves(*args, **kw)
def getSourceTroves(self):
""" Returns the list of troves that form this reference
(without their children).
"""
return self.getTroves(self.sourceTups, withFiles=False)
class GroupRecipe(Recipe):
Flags = use.LocalFlags
depCheck = False
autoResolve = False
checkOnlyByDefaultDeps = True
def Requires(self, requirement, groupName = None):
if requirement[0] == '/':
raise RecipeFileError, 'file requirements not allowed in groups'
if groupName is None: groupName = self.name
self.groups[groupName].Requires(requirement)
def _parseFlavor(self, flavor):
assert(flavor is None or isinstance(flavor, str))
if flavor is None:
return None
flavorObj = deps.parseFlavor(flavor)
if flavorObj is None:
raise ValueError, 'invalid flavor: %s' % flavor
return flavorObj
def _parseGroupNames(self, groupName):
if groupName is None:
return [self.name]
elif not isinstance(groupName, (list, tuple)):
return [groupName]
else:
return groupName
def addTrove(self, name, versionStr = None, flavor = None, source = None,
byDefault = None, groupName = None, ref=None):
groupNames = self._parseGroupNames(groupName)
flavor = self._parseFlavor(flavor)
# track this trove in the GroupRecipe so that it can be found
# as a group with the rest of the troves.
self.toFind.setdefault(ref, set()).add((name, versionStr, flavor))
if ref is not None:
self.sources.add(ref)
for groupName in groupNames:
self.groups[groupName].addTrove(name, versionStr = versionStr,
flavor = flavor,
source = source,
byDefault = byDefault,
ref = ref)
def setByDefault(self, byDefault=True, groupName=None):
""" Set whether troves added to this group are installed by default
or not. (This default value can be overridden by the byDefault
parameter to individual addTrove commands). If you set the
byDefault value for the main group, you set it for any
future groups created.
"""
groupNames = self._parseGroupNames()
for groupName in groupNames:
self.groups[groupName].setByDefault(byDefault)
def addAllTroves(self, reference, groupName=None):
""" Add all of the troves directly contained in the given
reference to groupName. For example, if the cooked group-foo
contains references to the troves
foo1=<version>[flavor] and foo2=<version>[flavor],
the lines
ref = r.addReference('group-foo')
followed by
r.addAllTroves(ref)
would be equivalent to you having added the addTrove lines
r.addTrove('foo1', <version>)
r.addTrove('foo2', <version>)
"""
assert(reference is not None)
self.sources.add(reference)
groupNames = self._parseGroupNames(groupName)
for groupName in groupNames:
self.groups[groupName].addAllTroves(reference)
def removeTrove(self, name, versionStr=None, flavor=None,
groupName=None):
""" Remove a trove added to this group, either by an addAllTroves
line or by an addTrove line.
"""
groupNames = self._parseGroupNames(groupName)
flavor = self._parseFlavor(flavor)
for groupName in groupNames:
self.groups[groupName].removeTrove(name, versionStr, flavor)
def addReference(self, name, versionStr=None, flavor=None, ref=None):
flavor = self._parseFlavor(flavor)
return _GroupReference(((name, versionStr, flavor),), ref)
def addNewGroup(self, name, groupName = None, byDefault = True):
if groupName is None:
groupName = self.name
if not self.groups.has_key(name):
raise RecipeFileError, 'group %s has not been created' % name
self.groups[groupName].addNewGroup(name, byDefault)
def getRequires(self, groupName = None):
if groupName is None: groupName = self.name
return self.groups[groupName].getRequires()
def getTroveList(self, groupName = None):
if groupName is None: groupName = self.name
return self.groups[groupName].getTroveList()
def getNewGroupList(self, groupName = None):
if groupName is None: groupName = self.name
return self.groups[groupName].getNewGroupList()
def getSize(self, groupName = None):
if groupName is None: groupName = self.name
return self.groups[groupName].size
def setLabelPath(self, *path):
self.labelPath = [ versions.Label(x) for x in path ]
def createGroup(self, groupName, depCheck = False, autoResolve = False,
byDefault = None, checkOnlyByDefaultDeps = None):
if self.groups.has_key(groupName):
raise RecipeFileError, 'group %s was already created' % groupName
if not groupName.startswith('group-'):
raise RecipeFileError, 'group names must start with "group-"'
if byDefault is None:
byDefault = self.groups[self.name].byDefault
if checkOnlyByDefaultDeps is None:
checkOnlyByDefaultDeps = self.groups[self.name].checkOnlyByDefaultDeps
self.groups[groupName] = SingleGroup(depCheck, autoResolve,
checkOnlyByDefaultDeps, byDefault)
def getGroupNames(self):
return self.groups.keys()
def _orderGroups(self):
""" Order the groups so that each group is after any group it
contains. Raises an error if a cycle is found.
"""
# boy using a DFS for such a small graph seems like overkill.
# but its handy since we're also trying to find a cycle at the same
# time.
children = {}
groupNames = self.getGroupNames()
for groupName in groupNames:
children[groupName] = \
set([x[0] for x in self.getNewGroupList(groupName)])
timeStamp = 0
# the different items in the seen dict
VISITED = 0 # True if we've added this node to the stack
START = 1 # time at which the node was first visited
FINISH = 2 # time at which all the nodes child nodes were finished
# with
PATH = 3 # path to get to this node from wherever it was
# started.
seen = dict((x, [False, None, None, []]) for x in groupNames)
for groupName in groupNames:
if seen[groupName][VISITED]: continue
stack = [groupName]
seen[groupName][VISITED] = True
while stack:
timeStamp += 1
node = stack[-1]
if not seen[node][START]:
seen[node][START] = timeStamp
childList = []
if children[node]:
path = seen[node][PATH] + [node]
for child in children[node]:
if child in path:
cycle = path[path.index(child):] + [child]
raise RecipeFileError('cycle in groups: %s' % cycle)
if not seen[child][VISITED]:
childList.append(child)
if not childList:
# we've finished with all this nodes children
# mark it as done
seen[node][FINISH] = timeStamp
stack = stack[:-1]
else:
path = seen[node][PATH] + [node]
for child in childList:
seen[child] = [True, None, None, path]
stack.append(child)
groupsByLastSeen = ( (seen[x][FINISH], x) for x in groupNames)
return [x[1] for x in sorted(groupsByLastSeen)]
def _getIncludedTroves(self, groupName, checkOnlyByDefaultDeps):
"""
Returns the troves in all subGroups included by this trove.
If checkOnlyByDefaultDeps is False, exclude troves that are
not included by default.
"""
allTroves = []
childGroups = []
for childGroup, byDefault in self.groups[groupName].getNewGroupList():
if byDefault or not checkOnlyByDefaultDeps:
childGroups.append(childGroup)
while childGroups:
childGroup = childGroups.pop()
groupObj = self.groups[childGroup]
if checkOnlyByDefaultDeps:
allTroves.extend(groupObj.getDefaultTroves())
else:
allTroves.extend(groupObj.troves)
for childGroup, byDft in self.groups[childGroup].getNewGroupList():
if byDft or not checkOnlyByDefaultDeps:
childGroups.append(childGroup)
return allTroves
def findAllTroves(self):
if self.toFind is not None:
# find all troves needed by all included groups together, at
# once. We then pass that information into the individual
# groups.
self._findSources()
self._findTroves()
self.toFind = None
groupNames = self._orderGroups()
for groupName in groupNames:
groupObj = self.groups[groupName]
# assign troves to this group
groupObj.findTroves(self.troveSpecMap, self.repos)
# if ordering is right, we now should be able to recurse through
# the groups included by this group and get all recursively
# included troves
includedTroves = self._getIncludedTroves(groupName,
groupObj.checkOnlyByDefaultDeps)
# include those troves when doing dependency resolution/checking
groupObj.autoResolveDeps(self.cfg, self.repos, self.labelPath,
includedTroves)
failedDeps = groupObj.checkDependencies(self.cfg, includedTroves)
if failedDeps:
return groupName, failedDeps
groupObj.calcSize()
def _findSources(self):
for troveSource in self.sources:
if troveSource is None:
continue
troveSource.findSources(self.repos, self.labelPath, self.flavor)
def _findTroves(self):
""" Finds all the troves needed by all groups, and then
stores the information for retrieval by the individual
groups (stored in troveSpecMap).
"""
repos = self.repos
cfg = self.cfg
troveTups = set()
results = {}
for troveSource, toFind in self.toFind.iteritems():
try:
if troveSource is None:
source = repos
else:
source = troveSource
results[troveSource] = source.findTroves(self.labelPath,
toFind,
cfg.buildFlavor)
except repository.TroveNotFound, e:
raise RecipeFileError, str(e)
for result in results.itervalues():
troveTups.update(chain(*result.itervalues()))
troveTups = list(troveTups)
troves = repos.getTroves(troveTups, withFiles=False)
foundTroves = dict(izip(troveTups, troves))
troveSpecMap = {}
# store the pertinent information in troveSpecMap
# keyed off of source, then troveSpec
# note - redirect troves are not allowed in group recipes.
# we track whether a trove is a redirect because it's possible
# it could be added at one point (say, by an overly general
# addTrove line) and then removed afterwards by a removeTrove.
for troveSource, toFind in self.toFind.iteritems():
d = {}
for troveSpec in toFind:
d[troveSpec] = [ (x,
foundTroves[x].getSize(),
foundTroves[x].isRedirect())
for x in results[troveSource][troveSpec] ]
troveSpecMap[troveSource] = d
self.troveSpecMap = troveSpecMap
def __init__(self, repos, cfg, label, flavor, extraMacros={}):
self.repos = repos
self.cfg = cfg
self.labelPath = [ label ]
self.flavor = flavor
self.macros = macros.Macros()
self.macros.update(extraMacros)
self.toFind = {}
self.troveSpecMap = {}
self.foundTroves = {}
self.sources = set()
self.groups = {}
self.groups[self.name] = SingleGroup(self.depCheck, self.autoResolve,
self.checkOnlyByDefaultDeps)
class RedirectRecipe(Recipe):
Flags = use.LocalFlags
def addRedirect(self, name, versionStr = None, flavorStr = None,
fromTrove = None):
if flavorStr is not None:
flavor = deps.parseFlavor(flavorStr)
if flavor is None:
raise ValueError, 'invalid flavor %s' % flavorStr
else:
flavor = None
if fromTrove is None:
fromTrove = self.name
elif fromTrove.find(":") != -1:
raise ValueError, 'components cannot be individually redirected'
self.addTroveList.append((name, versionStr, flavor, fromTrove))
def findTroves(self):
self.size = 0
validSize = True
troveList = []
packageSet = {}
for (name, versionStr, flavor, fromName) in self.addTroveList:
try:
desFlavor = self.cfg.buildFlavor.copy()
if flavor is not None:
desFlavor.union(flavor, deps.DEP_MERGE_TYPE_OVERRIDE)
pkgList = self.repos.findTrove(self.label,
(name, versionStr, desFlavor))
except repository.TroveNotFound, e:
raise RecipeFileError, str(e)
assert(len(pkgList) == 1)
packageSet[pkgList[0]] = fromName
troveList.append(pkgList[0])
troves = self.repos.getTroves(troveList, withFiles = False)
redirections = {}
for topLevelTrove in troves:
topName = topLevelTrove.getName()
topVersion = topLevelTrove.getVersion()
topFlavor = topLevelTrove.getFlavor()
fromName = packageSet[(topName, topVersion, topFlavor)]
d = self.redirections.setdefault(fromName, {})
# this redirects from oldTrove -> newTrove
d[(topName, topVersion, topFlavor)] = True
for (name, version, flavor) in topLevelTrove.iterTroveList():
# redirect from oldTrove -> referencedPackage
d[(name, version, flavor)] = True
if name.find(":") != -1:
compName = fromName + ":" + name.split(":")[1]
# redirect from oldTrove -> oldTrove:component. we
# leave version/flavor alone; they get filled in later
d[(compName, None, None)] = True
# redirect from oldTrove:component -> newTrove:component
d2 = self.redirections.setdefault(compName, {})
d2[(name, version, flavor)] = True
for name,d in redirections.iteritems():
self.redirections[name] = [ (x[0], x[1], x[2]) for x in d ]
def getRedirections(self):
return self.redirections
def __init__(self, repos, cfg, label, flavor, extraMacros={}):
self.repos = repos
self.cfg = cfg
self.redirections = {}
self.label = label
self.flavor = flavor
self.addTroveList = []
self.macros = macros.Macros()
self.macros.update(extraMacros)
class FilesetRecipe(Recipe):
# XXX need to work on adding files from different flavors of troves
def addFileFromPackage(self, pattern, pkg, recurse, remapList):
pathMap = {}
for (pathId, pkgPath, fileId, version) in pkg.iterFileList():
pathMap[pkgPath] = (pathId, fileId, version)
patternList = util.braceExpand(pattern)
matches = {}
for pattern in patternList:
if not recurse:
matchList = [ n for n in pathMap.keys() if
fnmatchcase(n, pattern)]
else:
matchList = []
dirCount = pattern.count("/")
for n in pathMap.iterkeys():
i = n.count("/")
if i > dirCount:
dirName = os.sep.join(n.split(os.sep)[:dirCount + 1])
match = fnmatchcase(dirName, pattern)
elif i == dirCount:
match = fnmatchcase(n, pattern)
else:
match = False
if match: matchList.append(n)
for path in matchList:
matches[path] = pathMap[path]
if not matches:
return False
for path in matches.keys():
(pathId, fileId, version) = matches[path]
for (old, new) in remapList:
if path == old:
path = new
break
elif len(path) > len(old) and path.startswith(old) and \
path[len(old)] == "/":
path = new + path[len(old):]
break
if self.paths.has_key(path):
raise RecipeFileError, "%s has been included multiple times" \
% path
self.files[pathId] = (path, fileId, version)
self.paths[path] = 1
return True
def addFile(self, pattern, component, versionStr = None, recurse = True,
remap = []):
"""
Adds files which match pattern from version versionStr of component.
Pattern is glob-style, with brace expansion. If recurse is set,
anything below a directory which matches pattern is also included,
and the directory itself does not have to be part of the trove.
Remap is a list of (oldPath, newPath) tuples. The first oldPath
which matches the start of a matched pattern is rewritten as
newPath.
"""
if type(remap) == tuple:
remap = [ remap ]
try:
pkgList = self.repos.findTrove(self.label,
(component, versionStr, None),
self.flavor)
except repository.TroveNotFound, e:
raise RecipeFileError, str(e)
if len(pkgList) == 0:
raise RecipeFileError, "no packages match %s" % component
elif len(pkgList) > 1:
raise RecipeFileError, "too many packages match %s" % component
foundIt = False
pkg = self.repos.getTrove(*pkgList[0])
for sub in self.repos.walkTroveSet(pkg):
foundIt = foundIt or self.addFileFromPackage(pattern, sub, recurse,
remap)
if not foundIt:
raise RecipeFileError, "%s does not exist in version %s of %s" % \
(pattern, pkg.getVersion().asString(), pkg.getName())
def iterFileList(self):
for (pathId, (path, fileId, version)) in self.files.iteritems():
yield (pathId, path, fileId, version)
def __init__(self, repos, cfg, label, flavor, extraMacros={}):
self.repos = repos
self.cfg = cfg
self.files = {}
self.paths = {}
self.label = label
self.flavor = flavor
self.macros = macros.Macros()
self.macros.update(extraMacros)
class RecipeFileError(Exception):
def __init__(self, msg):
self.msg = msg
def __repr__(self):
return self.msg
def __str__(self):
return repr(self)
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# copied from harvest_template.py which is due to:
# (C) 2013 Multichill, Amir
# (C) 2013 Pywikipediabot team
#
# Distributed under the terms of MIT License.
#
import re
import json
import pywikibot
from pywikibot import pagegenerators as pg
from pywikibot import textlib
import mwparserfromhell
import xisbn
from collections import defaultdict
from pprint import pprint
import copy
import pyisbn
en_wikipedia = pywikibot.Site('en', 'wikipedia')
wikidata = en_wikipedia.data_repository()
if not wikidata.logged_in(): wikidata.login()
if not en_wikipedia.logged_in(): en_wikipedia.login()
source = pywikibot.Claim(wikidata, 'p143')
source.setTarget(pywikibot.ItemPage(wikidata,'q328'))
properties = {'isbn':'P212',
'ocn':'P243',
'illustrator': 'P110',
'author': 'P50',
'lang': 'P364',
'genre': 'P136',
'translator': 'P655'}
wplangs = {'en':'Q328',
'de':'Q48183',
'fr':'Q8447',
'it':'Q11920',
"es": "q8449",
"ja": "q177837",
"ru": "q206855",
"pl": "q1551807",
"sv": "q169514",
'imported_from': 'P143'}
wpsites = {'en': {'isbn': u'isbn',
'oclc': u'oclc',
'author': u'author',
'illustrator': u'illustrator',
'translator': u'translator',
'language': u'language',
'published': u'published',
'genre': u'genre',
'dewey': u'dewey'},
'it': {'isbn': None,
'oclc': None,
'author': u'autore',
'illustrator': None,
'translator': None,
'language': u'lingua',
'published': u'annoorig',
'genre': u'genere',
'dewey': None},
'fr': {'isbn': u'isbn',
'oclc': None,
'author': u'auteur',
'illustrator': u'dessinateur',
'translator': u'traducteur',
'language': u'langue',
'published': u'dateparution_orig',
'genre': u'genere',
'dewey': None},
'es': {'isbn': u'isbn',
'oclc': u'oclc',
'author': u'autor',
'illustrator': u'ilustrador',
'translator': u'traductor',
'language': u'idioma original',
'published': u'publicación original',
'genre': u'género',
'dewey': None},
'ja': {'isbn': None,
'oclc': None,
'author': u'author',
'illustrator': u'illustrator',
'translator': u'translator',
'language': u'language',
'published': u'published',
'genre': u'genre',
'dewey': None},
'pl': {'isbn': None,
'oclc': None,
'author': u'autor',
'illustrator': None,
'translator': u'tłumacz',
'language': u'język oryg wyd',
'published': u'data I wyd oryg',
'genre': u'tematyka',
'dewey': None},
'pt': {'isbn': u'isbn',
'oclc': None,
'author': u'autor',
'illustrator': u'ilustrador',
'translator': u'tradutor_br',
'language': u'idioma',
'published': u'lançamento',
'genre': u'gênero',
'dewey': None},
'sv': {'isbn': u'isbn',
'oclc': None,
'author': u'autor',
'illustrator': u'ilustrador',
'translator': u'tradutor_br',
'language': u'idioma',
'published': u'lançamento',
'genre': u'gênero',
'dewey': None},
'ru': {'isbn': u'isbni',
'oclc': None,
'author': u'Автор',
'illustrator': u'illustratör ',
'translator': u'Переводчик',
'language': u'Язык',
'published': u'Оригинал выпуска',
'genre': u'Жанр',
'dewey': None}
}
templateTitleDict = {'en': u'Infobox book',
'it': u'Libro',
'fr': u'Infobox Livre',
'es': u'Ficha de libro',
'ja': u'基礎情報 書籍',
'pl': u'Książka infobox',
'pt': u'Info/Livro',
'sv': u'Bokfakta',
'ru': u'Издание'}
templateNSDict = {'en': u'Template:',
'it': u'Template:',
'fr': u'Modèle:',
'es': u'Plantilla:',
'ja': u'Template:',
'pl': u'Szablon:',
'pt': u'Predefinição:',
'sv': u'Mall:',
'ru': u'Шаблон:'}
def makeGenerator(lang):
templateNS = templateNSDict[lang]
templateTitle = templateTitleDict[lang]
tsite = pywikibot.Site(lang, 'wikipedia')
templatePage = pywikibot.Page(tsite, templateNS+templateTitle)
generator = pg.ReferringPageGenerator(templatePage, followRedirects=False,
withTemplateInclusion=True,
onlyTemplateInclusion=True,
step=None, total=None, content=False)
return generator
def logVIAFstats(remoteClaims):
for remoteClaimList in remoteClaims.itervalues():
for remoteClaim in remoteClaimList:
if remoteClaim.id == 'P214':
print 'VIAF Author', str(remoteClaim.target)
cases['hadVIAF'] += 1
class bookdata:
def __init__(self, wditem):
self.wditem = wditem
self.isbns = list()
self.xisbns = list()
self.ocns = list()
self.xocns = list()
self.deweys = list()
self.authors = list()
self.illustrators = list()
self.translators = list()
self.langs = list()
self.publishdate = list()
self.genres = list()
def dictify(self):
bookdict = dict()
for k, v in self.__dict__.iteritems():
try:
bookdict[k] = str(v)
except pywikibot.exceptions.NoPage:
bookdict[k] = 'pywikibot.exceptions.NoPage'
try:
bookdict['wditem'] = self.wditem.getID()
except pywikibot.exceptions.NoPage:
bookdict[k] = 'pywikibot.exceptions.NoPage'
return bookdict
def incorp_xdata(book):
if not book.ocns:
if book.xocns:
book.xocns.sort()
book.ocns.append(book.xocns[0])
cases['got_ocn_from_xisbn'] += 1
def checkISBN13(book):
def ISBNsize(isbn, isnblen):
justnums = filter(lambda x: x in '1234567890Xx', isbn)
if len(justnums) == isbnlen:
return True
else:
return False
isbnlists ={13: list(), 10:list() }
for isbnlen in isbnlists.iterkeys():
for isbn in book.isbns:
if ISBNsize(isbn, isbnlen):
isbnlists[isbnlen].append(isbn)
#no isbn13s
if not isbnlists[13] and not isbnlists[10]:
if book.xisbns:
book.xisbns.sort()
book.isbns.append(book.xisbns[0])
print 'using an xisbn here'
cases['put_in_a_isbn13'] += 1
if isbnlists[10] and not isbnlists[13]:
for isbn in isbnlists[10]:
converted = pyisbn.convert(isbn)
print 'conversion', isbn, converted
book.isbns.append(converted)
def processRE(param, rx):
cleaned_text = textlib.removeDisabledParts(str(param.value.strip()))
relist = re.findall(rx, cleaned_text)
return relist
def processLinks(param, wpsitelang):
itempagelist = list()
tsite = pywikibot.Site(wpsitelang, 'wikipedia')
for mwnode in param.value.filter():
if type(mwnode) == mwparserfromhell.nodes.wikilink.Wikilink:
try:
paramLinkRedir = pywikibot.Page(tsite, mwnode.title).isRedirectPage()
except:
paramLinkRedir = False
if paramLinkRedir:
redirpage = pywikibot.Page(tsite, mwnode.title).getRedirectTarget()
pagetitle = redirpage.title()
else:
pagetitle = mwnode.title
#hopefully here you can see im trying to add to the returnlist a Wikdata ItemPage associated with a mwparerfromhell wikilink
try:
itempagelist.append(pywikibot.ItemPage.fromPage(pywikibot.Page(tsite, pagetitle)))
except:
continue
return itempagelist
def processISBNs(param, book):
isbns = processRE(param=param, rx="[0-9][--–\ 0-9]{9,16}[xX]?")
isbns = map(lambda x: x.replace(' ', ''), isbns)
xisbns = set()
xocns = set()
for isbn in isbns:
try:
metadata = xisbn.xisbn(isbn, metadata=True)
xisbns.update(metadata['isbn'])
xocns.update(metadata['oclcnum'])
except xisbn.isbnError:
pywikibot.output('xisbn error')
book.isbns.extend(isbns)
book.xisbns.extend(list(xisbns))
book.xocns.extend(list(xocns))
def processOCNs(param, book, wpsitelang):
ocns = processRE(param=param, rx="\d+")
book.ocns.extend(ocns)
def processDewey(param, book, wpsitelang):
deweys = processRE(param=param, rx="[^,]+")
book.deweys.extend(deweys)
def processAuthors(param, book, wpsitelang):
book.authors.extend(processLinks(param, wpsitelang))
def processIllustrators(param, book, wpsitelang):
book.illustrators.extend(processLinks(param, wpsitelang))
def processTranslators(param, book, wpsitelang):
book.translators.extend(processLinks(param, wpsitelang))
def processGenre(param, book, wpsitelang):
book.genres.extend(processLinks(param, wpsitelang))
def processLanguage(param, book, wpsitelang):
book.langs.extend(processLinks(param, wpsitelang))
def processPublished(param, book, wpsitelang):
pass
def processPage(page, wpsitelang):
"""
Process a single page
"""
paramdict = wpsites[wpsitelang]
wditem = pywikibot.ItemPage.fromPage(page)
book = bookdata(wditem)
pywikibot.output('Processing %s' % page)
pagetext = page.get()
wikicode = mwparserfromhell.parse(pagetext)
for template in wikicode.filter_templates():
if template.name.startswith(templateTitleDict[wpsitelang]):
for param in template.params:
paramname = param.name.strip()
if paramname == paramdict['isbn']:
processISBNs(param, book, wpsitelang)
if paramname == paramdict['oclc']:
processOCNs(param, book, wpsitelang)
if paramname == paramdict['author']:
processAuthors(param, book, wpsitelang)
if paramname == paramdict['illustrator']:
processIllustrators(param, book, wpsitelang)
if paramname == paramdict['translator']:
processTranslators(param, book, wpsitelang)
if paramname == paramdict['language']:
processLanguage(param, book, wpsitelang)
if paramname == paramdict['published']:
processPublished(param, book, wpsitelang)
if paramname == paramdict['genre']:
processGenre(param, book, wpsitelang)
if paramname == paramdict['dewey']:
processDewey(param, book, wpsitelang)
return book
def propertiesToClaims(book, lang):
localClaims = list() #we're returning this
bookattrs = {'isbn': book.isbns,
'ocn': book.ocns,
'illustrator': book.illustrators,
'author': book.authors,
'lang': book.langs,
'genre': book.genres}
for book_k, book_v in bookattrs.iteritems():
if book_v:
for attr in book_v:
claimObj = pywikibot.Claim(site=wikidata, pid=properties[book_k])
claimObj.setTarget(attr)
localClaims.append(claimObj)
return localClaims
def compareClaims(book, sourcelang):
qid = book.wditem.getID()
try:
pageToEdit = pywikibot.ItemPage(wikidata, qid)
page_parts = pageToEdit.get()
except pywikibot.data.api.APIError:
#maybe there's no associated wikidata page
return
localClaims = propertiesToClaims(book, sourcelang)
remoteClaims = page_parts['claims']
logVIAFstats(remoteClaims)
#we'll need this for every claim
localSource = pywikibot.Claim(site=wikidata, pid=wplangs['imported_from'])
localSource.setTarget(pywikibot.ItemPage(wikidata, wplangs[sourcelang]))
for localClaim in localClaims:
'''there are three states
noMatchingClaim, so we add our claim
matchingClaimUnsourced, so we add our source
matchingClaimSurced, claim was already present and had the same source, do nothing
'''
noMatchingClaim = False
matchingClaimUnsourced = False
matchingClaimSourced = False
for remoteClaimList in remoteClaims.itervalues():
for remoteClaim in remoteClaimList:
if localClaim.id == remoteClaim.id:
#now we see if a our source is there
for remoteSourceDict in remoteClaim.getSources():
for remoteSourceList in remoteSourceDict.itervalues():
for remoteSource in remoteSourceList:
if remoteSource.id == localSource.id:
if remoteSource.getTarget() == localSource.getTarget():
matchingClaimSourced = True
if not matchingClaimSourced:
matchingClaimUnsourced = remoteClaim
if not matchingClaimUnsourced:
noMatchingClaim = True
if matchingClaimSourced:
cases[str(localClaim.id)+'present'] += 1
continue
if matchingClaimUnsourced:
matchingClaimUnsourced.addSource(localSource)
cases[str(localSource.id)+'source'] += 1
continue
if noMatchingClaim:
try:
pageToEdit.addClaim(localClaim)
localClaim.addSource(localSource)
cases[str(localClaim.id)+'claim'] += 1
except:
print 'Error:'
pprint(localClaim)
continue
try:
casesJSON = open('cases.JSON','r')
cases = defaultdict(int)
savedcases = json.load(casesJSON)
for k, v in savedcases.iteritems():
cases[k] = v
casesJSON.close()
except IOError:
cases = defaultdict(int)
cases["prevtouched"] = 0
try:
allbooksJSON = open('allbooks.JSON','r')
allbooks = json.load(allbooksJSON)
allbooksJSON.close()
except IOError:
allbooks = defaultdict(list)
def savecases():
casesJSON = open('cases.JSON', 'w')
json.dump(cases, casesJSON, indent=4)
casesJSON.close()
allbooksJSON = open('allbooks.json', 'w')
json.dump(allbooks, allbooksJSON, indent=4)
allbooksJSON.close()
def run(wpsitelang):
touched = 0
generator = makeGenerator(wpsitelang)
for page in generator:
touched += 1
fake = False
if not fake:
if cases['prevtouched'] >= touched:
continue
if page.namespace() == 0:
book = processPage(page, wpsitelang)
allbooks[wpsitelang].append(book.dictify())
incorp_xdata(book)
checkISBN13(book)
#pprint (vars(book))
compareClaims(book, 'en')
cases['prevtouched'] = touched
savecases()
if __name__ == "__main__":
for lang in wpsites.iterkeys():
print 'running now on language: ', lang
run(lang)
processISBNs takes wpsitelang argument, whoops
#!/usr/bin/python
# -*- coding: utf-8 -*-
# copied from harvest_template.py which is due to:
# (C) 2013 Multichill, Amir
# (C) 2013 Pywikipediabot team
#
# Distributed under the terms of MIT License.
#
import re
import json
import pywikibot
from pywikibot import pagegenerators as pg
from pywikibot import textlib
import mwparserfromhell
import xisbn
from collections import defaultdict
from pprint import pprint
import copy
import pyisbn
en_wikipedia = pywikibot.Site('en', 'wikipedia')
wikidata = en_wikipedia.data_repository()
if not wikidata.logged_in(): wikidata.login()
if not en_wikipedia.logged_in(): en_wikipedia.login()
source = pywikibot.Claim(wikidata, 'p143')
source.setTarget(pywikibot.ItemPage(wikidata,'q328'))
properties = {'isbn':'P212',
'ocn':'P243',
'illustrator': 'P110',
'author': 'P50',
'lang': 'P364',
'genre': 'P136',
'translator': 'P655'}
wplangs = {'en':'Q328',
'de':'Q48183',
'fr':'Q8447',
'it':'Q11920',
"es": "q8449",
"ja": "q177837",
"ru": "q206855",
"pl": "q1551807",
"sv": "q169514",
'imported_from': 'P143'}
wpsites = {'en': {'isbn': u'isbn',
'oclc': u'oclc',
'author': u'author',
'illustrator': u'illustrator',
'translator': u'translator',
'language': u'language',
'published': u'published',
'genre': u'genre',
'dewey': u'dewey'},
'it': {'isbn': None,
'oclc': None,
'author': u'autore',
'illustrator': None,
'translator': None,
'language': u'lingua',
'published': u'annoorig',
'genre': u'genere',
'dewey': None},
'fr': {'isbn': u'isbn',
'oclc': None,
'author': u'auteur',
'illustrator': u'dessinateur',
'translator': u'traducteur',
'language': u'langue',
'published': u'dateparution_orig',
'genre': u'genere',
'dewey': None},
'es': {'isbn': u'isbn',
'oclc': u'oclc',
'author': u'autor',
'illustrator': u'ilustrador',
'translator': u'traductor',
'language': u'idioma original',
'published': u'publicación original',
'genre': u'género',
'dewey': None},
'ja': {'isbn': None,
'oclc': None,
'author': u'author',
'illustrator': u'illustrator',
'translator': u'translator',
'language': u'language',
'published': u'published',
'genre': u'genre',
'dewey': None},
'pl': {'isbn': None,
'oclc': None,
'author': u'autor',
'illustrator': None,
'translator': u'tłumacz',
'language': u'język oryg wyd',
'published': u'data I wyd oryg',
'genre': u'tematyka',
'dewey': None},
'pt': {'isbn': u'isbn',
'oclc': None,
'author': u'autor',
'illustrator': u'ilustrador',
'translator': u'tradutor_br',
'language': u'idioma',
'published': u'lançamento',
'genre': u'gênero',
'dewey': None},
'sv': {'isbn': u'isbn',
'oclc': None,
'author': u'autor',
'illustrator': u'ilustrador',
'translator': u'tradutor_br',
'language': u'idioma',
'published': u'lançamento',
'genre': u'gênero',
'dewey': None},
'ru': {'isbn': u'isbni',
'oclc': None,
'author': u'Автор',
'illustrator': u'illustratör ',
'translator': u'Переводчик',
'language': u'Язык',
'published': u'Оригинал выпуска',
'genre': u'Жанр',
'dewey': None}
}
templateTitleDict = {'en': u'Infobox book',
'it': u'Libro',
'fr': u'Infobox Livre',
'es': u'Ficha de libro',
'ja': u'基礎情報 書籍',
'pl': u'Książka infobox',
'pt': u'Info/Livro',
'sv': u'Bokfakta',
'ru': u'Издание'}
templateNSDict = {'en': u'Template:',
'it': u'Template:',
'fr': u'Modèle:',
'es': u'Plantilla:',
'ja': u'Template:',
'pl': u'Szablon:',
'pt': u'Predefinição:',
'sv': u'Mall:',
'ru': u'Шаблон:'}
def makeGenerator(lang):
templateNS = templateNSDict[lang]
templateTitle = templateTitleDict[lang]
tsite = pywikibot.Site(lang, 'wikipedia')
templatePage = pywikibot.Page(tsite, templateNS+templateTitle)
generator = pg.ReferringPageGenerator(templatePage, followRedirects=False,
withTemplateInclusion=True,
onlyTemplateInclusion=True,
step=None, total=None, content=False)
return generator
def logVIAFstats(remoteClaims):
for remoteClaimList in remoteClaims.itervalues():
for remoteClaim in remoteClaimList:
if remoteClaim.id == 'P214':
print 'VIAF Author', str(remoteClaim.target)
cases['hadVIAF'] += 1
class bookdata:
def __init__(self, wditem):
self.wditem = wditem
self.isbns = list()
self.xisbns = list()
self.ocns = list()
self.xocns = list()
self.deweys = list()
self.authors = list()
self.illustrators = list()
self.translators = list()
self.langs = list()
self.publishdate = list()
self.genres = list()
def dictify(self):
bookdict = dict()
for k, v in self.__dict__.iteritems():
try:
bookdict[k] = str(v)
except pywikibot.exceptions.NoPage:
bookdict[k] = 'pywikibot.exceptions.NoPage'
try:
bookdict['wditem'] = self.wditem.getID()
except pywikibot.exceptions.NoPage:
bookdict[k] = 'pywikibot.exceptions.NoPage'
return bookdict
def incorp_xdata(book):
if not book.ocns:
if book.xocns:
book.xocns.sort()
book.ocns.append(book.xocns[0])
cases['got_ocn_from_xisbn'] += 1
def checkISBN13(book):
def ISBNsize(isbn, isnblen):
justnums = filter(lambda x: x in '1234567890Xx', isbn)
if len(justnums) == isbnlen:
return True
else:
return False
isbnlists ={13: list(), 10:list() }
for isbnlen in isbnlists.iterkeys():
for isbn in book.isbns:
if ISBNsize(isbn, isbnlen):
isbnlists[isbnlen].append(isbn)
#no isbn13s
if not isbnlists[13] and not isbnlists[10]:
if book.xisbns:
book.xisbns.sort()
book.isbns.append(book.xisbns[0])
print 'using an xisbn here'
cases['put_in_a_isbn13'] += 1
if isbnlists[10] and not isbnlists[13]:
for isbn in isbnlists[10]:
converted = pyisbn.convert(isbn)
print 'conversion', isbn, converted
book.isbns.append(converted)
def processRE(param, rx):
cleaned_text = textlib.removeDisabledParts(str(param.value.strip()))
relist = re.findall(rx, cleaned_text)
return relist
def processLinks(param, wpsitelang):
itempagelist = list()
tsite = pywikibot.Site(wpsitelang, 'wikipedia')
for mwnode in param.value.filter():
if type(mwnode) == mwparserfromhell.nodes.wikilink.Wikilink:
try:
paramLinkRedir = pywikibot.Page(tsite, mwnode.title).isRedirectPage()
except:
paramLinkRedir = False
if paramLinkRedir:
redirpage = pywikibot.Page(tsite, mwnode.title).getRedirectTarget()
pagetitle = redirpage.title()
else:
pagetitle = mwnode.title
#hopefully here you can see im trying to add to the returnlist a Wikdata ItemPage associated with a mwparerfromhell wikilink
try:
itempagelist.append(pywikibot.ItemPage.fromPage(pywikibot.Page(tsite, pagetitle)))
except:
continue
return itempagelist
def processISBNs(param, book, wpsitelang):
isbns = processRE(param=param, rx="[0-9][--–\ 0-9]{9,16}[xX]?")
isbns = map(lambda x: x.replace(' ', ''), isbns)
xisbns = set()
xocns = set()
for isbn in isbns:
try:
metadata = xisbn.xisbn(isbn, metadata=True)
xisbns.update(metadata['isbn'])
xocns.update(metadata['oclcnum'])
except xisbn.isbnError:
pywikibot.output('xisbn error')
book.isbns.extend(isbns)
book.xisbns.extend(list(xisbns))
book.xocns.extend(list(xocns))
def processOCNs(param, book, wpsitelang):
ocns = processRE(param=param, rx="\d+")
book.ocns.extend(ocns)
def processDewey(param, book, wpsitelang):
deweys = processRE(param=param, rx="[^,]+")
book.deweys.extend(deweys)
def processAuthors(param, book, wpsitelang):
book.authors.extend(processLinks(param, wpsitelang))
def processIllustrators(param, book, wpsitelang):
book.illustrators.extend(processLinks(param, wpsitelang))
def processTranslators(param, book, wpsitelang):
book.translators.extend(processLinks(param, wpsitelang))
def processGenre(param, book, wpsitelang):
book.genres.extend(processLinks(param, wpsitelang))
def processLanguage(param, book, wpsitelang):
book.langs.extend(processLinks(param, wpsitelang))
def processPublished(param, book, wpsitelang):
pass
def processPage(page, wpsitelang):
"""
Process a single page
"""
paramdict = wpsites[wpsitelang]
wditem = pywikibot.ItemPage.fromPage(page)
book = bookdata(wditem)
pywikibot.output('Processing %s' % page)
pagetext = page.get()
wikicode = mwparserfromhell.parse(pagetext)
for template in wikicode.filter_templates():
if template.name.startswith(templateTitleDict[wpsitelang]):
for param in template.params:
paramname = param.name.strip()
if paramname == paramdict['isbn']:
processISBNs(param, book, wpsitelang)
if paramname == paramdict['oclc']:
processOCNs(param, book, wpsitelang)
if paramname == paramdict['author']:
processAuthors(param, book, wpsitelang)
if paramname == paramdict['illustrator']:
processIllustrators(param, book, wpsitelang)
if paramname == paramdict['translator']:
processTranslators(param, book, wpsitelang)
if paramname == paramdict['language']:
processLanguage(param, book, wpsitelang)
if paramname == paramdict['published']:
processPublished(param, book, wpsitelang)
if paramname == paramdict['genre']:
processGenre(param, book, wpsitelang)
if paramname == paramdict['dewey']:
processDewey(param, book, wpsitelang)
return book
def propertiesToClaims(book, lang):
localClaims = list() #we're returning this
bookattrs = {'isbn': book.isbns,
'ocn': book.ocns,
'illustrator': book.illustrators,
'author': book.authors,
'lang': book.langs,
'genre': book.genres}
for book_k, book_v in bookattrs.iteritems():
if book_v:
for attr in book_v:
claimObj = pywikibot.Claim(site=wikidata, pid=properties[book_k])
claimObj.setTarget(attr)
localClaims.append(claimObj)
return localClaims
def compareClaims(book, sourcelang):
qid = book.wditem.getID()
try:
pageToEdit = pywikibot.ItemPage(wikidata, qid)
page_parts = pageToEdit.get()
except pywikibot.data.api.APIError:
#maybe there's no associated wikidata page
return
localClaims = propertiesToClaims(book, sourcelang)
remoteClaims = page_parts['claims']
logVIAFstats(remoteClaims)
#we'll need this for every claim
localSource = pywikibot.Claim(site=wikidata, pid=wplangs['imported_from'])
localSource.setTarget(pywikibot.ItemPage(wikidata, wplangs[sourcelang]))
for localClaim in localClaims:
'''there are three states
noMatchingClaim, so we add our claim
matchingClaimUnsourced, so we add our source
matchingClaimSurced, claim was already present and had the same source, do nothing
'''
noMatchingClaim = False
matchingClaimUnsourced = False
matchingClaimSourced = False
for remoteClaimList in remoteClaims.itervalues():
for remoteClaim in remoteClaimList:
if localClaim.id == remoteClaim.id:
#now we see if a our source is there
for remoteSourceDict in remoteClaim.getSources():
for remoteSourceList in remoteSourceDict.itervalues():
for remoteSource in remoteSourceList:
if remoteSource.id == localSource.id:
if remoteSource.getTarget() == localSource.getTarget():
matchingClaimSourced = True
if not matchingClaimSourced:
matchingClaimUnsourced = remoteClaim
if not matchingClaimUnsourced:
noMatchingClaim = True
if matchingClaimSourced:
cases[str(localClaim.id)+'present'] += 1
continue
if matchingClaimUnsourced:
matchingClaimUnsourced.addSource(localSource)
cases[str(localSource.id)+'source'] += 1
continue
if noMatchingClaim:
try:
pageToEdit.addClaim(localClaim)
localClaim.addSource(localSource)
cases[str(localClaim.id)+'claim'] += 1
except:
print 'Error:'
pprint(localClaim)
continue
try:
casesJSON = open('cases.JSON','r')
cases = defaultdict(int)
savedcases = json.load(casesJSON)
for k, v in savedcases.iteritems():
cases[k] = v
casesJSON.close()
except IOError:
cases = defaultdict(int)
cases["prevtouched"] = 0
try:
allbooksJSON = open('allbooks.JSON','r')
allbooks = json.load(allbooksJSON)
allbooksJSON.close()
except IOError:
allbooks = defaultdict(list)
def savecases():
casesJSON = open('cases.JSON', 'w')
json.dump(cases, casesJSON, indent=4)
casesJSON.close()
allbooksJSON = open('allbooks.json', 'w')
json.dump(allbooks, allbooksJSON, indent=4)
allbooksJSON.close()
def run(wpsitelang):
touched = 0
generator = makeGenerator(wpsitelang)
for page in generator:
touched += 1
fake = False
if not fake:
if cases['prevtouched'] >= touched:
continue
if page.namespace() == 0:
book = processPage(page, wpsitelang)
allbooks[wpsitelang].append(book.dictify())
incorp_xdata(book)
checkISBN13(book)
#pprint (vars(book))
compareClaims(book, 'en')
cases['prevtouched'] = touched
savecases()
if __name__ == "__main__":
for lang in wpsites.iterkeys():
print 'running now on language: ', lang
run(lang)
|
# Copyright 2020 Planet Labs, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import copy
import json
from http import HTTPStatus
import logging
import math
import os
from pathlib import Path
import httpx
import pytest
import respx
from planet import OrdersClient, clients, exceptions
TEST_URL = 'http://MockNotRealURL/'
LOGGER = logging.getLogger(__name__)
@pytest.fixture
def order_descriptions(order_description):
order1 = order_description
order1['id'] = 'oid1'
order2 = copy.deepcopy(order_description)
order2['id'] = 'oid2'
order3 = copy.deepcopy(order_description)
order3['id'] = 'oid3'
return [order1, order2, order3]
@pytest.fixture
def oid2():
# obtained from uuid.uuid1()
return '5ece1dc0-ea81-11eb-837c-acde48001122'
@respx.mock
@pytest.mark.asyncio
async def test_list_orders_basic(order_descriptions, session):
list_url = TEST_URL + 'orders/v2/'
next_page_url = list_url + 'blob/?page_marker=IAmATest'
order1, order2, order3 = order_descriptions
page1_response = {
"_links": {
"_self": "string",
"next": next_page_url},
"orders": [order1, order2]
}
mock_resp1 = httpx.Response(HTTPStatus.OK, json=page1_response)
respx.get(list_url).return_value = mock_resp1
page2_response = {
"_links": {
"_self": next_page_url},
"orders": [order3]
}
mock_resp2 = httpx.Response(HTTPStatus.OK, json=page2_response)
respx.get(next_page_url).return_value = mock_resp2
cl = OrdersClient(session, base_url=TEST_URL)
orders = await cl.list_orders()
oids = list(o.id for o in orders)
assert oids == ['oid1', 'oid2', 'oid3']
@respx.mock
@pytest.mark.asyncio
async def test_list_orders_state(order_descriptions, session):
list_url = TEST_URL + 'orders/v2/?state=failed'
order1, order2, _ = order_descriptions
page1_response = {
"_links": {
"_self": "string"
},
"orders": [order1, order2]
}
mock_resp = httpx.Response(HTTPStatus.OK, json=page1_response)
respx.get(list_url).return_value = mock_resp
cl = OrdersClient(session, base_url=TEST_URL)
orders = await cl.list_orders(state='failed')
oids = list(o.id for o in orders)
assert oids == ['oid1', 'oid2']
@pytest.mark.asyncio
async def test_list_orders_state_invalid_state(session):
cl = OrdersClient(session, base_url=TEST_URL)
with pytest.raises(clients.orders.OrdersClientException):
_ = await cl.list_orders(state='invalidstate')
@respx.mock
@pytest.mark.asyncio
async def test_list_orders_limit(order_descriptions, session):
# check that the client doesn't try to get the next page when the
# limit is already reached by providing link to next page but not
# registering a response. if the client tries to get the next
# page, an error will occur
list_url = TEST_URL + 'orders/v2/'
nono_page_url = list_url + '?page_marker=OhNoNo'
order1, order2, order3 = order_descriptions
page1_response = {
"_links": {
"_self": "string",
"next": nono_page_url},
"orders": [order1, order2]
}
mock_resp = httpx.Response(HTTPStatus.OK, json=page1_response)
page2_response = {
"_links": {
"_self": "string",
},
"orders": [order3]
}
mock_resp2 = httpx.Response(HTTPStatus.OK, json=page2_response)
respx.route(method="GET", url__eq=list_url).mock(return_value=mock_resp)
nono_route = respx.route(method="GET", url__eq=nono_page_url).mock(
return_value=mock_resp2)
cl = OrdersClient(session, base_url=TEST_URL)
orders = await cl.list_orders(limit=1)
assert not nono_route.called
oids = [o.id for o in orders]
assert oids == ['oid1']
@respx.mock
@pytest.mark.asyncio
async def test_list_orders_asjson(order_descriptions, session):
list_url = TEST_URL + 'orders/v2/'
order1, order2, order3 = order_descriptions
page1_response = {
"_links": {"_self": "string"},
"orders": [order1]
}
mock_resp1 = httpx.Response(HTTPStatus.OK, json=page1_response)
respx.get(list_url).return_value = mock_resp1
cl = OrdersClient(session, base_url=TEST_URL)
orders = await cl.list_orders(as_json=True)
assert orders[0]['id'] == 'oid1'
@respx.mock
@pytest.mark.asyncio
async def test_create_order(oid, order_description, order_request, session):
create_url = TEST_URL + 'orders/v2/'
mock_resp = httpx.Response(HTTPStatus.OK, json=order_description)
respx.post(create_url).return_value = mock_resp
cl = OrdersClient(session, base_url=TEST_URL)
order = await cl.create_order(order_request)
assert order.json == order_description
@respx.mock
@pytest.mark.asyncio
async def test_create_order_bad_item_type(order_request, session):
create_url = TEST_URL + 'orders/v2/'
resp = {
"field": {
"Products": [
{
"message": ("Bad item type 'invalid' for bundle type " +
"'analytic'")
}
]
},
"general": [
{
"message": "Unable to accept order"
}
]
}
mock_resp = httpx.Response(400, json=resp)
respx.post(create_url).return_value = mock_resp
order_request['products'][0]['item_type'] = 'invalid'
cl = OrdersClient(session, base_url=TEST_URL)
expected_msg = (
"Unable to accept order - Bad item type 'invalid' for bundle type " +
"'analytic'")
with pytest.raises(exceptions.BadQuery, match=expected_msg):
_ = await cl.create_order(order_request)
@respx.mock
@pytest.mark.asyncio
async def test_create_order_item_id_does_not_exist(
order_request, session, match_pytest_raises):
create_url = TEST_URL + 'orders/v2/'
resp = {
"field": {
"Details": [
{
"message": ("Item ID 4500474_2133707_2021-05-20_2419 / " +
"Item Type PSScene3Band doesn't exist")
}
]
},
"general": [
{
"message": "Unable to accept order"
}
]
}
mock_resp = httpx.Response(400, json=resp)
respx.post(create_url).return_value = mock_resp
order_request['products'][0]['item_ids'] = \
'4500474_2133707_2021-05-20_2419'
cl = OrdersClient(session, base_url=TEST_URL)
expected_msg = (
"Unable to accept order - Item ID 4500474_2133707_2021-05-20_2419 " +
"/ Item Type PSScene3Band doesn't exist")
with match_pytest_raises(exceptions.BadQuery, expected_msg):
_ = await cl.create_order(order_request)
@respx.mock
@pytest.mark.asyncio
async def test_get_order(oid, order_description, session):
get_url = TEST_URL + 'orders/v2/' + oid
mock_resp = httpx.Response(HTTPStatus.OK, json=order_description)
respx.get(get_url).return_value = mock_resp
cl = OrdersClient(session, base_url=TEST_URL)
order = await cl.get_order(oid)
assert order.state == 'queued'
@pytest.mark.asyncio
async def test_get_order_invalid_id(session):
cl = OrdersClient(session, base_url=TEST_URL)
with pytest.raises(clients.orders.OrdersClientException):
_ = await cl.get_order('-')
@respx.mock
@pytest.mark.asyncio
async def test_get_order_id_doesnt_exist(
oid, session, match_pytest_raises):
get_url = TEST_URL + 'orders/v2/' + oid
msg = f'Could not load order ID: {oid}.'
resp = {
"message": msg
}
mock_resp = httpx.Response(404, json=resp)
respx.get(get_url).return_value = mock_resp
cl = OrdersClient(session, base_url=TEST_URL)
with match_pytest_raises(exceptions.MissingResource, msg):
_ = await cl.get_order(oid)
@respx.mock
@pytest.mark.asyncio
async def test_cancel_order(oid, order_description, session):
cancel_url = TEST_URL + 'orders/v2/' + oid
order_description['state'] = 'cancelled'
mock_resp = httpx.Response(HTTPStatus.OK, json=order_description)
respx.put(cancel_url).return_value = mock_resp
# TODO: the api says cancel order returns the order details but as
# far as I can test thus far, it returns nothing. follow up on this
cl = OrdersClient(session, base_url=TEST_URL)
await cl.cancel_order(oid)
@pytest.mark.asyncio
async def test_cancel_order_invalid_id(session):
cl = OrdersClient(session, base_url=TEST_URL)
with pytest.raises(clients.orders.OrdersClientException):
_ = await cl.cancel_order('invalid_order_id')
@respx.mock
@pytest.mark.asyncio
async def test_cancel_order_id_doesnt_exist(
oid, session, match_pytest_raises):
cancel_url = TEST_URL + 'orders/v2/' + oid
msg = f'No such order ID: {oid}.'
resp = {
"message": msg
}
mock_resp = httpx.Response(404, json=resp)
respx.put(cancel_url).return_value = mock_resp
cl = OrdersClient(session, base_url=TEST_URL)
with match_pytest_raises(exceptions.MissingResource, msg):
_ = await cl.cancel_order(oid)
@respx.mock
@pytest.mark.asyncio
async def test_cancel_order_id_cannot_be_cancelled(
oid, session, match_pytest_raises):
cancel_url = TEST_URL + 'orders/v2/' + oid
msg = 'Order not in a cancellable state'
resp = {
"message": msg
}
mock_resp = httpx.Response(409, json=resp)
respx.put(cancel_url).return_value = mock_resp
cl = OrdersClient(session, base_url=TEST_URL)
with match_pytest_raises(exceptions.Conflict, msg):
_ = await cl.cancel_order(oid)
@respx.mock
@pytest.mark.asyncio
async def test_cancel_orders_by_ids(session, oid, oid2):
bulk_cancel_url = TEST_URL + 'bulk/orders/v2/cancel'
test_ids = [oid, oid2]
example_result = {
"result": {
"succeeded": {"count": 1},
"failed": {
"count": 1,
"failures": [
{
"order_id": oid2,
"message": "Order not in a cancellable state",
}
]
}
}
}
mock_resp = httpx.Response(HTTPStatus.OK, json=example_result)
respx.post(bulk_cancel_url).return_value = mock_resp
cl = OrdersClient(session, base_url=TEST_URL)
res = await cl.cancel_orders(test_ids)
assert res == example_result
expected_body = {
"order_ids": test_ids
}
actual_body = json.loads(respx.calls.last.request.content)
assert actual_body == expected_body
@pytest.mark.asyncio
async def test_cancel_orders_by_ids_invalid_id(session, oid):
cl = OrdersClient(session, base_url=TEST_URL)
with pytest.raises(clients.orders.OrdersClientException):
_ = await cl.cancel_orders([oid, "invalid_oid"])
@respx.mock
@pytest.mark.asyncio
async def test_cancel_orders_all(session):
bulk_cancel_url = TEST_URL + 'bulk/orders/v2/cancel'
example_result = {
"result": {
"succeeded": {"count": 2},
"failed": {
"count": 0,
"failures": []
}
}
}
mock_resp = httpx.Response(HTTPStatus.OK, json=example_result)
respx.post(bulk_cancel_url).return_value = mock_resp
cl = OrdersClient(session, base_url=TEST_URL)
res = await cl.cancel_orders()
assert res == example_result
actual_body = json.loads(respx.calls.last.request.content)
assert actual_body == {}
@respx.mock
@pytest.mark.asyncio
async def test_poll(oid, order_description, session):
get_url = TEST_URL + 'orders/v2/' + oid
order_description2 = copy.deepcopy(order_description)
order_description2['state'] = 'running'
order_description3 = copy.deepcopy(order_description)
order_description3['state'] = 'success'
cl = OrdersClient(session, base_url=TEST_URL)
route = respx.get(get_url)
route.side_effect = [
httpx.Response(HTTPStatus.OK, json=order_description),
httpx.Response(HTTPStatus.OK, json=order_description2),
httpx.Response(HTTPStatus.OK, json=order_description3)
]
state = await cl.poll(oid, wait=0)
assert state == 'success'
route = respx.get(get_url)
route.side_effect = [
httpx.Response(HTTPStatus.OK, json=order_description),
httpx.Response(HTTPStatus.OK, json=order_description2),
httpx.Response(HTTPStatus.OK, json=order_description3)
]
state = await cl.poll(oid, state='running', wait=0)
assert state == 'running'
@pytest.mark.asyncio
async def test_poll_invalid_oid(session):
cl = OrdersClient(session, base_url=TEST_URL)
with pytest.raises(clients.orders.OrdersClientException):
_ = await cl.poll("invalid_oid", wait=0)
@pytest.mark.asyncio
async def test_poll_invalid_state(oid, session):
cl = OrdersClient(session, base_url=TEST_URL)
with pytest.raises(clients.orders.OrdersClientException):
_ = await cl.poll(oid, state="invalid_state", wait=0)
@respx.mock
@pytest.mark.asyncio
async def test_aggegated_order_stats(session):
stats_url = TEST_URL + 'stats/orders/v2/'
LOGGER.debug(f'url: {stats_url}')
example_stats = {
"organization": {
"queued_orders": 0,
"running_orders": 6
},
"user": {
"queued_orders": 0,
"running_orders": 0
}
}
mock_resp = httpx.Response(HTTPStatus.OK, json=example_stats)
respx.get(stats_url).return_value = mock_resp
cl = OrdersClient(session, base_url=TEST_URL)
res = await cl.aggregated_order_stats()
assert res == example_stats
@respx.mock
@pytest.mark.asyncio
async def test_download_asset_md(tmpdir, session):
dl_url = TEST_URL + 'download/?token=IAmAToken'
md_json = {'key': 'value'}
md_headers = {
'Content-Type': 'application/json',
'Content-Disposition': 'attachment; filename="metadata.json"'
}
mock_resp = httpx.Response(HTTPStatus.OK, json=md_json, headers=md_headers)
respx.get(dl_url).return_value = mock_resp
cl = OrdersClient(session, base_url=TEST_URL)
filename = await cl.download_asset(dl_url, directory=str(tmpdir))
assert json.loads(open(filename).read()) == {'key': 'value'}
assert Path(filename).name == 'metadata.json'
@respx.mock
@pytest.mark.asyncio
async def test_download_asset_img(tmpdir, open_test_img, session):
dl_url = TEST_URL + 'download/?token=IAmAToken'
img_headers = {
'Content-Type': 'image/tiff',
'Content-Length': '527',
'Content-Disposition': 'attachment; filename="img.tif"'
}
async def _stream_img():
data = open_test_img.read()
v = memoryview(data)
chunksize = 100
for i in range(math.ceil(len(v)/(chunksize))):
yield v[i*chunksize:min((i+1)*chunksize, len(v))]
# populate request parameter to avoid respx cloning, which throws
# an error caused by respx and not this code
# https://github.com/lundberg/respx/issues/130
mock_resp = httpx.Response(HTTPStatus.OK, stream=_stream_img(),
headers=img_headers,
request='donotcloneme')
respx.get(dl_url).return_value = mock_resp
cl = OrdersClient(session, base_url=TEST_URL)
filename = await cl.download_asset(dl_url, directory=str(tmpdir))
assert Path(filename).name == 'img.tif'
assert os.path.isfile(filename)
@respx.mock
@pytest.mark.asyncio
async def test_download_order(tmpdir, order_description, oid, session):
dl_url1 = TEST_URL + 'download/1?token=IAmAToken'
dl_url2 = TEST_URL + 'download/2?token=IAmAnotherToken'
order_description['_links']['results'] = [
{'location': dl_url1},
{'location': dl_url2}
]
get_url = TEST_URL + 'orders/v2/' + oid
mock_resp = httpx.Response(HTTPStatus.OK, json=order_description)
respx.get(get_url).return_value = mock_resp
mock_resp1 = httpx.Response(
HTTPStatus.OK,
json={'key': 'value'},
headers={
'Content-Type': 'application/json',
'Content-Disposition': 'attachment; filename="m1.json"'
})
respx.get(dl_url1).return_value = mock_resp1
mock_resp2 = httpx.Response(
HTTPStatus.OK,
json={'key2': 'value2'},
headers={
'Content-Type': 'application/json',
'Content-Disposition': 'attachment; filename="m2.json"'
})
respx.get(dl_url2).return_value = mock_resp2
cl = OrdersClient(session, base_url=TEST_URL)
filenames = await cl.download_order(oid, directory=str(tmpdir))
assert len(filenames) == 2
assert json.loads(open(filenames[0]).read()) == {'key': 'value'}
assert Path(filenames[0]).name == 'm1.json'
assert json.loads(open(filenames[1]).read()) == {'key2': 'value2'}
assert Path(filenames[1]).name == 'm2.json'
@respx.mock
@pytest.mark.asyncio
async def test_download_order_overwrite_true_preexisting_data(
tmpdir, order_description, oid, session):
'''
Test if download_order() overwrites pre-existing data with
overwrite flag set to True.
'''
# Create a JSON
original_content = {'key': 'original_file'}
# Save JSON to tmpdir
f = open(Path(tmpdir, 'file.json'), "a")
f.write(json.dumps(original_content))
f.close()
downloaded_content = {'key': 'downloaded_file'}
cl = await get_orders_client(downloaded_content, order_description, oid,
session)
# Download order and overwrite data
_ = await cl.download_order(oid, directory=str(tmpdir), overwrite=True)
assert json.loads(open(Path(tmpdir, 'file.json')).read()) != ...
downloaded_content
@respx.mock
@pytest.mark.asyncio
async def test_download_order_overwrite_false_preexisting_data(
tmpdir, order_description, oid, session):
'''
Test if download_order() does not overwrite pre-existing
data with overwrite flag set to False.
'''
# Create a JSON
original_content = {'key': 'original_file'}
# Save JSON to tmpdir
f = open(Path(tmpdir, 'file.json'), "a")
f.write(json.dumps(original_content))
f.close()
downloaded_content = {'key': 'downloaded_file'}
cl = await get_orders_client(downloaded_content, order_description, oid,
session)
# Download order and overwrite data
_ = await cl.download_order(oid, directory=str(tmpdir), overwrite=False)
assert json.loads(open(Path(tmpdir, 'file.json')).read()) != ...
original_content
@respx.mock
@pytest.mark.asyncio
async def test_download_order_overwrite_true_nonexisting_data(
tmpdir, order_description, oid, session):
'''
Test if download_order() downloads data with overwrite flag
set to true without pre-existing data.
'''
downloaded_content = {'key': 'downloaded_file'}
cl = await get_orders_client(downloaded_content, order_description, oid,
session)
# Download order and overwrite data
_ = await cl.download_order(oid, directory=str(tmpdir), overwrite=True)
assert json.loads(open(Path(tmpdir, 'file.json')).read()) != ...
downloaded_content
@respx.mock
@pytest.mark.asyncio
async def test_download_order_overwrite_false_nonexisting_data(
tmpdir, order_description, oid, session):
'''
Test if download_order() downloads data with overwrite flag
set to false without pre-existing data.
'''
downloaded_content = {'key': 'downloaded_file'}
cl = await get_orders_client(downloaded_content, order_description, oid,
session)
# Download order and overwrite data
_ = await cl.download_order(oid, directory=str(tmpdir), overwrite=False)
assert json.loads(open(Path(tmpdir, 'file.json')).read()) != ...
downloaded_content
async def get_orders_client(
downloaded_content, order_description, oid, session):
'''
Mock an HTTP response and return the Orders client.
'''
# Create mock HTTP response
dl_url = TEST_URL + 'download/1?token=IAmAToken'
order_description['_links']['results'] = [
{'location': dl_url},
]
get_url = TEST_URL + 'orders/v2/' + oid
mock_resp = httpx.Response(HTTPStatus.OK, json=order_description)
respx.get(get_url).return_value = mock_resp
mock_resp = httpx.Response(
HTTPStatus.OK,
json=downloaded_content,
headers={
'Content-Type': 'application/json',
'Content-Disposition': 'attachment; filename="file.json"'
})
respx.get(dl_url).return_value = mock_resp
# Get the Orders client
cl = OrdersClient(session, base_url=TEST_URL)
return cl
Moved a function and some variables to pytest fixtures. Removed Orders client from pytest fixture and back into tests. Added documentation to code.
# Copyright 2020 Planet Labs, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import copy
import json
from http import HTTPStatus
import logging
import math
import os
from pathlib import Path
import httpx
import pytest
import respx
from planet import OrdersClient, clients, exceptions
TEST_URL = 'http://MockNotRealURL/'
LOGGER = logging.getLogger(__name__)
@pytest.fixture
def order_descriptions(order_description):
order1 = order_description
order1['id'] = 'oid1'
order2 = copy.deepcopy(order_description)
order2['id'] = 'oid2'
order3 = copy.deepcopy(order_description)
order3['id'] = 'oid3'
return [order1, order2, order3]
@pytest.fixture
def oid2():
# obtained from uuid.uuid1()
return '5ece1dc0-ea81-11eb-837c-acde48001122'
@pytest.fixture
def downloaded_content():
return {'key': 'downloaded_file'}
@pytest.fixture
def original_content():
return {'key': 'original_file'}
@pytest.fixture
def create_download_mock(downloaded_content, order_description, oid):
'''
Mock an HTTP response for download.
'''
def f():
# Create mock HTTP response
dl_url = TEST_URL + 'download/1?token=IAmAToken'
order_description['_links']['results'] = [
{'location': dl_url},
]
get_url = TEST_URL + 'orders/v2/' + oid
mock_resp = httpx.Response(HTTPStatus.OK, json=order_description)
respx.get(get_url).return_value = mock_resp
mock_resp = httpx.Response(
HTTPStatus.OK,
json=downloaded_content,
headers={
'Content-Type': 'application/json',
'Content-Disposition': 'attachment; filename="file.json"'
})
respx.get(dl_url).return_value = mock_resp
return f
@respx.mock
@pytest.mark.asyncio
async def test_list_orders_basic(order_descriptions, session):
list_url = TEST_URL + 'orders/v2/'
next_page_url = list_url + 'blob/?page_marker=IAmATest'
order1, order2, order3 = order_descriptions
page1_response = {
"_links": {
"_self": "string",
"next": next_page_url},
"orders": [order1, order2]
}
mock_resp1 = httpx.Response(HTTPStatus.OK, json=page1_response)
respx.get(list_url).return_value = mock_resp1
page2_response = {
"_links": {
"_self": next_page_url},
"orders": [order3]
}
mock_resp2 = httpx.Response(HTTPStatus.OK, json=page2_response)
respx.get(next_page_url).return_value = mock_resp2
cl = OrdersClient(session, base_url=TEST_URL)
orders = await cl.list_orders()
oids = list(o.id for o in orders)
assert oids == ['oid1', 'oid2', 'oid3']
@respx.mock
@pytest.mark.asyncio
async def test_list_orders_state(order_descriptions, session):
list_url = TEST_URL + 'orders/v2/?state=failed'
order1, order2, _ = order_descriptions
page1_response = {
"_links": {
"_self": "string"
},
"orders": [order1, order2]
}
mock_resp = httpx.Response(HTTPStatus.OK, json=page1_response)
respx.get(list_url).return_value = mock_resp
cl = OrdersClient(session, base_url=TEST_URL)
orders = await cl.list_orders(state='failed')
oids = list(o.id for o in orders)
assert oids == ['oid1', 'oid2']
@pytest.mark.asyncio
async def test_list_orders_state_invalid_state(session):
cl = OrdersClient(session, base_url=TEST_URL)
with pytest.raises(clients.orders.OrdersClientException):
_ = await cl.list_orders(state='invalidstate')
@respx.mock
@pytest.mark.asyncio
async def test_list_orders_limit(order_descriptions, session):
# check that the client doesn't try to get the next page when the
# limit is already reached by providing link to next page but not
# registering a response. if the client tries to get the next
# page, an error will occur
list_url = TEST_URL + 'orders/v2/'
nono_page_url = list_url + '?page_marker=OhNoNo'
order1, order2, order3 = order_descriptions
page1_response = {
"_links": {
"_self": "string",
"next": nono_page_url},
"orders": [order1, order2]
}
mock_resp = httpx.Response(HTTPStatus.OK, json=page1_response)
page2_response = {
"_links": {
"_self": "string",
},
"orders": [order3]
}
mock_resp2 = httpx.Response(HTTPStatus.OK, json=page2_response)
respx.route(method="GET", url__eq=list_url).mock(return_value=mock_resp)
nono_route = respx.route(method="GET", url__eq=nono_page_url).mock(
return_value=mock_resp2)
cl = OrdersClient(session, base_url=TEST_URL)
orders = await cl.list_orders(limit=1)
assert not nono_route.called
oids = [o.id for o in orders]
assert oids == ['oid1']
@respx.mock
@pytest.mark.asyncio
async def test_list_orders_asjson(order_descriptions, session):
list_url = TEST_URL + 'orders/v2/'
order1, order2, order3 = order_descriptions
page1_response = {
"_links": {"_self": "string"},
"orders": [order1]
}
mock_resp1 = httpx.Response(HTTPStatus.OK, json=page1_response)
respx.get(list_url).return_value = mock_resp1
cl = OrdersClient(session, base_url=TEST_URL)
orders = await cl.list_orders(as_json=True)
assert orders[0]['id'] == 'oid1'
@respx.mock
@pytest.mark.asyncio
async def test_create_order(oid, order_description, order_request, session):
create_url = TEST_URL + 'orders/v2/'
mock_resp = httpx.Response(HTTPStatus.OK, json=order_description)
respx.post(create_url).return_value = mock_resp
cl = OrdersClient(session, base_url=TEST_URL)
order = await cl.create_order(order_request)
assert order.json == order_description
@respx.mock
@pytest.mark.asyncio
async def test_create_order_bad_item_type(order_request, session):
create_url = TEST_URL + 'orders/v2/'
resp = {
"field": {
"Products": [
{
"message": ("Bad item type 'invalid' for bundle type " +
"'analytic'")
}
]
},
"general": [
{
"message": "Unable to accept order"
}
]
}
mock_resp = httpx.Response(400, json=resp)
respx.post(create_url).return_value = mock_resp
order_request['products'][0]['item_type'] = 'invalid'
cl = OrdersClient(session, base_url=TEST_URL)
expected_msg = (
"Unable to accept order - Bad item type 'invalid' for bundle type " +
"'analytic'")
with pytest.raises(exceptions.BadQuery, match=expected_msg):
_ = await cl.create_order(order_request)
@respx.mock
@pytest.mark.asyncio
async def test_create_order_item_id_does_not_exist(
order_request, session, match_pytest_raises):
create_url = TEST_URL + 'orders/v2/'
resp = {
"field": {
"Details": [
{
"message": ("Item ID 4500474_2133707_2021-05-20_2419 / " +
"Item Type PSScene3Band doesn't exist")
}
]
},
"general": [
{
"message": "Unable to accept order"
}
]
}
mock_resp = httpx.Response(400, json=resp)
respx.post(create_url).return_value = mock_resp
order_request['products'][0]['item_ids'] = \
'4500474_2133707_2021-05-20_2419'
cl = OrdersClient(session, base_url=TEST_URL)
expected_msg = (
"Unable to accept order - Item ID 4500474_2133707_2021-05-20_2419 " +
"/ Item Type PSScene3Band doesn't exist")
with match_pytest_raises(exceptions.BadQuery, expected_msg):
_ = await cl.create_order(order_request)
@respx.mock
@pytest.mark.asyncio
async def test_get_order(oid, order_description, session):
get_url = TEST_URL + 'orders/v2/' + oid
mock_resp = httpx.Response(HTTPStatus.OK, json=order_description)
respx.get(get_url).return_value = mock_resp
cl = OrdersClient(session, base_url=TEST_URL)
order = await cl.get_order(oid)
assert order.state == 'queued'
@pytest.mark.asyncio
async def test_get_order_invalid_id(session):
cl = OrdersClient(session, base_url=TEST_URL)
with pytest.raises(clients.orders.OrdersClientException):
_ = await cl.get_order('-')
@respx.mock
@pytest.mark.asyncio
async def test_get_order_id_doesnt_exist(
oid, session, match_pytest_raises):
get_url = TEST_URL + 'orders/v2/' + oid
msg = f'Could not load order ID: {oid}.'
resp = {
"message": msg
}
mock_resp = httpx.Response(404, json=resp)
respx.get(get_url).return_value = mock_resp
cl = OrdersClient(session, base_url=TEST_URL)
with match_pytest_raises(exceptions.MissingResource, msg):
_ = await cl.get_order(oid)
@respx.mock
@pytest.mark.asyncio
async def test_cancel_order(oid, order_description, session):
cancel_url = TEST_URL + 'orders/v2/' + oid
order_description['state'] = 'cancelled'
mock_resp = httpx.Response(HTTPStatus.OK, json=order_description)
respx.put(cancel_url).return_value = mock_resp
# TODO: the api says cancel order returns the order details but as
# far as I can test thus far, it returns nothing. follow up on this
cl = OrdersClient(session, base_url=TEST_URL)
await cl.cancel_order(oid)
@pytest.mark.asyncio
async def test_cancel_order_invalid_id(session):
cl = OrdersClient(session, base_url=TEST_URL)
with pytest.raises(clients.orders.OrdersClientException):
_ = await cl.cancel_order('invalid_order_id')
@respx.mock
@pytest.mark.asyncio
async def test_cancel_order_id_doesnt_exist(
oid, session, match_pytest_raises):
cancel_url = TEST_URL + 'orders/v2/' + oid
msg = f'No such order ID: {oid}.'
resp = {
"message": msg
}
mock_resp = httpx.Response(404, json=resp)
respx.put(cancel_url).return_value = mock_resp
cl = OrdersClient(session, base_url=TEST_URL)
with match_pytest_raises(exceptions.MissingResource, msg):
_ = await cl.cancel_order(oid)
@respx.mock
@pytest.mark.asyncio
async def test_cancel_order_id_cannot_be_cancelled(
oid, session, match_pytest_raises):
cancel_url = TEST_URL + 'orders/v2/' + oid
msg = 'Order not in a cancellable state'
resp = {
"message": msg
}
mock_resp = httpx.Response(409, json=resp)
respx.put(cancel_url).return_value = mock_resp
cl = OrdersClient(session, base_url=TEST_URL)
with match_pytest_raises(exceptions.Conflict, msg):
_ = await cl.cancel_order(oid)
@respx.mock
@pytest.mark.asyncio
async def test_cancel_orders_by_ids(session, oid, oid2):
bulk_cancel_url = TEST_URL + 'bulk/orders/v2/cancel'
test_ids = [oid, oid2]
example_result = {
"result": {
"succeeded": {"count": 1},
"failed": {
"count": 1,
"failures": [
{
"order_id": oid2,
"message": "Order not in a cancellable state",
}
]
}
}
}
mock_resp = httpx.Response(HTTPStatus.OK, json=example_result)
respx.post(bulk_cancel_url).return_value = mock_resp
cl = OrdersClient(session, base_url=TEST_URL)
res = await cl.cancel_orders(test_ids)
assert res == example_result
expected_body = {
"order_ids": test_ids
}
actual_body = json.loads(respx.calls.last.request.content)
assert actual_body == expected_body
@pytest.mark.asyncio
async def test_cancel_orders_by_ids_invalid_id(session, oid):
cl = OrdersClient(session, base_url=TEST_URL)
with pytest.raises(clients.orders.OrdersClientException):
_ = await cl.cancel_orders([oid, "invalid_oid"])
@respx.mock
@pytest.mark.asyncio
async def test_cancel_orders_all(session):
bulk_cancel_url = TEST_URL + 'bulk/orders/v2/cancel'
example_result = {
"result": {
"succeeded": {"count": 2},
"failed": {
"count": 0,
"failures": []
}
}
}
mock_resp = httpx.Response(HTTPStatus.OK, json=example_result)
respx.post(bulk_cancel_url).return_value = mock_resp
cl = OrdersClient(session, base_url=TEST_URL)
res = await cl.cancel_orders()
assert res == example_result
actual_body = json.loads(respx.calls.last.request.content)
assert actual_body == {}
@respx.mock
@pytest.mark.asyncio
async def test_poll(oid, order_description, session):
get_url = TEST_URL + 'orders/v2/' + oid
order_description2 = copy.deepcopy(order_description)
order_description2['state'] = 'running'
order_description3 = copy.deepcopy(order_description)
order_description3['state'] = 'success'
cl = OrdersClient(session, base_url=TEST_URL)
route = respx.get(get_url)
route.side_effect = [
httpx.Response(HTTPStatus.OK, json=order_description),
httpx.Response(HTTPStatus.OK, json=order_description2),
httpx.Response(HTTPStatus.OK, json=order_description3)
]
state = await cl.poll(oid, wait=0)
assert state == 'success'
route = respx.get(get_url)
route.side_effect = [
httpx.Response(HTTPStatus.OK, json=order_description),
httpx.Response(HTTPStatus.OK, json=order_description2),
httpx.Response(HTTPStatus.OK, json=order_description3)
]
state = await cl.poll(oid, state='running', wait=0)
assert state == 'running'
@pytest.mark.asyncio
async def test_poll_invalid_oid(session):
cl = OrdersClient(session, base_url=TEST_URL)
with pytest.raises(clients.orders.OrdersClientException):
_ = await cl.poll("invalid_oid", wait=0)
@pytest.mark.asyncio
async def test_poll_invalid_state(oid, session):
cl = OrdersClient(session, base_url=TEST_URL)
with pytest.raises(clients.orders.OrdersClientException):
_ = await cl.poll(oid, state="invalid_state", wait=0)
@respx.mock
@pytest.mark.asyncio
async def test_aggegated_order_stats(session):
stats_url = TEST_URL + 'stats/orders/v2/'
LOGGER.debug(f'url: {stats_url}')
example_stats = {
"organization": {
"queued_orders": 0,
"running_orders": 6
},
"user": {
"queued_orders": 0,
"running_orders": 0
}
}
mock_resp = httpx.Response(HTTPStatus.OK, json=example_stats)
respx.get(stats_url).return_value = mock_resp
cl = OrdersClient(session, base_url=TEST_URL)
res = await cl.aggregated_order_stats()
assert res == example_stats
@respx.mock
@pytest.mark.asyncio
async def test_download_asset_md(tmpdir, session):
dl_url = TEST_URL + 'download/?token=IAmAToken'
md_json = {'key': 'value'}
md_headers = {
'Content-Type': 'application/json',
'Content-Disposition': 'attachment; filename="metadata.json"'
}
mock_resp = httpx.Response(HTTPStatus.OK, json=md_json, headers=md_headers)
respx.get(dl_url).return_value = mock_resp
cl = OrdersClient(session, base_url=TEST_URL)
filename = await cl.download_asset(dl_url, directory=str(tmpdir))
assert json.loads(open(filename).read()) == {'key': 'value'}
assert Path(filename).name == 'metadata.json'
@respx.mock
@pytest.mark.asyncio
async def test_download_asset_img(tmpdir, open_test_img, session):
dl_url = TEST_URL + 'download/?token=IAmAToken'
img_headers = {
'Content-Type': 'image/tiff',
'Content-Length': '527',
'Content-Disposition': 'attachment; filename="img.tif"'
}
async def _stream_img():
data = open_test_img.read()
v = memoryview(data)
chunksize = 100
for i in range(math.ceil(len(v)/(chunksize))):
yield v[i*chunksize:min((i+1)*chunksize, len(v))]
# populate request parameter to avoid respx cloning, which throws
# an error caused by respx and not this code
# https://github.com/lundberg/respx/issues/130
mock_resp = httpx.Response(HTTPStatus.OK, stream=_stream_img(),
headers=img_headers,
request='donotcloneme')
respx.get(dl_url).return_value = mock_resp
cl = OrdersClient(session, base_url=TEST_URL)
filename = await cl.download_asset(dl_url, directory=str(tmpdir))
assert Path(filename).name == 'img.tif'
assert os.path.isfile(filename)
@respx.mock
@pytest.mark.asyncio
async def test_download_order_success(tmpdir, order_description, oid, session):
'''
Test if download_order() successfully downloads an order with two files,
given a singular order ID.
'''
# Mock an HTTP response for download
dl_url1 = TEST_URL + 'download/1?token=IAmAToken'
dl_url2 = TEST_URL + 'download/2?token=IAmAnotherToken'
order_description['_links']['results'] = [
{'location': dl_url1},
{'location': dl_url2}
]
get_url = TEST_URL + 'orders/v2/' + oid
mock_resp = httpx.Response(HTTPStatus.OK, json=order_description)
respx.get(get_url).return_value = mock_resp
mock_resp1 = httpx.Response(
HTTPStatus.OK,
json={'key': 'value'},
headers={
'Content-Type': 'application/json',
'Content-Disposition': 'attachment; filename="m1.json"'
})
respx.get(dl_url1).return_value = mock_resp1
mock_resp2 = httpx.Response(
HTTPStatus.OK,
json={'key2': 'value2'},
headers={
'Content-Type': 'application/json',
'Content-Disposition': 'attachment; filename="m2.json"'
})
respx.get(dl_url2).return_value = mock_resp2
# Create Orders client
cl = OrdersClient(session, base_url=TEST_URL)
# Download order and return the filenames of the downloaded files
filenames = await cl.download_order(oid, directory=str(tmpdir))
# Check there are as many files as expected
assert len(filenames) == 2
# Check that the downloaded files have the correct filename and contents
assert json.load(open(filenames[0])) == {'key': 'value'}
assert Path(filenames[0]).name == 'm1.json'
assert json.load(open(filenames[1])) == {'key2': 'value2'}
assert Path(filenames[1]).name == 'm2.json'
@respx.mock
@pytest.mark.asyncio
async def test_download_order_overwrite_true_preexisting_data(
tmpdir, oid, session, create_download_mock, original_content,
downloaded_content):
'''
Test if download_order() overwrites pre-existing data with
overwrite flag set to True.
'''
# Save JSON to out_file in tmpdir
with open(Path(tmpdir, 'file.json'), "a") as out_file:
json.dump(original_content, out_file)
# Create mock response for downloading
create_download_mock()
# Create Orders client
cl = OrdersClient(session, base_url=TEST_URL)
# Download order and overwrite data
_ = await cl.download_order(oid, directory=str(tmpdir), overwrite=True)
# Check that the data downloaded has overwritten the original data
assert json.load(open(Path(tmpdir, 'file.json'))) == downloaded_content
@respx.mock
@pytest.mark.asyncio
async def test_download_order_overwrite_false_preexisting_data(
tmpdir, oid, session, create_download_mock, original_content):
'''
Test if download_order() does not overwrite pre-existing
data with overwrite flag set to False.
'''
# Save JSON to out_file in tmpdir
with open(Path(tmpdir, 'file.json'), "a") as out_file:
json.dump(original_content, out_file)
# Create mock response for downloading
create_download_mock()
# Create Orders client
cl = OrdersClient(session, base_url=TEST_URL)
# Download order and overwrite data
_ = await cl.download_order(oid, directory=str(tmpdir), overwrite=False)
# Check that the original data has not been overwritten
assert json.load(open(Path(tmpdir, 'file.json'))) == original_content
@respx.mock
@pytest.mark.asyncio
async def test_download_order_overwrite_true_nonexisting_data(
tmpdir, oid, session, create_download_mock, downloaded_content):
'''
Test if download_order() downloads data with overwrite flag
set to true without pre-existing data.
'''
# Save JSON to out_file in tmpdir
create_download_mock()
# Create Orders client
cl = OrdersClient(session, base_url=TEST_URL)
# Download order and overwrite data
_ = await cl.download_order(oid, directory=str(tmpdir), overwrite=True)
# Check that the data downloaded has the correct contents
assert json.load(open(Path(tmpdir, 'file.json'))) == downloaded_content
@respx.mock
@pytest.mark.asyncio
async def test_download_order_overwrite_false_nonexisting_data(
tmpdir, oid, session, create_download_mock, downloaded_content):
'''
Test if download_order() downloads data with overwrite flag
set to false without pre-existing data.
'''
# Save JSON to out_file in tmpdir
create_download_mock()
# Create Orders client
cl = OrdersClient(session, base_url=TEST_URL)
# Download order and overwrite data
_ = await cl.download_order(oid, directory=str(tmpdir), overwrite=False)
# Check that the data downloaded has the correct contents
assert json.load(open(Path(tmpdir, 'file.json'))) == downloaded_content
|
import re
from collections import OrderedDict
import json
import xmltodict
from share.transform.base import BaseTransformer
from share.transform.chain.links import Context
# NOTE: Context is a thread local singleton
# It is assigned to ctx here just to keep a family interface
ctx = Context()
class ChainTransformer(BaseTransformer):
EMPTY_RE = re.compile(r'\s*(|none|empty)\s*', flags=re.I)
NAMESPACES = {
'http://purl.org/dc/elements/1.1/': 'dc',
'http://www.openarchives.org/OAI/2.0/': None,
'http://www.openarchives.org/OAI/2.0/oai_dc/': None,
}
REMOVE_EMPTY = True
root_parser = None
def __init__(self, *args, clean_up=True, **kwargs):
super().__init__(*args, **kwargs)
self.clean_up = clean_up
@property
def allowed_roots(self):
from share.models import AbstractCreativeWork
return set(t.__name__ for t in AbstractCreativeWork.get_type_classes())
def do_transform(self, data):
# Parsed data will be loaded into ctx
ctx.clear() # Just in case
ctx._config = self.config
unwrapped = self.unwrap_data(data)
if self.REMOVE_EMPTY:
self.remove_empty_values(unwrapped)
parser = self.get_root_parser(unwrapped)
root_ref = parser(unwrapped).parse()
jsonld = ctx.jsonld
if self.clean_up:
ctx.clear()
return jsonld, root_ref
def unwrap_data(self, data):
if data.startswith('<'):
return xmltodict.parse(data, process_namespaces=True, namespaces=self.kwargs.get('namespaces', self.NAMESPACES))
else:
return json.loads(data, object_pairs_hook=OrderedDict)
def get_root_parser(self, unwrapped):
if self.root_parser:
return self.root_parser
raise NotImplementedError('ChainTransformers must implement root_parser or get_root_parser')
def remove_empty_values(self, parsed):
if isinstance(parsed, dict):
ret = OrderedDict()
for k, v in parsed.items():
if isinstance(v, (dict, list)):
v = self.remove_empty_values(v)
if isinstance(v, str) and self.EMPTY_RE.fullmatch(v):
continue
ret[k] = v
return ret
ret = []
for v in parsed:
if isinstance(v, (dict, list)):
v = self.remove_empty_values(v)
if isinstance(v, str) and self.EMPTY_RE.fullmatch(v):
continue
ret.append(v)
return ret
[Fix] Clean up after adding the source identifier
import re
from collections import OrderedDict
import json
import xmltodict
from share.transform.base import BaseTransformer
from share.transform.chain.links import Context
# NOTE: Context is a thread local singleton
# It is assigned to ctx here just to keep a family interface
ctx = Context()
class ChainTransformer(BaseTransformer):
EMPTY_RE = re.compile(r'\s*(|none|empty)\s*', flags=re.I)
NAMESPACES = {
'http://purl.org/dc/elements/1.1/': 'dc',
'http://www.openarchives.org/OAI/2.0/': None,
'http://www.openarchives.org/OAI/2.0/oai_dc/': None,
}
REMOVE_EMPTY = True
root_parser = None
def __init__(self, *args, clean_up=True, **kwargs):
super().__init__(*args, **kwargs)
self.clean_up = clean_up
@property
def allowed_roots(self):
from share.models import AbstractCreativeWork
return set(t.__name__ for t in AbstractCreativeWork.get_type_classes())
def do_transform(self, data):
# Parsed data will be loaded into ctx
ctx.clear() # Just in case
ctx._config = self.config
unwrapped = self.unwrap_data(data)
if self.REMOVE_EMPTY:
self.remove_empty_values(unwrapped)
parser = self.get_root_parser(unwrapped)
root_ref = parser(unwrapped).parse()
jsonld = ctx.jsonld
return jsonld, root_ref
def transform(self, datum):
ret = super().transform(datum)
if self.clean_up:
ctx.clear()
return ret
def unwrap_data(self, data):
if data.startswith('<'):
return xmltodict.parse(data, process_namespaces=True, namespaces=self.kwargs.get('namespaces', self.NAMESPACES))
else:
return json.loads(data, object_pairs_hook=OrderedDict)
def get_root_parser(self, unwrapped):
if self.root_parser:
return self.root_parser
raise NotImplementedError('ChainTransformers must implement root_parser or get_root_parser')
def remove_empty_values(self, parsed):
if isinstance(parsed, dict):
ret = OrderedDict()
for k, v in parsed.items():
if isinstance(v, (dict, list)):
v = self.remove_empty_values(v)
if isinstance(v, str) and self.EMPTY_RE.fullmatch(v):
continue
ret[k] = v
return ret
ret = []
for v in parsed:
if isinstance(v, (dict, list)):
v = self.remove_empty_values(v)
if isinstance(v, str) and self.EMPTY_RE.fullmatch(v):
continue
ret.append(v)
return ret
|
'''
PathwayGenie (c) GeneGenie Bioinformatics Ltd. 2018
PathwayGenie is licensed under the MIT License.
To view a copy of this license, visit <http://opensource.org/licenses/MIT/>.
@author: neilswainston
'''
# pylint: disable=too-many-arguments
# pylint: disable=too-many-locals
import random
import sys
import time
import uuid
import requests
_APITOKEN = '''
eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJpZCI6IjI2Y2QzNmU3LTBiNWEtNGUwMC1h
NTc5LWE5ZWYxYzMxOGJiNiIsInVzZXJfaWQiOiJhZmQ0MTdhNi02YjUwLTQ3ODEtYWNjMC00M
mJlNjhiYmEyZGYiLCJ1c2VybmFtZSI6Im1hbmNoZXN0ZXJfdW5pX2FwaSIsImV4cCI6MTYxMT
QxMzUwNH0.WtfTiuBhWWxxQCgqzk5v8uoY3bbWKYoAfKlobDw9gvs
'''
_EUTOKEN = '''
eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJpZCI6ImE0azBtMDAwMDAwOFJFYUFBTSIs
ImVtYWlsIjoibmVpbC5zd2FpbnN0b25AbWFuY2hlc3Rlci5hYy51ayIsImFjY291bnQiOiIwM
DEzMTAwMDAxY1NDRVRBQTQiLCJhY2NvdW50X2FkbWluIjp0cnVlLCJyZWFkIjp0cnVlLCJ3cm
l0ZSI6dHJ1ZSwiZXhwIjoxNjAyNzczNzM0fQ.Ix1BMjpfufnqXMd8VXotm4Pimq10IBk9sZgA
N-29bBo
'''
_HOST = 'https://twist-api.twistbioscience-staging.com/'
class TwistClient():
'''Class to define client for the Twist API.'''
def __init__(self, email, password, username='manchester_uni_api'):
self.__password = password
self.__email = email
self.__username = username
self.__session = requests.Session()
self.__session.headers.update(
{'Authorization': 'JWT ' + ''.join(_APITOKEN.split()),
'X-End-User-Token': ''.join(_EUTOKEN.split()),
'Accept-Encoding': 'json'})
def get_accounts(self):
'''Get accounts.'''
return self.__get(self.__get_email_url('v1/accounts/'))
def get_prices(self):
'''Get prices.'''
return self.__get('v1/prices/')
def get_user_data(self):
'''Get user data.'''
return self.__get(self.__get_email_url('v1/users/{}/'))
def get_addresses(self):
'''Get addresses.'''
return self.__get(self.__get_email_url('v1/users/{}/addresses/'))
def get_payments(self):
'''Get payments.'''
return self.__get(self.__get_email_url('v1/users/{}/payments/'))
def get_vectors(self):
'''Get vectors.'''
return self.get_user_data().get('vectors', [])
def submit_constructs(self, sequences, names, typ='NON_CLONED_GENE'):
'''Submit constructs.'''
constructs = _get_constructs(sequences, names, typ)
return self.__post(self.__get_email_url('v1/users/{}/constructs/'),
constructs, target=201)
def get_scores(self, ids, max_errors=8):
'''Get scores.'''
resp = None
errors = 0
while True:
url = self.__get_email_url('v1/users/{}/constructs/describe/')
try:
resp = self.__get(url, {'scored': 'True',
'id__in': ','.join(ids)})
if {datum['id'] for datum in resp} == set(ids):
break
except TwistError as exc:
errors += 1
if errors == max_errors:
raise exc
time.sleep(1)
return resp
def get_quote(self, construct_ids, external_id, address_id,
first_name, last_name,
typ='96_WELL_PLATE', fill_method='VERTICAL',
shipment_method='MULTIPLE_SHIPMENTS',
vectors=None, cloning_strategies=None):
'''Get quote.'''
json = {'external_id': external_id,
'containers': [{'constructs': [
{'index': index, 'id': id_}
for index, id_ in enumerate(construct_ids)],
'type': typ,
'fill_method': fill_method}],
'shipment': {'recipient_address_id': address_id,
'first_name': first_name,
'last_name': last_name,
'preferences': {
'shipment_method': shipment_method}},
'vectors': vectors or [],
'cloning_strategies': cloning_strategies or [],
'advanced_options': {}}
url = self.__get_email_url('v1/users/{}/quotes/')
resp = self.__post(url, json=json, target=201)
return resp['id']
def check_quote(self, quote_id):
'''Check quote.'''
resp = None
while True:
url = self.__get_email_url('v1/users/{}/quotes/%s/') % quote_id
resp = self.__get(url)
if resp['status_info']['status'] != 'PENDING':
break
time.sleep(100)
if resp['status_info']['status'] == 'SUCCESS':
return resp
raise ValueError(resp['status_info']['status'])
def submit_order(self, quote_id, payment_id):
'''Submit order.'''
return self.__post(self.__get_email_url('v1/users/{}/orders/'),
json={'quote_id': quote_id,
'payment_method_id': payment_id})
def __get_token(self):
'''Get token.'''
json = self.__post('/api-token-auth/',
{'username': self.__username,
'password': self.__password})
return json['token']
def __get_email_url(self, url):
'''Get email URL.'''
return url.format(self.__email)
def __get(self, url, params=None):
'''GET method.'''
if not params:
params = {}
resp = self.__session.get(_HOST + url, params=params)
return check_response(resp, 200)
def __post(self, url, json, target=200):
'''POST method.'''
resp = self.__session.post(_HOST + url, json=json)
return check_response(resp, target)
class TwistError(Exception):
'''Class to represent a TwistException.'''
def __init__(self, message, status_code):
Exception.__init__(self, '{}: {}'.format(message, status_code))
def check_response(resp, target):
'''Check response.'''
if not resp.status_code == target:
raise TwistError(resp.content, resp.status_code)
return resp.json()
def _get_constructs(sequences, names, typ='NON_CLONED_GENE'):
'''Get constructs.'''
constructs = []
for idx, (seq, name) in enumerate(zip(sequences, names)):
construct = {'sequences': seq,
'name': name,
'type': typ,
'insertion_point_mes_uid': 'na',
'vector_mes_uid': 'na',
'column': idx / 8,
'row': idx % 8,
'plate': idx / 96}
constructs.append(construct)
return constructs
def main(args):
'''''main method.'''
client = TwistClient(args[0], args[1])
addresses = client.get_addresses()
payments = client.get_payments()
print('Accounts\t' + str(client.get_accounts()))
print('Prices\t' + str(client.get_prices()))
print('User data\t' + str(client.get_user_data()))
print('Addresses\t' + str(addresses))
print('Payments\t' + str(payments))
print('Vectors\t' + str(client.get_vectors()))
# Produce dummy order:
sequences = []
names = []
for i in range(0, 5):
sequences.append(''.join(
[random.choice('ACTG')
for _ in range(0, random.randint(300, 1800))]))
names.append('seq{}'.format(i + 1))
resp = client.submit_constructs(sequences, names)
ids = [i['id'] for i in resp]
print('Scores\t' + str(client.get_scores(ids)))
quote_id = client.get_quote(ids,
external_id=str(uuid.uuid4()),
address_id=addresses[0]['id'],
first_name=args[2],
last_name=args[3])
print('Quote\t' + str(client.check_quote(quote_id)))
if payments:
print('Submission\t' +
str(client.submit_order(quote_id, payments[0]['id'])))
if __name__ == '__main__':
main(sys.argv[1:])
Minor update.
'''
PathwayGenie (c) GeneGenie Bioinformatics Ltd. 2018
PathwayGenie is licensed under the MIT License.
To view a copy of this license, visit <http://opensource.org/licenses/MIT/>.
@author: neilswainston
'''
# pylint: disable=too-many-arguments
# pylint: disable=too-many-locals
import random
import sys
import time
import uuid
import requests
_APITOKEN = '''
*
'''
_EUTOKEN = '''
*
'''
_HOST = 'https://twist-api.twistbioscience-staging.com/'
class TwistClient():
'''Class to define client for the Twist API.'''
def __init__(self, email, password, username='manchester_uni_api'):
self.__password = password
self.__email = email
self.__username = username
self.__session = requests.Session()
self.__session.headers.update(
{'Authorization': 'JWT ' + ''.join(_APITOKEN.split()),
'X-End-User-Token': ''.join(_EUTOKEN.split()),
'Accept-Encoding': 'json'})
def get_accounts(self):
'''Get accounts.'''
return self.__get(self.__get_email_url('v1/accounts/'))
def get_prices(self):
'''Get prices.'''
return self.__get('v1/prices/')
def get_user_data(self):
'''Get user data.'''
return self.__get(self.__get_email_url('v1/users/{}/'))
def get_addresses(self):
'''Get addresses.'''
return self.__get(self.__get_email_url('v1/users/{}/addresses/'))
def get_payments(self):
'''Get payments.'''
return self.__get(self.__get_email_url('v1/users/{}/payments/'))
def get_vectors(self):
'''Get vectors.'''
return self.get_user_data().get('vectors', [])
def submit_constructs(self, sequences, names, typ='NON_CLONED_GENE'):
'''Submit constructs.'''
constructs = _get_constructs(sequences, names, typ)
return self.__post(self.__get_email_url('v1/users/{}/constructs/'),
constructs, target=201)
def get_scores(self, ids, max_errors=8):
'''Get scores.'''
resp = None
errors = 0
while True:
url = self.__get_email_url('v1/users/{}/constructs/describe/')
try:
resp = self.__get(url, {'scored': 'True',
'id__in': ','.join(ids)})
if {datum['id'] for datum in resp} == set(ids):
break
except TwistError as exc:
errors += 1
if errors == max_errors:
raise exc
time.sleep(1)
return resp
def get_quote(self, construct_ids, external_id, address_id,
first_name, last_name,
typ='96_WELL_PLATE', fill_method='VERTICAL',
shipment_method='MULTIPLE_SHIPMENTS',
vectors=None, cloning_strategies=None):
'''Get quote.'''
json = {'external_id': external_id,
'containers': [{'constructs': [
{'index': index, 'id': id_}
for index, id_ in enumerate(construct_ids)],
'type': typ,
'fill_method': fill_method}],
'shipment': {'recipient_address_id': address_id,
'first_name': first_name,
'last_name': last_name,
'preferences': {
'shipment_method': shipment_method}},
'vectors': vectors or [],
'cloning_strategies': cloning_strategies or [],
'advanced_options': {}}
url = self.__get_email_url('v1/users/{}/quotes/')
resp = self.__post(url, json=json, target=201)
return resp['id']
def check_quote(self, quote_id):
'''Check quote.'''
resp = None
while True:
url = self.__get_email_url('v1/users/{}/quotes/%s/') % quote_id
resp = self.__get(url)
if resp['status_info']['status'] != 'PENDING':
break
time.sleep(100)
if resp['status_info']['status'] == 'SUCCESS':
return resp
raise ValueError(resp['status_info']['status'])
def submit_order(self, quote_id, payment_id):
'''Submit order.'''
return self.__post(self.__get_email_url('v1/users/{}/orders/'),
json={'quote_id': quote_id,
'payment_method_id': payment_id})
def __get_token(self):
'''Get token.'''
json = self.__post('/api-token-auth/',
{'username': self.__username,
'password': self.__password})
return json['token']
def __get_email_url(self, url):
'''Get email URL.'''
return url.format(self.__email)
def __get(self, url, params=None):
'''GET method.'''
if not params:
params = {}
resp = self.__session.get(_HOST + url, params=params)
return check_response(resp, 200)
def __post(self, url, json, target=200):
'''POST method.'''
resp = self.__session.post(_HOST + url, json=json)
return check_response(resp, target)
class TwistError(Exception):
'''Class to represent a TwistException.'''
def __init__(self, message, status_code):
Exception.__init__(self, '{}: {}'.format(message, status_code))
def check_response(resp, target):
'''Check response.'''
if not resp.status_code == target:
raise TwistError(resp.content, resp.status_code)
return resp.json()
def _get_constructs(sequences, names, typ='NON_CLONED_GENE'):
'''Get constructs.'''
constructs = []
for idx, (seq, name) in enumerate(zip(sequences, names)):
construct = {'sequences': seq,
'name': name,
'type': typ,
'insertion_point_mes_uid': 'na',
'vector_mes_uid': 'na',
'column': idx / 8,
'row': idx % 8,
'plate': idx / 96}
constructs.append(construct)
return constructs
def main(args):
'''''main method.'''
client = TwistClient(args[0], args[1])
addresses = client.get_addresses()
payments = client.get_payments()
print('Accounts\t' + str(client.get_accounts()))
print('Prices\t' + str(client.get_prices()))
print('User data\t' + str(client.get_user_data()))
print('Addresses\t' + str(addresses))
print('Payments\t' + str(payments))
print('Vectors\t' + str(client.get_vectors()))
# Produce dummy order:
sequences = []
names = []
for i in range(0, 5):
sequences.append(''.join(
[random.choice('ACTG')
for _ in range(0, random.randint(300, 1800))]))
names.append('seq{}'.format(i + 1))
resp = client.submit_constructs(sequences, names)
ids = [i['id'] for i in resp]
print('Scores\t' + str(client.get_scores(ids)))
quote_id = client.get_quote(ids,
external_id=str(uuid.uuid4()),
address_id=addresses[0]['id'],
first_name=args[2],
last_name=args[3])
print('Quote\t' + str(client.check_quote(quote_id)))
if payments:
print('Submission\t' +
str(client.submit_order(quote_id, payments[0]['id'])))
if __name__ == '__main__':
main(sys.argv[1:])
|
import os
import re
import sys
import json
import math
import crayons
import requests
import collections
import better_exceptions
from dateutil import parser
from bs4 import BeautifulSoup
from operator import itemgetter
from datetime import date, datetime
better_exceptions.MAX_LENGTH = None
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
last_group = ''
def log(group, text):
global last_group
if last_group != group:
print('')
last_group = group
sys.stdout.write('\x1b[2K\r[{} - {}] {}'.format(
crayons.blue(datetime.now().strftime('%Y-%m-%d %H:%M:%S')), crayons.blue(group), crayons.green(text)))
def pretty(text):
# This strips and removes tabs, new lines and (double) spaces from our string
# Doesn't fucking always work
text = text.replace('\t', '')
text = text.replace('\n', '')
text = text.replace('\\t', '')
text = text.replace('\\n', '')
text = re.sub(' +', ' ', text)
text = text.strip()
if text.startswith('"'):
text = text[1:]
if text.endswith('"'):
text = text[:-1]
return text
def fix_showname(show_name):
if show_name == 'MOVIE:': # Remove the colon plz
return 'MOVIE'
elif show_name == 'SPECIAL:': # Remove the colon plz
return 'SPECIAL'
elif show_name == 'Cloudy': # Fix name
return 'Cloudy with a Chance of Meatballs'
elif show_name == 'Cloudy With a Chance of Meatballs': # Fix name
return 'Cloudy with a Chance of Meatballs'
elif show_name == 'The Amazing World of Gumball': # Fix name
return 'Amazing World of Gumball'
return show_name
def grab_cnschedule(url):
# Get the schedule from the "backdoor" and parse it for every available day
schedule = {}
available_dates = []
log('cn', 'Will get today\'s page')
# Get the file for today and let BS parse it
d = date.today()
now = '%02d/%02d/%d' % (d.month, d.day, d.year)
r = requests.get(url % now)
log('cn', 'Parsing...')
soup = BeautifulSoup(r.text, 'html5lib')
# We'll now save all the available dates
for select_date in soup.find_all('select')[1].find_all('option'):
if select_date['value'] != '0':
available_dates.append(select_date['value'])
log('cn', 'Parsed. Available dates: ' + ', '.join(available_dates))
# Now, we'll go throught each day
for available_date in available_dates:
# Get a YYYY-MM-DD version of the date
parsed_date = parser.parse(available_date).strftime('%Y-%m-%d')
# Init the list and the increment counter
schedule[parsed_date] = {
'source': 'Cartoon Network',
'schedule': []
}
i = 0
log('cn', 'Downloading and parsing ' + parsed_date)
# Get the day's page
r = requests.get(url % available_date)
soup = BeautifulSoup(r.text, 'html5lib')
log('cn', 'Parsed. Extracting data...')
# And let's go directly to where the list is (yup, it's a bit complicated)
trs = soup.find_all('table')[2].find('tr').find_all('td', recursive=False)[1]\
.find_all('table', recursive=False)[1].find_all('td')[3]\
.find_all('table', recursive=False)[2].find('tbody').find_all('tr', attrs={'valign': 'top'})
# And now, for each lines...
for i, tr in enumerate(trs):
alt_table = False
el_time = tr.find(class_='timecell') # Get the element that contains the time
if el_time is None:
el_time = tr.find(class_='timeoncell')
alt_table = True
if not pretty(''.join(el_time.strings)).endswith('m'):
continue
try:
# Get the "time" and "show" details
if alt_table: # Sometimes, it's using '[...]oncell' instead of just '[...]cell'
el_time = tr.find(class_='timeoncell') # Get the element that contains the time
el_show = tr.find_all(class_='showoncell') # Get the element**s** that contains show details
el_show_name = el_show[0].find('a') # Get the element that contains the show name
episode_name = el_show[2].string # Get the episode title
show_name = pretty(''.join(el_show_name.strings)) # Get the actual show name
# Show might not be under a link, so we try to get the first string in the element then
if show_name == 'See All Showings':
show_name = pretty(el_show[0].contents[0])
else: # If it's not, let's get our values
el_show = tr.find_all(class_='showcell') # Get the element**s** that contains show details
el_show_name = el_show[1].find('a') # Get the element that contains the show name
episode_name = el_show[3].string # Get the episode title
show_name = pretty(''.join(el_show_name.strings)) # Get the actual show name
# Show might not be under a link, so we try to get the first string in the element then
if show_name == 'See All Showings':
show_name = pretty(el_show[1].contents[0])
show_name = fix_showname(show_name)
if episode_name is None: # If no episode title is found, return an empty string
episode_name = ''
try:
el_next_time = trs[i + 2].find(class_='timecell')
if el_next_time is None:
el_next_time = tr.find(class_='timeoncell') # Get the element that contains the time
next_time = pretty(''.join(el_next_time.strings))
except IndexError:
next_time = '6:00 am'
time = pretty(''.join(el_time.strings)) # Get the air time
parsed_datetime = parser.parse('%s %s -0400' % (parsed_date, time)) # And let's parse it
parsed_next_datetime = parser.parse('%s %s -0400' % (parsed_date, next_time)) # And let's parse it
# If it's before 6am and after (including) 8pm, it's the adultswim part
if parsed_datetime.hour < 6 or parsed_datetime.hour >= 20:
continue
slots = math.ceil(((int(parsed_next_datetime.timestamp()) - int(parsed_datetime.timestamp())) / 60) / 15)
if slots < 1:
slots = 2
# Add all the details to our schedule list
schedule[parsed_date]['schedule'].append({
'date': parsed_date,
'time': time,
'timestamp': int(parsed_datetime.timestamp()),
'timestamp_end': int(parsed_next_datetime.timestamp()),
'show': show_name,
'title': episode_name,
'slots': slots
})
except:
# If something goes wrong, show the <tr> and raise the error
print(tr)
raise
log('cn', 'Data extracted.')
# And now that we have everything we need,
# let's return our list
log('cn', 'Done!')
return schedule
def grab_zapschedule(url):
# Get the schedule from the "backdoor" and parse it for every available day
schedule = {}
available_dates = []
log('zap', 'Will get today\'s page')
# Get the file for today and let BS parse it
r = requests.get(url)
log('zap', 'Parsing...')
soup = BeautifulSoup(r.text, 'html5lib')
# We'll now save all the available dates
for select_date in soup.find(id='zc-ssl-dayNav-popup').find_all(class_='zc-topbar-dropdown-item'):
available_dates.append({'date': select_date.string, 'url': select_date['href']})
log('zap', 'Parsed. Found ' + str(len(available_dates)) + ' days available')
# Now, we'll go throught each day
for available_date in available_dates:
# Get a YYYY-MM-DD version of the date
parsed_date = parser.parse(available_date['date']).strftime('%Y-%m-%d')
# Init the list and the increment counter
schedule[parsed_date] = {
'source': 'Zap2it',
'schedule': []
}
i = 0
log('zap', 'Downloading and parsing ' + parsed_date)
# Get the day's page
r = requests.get(available_date['url'])
soup = BeautifulSoup(r.text, 'html5lib')
log('zap', 'Parsed. Extracting data...')
# And let's go directly to where the list is (yup, it's a bit complicated)
trs = soup.find_all(class_='zc-ssl-pg')
# And now, for each lines...
for i, tr in enumerate(trs):
# Well, actually only the even ones
if tr['id'].startswith('row1'):
try:
# Get the "time" and "show" details
el_time = tr.find(class_='zc-ssl-pg-time') # Get the element that contains the time
el_show_name = tr.find(class_='zc-ssl-pg-title')
el_episodenumber = tr.find(class_='zc-ssl-pg-ep')
try:
el_next_time = trs[i + 1].find(class_='zc-ssl-pg-time')
next_time = pretty(''.join(el_next_time.strings))
except IndexError:
next_time = '6:00 am'
show_name = pretty(''.join(el_show_name.strings)) # Get the actual show name
try:
episode_name = pretty(''.join(el_episodenumber.strings))
except:
episode_name = ''
show_name = fix_showname(show_name)
time = pretty(''.join(el_time.strings)).lower() # Get the air time
parsed_datetime = parser.parse('%s %s -0500' % (parsed_date, time)) # And let's parse it
parsed_next_datetime = parser.parse('%s %s -0500' % (parsed_date, next_time)) # And let's parse it
# If it's before 6am and after (including) 8pm, it's the adultswim part
if parsed_datetime.hour < 6 or parsed_datetime.hour >= 20:
continue
slots = math.ceil(((int(parsed_next_datetime.timestamp()) - int(parsed_datetime.timestamp())) / 60) / 15)
if slots < 1:
slots = 2
# Add all the details to our schedule list
schedule[parsed_date]['schedule'].append({
'date': parsed_date,
'time': time,
'timestamp': int(parsed_datetime.timestamp()),
'timestamp_end': int(parsed_next_datetime.timestamp()),
'show': show_name,
'title': episode_name,
'slots': slots
})
except:
# If something goes wrong, show the <tr> and raise the error
print(tr)
raise
log('zap', 'Data extracted.')
# And now that we have everything we need,
# let's return our list
log('zap', 'Done!')
return schedule
def merge_schedules(cn_sch, zap_sch):
log('merge', 'Fixing empty schedules...')
for s_date, values in cn_sch.items():
if len(values['schedule']) == 0:
cn_sch[s_date] = zap_sch[s_date]
log('merge', 'Adding missing days from Zap2it...')
for s_date, __ in zap_sch.items():
if s_date not in cn_sch:
cn_sch[s_date] = zap_sch[s_date]
log('merge', 'Ordering...')
for day in cn_sch:
cn_sch[day]['schedule'] = sorted(cn_sch[day]['schedule'], key=itemgetter('timestamp'))
ordered = collections.OrderedDict(sorted(cn_sch.items()))
log('merge', 'Done!')
return ordered
def gen_api(sch):
# Save our schedule in a json file
log('json', 'Generating json...')
sch['_'] = {'generated': int(datetime.utcnow().timestamp())}
with open(os.path.join(THIS_DIR, 'api', 'cnschedule.json'), 'w') as f:
f.write(json.dumps(sch, indent=2, sort_keys=False))
log('json', 'Done!')
if __name__ == '__main__':
# Using the [as] "backdoor", where we can put a date in the URL
source_url = 'http://schedule.adultswim.com/servlet/ScheduleServlet?action=GO&theDate=%s&timeZone=EST'
# First we grab CN's schedule and put it in a list
cn_schedule = grab_cnschedule(source_url)
# Using Zap2it's schedule list for CN to get the list
source_url = 'http://tvschedule.zap2it.com/tvlistings/ZCSGrid.do?sgt=list&stnNum=12131&aid=tvschedule'
# Now we grab Zap2it's schedule and put it in a list
zap_schedule = grab_zapschedule(source_url)
# We merge and order the schedules
saved_schedule = merge_schedules(cn_schedule, zap_schedule)
# Then we save what we've got
gen_api(saved_schedule)
Fix the bug where episode titles under a link are None
import os
import re
import sys
import json
import math
import crayons
import requests
import collections
import better_exceptions
from dateutil import parser
from bs4 import BeautifulSoup
from operator import itemgetter
from datetime import date, datetime
better_exceptions.MAX_LENGTH = None
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
last_group = ''
def log(group, text):
global last_group
if last_group != group:
print('')
last_group = group
sys.stdout.write('\x1b[2K\r[{} - {}] {}'.format(
crayons.blue(datetime.now().strftime('%Y-%m-%d %H:%M:%S')), crayons.blue(group), crayons.green(text)))
def pretty(text):
# This strips and removes tabs, new lines and (double) spaces from our string
# Doesn't fucking always work
text = text.replace('\t', '')
text = text.replace('\n', '')
text = text.replace('\\t', '')
text = text.replace('\\n', '')
text = re.sub(' +', ' ', text)
text = text.strip()
if text.startswith('"'):
text = text[1:]
if text.endswith('"'):
text = text[:-1]
return text
def fix_showname(show_name):
if show_name == 'MOVIE:': # Remove the colon plz
return 'MOVIE'
elif show_name == 'SPECIAL:': # Remove the colon plz
return 'SPECIAL'
elif show_name == 'Cloudy': # Fix name
return 'Cloudy with a Chance of Meatballs'
elif show_name == 'Cloudy With a Chance of Meatballs': # Fix name
return 'Cloudy with a Chance of Meatballs'
elif show_name == 'The Amazing World of Gumball': # Fix name
return 'Amazing World of Gumball'
return show_name
def grab_cnschedule(url):
# Get the schedule from the "backdoor" and parse it for every available day
schedule = {}
available_dates = []
log('cn', 'Will get today\'s page')
# Get the file for today and let BS parse it
d = date.today()
now = '%02d/%02d/%d' % (d.month, d.day, d.year)
r = requests.get(url % now)
log('cn', 'Parsing...')
soup = BeautifulSoup(r.text, 'html5lib')
# We'll now save all the available dates
for select_date in soup.find_all('select')[1].find_all('option'):
if select_date['value'] != '0':
available_dates.append(select_date['value'])
log('cn', 'Parsed. Available dates: ' + ', '.join(available_dates))
# Now, we'll go throught each day
for available_date in available_dates:
# Get a YYYY-MM-DD version of the date
parsed_date = parser.parse(available_date).strftime('%Y-%m-%d')
# Init the list and the increment counter
schedule[parsed_date] = {
'source': 'Cartoon Network',
'schedule': []
}
i = 0
log('cn', 'Downloading and parsing ' + parsed_date)
# Get the day's page
r = requests.get(url % available_date)
soup = BeautifulSoup(r.text, 'html5lib')
log('cn', 'Parsed. Extracting data...')
# And let's go directly to where the list is (yup, it's a bit complicated)
trs = soup.find_all('table')[2].find('tr').find_all('td', recursive=False)[1]\
.find_all('table', recursive=False)[1].find_all('td')[3]\
.find_all('table', recursive=False)[2].find('tbody').find_all('tr', attrs={'valign': 'top'})
# And now, for each lines...
for i, tr in enumerate(trs):
alt_table = False
el_time = tr.find(class_='timecell') # Get the element that contains the time
if el_time is None:
el_time = tr.find(class_='timeoncell')
alt_table = True
if not pretty(''.join(el_time.strings)).endswith('m'):
continue
try:
# Get the "time" and "show" details
if alt_table: # Sometimes, it's using '[...]oncell' instead of just '[...]cell'
el_time = tr.find(class_='timeoncell') # Get the element that contains the time
el_show = tr.find_all(class_='showoncell') # Get the element**s** that contains show details
el_show_name = el_show[0].find('a') # Get the element that contains the show name
show_name = pretty(''.join(el_show_name.strings)) # Get the actual show name
episode_name = el_show[2].string # Get the episode title
# Show might not be under a link, so we try to get the first string in the element then
if show_name == 'See All Showings':
show_name = pretty(el_show[0].contents[0])
# Episode name might be under a link
if episode_name is None:
el_episode_name = el_show[2].find('a')
episode_name = pretty(''.join(el_episode_name.strings))
else: # If it's not, let's get our values
el_show = tr.find_all(class_='showcell') # Get the element**s** that contains show details
el_show_name = el_show[1].find('a') # Get the element that contains the show name
show_name = pretty(''.join(el_show_name.strings)) # Get the actual show name
episode_name = el_show[3].string # Get the episode title
# Show might not be under a link, so we try to get the first string in the element then
if show_name == 'See All Showings':
show_name = pretty(el_show[1].contents[0])
# Episode name might be under a link
if episode_name is None:
el_episode_name = el_show[3].find('a')
episode_name = pretty(''.join(el_episode_name.strings))
show_name = fix_showname(show_name)
if episode_name is None: # If no episode title is found, return an empty string
episode_name = ''
try:
el_next_time = trs[i + 2].find(class_='timecell')
if el_next_time is None:
el_next_time = tr.find(class_='timeoncell') # Get the element that contains the time
next_time = pretty(''.join(el_next_time.strings))
except IndexError:
next_time = '6:00 am'
time = pretty(''.join(el_time.strings)) # Get the air time
parsed_datetime = parser.parse('%s %s -0400' % (parsed_date, time)) # And let's parse it
parsed_next_datetime = parser.parse('%s %s -0400' % (parsed_date, next_time)) # And let's parse it
# If it's before 6am and after (including) 8pm, it's the adultswim part
if parsed_datetime.hour < 6 or parsed_datetime.hour >= 20:
continue
slots = math.ceil(((int(parsed_next_datetime.timestamp()) - int(parsed_datetime.timestamp())) / 60) / 15)
if slots < 1:
slots = 2
# Add all the details to our schedule list
schedule[parsed_date]['schedule'].append({
'date': parsed_date,
'time': time,
'timestamp': int(parsed_datetime.timestamp()),
'timestamp_end': int(parsed_next_datetime.timestamp()),
'show': show_name,
'title': episode_name,
'slots': slots
})
except:
# If something goes wrong, show the <tr> and raise the error
print(tr)
raise
log('cn', 'Data extracted.')
# And now that we have everything we need,
# let's return our list
log('cn', 'Done!')
return schedule
def grab_zapschedule(url):
# Get the schedule from the "backdoor" and parse it for every available day
schedule = {}
available_dates = []
log('zap', 'Will get today\'s page')
# Get the file for today and let BS parse it
r = requests.get(url)
log('zap', 'Parsing...')
soup = BeautifulSoup(r.text, 'html5lib')
# We'll now save all the available dates
for select_date in soup.find(id='zc-ssl-dayNav-popup').find_all(class_='zc-topbar-dropdown-item'):
available_dates.append({'date': select_date.string, 'url': select_date['href']})
log('zap', 'Parsed. Found ' + str(len(available_dates)) + ' days available')
# Now, we'll go throught each day
for available_date in available_dates:
# Get a YYYY-MM-DD version of the date
parsed_date = parser.parse(available_date['date']).strftime('%Y-%m-%d')
# Init the list and the increment counter
schedule[parsed_date] = {
'source': 'Zap2it',
'schedule': []
}
i = 0
log('zap', 'Downloading and parsing ' + parsed_date)
# Get the day's page
r = requests.get(available_date['url'])
soup = BeautifulSoup(r.text, 'html5lib')
log('zap', 'Parsed. Extracting data...')
# And let's go directly to where the list is (yup, it's a bit complicated)
trs = soup.find_all(class_='zc-ssl-pg')
# And now, for each lines...
for i, tr in enumerate(trs):
# Well, actually only the even ones
if tr['id'].startswith('row1'):
try:
# Get the "time" and "show" details
el_time = tr.find(class_='zc-ssl-pg-time') # Get the element that contains the time
el_show_name = tr.find(class_='zc-ssl-pg-title')
el_episodenumber = tr.find(class_='zc-ssl-pg-ep')
try:
el_next_time = trs[i + 1].find(class_='zc-ssl-pg-time')
next_time = pretty(''.join(el_next_time.strings))
except IndexError:
next_time = '6:00 am'
show_name = pretty(''.join(el_show_name.strings)) # Get the actual show name
try:
episode_name = pretty(''.join(el_episodenumber.strings))
except:
episode_name = ''
show_name = fix_showname(show_name)
time = pretty(''.join(el_time.strings)).lower() # Get the air time
parsed_datetime = parser.parse('%s %s -0500' % (parsed_date, time)) # And let's parse it
parsed_next_datetime = parser.parse('%s %s -0500' % (parsed_date, next_time)) # And let's parse it
# If it's before 6am and after (including) 8pm, it's the adultswim part
if parsed_datetime.hour < 6 or parsed_datetime.hour >= 20:
continue
slots = math.ceil(((int(parsed_next_datetime.timestamp()) - int(parsed_datetime.timestamp())) / 60) / 15)
if slots < 1:
slots = 2
# Add all the details to our schedule list
schedule[parsed_date]['schedule'].append({
'date': parsed_date,
'time': time,
'timestamp': int(parsed_datetime.timestamp()),
'timestamp_end': int(parsed_next_datetime.timestamp()),
'show': show_name,
'title': episode_name,
'slots': slots
})
except:
# If something goes wrong, show the <tr> and raise the error
print(tr)
raise
log('zap', 'Data extracted.')
# And now that we have everything we need,
# let's return our list
log('zap', 'Done!')
return schedule
def merge_schedules(cn_sch, zap_sch):
log('merge', 'Fixing empty schedules...')
for s_date, values in cn_sch.items():
if len(values['schedule']) == 0:
cn_sch[s_date] = zap_sch[s_date]
log('merge', 'Adding missing days from Zap2it...')
for s_date, __ in zap_sch.items():
if s_date not in cn_sch:
cn_sch[s_date] = zap_sch[s_date]
log('merge', 'Ordering...')
for day in cn_sch:
cn_sch[day]['schedule'] = sorted(cn_sch[day]['schedule'], key=itemgetter('timestamp'))
ordered = collections.OrderedDict(sorted(cn_sch.items()))
log('merge', 'Done!')
return ordered
def gen_api(sch):
# Save our schedule in a json file
log('json', 'Generating json...')
sch['_'] = {'generated': int(datetime.utcnow().timestamp())}
with open(os.path.join(THIS_DIR, 'api', 'cnschedule.json'), 'w') as f:
f.write(json.dumps(sch, indent=2, sort_keys=False))
log('json', 'Done!')
if __name__ == '__main__':
# Using the [as] "backdoor", where we can put a date in the URL
source_url = 'http://schedule.adultswim.com/servlet/ScheduleServlet?action=GO&theDate=%s&timeZone=EST'
# First we grab CN's schedule and put it in a list
cn_schedule = grab_cnschedule(source_url)
# Using Zap2it's schedule list for CN to get the list
source_url = 'http://tvschedule.zap2it.com/tvlistings/ZCSGrid.do?sgt=list&stnNum=12131&aid=tvschedule'
# Now we grab Zap2it's schedule and put it in a list
zap_schedule = grab_zapschedule(source_url)
# We merge and order the schedules
saved_schedule = merge_schedules(cn_schedule, zap_schedule)
# Then we save what we've got
gen_api(saved_schedule)
|
import json
from onadata.apps.api.tests.viewsets.test_abstract_viewset import\
TestAbstractViewSet
from onadata.apps.api.viewsets.project_viewset import ProjectViewSet
from onadata.libs.permissions import (
OwnerRole, ReadOnlyRole, ManagerRole, DataEntryRole, EditorRole)
from onadata.apps.api.models import Project
class TestProjectViewset(TestAbstractViewSet):
def setUp(self):
super(self.__class__, self).setUp()
self.view = ProjectViewSet.as_view({
'get': 'list',
'post': 'create'
})
def test_projects_list(self):
self._project_create()
request = self.factory.get('/', **self.extra)
response = self.view(request)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data, [self.project_data])
def test_projects_get(self):
self._project_create()
view = ProjectViewSet.as_view({
'get': 'retrieve'
})
request = self.factory.get('/', **self.extra)
response = view(request, pk=self.project.pk)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data, self.project_data)
def test_projects_create(self):
self._project_create()
def test_publish_xls_form_to_project(self):
self._publish_xls_form_to_project()
def test_view_xls_form(self):
self._publish_xls_form_to_project()
view = ProjectViewSet.as_view({
'get': 'forms'
})
request = self.factory.get('/', **self.extra)
response = view(request, pk=self.project.pk)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data, [self.form_data])
response = view(request, pk=self.project.pk, formid=self.xform.pk)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data, self.form_data)
def test_assign_form_to_project(self):
view = ProjectViewSet.as_view({
'post': 'forms'
})
self._publish_xls_form_to_project()
formid = self.xform.pk
old_project = self.project
project_name = u'another project'
self._project_create({'name': project_name})
self.assertTrue(self.project.name == project_name)
project_id = self.project.pk
post_data = {'formid': formid}
request = self.factory.post('/', data=post_data, **self.extra)
response = view(request, pk=project_id)
self.assertEqual(response.status_code, 201)
self.assertTrue(self.project.projectxform_set.filter(xform=self.xform))
self.assertFalse(old_project.projectxform_set.filter(xform=self.xform))
def test_project_share_endpoint(self):
self._project_create()
alice_data = {'username': 'alice', 'email': 'alice@localhost.com'}
alice_profile = self._create_user_profile(alice_data)
view = ProjectViewSet.as_view({
'post': 'share'
})
projectid = self.project.pk
ROLES = [ReadOnlyRole,
DataEntryRole,
EditorRole,
ManagerRole,
OwnerRole]
for role_class in ROLES:
self.assertFalse(role_class.has_role(alice_profile.user,
self.project))
data = {'username': 'alice', 'role': role_class.name}
request = self.factory.post('/', data=data, **self.extra)
response = view(request, pk=projectid)
self.assertEqual(response.status_code, 204)
self.assertTrue(role_class.has_role(alice_profile.user,
self.project))
data = {'username': 'alice', 'role': ''}
request = self.factory.post('/', data=data, **self.extra)
response = view(request, pk=projectid)
self.assertEqual(response.status_code, 400)
def test_project_filter_by_owner(self):
self._project_create()
default_project_data = self.project_data
alice_data = {'username': 'alice', 'email': 'alice@localhost.com'}
self._login_user_and_profile(alice_data)
ReadOnlyRole.add(self.user, self.project)
self._project_create({'name': 'another project'})
# both bob's and alice's projects
request = self.factory.get('/', **self.extra)
response = self.view(request)
self.assertEqual(response.status_code, 200)
self.assertIn(default_project_data, response.data)
self.assertIn(self.project_data, response.data)
# only bob's project
request = self.factory.get('/', {'owner': 'bob'}, **self.extra)
response = self.view(request)
self.assertEqual(response.status_code, 200)
self.assertIn(default_project_data, response.data)
self.assertNotIn(self.project_data, response.data)
# only alice's project
request = self.factory.get('/', {'owner': 'alice'}, **self.extra)
response = self.view(request)
self.assertEqual(response.status_code, 200)
self.assertNotIn(default_project_data, response.data)
self.assertIn(self.project_data, response.data)
# none existent user
request = self.factory.get('/', {'owner': 'noone'}, **self.extra)
response = self.view(request)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data, [])
def test_project_partial_updates(self):
self._project_create()
view = ProjectViewSet.as_view({
'patch': 'partial_update'
})
projectid = self.project.pk
metadata = '{"description": "Lorem ipsum",' \
'"location": "Nakuru, Kenya",' \
'"category": "water"' \
'}'
json_metadata = json.loads(metadata)
data = {'metadata': metadata}
request = self.factory.patch('/', data=data, **self.extra)
response = view(request, pk=projectid)
project = Project.objects.get(pk=projectid)
self.assertEqual(response.status_code, 200)
self.assertEqual(project.metadata, json_metadata)
def test_project_put_updates(self):
self._project_create()
view = ProjectViewSet.as_view({
'put': 'update'
})
projectid = self.project.pk
data = {
'name': u'updated name',
'owner': 'http://testserver/api/v1/users/%s' % self.user.username,
'metadata': {'description': 'description',
'location': 'Nairobi, Kenya',
'category': 'health'}
}
data.update({'metadata': json.dumps(data.get('metadata'))})
request = self.factory.put('/', data=data, **self.extra)
response = view(request, pk=projectid)
data.update({'metadata': json.loads(data.get('metadata'))})
self.assertDictContainsSubset(data, response.data)
def test_project_partial_updates_to_existing_metadata(self):
self._project_create()
view = ProjectViewSet.as_view({
'patch': 'partial_update'
})
projectid = self.project.pk
metadata = '{"description": "Changed description"}'
json_metadata = json.loads(metadata)
data = {'metadata': metadata}
request = self.factory.patch('/', data=data, **self.extra)
response = view(request, pk=projectid)
project = Project.objects.get(pk=projectid)
json_metadata.update(project.metadata)
self.assertEqual(response.status_code, 200)
self.assertEqual(project.metadata, json_metadata)
PLD: more project tests
import json
from onadata.apps.api.tests.viewsets.test_abstract_viewset import\
TestAbstractViewSet
from onadata.apps.api.viewsets.project_viewset import ProjectViewSet
from onadata.libs.permissions import (
OwnerRole, ReadOnlyRole, ManagerRole, DataEntryRole, EditorRole)
from onadata.apps.api.models import Project
class TestProjectViewSet(TestAbstractViewSet):
def setUp(self):
super(self.__class__, self).setUp()
self.view = ProjectViewSet.as_view({
'get': 'list',
'post': 'create'
})
def test_projects_list(self):
self._project_create()
request = self.factory.get('/', **self.extra)
response = self.view(request)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data, [self.project_data])
def test_projects_get(self):
self._project_create()
view = ProjectViewSet.as_view({
'get': 'retrieve'
})
request = self.factory.get('/', **self.extra)
response = view(request, pk=self.project.pk)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data, self.project_data)
def test_projects_create(self):
self._project_create()
self.assertIsNotNone(self.project)
self.assertIsNotNone(self.project_data)
projects = Project.objects.all()
self.assertEqual(len(projects), 1)
for project in projects:
self.assertEqual(self.user, project.created_by)
self.assertEqual(self.user, project.organization)
def test_projects_create_many_users(self):
self._project_create()
alice_data = {'username': 'alice', 'email': 'alice@localhost.com'}
self._login_user_and_profile(alice_data)
self._project_create()
projects = Project.objects.filter(created_by=self.user)
self.assertEqual(len(projects), 1)
for project in projects:
self.assertEqual(self.user, project.created_by)
self.assertEqual(self.user, project.organization)
def test_publish_xls_form_to_project(self):
self._publish_xls_form_to_project()
def test_view_xls_form(self):
self._publish_xls_form_to_project()
view = ProjectViewSet.as_view({
'get': 'forms'
})
request = self.factory.get('/', **self.extra)
response = view(request, pk=self.project.pk)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data, [self.form_data])
response = view(request, pk=self.project.pk, formid=self.xform.pk)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data, self.form_data)
def test_assign_form_to_project(self):
view = ProjectViewSet.as_view({
'post': 'forms'
})
self._publish_xls_form_to_project()
formid = self.xform.pk
old_project = self.project
project_name = u'another project'
self._project_create({'name': project_name})
self.assertTrue(self.project.name == project_name)
project_id = self.project.pk
post_data = {'formid': formid}
request = self.factory.post('/', data=post_data, **self.extra)
response = view(request, pk=project_id)
self.assertEqual(response.status_code, 201)
self.assertTrue(self.project.projectxform_set.filter(xform=self.xform))
self.assertFalse(old_project.projectxform_set.filter(xform=self.xform))
def test_project_share_endpoint(self):
self._project_create()
alice_data = {'username': 'alice', 'email': 'alice@localhost.com'}
alice_profile = self._create_user_profile(alice_data)
view = ProjectViewSet.as_view({
'post': 'share'
})
projectid = self.project.pk
ROLES = [ReadOnlyRole,
DataEntryRole,
EditorRole,
ManagerRole,
OwnerRole]
for role_class in ROLES:
self.assertFalse(role_class.has_role(alice_profile.user,
self.project))
data = {'username': 'alice', 'role': role_class.name}
request = self.factory.post('/', data=data, **self.extra)
response = view(request, pk=projectid)
self.assertEqual(response.status_code, 204)
self.assertTrue(role_class.has_role(alice_profile.user,
self.project))
data = {'username': 'alice', 'role': ''}
request = self.factory.post('/', data=data, **self.extra)
response = view(request, pk=projectid)
self.assertEqual(response.status_code, 400)
def test_project_filter_by_owner(self):
self._project_create()
default_project_data = self.project_data
alice_data = {'username': 'alice', 'email': 'alice@localhost.com'}
self._login_user_and_profile(alice_data)
ReadOnlyRole.add(self.user, self.project)
self._project_create({'name': 'another project'})
# both bob's and alice's projects
request = self.factory.get('/', **self.extra)
response = self.view(request)
self.assertEqual(response.status_code, 200)
self.assertIn(default_project_data, response.data)
self.assertIn(self.project_data, response.data)
# only bob's project
request = self.factory.get('/', {'owner': 'bob'}, **self.extra)
response = self.view(request)
self.assertEqual(response.status_code, 200)
self.assertIn(default_project_data, response.data)
self.assertNotIn(self.project_data, response.data)
# only alice's project
request = self.factory.get('/', {'owner': 'alice'}, **self.extra)
response = self.view(request)
self.assertEqual(response.status_code, 200)
self.assertNotIn(default_project_data, response.data)
self.assertIn(self.project_data, response.data)
# none existent user
request = self.factory.get('/', {'owner': 'noone'}, **self.extra)
response = self.view(request)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data, [])
def test_project_partial_updates(self):
self._project_create()
view = ProjectViewSet.as_view({
'patch': 'partial_update'
})
projectid = self.project.pk
metadata = '{"description": "Lorem ipsum",' \
'"location": "Nakuru, Kenya",' \
'"category": "water"' \
'}'
json_metadata = json.loads(metadata)
data = {'metadata': metadata}
request = self.factory.patch('/', data=data, **self.extra)
response = view(request, pk=projectid)
project = Project.objects.get(pk=projectid)
self.assertEqual(response.status_code, 200)
self.assertEqual(project.metadata, json_metadata)
def test_project_put_updates(self):
self._project_create()
view = ProjectViewSet.as_view({
'put': 'update'
})
projectid = self.project.pk
data = {
'name': u'updated name',
'owner': 'http://testserver/api/v1/users/%s' % self.user.username,
'metadata': {'description': 'description',
'location': 'Nairobi, Kenya',
'category': 'health'}
}
data.update({'metadata': json.dumps(data.get('metadata'))})
request = self.factory.put('/', data=data, **self.extra)
response = view(request, pk=projectid)
data.update({'metadata': json.loads(data.get('metadata'))})
self.assertDictContainsSubset(data, response.data)
def test_project_partial_updates_to_existing_metadata(self):
self._project_create()
view = ProjectViewSet.as_view({
'patch': 'partial_update'
})
projectid = self.project.pk
metadata = '{"description": "Changed description"}'
json_metadata = json.loads(metadata)
data = {'metadata': metadata}
request = self.factory.patch('/', data=data, **self.extra)
response = view(request, pk=projectid)
project = Project.objects.get(pk=projectid)
json_metadata.update(project.metadata)
self.assertEqual(response.status_code, 200)
self.assertEqual(project.metadata, json_metadata)
|
#-*- coding:utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 OpenERP SA (<http://openerp.com>). All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import datetime
import time
from report import report_sxw
class report_hr_salary_employee_bymonth(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(report_hr_salary_employee_bymonth, self).__init__(cr, uid, name, context=context)
self.localcontext.update({
'time': time,
'get_employee': self.get_employee,
'get_periods': self.get_periods,
'get_months_tol': self.get_months_tol,
'get_total': self.get_total,
})
self.context = context
self.mnths = []
self.mnths_total = []
self.total = 0.0
def get_periods(self, form):
# Get start year-month-date and end year-month-date
first_year = int(form['start_date'][0:4])
last_year = int(form['end_date'][0:4])
first_month = int(form['start_date'][5:7])
last_month = int(form['end_date'][5:7])
no_months = (last_year-first_year) * 12 + last_month - first_month + 1
current_month = first_month
current_year = first_year
# Get name of the months from integer
mnth_name = []
for count in range(0, no_months):
m = datetime.date(current_year, current_month, 1).strftime('%b')
mnth_name.append(m)
self.mnths.append(str(current_month) + '-' + str(current_year))
if current_month == 12:
current_month = 0
current_year = last_year
current_month = current_month + 1
for c in range(0, (12-no_months)):
mnth_name.append('None')
self.mnths.append('None')
return [mnth_name]
def get_salary(self, form, emp_id,emp_salary):
emp_obj = self.pool.get('hr.employee')
emp_ids = form.get('employee_ids', [])
date_from = form.get('start_date', [])
date_to = form.get('end_date', [])
employees = emp_obj.browse(self.cr, self.uid, emp_ids, context=self.context)
self.cr.execute("select to_char(date_to,'mm-yyyy') as to_date ,sum(pl.total) as net \
from hr_payslip_line as pl \
left join hr_payslip as p on pl.slip_id = p.id \
left join hr_employee as emp on emp.id = p.employee_id \
left join resource_resource as r on r.id = emp.resource_id \
where pl.code = 'NET' and p.state = 'done' and p.employee_id in %s \
group by r.name, p.date_to,emp.id",(tuple([emp_id]),))
sal = self.cr.fetchall()
salary = dict(sal)
total = 0.0
total_mnths=['Total', 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
cnt = 1
for month in self.mnths:
if month <> 'None':
if len(month) != 7:
month = '0' + str(month)
if month in salary and salary[month]:
emp_salary.append(salary[month])
total += salary[month]
total_mnths[cnt] = total_mnths[cnt] + salary[month]
else:
emp_salary.append(0.00)
else:
emp_salary.append('')
total_mnths[cnt] = ''
cnt = cnt + 1
return emp_salary,total,total_mnths
def get_employee(self, form):
emp_salary = []
salary_list = []
total_mnths=['Total', 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
emp_obj = self.pool.get('hr.employee')
emp_ids = form.get('employee_ids', [])
employees = emp_obj.browse(self.cr, self.uid, emp_ids, context=self.context)
cnt = 1
for emp_id in employees:
emp_salary.append(emp_id.name)
total = 0.0
emp_salary,total,total_mnths = self.get_salary(form,emp_id.id,emp_salary)
emp_salary.append(total)
cnt = 1
emp_salary.append(total)
salary_list.append(emp_salary)
emp_salary = []
self.mnths_total.append(total_mnths)
return salary_list
def get_months_tol(self):
return self.mnths_total
def get_total(self):
for item in self.mnths_total:
for count in range(1, len(item)):
if item[count] == '':
continue
self.total += item[count]
return self.total
report_sxw.report_sxw('report.salary.employee.bymonth', 'hr.salary.employee.month', 'l10n_in_hr_payroll/report/report_hr_salary_employee_bymonth.rml', parser=report_hr_salary_employee_bymonth, header='internal')
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
[FIX]l10n_in_hr_payroll:total of salary
bzr revid: kbh@tinyerp.com-20120725060814-jech1omb9vquin7v
#-*- coding:utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 OpenERP SA (<http://openerp.com>). All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import datetime
import time
from report import report_sxw
class report_hr_salary_employee_bymonth(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(report_hr_salary_employee_bymonth, self).__init__(cr, uid, name, context=context)
self.localcontext.update({
'time': time,
'get_employee': self.get_employee,
'get_periods': self.get_periods,
'get_months_tol': self.get_months_tol,
'get_total': self.get_total,
})
self.context = context
self.mnths = []
self.mnths_total = []
self.total = 0.0
def get_periods(self, form):
# Get start year-month-date and end year-month-date
first_year = int(form['start_date'][0:4])
last_year = int(form['end_date'][0:4])
first_month = int(form['start_date'][5:7])
last_month = int(form['end_date'][5:7])
no_months = (last_year-first_year) * 12 + last_month - first_month + 1
current_month = first_month
current_year = first_year
# Get name of the months from integer
mnth_name = []
for count in range(0, no_months):
m = datetime.date(current_year, current_month, 1).strftime('%b')
mnth_name.append(m)
self.mnths.append(str(current_month) + '-' + str(current_year))
if current_month == 12:
current_month = 0
current_year = last_year
current_month = current_month + 1
for c in range(0, (12-no_months)):
mnth_name.append('None')
self.mnths.append('None')
return [mnth_name]
def get_salary(self, form, emp_id, emp_salary, total_mnths):
# Get salary of the employee
emp_obj = self.pool.get('hr.employee')
emp_ids = form.get('employee_ids', [])
date_from = form.get('start_date', [])
date_to = form.get('end_date', [])
employees = emp_obj.browse(self.cr, self.uid, emp_ids, context=self.context)
self.cr.execute("select to_char(date_to,'mm-yyyy') as to_date ,sum(pl.total) as net \
from hr_payslip_line as pl \
left join hr_payslip as p on pl.slip_id = p.id \
left join hr_employee as emp on emp.id = p.employee_id \
left join resource_resource as r on r.id = emp.resource_id \
where pl.code = 'NET' and p.state = 'done' and p.employee_id in %s \
group by r.name, p.date_to,emp.id",(tuple([emp_id]),))
sal = self.cr.fetchall()
salary = dict(sal)
total = 0.0
cnt = 1
for month in self.mnths:
if month <> 'None':
if len(month) != 7:
month = '0' + str(month)
if month in salary and salary[month]:
emp_salary.append(salary[month])
total += salary[month]
total_mnths[cnt] = total_mnths[cnt] + salary[month]
else:
emp_salary.append(0.00)
else:
emp_salary.append('')
total_mnths[cnt] = ''
cnt = cnt + 1
return emp_salary,total,total_mnths
def get_employee(self, form):
emp_salary = []
salary_list = []
total_mnths=['Total', 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
emp_obj = self.pool.get('hr.employee')
emp_ids = form.get('employee_ids', [])
employees = emp_obj.browse(self.cr, self.uid, emp_ids, context=self.context)
for emp_id in employees:
emp_salary.append(emp_id.name)
total = 0.0
emp_salary, total, total_mnths = self.get_salary(form, emp_id.id, emp_salary, total_mnths)
emp_salary.append(total)
salary_list.append(emp_salary)
emp_salary = []
self.mnths_total.append(total_mnths)
return salary_list
def get_months_tol(self):
return self.mnths_total
def get_total(self):
for item in self.mnths_total:
for count in range(1, len(item)):
if item[count] == '':
continue
self.total += item[count]
return self.total
report_sxw.report_sxw('report.salary.employee.bymonth', 'hr.salary.employee.month', 'l10n_in_hr_payroll/report/report_hr_salary_employee_bymonth.rml', parser=report_hr_salary_employee_bymonth, header='internal')
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: |
from base64 import b64encode
import json
import pytest
from six import text_type
from openapi_core.schema.media_types.exceptions import (
InvalidContentType, InvalidMediaTypeValue,
)
from openapi_core.extensions.models.models import BaseModel
from openapi_core.schema.operations.exceptions import InvalidOperation
from openapi_core.schema.parameters.exceptions import MissingRequiredParameter
from openapi_core.schema.parameters.exceptions import InvalidParameterValue
from openapi_core.schema.paths.exceptions import InvalidPath
from openapi_core.schema.request_bodies.exceptions import MissingRequestBody
from openapi_core.schema.responses.exceptions import (
MissingResponseContent, InvalidResponse,
)
from openapi_core.schema.servers.exceptions import InvalidServer
from openapi_core.shortcuts import create_spec
from openapi_core.validation.request.validators import RequestValidator
from openapi_core.validation.response.validators import ResponseValidator
from openapi_core.wrappers.mock import MockRequest, MockResponse
class TestRequestValidator(object):
host_url = 'http://petstore.swagger.io'
api_key = '12345'
@property
def api_key_encoded(self):
api_key_bytes = self.api_key.encode('utf8')
api_key_bytes_enc = b64encode(api_key_bytes)
return text_type(api_key_bytes_enc, 'utf8')
@pytest.fixture
def spec_dict(self, factory):
return factory.spec_from_file("data/v3.0/petstore.yaml")
@pytest.fixture
def spec(self, spec_dict):
return create_spec(spec_dict)
@pytest.fixture
def validator(self, spec):
return RequestValidator(spec)
def test_request_server_error(self, validator):
request = MockRequest('http://petstore.invalid.net/v1', 'get', '/')
result = validator.validate(request)
assert len(result.errors) == 1
assert type(result.errors[0]) == InvalidServer
assert result.body is None
assert result.parameters == {}
def test_invalid_path(self, validator):
request = MockRequest(self.host_url, 'get', '/v1')
result = validator.validate(request)
assert len(result.errors) == 1
assert type(result.errors[0]) == InvalidPath
assert result.body is None
assert result.parameters == {}
def test_invalid_operation(self, validator):
request = MockRequest(self.host_url, 'patch', '/v1/pets')
result = validator.validate(request)
assert len(result.errors) == 1
assert type(result.errors[0]) == InvalidOperation
assert result.body is None
assert result.parameters == {}
def test_missing_parameter(self, validator):
request = MockRequest(self.host_url, 'get', '/v1/pets')
result = validator.validate(request)
assert type(result.errors[0]) == MissingRequiredParameter
assert result.body is None
assert result.parameters == {
'query': {
'page': 1,
'search': '',
},
}
def test_get_pets(self, validator):
request = MockRequest(
self.host_url, 'get', '/v1/pets',
path_pattern='/v1/pets', args={'limit': '10'},
)
result = validator.validate(request)
assert result.errors == []
assert result.body is None
assert result.parameters == {
'query': {
'limit': 10,
'page': 1,
'search': '',
},
}
def test_missing_body(self, validator):
headers = {
'api_key': self.api_key_encoded,
}
cookies = {
'user': '123',
}
request = MockRequest(
self.host_url, 'post', '/v1/pets',
path_pattern='/v1/pets',
headers=headers, cookies=cookies,
)
result = validator.validate(request)
assert len(result.errors) == 1
assert type(result.errors[0]) == MissingRequestBody
assert result.body is None
assert result.parameters == {
'header': {
'api_key': self.api_key,
},
'cookie': {
'user': 123,
},
}
def test_invalid_content_type(self, validator):
headers = {
'api_key': self.api_key_encoded,
}
cookies = {
'user': '123',
}
request = MockRequest(
self.host_url, 'post', '/v1/pets',
path_pattern='/v1/pets', mimetype='text/csv',
headers=headers, cookies=cookies,
)
result = validator.validate(request)
assert len(result.errors) == 1
assert type(result.errors[0]) == InvalidContentType
assert result.body is None
assert result.parameters == {
'header': {
'api_key': self.api_key,
},
'cookie': {
'user': 123,
},
}
def test_post_pets(self, validator, spec_dict):
pet_name = 'Cat'
pet_tag = 'cats'
pet_street = 'Piekna'
pet_city = 'Warsaw'
data_json = {
'name': pet_name,
'tag': pet_tag,
'position': 2,
'address': {
'street': pet_street,
'city': pet_city,
},
'ears': {
'healthy': True,
}
}
data = json.dumps(data_json)
headers = {
'api_key': self.api_key_encoded,
}
cookies = {
'user': '123',
}
request = MockRequest(
self.host_url, 'post', '/v1/pets',
path_pattern='/v1/pets', data=data,
headers=headers, cookies=cookies,
)
result = validator.validate(request)
assert result.errors == []
assert result.parameters == {
'header': {
'api_key': self.api_key,
},
'cookie': {
'user': 123,
},
}
schemas = spec_dict['components']['schemas']
pet_model = schemas['PetCreate']['x-model']
address_model = schemas['Address']['x-model']
assert result.body.__class__.__name__ == pet_model
assert result.body.name == pet_name
assert result.body.tag == pet_tag
assert result.body.position == 2
assert result.body.address.__class__.__name__ == address_model
assert result.body.address.street == pet_street
assert result.body.address.city == pet_city
def test_get_pet(self, validator):
request = MockRequest(
self.host_url, 'get', '/v1/pets/1',
path_pattern='/v1/pets/{petId}', view_args={'petId': '1'},
)
result = validator.validate(request)
assert result.errors == []
assert result.body is None
assert result.parameters == {
'path': {
'petId': 1,
},
}
class TestPathItemParamsValidator(object):
@pytest.fixture
def spec_dict(self):
return {
"openapi": "3.0.0",
"info": {
"title": "Test path item parameter validation",
"version": "0.1",
},
"paths": {
"/resource": {
"parameters": [
{
"name": "resId",
"in": "query",
"required": True,
"schema": {
"type": "integer",
},
},
],
"get": {
"responses": {
"default": {
"description": "Return the resource."
}
}
}
}
}
}
@pytest.fixture
def spec(self, spec_dict):
return create_spec(spec_dict)
@pytest.fixture
def validator(self, spec):
return RequestValidator(spec)
def test_request_missing_param(self, validator):
request = MockRequest('http://example.com', 'get', '/resource')
result = validator.validate(request)
assert len(result.errors) == 1
assert type(result.errors[0]) == MissingRequiredParameter
assert result.body is None
assert result.parameters == {}
def test_request_invalid_param(self, validator):
request = MockRequest(
'http://example.com', 'get', '/resource',
args={'resId': 'invalid'},
)
result = validator.validate(request)
assert len(result.errors) == 1
assert type(result.errors[0]) == InvalidParameterValue
assert result.body is None
assert result.parameters == {}
def test_request_valid_param(self, validator):
request = MockRequest(
'http://example.com', 'get', '/resource',
args={'resId': '10'},
)
result = validator.validate(request)
assert len(result.errors) == 0
assert result.body is None
assert result.parameters == {'query': {'resId': 10}}
@pytest.mark.xfail
def test_request_override_param(self, spec_dict):
# override parameter path parameter on operation
# (name and in property must match)
spec_dict["paths"]["/resource"]["get"]["parameters"] = [
{
# full valid parameter object required
"name": "resId",
"in": "query",
"required": False,
"schema": {
"type": "integer",
},
}
]
validator = RequestValidator(create_spec(spec_dict))
request = MockRequest('http://example.com', 'get', '/resource')
result = validator.validate(request)
assert len(result.errors) == 0
assert result.body is None
assert result.parameters == {}
@pytest.mark.xfail
def test_request_override_invalid_param(self, spec_dict):
# override parameter path parameter on operation
# This here should result in an invalid spec object, because there are
# now two parameters with the same name, but different location.
# (The openapi3 spec is also not very explicit about this case)
spec_dict["paths"]["/resource"]["get"]["parameters"] = [
{
# full valid parameter object required
"name": "resId",
"in": "path",
"required": False,
"schema": {
"type": "integer",
},
}
]
from openapi_spec_validator.exceptions import OpenAPIValidationError
with pytest.raises(OpenAPIValidationError):
create_spec(spec_dict)
class TestResponseValidator(object):
host_url = 'http://petstore.swagger.io'
@pytest.fixture
def spec_dict(self, factory):
return factory.spec_from_file("data/v3.0/petstore.yaml")
@pytest.fixture
def spec(self, spec_dict):
return create_spec(spec_dict)
@pytest.fixture
def validator(self, spec):
return ResponseValidator(spec)
def test_invalid_server(self, validator):
request = MockRequest('http://petstore.invalid.net/v1', 'get', '/')
response = MockResponse('Not Found', status_code=404)
result = validator.validate(request, response)
assert len(result.errors) == 1
assert type(result.errors[0]) == InvalidServer
assert result.data is None
assert result.headers is None
def test_invalid_operation(self, validator):
request = MockRequest(self.host_url, 'get', '/v1')
response = MockResponse('Not Found', status_code=404)
result = validator.validate(request, response)
assert len(result.errors) == 1
assert type(result.errors[0]) == InvalidOperation
assert result.data is None
assert result.headers is None
def test_invalid_response(self, validator):
request = MockRequest(self.host_url, 'get', '/v1/pets')
response = MockResponse('Not Found', status_code=409)
result = validator.validate(request, response)
assert len(result.errors) == 1
assert type(result.errors[0]) == InvalidResponse
assert result.data is None
assert result.headers is None
def test_invalid_content_type(self, validator):
request = MockRequest(self.host_url, 'get', '/v1/pets')
response = MockResponse('Not Found', mimetype='text/csv')
result = validator.validate(request, response)
assert len(result.errors) == 1
assert type(result.errors[0]) == InvalidContentType
assert result.data is None
assert result.headers == {}
def test_missing_body(self, validator):
request = MockRequest(self.host_url, 'get', '/v1/pets')
response = MockResponse(None)
result = validator.validate(request, response)
assert len(result.errors) == 1
assert type(result.errors[0]) == MissingResponseContent
assert result.data is None
assert result.headers == {}
def test_invalid_media_type_value(self, validator):
request = MockRequest(self.host_url, 'get', '/v1/pets')
response = MockResponse("{}")
result = validator.validate(request, response)
assert len(result.errors) == 1
assert type(result.errors[0]) == InvalidMediaTypeValue
assert result.data is None
assert result.headers == {}
def test_invalid_value(self, validator):
request = MockRequest(self.host_url, 'get', '/v1/tags')
response_json = {
'data': [
{
'id': 1,
'name': 'Sparky'
},
],
}
response_data = json.dumps(response_json)
response = MockResponse(response_data)
result = validator.validate(request, response)
assert len(result.errors) == 1
assert type(result.errors[0]) == InvalidMediaTypeValue
assert result.data is None
assert result.headers == {}
def test_get_pets(self, validator):
request = MockRequest(self.host_url, 'get', '/v1/pets')
response_json = {
'data': [
{
'id': 1,
'name': 'Sparky'
},
],
}
response_data = json.dumps(response_json)
response = MockResponse(response_data)
result = validator.validate(request, response)
assert result.errors == []
assert isinstance(result.data, BaseModel)
assert len(result.data.data) == 1
assert result.data.data[0].id == 1
assert result.data.data[0].name == 'Sparky'
assert result.headers == {}
parameters are unique as long as name `and` location are unique.
from base64 import b64encode
import json
import pytest
from six import text_type
from openapi_core.schema.media_types.exceptions import (
InvalidContentType, InvalidMediaTypeValue,
)
from openapi_core.extensions.models.models import BaseModel
from openapi_core.schema.operations.exceptions import InvalidOperation
from openapi_core.schema.parameters.exceptions import MissingRequiredParameter
from openapi_core.schema.parameters.exceptions import InvalidParameterValue
from openapi_core.schema.paths.exceptions import InvalidPath
from openapi_core.schema.request_bodies.exceptions import MissingRequestBody
from openapi_core.schema.responses.exceptions import (
MissingResponseContent, InvalidResponse,
)
from openapi_core.schema.servers.exceptions import InvalidServer
from openapi_core.shortcuts import create_spec
from openapi_core.validation.request.validators import RequestValidator
from openapi_core.validation.response.validators import ResponseValidator
from openapi_core.wrappers.mock import MockRequest, MockResponse
class TestRequestValidator(object):
host_url = 'http://petstore.swagger.io'
api_key = '12345'
@property
def api_key_encoded(self):
api_key_bytes = self.api_key.encode('utf8')
api_key_bytes_enc = b64encode(api_key_bytes)
return text_type(api_key_bytes_enc, 'utf8')
@pytest.fixture
def spec_dict(self, factory):
return factory.spec_from_file("data/v3.0/petstore.yaml")
@pytest.fixture
def spec(self, spec_dict):
return create_spec(spec_dict)
@pytest.fixture
def validator(self, spec):
return RequestValidator(spec)
def test_request_server_error(self, validator):
request = MockRequest('http://petstore.invalid.net/v1', 'get', '/')
result = validator.validate(request)
assert len(result.errors) == 1
assert type(result.errors[0]) == InvalidServer
assert result.body is None
assert result.parameters == {}
def test_invalid_path(self, validator):
request = MockRequest(self.host_url, 'get', '/v1')
result = validator.validate(request)
assert len(result.errors) == 1
assert type(result.errors[0]) == InvalidPath
assert result.body is None
assert result.parameters == {}
def test_invalid_operation(self, validator):
request = MockRequest(self.host_url, 'patch', '/v1/pets')
result = validator.validate(request)
assert len(result.errors) == 1
assert type(result.errors[0]) == InvalidOperation
assert result.body is None
assert result.parameters == {}
def test_missing_parameter(self, validator):
request = MockRequest(self.host_url, 'get', '/v1/pets')
result = validator.validate(request)
assert type(result.errors[0]) == MissingRequiredParameter
assert result.body is None
assert result.parameters == {
'query': {
'page': 1,
'search': '',
},
}
def test_get_pets(self, validator):
request = MockRequest(
self.host_url, 'get', '/v1/pets',
path_pattern='/v1/pets', args={'limit': '10'},
)
result = validator.validate(request)
assert result.errors == []
assert result.body is None
assert result.parameters == {
'query': {
'limit': 10,
'page': 1,
'search': '',
},
}
def test_missing_body(self, validator):
headers = {
'api_key': self.api_key_encoded,
}
cookies = {
'user': '123',
}
request = MockRequest(
self.host_url, 'post', '/v1/pets',
path_pattern='/v1/pets',
headers=headers, cookies=cookies,
)
result = validator.validate(request)
assert len(result.errors) == 1
assert type(result.errors[0]) == MissingRequestBody
assert result.body is None
assert result.parameters == {
'header': {
'api_key': self.api_key,
},
'cookie': {
'user': 123,
},
}
def test_invalid_content_type(self, validator):
headers = {
'api_key': self.api_key_encoded,
}
cookies = {
'user': '123',
}
request = MockRequest(
self.host_url, 'post', '/v1/pets',
path_pattern='/v1/pets', mimetype='text/csv',
headers=headers, cookies=cookies,
)
result = validator.validate(request)
assert len(result.errors) == 1
assert type(result.errors[0]) == InvalidContentType
assert result.body is None
assert result.parameters == {
'header': {
'api_key': self.api_key,
},
'cookie': {
'user': 123,
},
}
def test_post_pets(self, validator, spec_dict):
pet_name = 'Cat'
pet_tag = 'cats'
pet_street = 'Piekna'
pet_city = 'Warsaw'
data_json = {
'name': pet_name,
'tag': pet_tag,
'position': 2,
'address': {
'street': pet_street,
'city': pet_city,
},
'ears': {
'healthy': True,
}
}
data = json.dumps(data_json)
headers = {
'api_key': self.api_key_encoded,
}
cookies = {
'user': '123',
}
request = MockRequest(
self.host_url, 'post', '/v1/pets',
path_pattern='/v1/pets', data=data,
headers=headers, cookies=cookies,
)
result = validator.validate(request)
assert result.errors == []
assert result.parameters == {
'header': {
'api_key': self.api_key,
},
'cookie': {
'user': 123,
},
}
schemas = spec_dict['components']['schemas']
pet_model = schemas['PetCreate']['x-model']
address_model = schemas['Address']['x-model']
assert result.body.__class__.__name__ == pet_model
assert result.body.name == pet_name
assert result.body.tag == pet_tag
assert result.body.position == 2
assert result.body.address.__class__.__name__ == address_model
assert result.body.address.street == pet_street
assert result.body.address.city == pet_city
def test_get_pet(self, validator):
request = MockRequest(
self.host_url, 'get', '/v1/pets/1',
path_pattern='/v1/pets/{petId}', view_args={'petId': '1'},
)
result = validator.validate(request)
assert result.errors == []
assert result.body is None
assert result.parameters == {
'path': {
'petId': 1,
},
}
class TestPathItemParamsValidator(object):
@pytest.fixture
def spec_dict(self):
return {
"openapi": "3.0.0",
"info": {
"title": "Test path item parameter validation",
"version": "0.1",
},
"paths": {
"/resource": {
"parameters": [
{
"name": "resId",
"in": "query",
"required": True,
"schema": {
"type": "integer",
},
},
],
"get": {
"responses": {
"default": {
"description": "Return the resource."
}
}
}
}
}
}
@pytest.fixture
def spec(self, spec_dict):
return create_spec(spec_dict)
@pytest.fixture
def validator(self, spec):
return RequestValidator(spec)
def test_request_missing_param(self, validator):
request = MockRequest('http://example.com', 'get', '/resource')
result = validator.validate(request)
assert len(result.errors) == 1
assert type(result.errors[0]) == MissingRequiredParameter
assert result.body is None
assert result.parameters == {}
def test_request_invalid_param(self, validator):
request = MockRequest(
'http://example.com', 'get', '/resource',
args={'resId': 'invalid'},
)
result = validator.validate(request)
assert len(result.errors) == 1
assert type(result.errors[0]) == InvalidParameterValue
assert result.body is None
assert result.parameters == {}
def test_request_valid_param(self, validator):
request = MockRequest(
'http://example.com', 'get', '/resource',
args={'resId': '10'},
)
result = validator.validate(request)
assert len(result.errors) == 0
assert result.body is None
assert result.parameters == {'query': {'resId': 10}}
@pytest.mark.xfail
def test_request_override_param(self, spec_dict):
# override parameter path parameter on operation
# (name and in property must match)
spec_dict["paths"]["/resource"]["get"]["parameters"] = [
{
# full valid parameter object required
"name": "resId",
"in": "query",
"required": False,
"schema": {
"type": "integer",
},
}
]
validator = RequestValidator(create_spec(spec_dict))
request = MockRequest('http://example.com', 'get', '/resource')
result = validator.validate(request)
assert len(result.errors) == 0
assert result.body is None
assert result.parameters == {}
class TestResponseValidator(object):
host_url = 'http://petstore.swagger.io'
@pytest.fixture
def spec_dict(self, factory):
return factory.spec_from_file("data/v3.0/petstore.yaml")
@pytest.fixture
def spec(self, spec_dict):
return create_spec(spec_dict)
@pytest.fixture
def validator(self, spec):
return ResponseValidator(spec)
def test_invalid_server(self, validator):
request = MockRequest('http://petstore.invalid.net/v1', 'get', '/')
response = MockResponse('Not Found', status_code=404)
result = validator.validate(request, response)
assert len(result.errors) == 1
assert type(result.errors[0]) == InvalidServer
assert result.data is None
assert result.headers is None
def test_invalid_operation(self, validator):
request = MockRequest(self.host_url, 'get', '/v1')
response = MockResponse('Not Found', status_code=404)
result = validator.validate(request, response)
assert len(result.errors) == 1
assert type(result.errors[0]) == InvalidOperation
assert result.data is None
assert result.headers is None
def test_invalid_response(self, validator):
request = MockRequest(self.host_url, 'get', '/v1/pets')
response = MockResponse('Not Found', status_code=409)
result = validator.validate(request, response)
assert len(result.errors) == 1
assert type(result.errors[0]) == InvalidResponse
assert result.data is None
assert result.headers is None
def test_invalid_content_type(self, validator):
request = MockRequest(self.host_url, 'get', '/v1/pets')
response = MockResponse('Not Found', mimetype='text/csv')
result = validator.validate(request, response)
assert len(result.errors) == 1
assert type(result.errors[0]) == InvalidContentType
assert result.data is None
assert result.headers == {}
def test_missing_body(self, validator):
request = MockRequest(self.host_url, 'get', '/v1/pets')
response = MockResponse(None)
result = validator.validate(request, response)
assert len(result.errors) == 1
assert type(result.errors[0]) == MissingResponseContent
assert result.data is None
assert result.headers == {}
def test_invalid_media_type_value(self, validator):
request = MockRequest(self.host_url, 'get', '/v1/pets')
response = MockResponse("{}")
result = validator.validate(request, response)
assert len(result.errors) == 1
assert type(result.errors[0]) == InvalidMediaTypeValue
assert result.data is None
assert result.headers == {}
def test_invalid_value(self, validator):
request = MockRequest(self.host_url, 'get', '/v1/tags')
response_json = {
'data': [
{
'id': 1,
'name': 'Sparky'
},
],
}
response_data = json.dumps(response_json)
response = MockResponse(response_data)
result = validator.validate(request, response)
assert len(result.errors) == 1
assert type(result.errors[0]) == InvalidMediaTypeValue
assert result.data is None
assert result.headers == {}
def test_get_pets(self, validator):
request = MockRequest(self.host_url, 'get', '/v1/pets')
response_json = {
'data': [
{
'id': 1,
'name': 'Sparky'
},
],
}
response_data = json.dumps(response_json)
response = MockResponse(response_data)
result = validator.validate(request, response)
assert result.errors == []
assert isinstance(result.data, BaseModel)
assert len(result.data.data) == 1
assert result.data.data[0].id == 1
assert result.data.data[0].name == 'Sparky'
assert result.headers == {}
|
#!/usr/bin/env python
# build_script.py - Build, install, and test XCTest -*- python -*-
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2016 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See http://swift.org/LICENSE.txt for license information
# See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
import argparse
import fnmatch
import os
import subprocess
import sys
import tempfile
import textwrap
import platform
import errno
SOURCE_DIR = os.path.dirname(os.path.abspath(__file__))
def note(msg):
print("xctest-build: "+msg)
def run(command):
note(command)
subprocess.check_call(command, shell=True)
def _mkdirp(path):
"""
Creates a directory at the given path if it doesn't already exist.
"""
if not os.path.exists(path):
run("mkdir -p {}".format(path))
def _find_files_with_extension(path, extension):
"""
In Python 3.5 and above, glob supports recursive patterns such as
'**/*.swift'. This function backports that functionality to Python 3.4
and below.
"""
paths = []
for root, _, file_names in os.walk(path):
for file_name in fnmatch.filter(file_names, '*.{}'.format(extension)):
paths.append(os.path.join(root, file_name))
return paths
def symlink_force(target, link_name):
if os.path.isdir(link_name):
link_name = os.path.join(link_name, os.path.basename(target))
try:
os.symlink(target, link_name)
except OSError as e:
if e.errno == errno.EEXIST:
os.remove(link_name)
os.symlink(target, link_name)
else:
raise e
class DarwinStrategy:
@staticmethod
def requires_foundation_build_dir():
# The Foundation build directory is not required on Darwin because the
# Xcode workspace implicitly builds Foundation when building the XCTest
# schemes.
return False
@staticmethod
def build(args):
"""
Build XCTest and place the built products in the given 'build_dir'.
If 'test' is specified, also executes the 'test' subcommand.
"""
swiftc = os.path.abspath(args.swiftc)
build_dir = os.path.abspath(args.build_dir)
if args.build_style == "debug":
style_options = "Debug"
else:
style_options = "Release"
run("xcodebuild -workspace {source_dir}/XCTest.xcworkspace "
"-scheme SwiftXCTest "
"-configuration {style_options} "
"SWIFT_EXEC=\"{swiftc}\" "
"SWIFT_LINK_OBJC_RUNTIME=YES "
"SYMROOT=\"{build_dir}\" OBJROOT=\"{build_dir}\"".format(
swiftc=swiftc,
build_dir=build_dir,
style_options=style_options,
source_dir=SOURCE_DIR))
if args.test:
# Execute main() using the arguments necessary to run the tests.
main(args=["test",
"--swiftc", swiftc,
build_dir])
@staticmethod
def test(args):
"""
Test SwiftXCTest.framework, using the given 'swiftc' compiler, looking
for it in the given 'build_dir'.
"""
swiftc = os.path.abspath(args.swiftc)
build_dir = os.path.abspath(args.build_dir)
if args.build_style == "debug":
style_options = "Debug"
else:
style_options = "Release"
run("xcodebuild -workspace {source_dir}/XCTest.xcworkspace "
"-scheme SwiftXCTestFunctionalTests "
"-configuration {style_options} "
"SWIFT_EXEC=\"{swiftc}\" "
"SWIFT_LINK_OBJC_RUNTIME=YES "
"SYMROOT=\"{build_dir}\" OBJROOT=\"{build_dir}\" ".format(
swiftc=swiftc,
build_dir=build_dir,
style_options=style_options,
source_dir=SOURCE_DIR))
@staticmethod
def install(args):
"""
Installing XCTest is not supported on Darwin.
"""
note("error: The install command is not supported on this platform")
exit(1)
class GenericUnixStrategy:
@staticmethod
def requires_foundation_build_dir():
# This script does not know how to build Foundation in Unix environments,
# so we need the path to a pre-built Foundation library.
return True
@staticmethod
def build(args):
"""
Build XCTest and place the built products in the given 'build_dir'.
If 'test' is specified, also executes the 'test' subcommand.
"""
swiftc = os.path.abspath(args.swiftc)
build_dir = os.path.abspath(args.build_dir)
foundation_build_dir = os.path.abspath(args.foundation_build_dir)
core_foundation_build_dir = GenericUnixStrategy.core_foundation_build_dir(
foundation_build_dir, args.foundation_install_prefix)
if args.libdispatch_build_dir:
libdispatch_build_dir = os.path.abspath(args.libdispatch_build_dir)
if args.libdispatch_src_dir:
libdispatch_src_dir = os.path.abspath(args.libdispatch_src_dir)
_mkdirp(build_dir)
sourcePaths = _find_files_with_extension(
os.path.join(SOURCE_DIR, 'Sources', 'XCTest'),
'swift')
if args.build_style == "debug":
style_options = "-g"
else:
style_options = "-O"
# Not incremental..
# Build library
if args.libdispatch_build_dir and args.libdispatch_src_dir:
libdispatch_args = "-I {libdispatch_build_dir}/src -I {libdispatch_src_dir} ".format(
libdispatch_build_dir=libdispatch_build_dir,
libdispatch_src_dir=libdispatch_src_dir)
else:
libdispatch_args = ""
run("{swiftc} -Xcc -fblocks -c {style_options} -emit-object -emit-module "
"-module-name XCTest -module-link-name XCTest -parse-as-library "
"-emit-module-path {build_dir}/XCTest.swiftmodule "
"-force-single-frontend-invocation "
"-I {foundation_build_dir} -I {core_foundation_build_dir} "
"{libdispatch_args} "
"{source_paths} -o {build_dir}/XCTest.o".format(
swiftc=swiftc,
style_options=style_options,
build_dir=build_dir,
foundation_build_dir=foundation_build_dir,
core_foundation_build_dir=core_foundation_build_dir,
libdispatch_args=libdispatch_args,
source_paths=" ".join(sourcePaths)))
run("{swiftc} -emit-library {build_dir}/XCTest.o "
"-L {foundation_build_dir} -lswiftGlibc -lswiftCore -lFoundation -lm "
# We embed an rpath of `$ORIGIN` to ensure other referenced
# libraries (like `Foundation`) can be found solely via XCTest.
"-Xlinker -rpath=\\$ORIGIN "
"-o {build_dir}/libXCTest.so".format(
swiftc=swiftc,
build_dir=build_dir,
foundation_build_dir=foundation_build_dir))
if args.test:
# Execute main() using the arguments necessary to run the tests.
main(args=["test",
"--swiftc", swiftc,
"--foundation-build-dir", foundation_build_dir,
build_dir])
# If --module-install-path and --library-install-path were specified,
# we also install the built XCTest products.
if args.module_path is not None and args.lib_path is not None:
# Execute main() using the arguments necessary for installation.
main(args=["install", build_dir,
"--module-install-path", args.module_path,
"--library-install-path", args.lib_path])
note('Done.')
@staticmethod
def test(args):
"""
Test the built XCTest.so library at the given 'build_dir', using the
given 'swiftc' compiler.
"""
lit_path = os.path.abspath(args.lit)
if not os.path.exists(lit_path):
raise IOError(
'Could not find lit tester tool at path: "{}". This tool is '
'requred to run the test suite. Unless you specified a custom '
'path to the tool using the "--lit" option, the lit tool will be '
'found in the LLVM source tree, which is expected to be checked '
'out in the same directory as swift-corelibs-xctest. If you do '
'not have LLVM checked out at this path, you may follow the '
'instructions for "Getting Sources for Swift and Related '
'Projects" from the Swift project README in order to fix this '
'error.'.format(lit_path))
# FIXME: Allow these to be specified by the Swift build script.
lit_flags = "-sv --no-progress-bar"
tests_path = os.path.join(SOURCE_DIR, "Tests", "Functional")
foundation_build_dir = os.path.abspath(args.foundation_build_dir)
core_foundation_build_dir = GenericUnixStrategy.core_foundation_build_dir(
foundation_build_dir, args.foundation_install_prefix)
if args.libdispatch_build_dir:
libdispatch_build_dir = os.path.abspath(args.libdispatch_build_dir)
symlink_force(os.path.join(args.libdispatch_build_dir, "src", ".libs", "libdispatch.so"),
foundation_build_dir)
if args.libdispatch_src_dir and args.libdispatch_build_dir:
libdispatch_src_args = (
"LIBDISPATCH_SRC_DIR={libdispatch_src_dir} "
"LIBDISPATCH_BUILD_DIR={libdispatch_build_dir} "
"LIBDISPATCH_OVERLAY_DIR={libdispatch_overlay_dir}".format(
libdispatch_src_dir=os.path.abspath(args.libdispatch_src_dir),
libdispatch_build_dir=os.path.join(args.libdispatch_build_dir, 'src', '.libs'),
libdispatch_overlay_dir=os.path.join(args.libdispatch_build_dir, 'src', 'swift')))
else:
libdispatch_src_args = ""
run('SWIFT_EXEC={swiftc} '
'BUILT_PRODUCTS_DIR={built_products_dir} '
'FOUNDATION_BUILT_PRODUCTS_DIR={foundation_build_dir} '
'CORE_FOUNDATION_BUILT_PRODUCTS_DIR={core_foundation_build_dir} '
'{libdispatch_src_args} '
'{lit_path} {lit_flags} '
'{tests_path}'.format(
swiftc=os.path.abspath(args.swiftc),
built_products_dir=args.build_dir,
foundation_build_dir=foundation_build_dir,
core_foundation_build_dir=core_foundation_build_dir,
libdispatch_src_args=libdispatch_src_args,
lit_path=lit_path,
lit_flags=lit_flags,
tests_path=tests_path))
@staticmethod
def install(args):
"""
Install the XCTest.so, XCTest.swiftmodule, and XCTest.swiftdoc build
products into the given module and library paths.
"""
build_dir = os.path.abspath(args.build_dir)
module_install_path = os.path.abspath(args.module_install_path)
library_install_path = os.path.abspath(args.library_install_path)
_mkdirp(module_install_path)
_mkdirp(library_install_path)
xctest_so = "libXCTest.so"
run("cp {} {}".format(
os.path.join(build_dir, xctest_so),
os.path.join(library_install_path, xctest_so)))
xctest_swiftmodule = "XCTest.swiftmodule"
run("cp {} {}".format(
os.path.join(build_dir, xctest_swiftmodule),
os.path.join(module_install_path, xctest_swiftmodule)))
xctest_swiftdoc = "XCTest.swiftdoc"
run("cp {} {}".format(
os.path.join(build_dir, xctest_swiftdoc),
os.path.join(module_install_path, xctest_swiftdoc)))
@staticmethod
def core_foundation_build_dir(foundation_build_dir, foundation_install_prefix):
"""
Given the path to a swift-corelibs-foundation built product directory,
return the path to CoreFoundation built products.
When specifying a built Foundation dir such as
'/build/foundation-linux-x86_64/Foundation', CoreFoundation dependencies
are placed in 'usr/lib/swift'. Note that it's technically not necessary to
include this extra path when linking the installed Swift's
'usr/lib/swift/linux/libFoundation.so'.
"""
return os.path.join(foundation_build_dir,
foundation_install_prefix.strip("/"), 'lib', 'swift')
def main(args=sys.argv[1:]):
"""
The main entry point for this script. Based on the subcommand given,
delegates building or testing XCTest to a sub-parser and its corresponding
function.
"""
strategy = DarwinStrategy if platform.system() == 'Darwin' else GenericUnixStrategy
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent("""
Build, test, and install XCTest.
NOTE: In general this script should not be invoked directly. The
recommended way to build and test XCTest is via the Swift build
script. See this project's README for details.
The Swift build script invokes this %(prog)s script to build,
test, and install this project. You may invoke it in the same way
to build this project directly. For example, if you are in a Linux
environment, your install of Swift is located at "/swift" and you
wish to install XCTest into that same location, here is a sample
invocation of the build script:
$ %(prog)s \\
--swiftc="/swift/usr/bin/swiftc" \\
--build-dir="/tmp/XCTest_build" \\
--foundation-build-dir "/swift/usr/lib/swift/linux" \\
--library-install-path="/swift/usr/lib/swift/linux" \\
--module-install-path="/swift/usr/lib/swift/linux/x86_64"
Note that installation is not supported on Darwin as this library
is only intended to be used as a dependency in environments where
Apple XCTest is not available.
"""))
subparsers = parser.add_subparsers(
description=textwrap.dedent("""
Use one of these to specify whether to build, test, or install
XCTest. If you don't specify any of these, 'build' is executed as a
default. You may also use 'build' to also test and install the
built products. Pass the -h or --help option to any of the
subcommands for more information."""))
build_parser = subparsers.add_parser(
"build",
description=textwrap.dedent("""
Build XCTest.so, XCTest.swiftmodule, and XCTest.swiftdoc using the
given Swift compiler. This command may also test and install the
built products."""))
build_parser.set_defaults(func=strategy.build)
build_parser.add_argument(
"--swiftc",
help="Path to the 'swiftc' compiler that will be used to build "
"XCTest.so, XCTest.swiftmodule, and XCTest.swiftdoc. This will "
"also be used to build the tests for those built products if the "
"--test option is specified.",
required=True)
build_parser.add_argument(
"--build-dir",
help="Path to the output build directory. If not specified, a "
"temporary directory is used.",
default=tempfile.mkdtemp())
build_parser.add_argument(
"--foundation-build-dir",
help="Path to swift-corelibs-foundation build products, which "
"the built XCTest.so will be linked against.",
required=strategy.requires_foundation_build_dir())
build_parser.add_argument(
"--foundation-install-prefix",
help="Path to the installation location for swift-corelibs-foundation "
"build products ('%(default)s' by default); CoreFoundation "
"dependencies are expected to be found under "
"FOUNDATION_BUILD_DIR/FOUNDATION_INSTALL_PREFIX.",
default="/usr")
build_parser.add_argument(
"--libdispatch-build-dir",
help="Path to swift-corelibs-libdispatch build products, which "
"the built XCTest.so will be linked against.")
build_parser.add_argument(
"--libdispatch-src-dir",
help="Path to swift-corelibs-libdispatch source tree, which "
"the built XCTest.so will be linked against.")
build_parser.add_argument(
"--module-install-path",
help="Location at which to install XCTest.swiftmodule and "
"XCTest.swiftdoc. This directory will be created if it doesn't "
"already exist.",
dest="module_path")
build_parser.add_argument(
"--library-install-path",
help="Location at which to install XCTest.so. This directory will be "
"created if it doesn't already exist.",
dest="lib_path")
build_parser.add_argument(
"--release",
help="builds for release",
action="store_const",
dest="build_style",
const="release",
default="debug")
build_parser.add_argument(
"--debug",
help="builds for debug (the default)",
action="store_const",
dest="build_style",
const="debug",
default="debug")
build_parser.add_argument(
"--test",
help="Whether to run tests after building. Note that you must have "
"cloned https://github.com/apple/swift-llvm at {} in order to "
"run this command.".format(os.path.join(
os.path.dirname(SOURCE_DIR), 'llvm')),
action="store_true")
test_parser = subparsers.add_parser(
"test",
description="Tests a built XCTest framework at the given path.")
test_parser.set_defaults(func=strategy.test)
test_parser.add_argument(
"build_dir",
help="An absolute path to a directory containing the built XCTest.so "
"library.")
test_parser.add_argument(
"--swiftc",
help="Path to the 'swiftc' compiler used to build and run the tests.",
required=True)
test_parser.add_argument(
"--lit",
help="Path to the 'lit' tester tool used to run the test suite. "
"'%(default)s' by default.",
default=os.path.join(os.path.dirname(SOURCE_DIR),
"llvm", "utils", "lit", "lit.py"))
test_parser.add_argument(
"--foundation-build-dir",
help="Path to swift-corelibs-foundation build products, which the "
"tests will be linked against.",
required=strategy.requires_foundation_build_dir())
test_parser.add_argument(
"--foundation-install-prefix",
help="Path to the installation location for swift-corelibs-foundation "
"build products ('%(default)s' by default); CoreFoundation "
"dependencies are expected to be found under "
"FOUNDATION_BUILD_DIR/FOUNDATION_INSTALL_PREFIX.",
default="/usr")
test_parser.add_argument(
"--libdispatch-build-dir",
help="Path to swift-corelibs-libdispatch build products, which "
"the built XCTest.so will be linked against.")
test_parser.add_argument(
"--libdispatch-src-dir",
help="Path to swift-corelibs-libdispatch source tree, which "
"the built XCTest.so will be linked against.")
test_parser.add_argument(
"--release",
help="builds the tests for release",
action="store_const",
dest="build_style",
const="release",
default="debug")
test_parser.add_argument(
"--debug",
help="builds the tests for debug (the default)",
action="store_const",
dest="build_style",
const="debug",
default="debug")
install_parser = subparsers.add_parser(
"install",
description="Installs a built XCTest framework.")
install_parser.set_defaults(func=strategy.install)
install_parser.add_argument(
"build_dir",
help="An absolute path to a directory containing a built XCTest.so, "
"XCTest.swiftmodule, and XCTest.swiftdoc.")
install_parser.add_argument(
"-m", "--module-install-path",
help="Location at which to install XCTest.swiftmodule and "
"XCTest.swiftdoc. This directory will be created if it doesn't "
"already exist.")
install_parser.add_argument(
"-l", "--library-install-path",
help="Location at which to install XCTest.so. This directory will be "
"created if it doesn't already exist.")
# Many versions of Python require a subcommand must be specified.
# We handle this here: if no known subcommand (or none of the help options)
# is included in the arguments, then insert the default subcommand
# argument: 'build'.
if any([a in ["build", "test", "install", "-h", "--help"] for a in args]):
parsed_args = parser.parse_args(args=args)
else:
parsed_args = parser.parse_args(args=["build"] + args)
# Execute the function for the subcommand we've been given.
parsed_args.func(parsed_args)
if __name__ == '__main__':
main()
Force use of -swift-version 3 to build XCTest sources.
#!/usr/bin/env python
# build_script.py - Build, install, and test XCTest -*- python -*-
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2016 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See http://swift.org/LICENSE.txt for license information
# See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
import argparse
import fnmatch
import os
import subprocess
import sys
import tempfile
import textwrap
import platform
import errno
SOURCE_DIR = os.path.dirname(os.path.abspath(__file__))
def note(msg):
print("xctest-build: "+msg)
def run(command):
note(command)
subprocess.check_call(command, shell=True)
def _mkdirp(path):
"""
Creates a directory at the given path if it doesn't already exist.
"""
if not os.path.exists(path):
run("mkdir -p {}".format(path))
def _find_files_with_extension(path, extension):
"""
In Python 3.5 and above, glob supports recursive patterns such as
'**/*.swift'. This function backports that functionality to Python 3.4
and below.
"""
paths = []
for root, _, file_names in os.walk(path):
for file_name in fnmatch.filter(file_names, '*.{}'.format(extension)):
paths.append(os.path.join(root, file_name))
return paths
def symlink_force(target, link_name):
if os.path.isdir(link_name):
link_name = os.path.join(link_name, os.path.basename(target))
try:
os.symlink(target, link_name)
except OSError as e:
if e.errno == errno.EEXIST:
os.remove(link_name)
os.symlink(target, link_name)
else:
raise e
class DarwinStrategy:
@staticmethod
def requires_foundation_build_dir():
# The Foundation build directory is not required on Darwin because the
# Xcode workspace implicitly builds Foundation when building the XCTest
# schemes.
return False
@staticmethod
def build(args):
"""
Build XCTest and place the built products in the given 'build_dir'.
If 'test' is specified, also executes the 'test' subcommand.
"""
swiftc = os.path.abspath(args.swiftc)
build_dir = os.path.abspath(args.build_dir)
if args.build_style == "debug":
style_options = "Debug"
else:
style_options = "Release"
run("xcodebuild -workspace {source_dir}/XCTest.xcworkspace "
"-scheme SwiftXCTest "
"-configuration {style_options} "
"SWIFT_EXEC=\"{swiftc}\" "
"SWIFT_LINK_OBJC_RUNTIME=YES "
"SYMROOT=\"{build_dir}\" OBJROOT=\"{build_dir}\"".format(
swiftc=swiftc,
build_dir=build_dir,
style_options=style_options,
source_dir=SOURCE_DIR))
if args.test:
# Execute main() using the arguments necessary to run the tests.
main(args=["test",
"--swiftc", swiftc,
build_dir])
@staticmethod
def test(args):
"""
Test SwiftXCTest.framework, using the given 'swiftc' compiler, looking
for it in the given 'build_dir'.
"""
swiftc = os.path.abspath(args.swiftc)
build_dir = os.path.abspath(args.build_dir)
if args.build_style == "debug":
style_options = "Debug"
else:
style_options = "Release"
run("xcodebuild -workspace {source_dir}/XCTest.xcworkspace "
"-scheme SwiftXCTestFunctionalTests "
"-configuration {style_options} "
"SWIFT_EXEC=\"{swiftc}\" "
"SWIFT_LINK_OBJC_RUNTIME=YES "
"SYMROOT=\"{build_dir}\" OBJROOT=\"{build_dir}\" ".format(
swiftc=swiftc,
build_dir=build_dir,
style_options=style_options,
source_dir=SOURCE_DIR))
@staticmethod
def install(args):
"""
Installing XCTest is not supported on Darwin.
"""
note("error: The install command is not supported on this platform")
exit(1)
class GenericUnixStrategy:
@staticmethod
def requires_foundation_build_dir():
# This script does not know how to build Foundation in Unix environments,
# so we need the path to a pre-built Foundation library.
return True
@staticmethod
def build(args):
"""
Build XCTest and place the built products in the given 'build_dir'.
If 'test' is specified, also executes the 'test' subcommand.
"""
swiftc = os.path.abspath(args.swiftc)
build_dir = os.path.abspath(args.build_dir)
foundation_build_dir = os.path.abspath(args.foundation_build_dir)
core_foundation_build_dir = GenericUnixStrategy.core_foundation_build_dir(
foundation_build_dir, args.foundation_install_prefix)
if args.libdispatch_build_dir:
libdispatch_build_dir = os.path.abspath(args.libdispatch_build_dir)
if args.libdispatch_src_dir:
libdispatch_src_dir = os.path.abspath(args.libdispatch_src_dir)
_mkdirp(build_dir)
sourcePaths = _find_files_with_extension(
os.path.join(SOURCE_DIR, 'Sources', 'XCTest'),
'swift')
if args.build_style == "debug":
style_options = "-g"
else:
style_options = "-O"
# Not incremental..
# Build library
if args.libdispatch_build_dir and args.libdispatch_src_dir:
libdispatch_args = "-I {libdispatch_build_dir}/src -I {libdispatch_src_dir} ".format(
libdispatch_build_dir=libdispatch_build_dir,
libdispatch_src_dir=libdispatch_src_dir)
else:
libdispatch_args = ""
# NOTE: Force -swift-version 3 to build XCTest sources.
run("{swiftc} -Xcc -fblocks -c {style_options} -emit-object -emit-module "
"-module-name XCTest -module-link-name XCTest -parse-as-library "
"-emit-module-path {build_dir}/XCTest.swiftmodule "
"-force-single-frontend-invocation "
"-swift-version 3"
"-I {foundation_build_dir} -I {core_foundation_build_dir} "
"{libdispatch_args} "
"{source_paths} -o {build_dir}/XCTest.o".format(
swiftc=swiftc,
style_options=style_options,
build_dir=build_dir,
foundation_build_dir=foundation_build_dir,
core_foundation_build_dir=core_foundation_build_dir,
libdispatch_args=libdispatch_args,
source_paths=" ".join(sourcePaths)))
run("{swiftc} -emit-library {build_dir}/XCTest.o "
"-L {foundation_build_dir} -lswiftGlibc -lswiftCore -lFoundation -lm "
# We embed an rpath of `$ORIGIN` to ensure other referenced
# libraries (like `Foundation`) can be found solely via XCTest.
"-Xlinker -rpath=\\$ORIGIN "
"-o {build_dir}/libXCTest.so".format(
swiftc=swiftc,
build_dir=build_dir,
foundation_build_dir=foundation_build_dir))
if args.test:
# Execute main() using the arguments necessary to run the tests.
main(args=["test",
"--swiftc", swiftc,
"--foundation-build-dir", foundation_build_dir,
build_dir])
# If --module-install-path and --library-install-path were specified,
# we also install the built XCTest products.
if args.module_path is not None and args.lib_path is not None:
# Execute main() using the arguments necessary for installation.
main(args=["install", build_dir,
"--module-install-path", args.module_path,
"--library-install-path", args.lib_path])
note('Done.')
@staticmethod
def test(args):
"""
Test the built XCTest.so library at the given 'build_dir', using the
given 'swiftc' compiler.
"""
lit_path = os.path.abspath(args.lit)
if not os.path.exists(lit_path):
raise IOError(
'Could not find lit tester tool at path: "{}". This tool is '
'requred to run the test suite. Unless you specified a custom '
'path to the tool using the "--lit" option, the lit tool will be '
'found in the LLVM source tree, which is expected to be checked '
'out in the same directory as swift-corelibs-xctest. If you do '
'not have LLVM checked out at this path, you may follow the '
'instructions for "Getting Sources for Swift and Related '
'Projects" from the Swift project README in order to fix this '
'error.'.format(lit_path))
# FIXME: Allow these to be specified by the Swift build script.
lit_flags = "-sv --no-progress-bar"
tests_path = os.path.join(SOURCE_DIR, "Tests", "Functional")
foundation_build_dir = os.path.abspath(args.foundation_build_dir)
core_foundation_build_dir = GenericUnixStrategy.core_foundation_build_dir(
foundation_build_dir, args.foundation_install_prefix)
if args.libdispatch_build_dir:
libdispatch_build_dir = os.path.abspath(args.libdispatch_build_dir)
symlink_force(os.path.join(args.libdispatch_build_dir, "src", ".libs", "libdispatch.so"),
foundation_build_dir)
if args.libdispatch_src_dir and args.libdispatch_build_dir:
libdispatch_src_args = (
"LIBDISPATCH_SRC_DIR={libdispatch_src_dir} "
"LIBDISPATCH_BUILD_DIR={libdispatch_build_dir} "
"LIBDISPATCH_OVERLAY_DIR={libdispatch_overlay_dir}".format(
libdispatch_src_dir=os.path.abspath(args.libdispatch_src_dir),
libdispatch_build_dir=os.path.join(args.libdispatch_build_dir, 'src', '.libs'),
libdispatch_overlay_dir=os.path.join(args.libdispatch_build_dir, 'src', 'swift')))
else:
libdispatch_src_args = ""
run('SWIFT_EXEC={swiftc} '
'BUILT_PRODUCTS_DIR={built_products_dir} '
'FOUNDATION_BUILT_PRODUCTS_DIR={foundation_build_dir} '
'CORE_FOUNDATION_BUILT_PRODUCTS_DIR={core_foundation_build_dir} '
'{libdispatch_src_args} '
'{lit_path} {lit_flags} '
'{tests_path}'.format(
swiftc=os.path.abspath(args.swiftc),
built_products_dir=args.build_dir,
foundation_build_dir=foundation_build_dir,
core_foundation_build_dir=core_foundation_build_dir,
libdispatch_src_args=libdispatch_src_args,
lit_path=lit_path,
lit_flags=lit_flags,
tests_path=tests_path))
@staticmethod
def install(args):
"""
Install the XCTest.so, XCTest.swiftmodule, and XCTest.swiftdoc build
products into the given module and library paths.
"""
build_dir = os.path.abspath(args.build_dir)
module_install_path = os.path.abspath(args.module_install_path)
library_install_path = os.path.abspath(args.library_install_path)
_mkdirp(module_install_path)
_mkdirp(library_install_path)
xctest_so = "libXCTest.so"
run("cp {} {}".format(
os.path.join(build_dir, xctest_so),
os.path.join(library_install_path, xctest_so)))
xctest_swiftmodule = "XCTest.swiftmodule"
run("cp {} {}".format(
os.path.join(build_dir, xctest_swiftmodule),
os.path.join(module_install_path, xctest_swiftmodule)))
xctest_swiftdoc = "XCTest.swiftdoc"
run("cp {} {}".format(
os.path.join(build_dir, xctest_swiftdoc),
os.path.join(module_install_path, xctest_swiftdoc)))
@staticmethod
def core_foundation_build_dir(foundation_build_dir, foundation_install_prefix):
"""
Given the path to a swift-corelibs-foundation built product directory,
return the path to CoreFoundation built products.
When specifying a built Foundation dir such as
'/build/foundation-linux-x86_64/Foundation', CoreFoundation dependencies
are placed in 'usr/lib/swift'. Note that it's technically not necessary to
include this extra path when linking the installed Swift's
'usr/lib/swift/linux/libFoundation.so'.
"""
return os.path.join(foundation_build_dir,
foundation_install_prefix.strip("/"), 'lib', 'swift')
def main(args=sys.argv[1:]):
"""
The main entry point for this script. Based on the subcommand given,
delegates building or testing XCTest to a sub-parser and its corresponding
function.
"""
strategy = DarwinStrategy if platform.system() == 'Darwin' else GenericUnixStrategy
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent("""
Build, test, and install XCTest.
NOTE: In general this script should not be invoked directly. The
recommended way to build and test XCTest is via the Swift build
script. See this project's README for details.
The Swift build script invokes this %(prog)s script to build,
test, and install this project. You may invoke it in the same way
to build this project directly. For example, if you are in a Linux
environment, your install of Swift is located at "/swift" and you
wish to install XCTest into that same location, here is a sample
invocation of the build script:
$ %(prog)s \\
--swiftc="/swift/usr/bin/swiftc" \\
--build-dir="/tmp/XCTest_build" \\
--foundation-build-dir "/swift/usr/lib/swift/linux" \\
--library-install-path="/swift/usr/lib/swift/linux" \\
--module-install-path="/swift/usr/lib/swift/linux/x86_64"
Note that installation is not supported on Darwin as this library
is only intended to be used as a dependency in environments where
Apple XCTest is not available.
"""))
subparsers = parser.add_subparsers(
description=textwrap.dedent("""
Use one of these to specify whether to build, test, or install
XCTest. If you don't specify any of these, 'build' is executed as a
default. You may also use 'build' to also test and install the
built products. Pass the -h or --help option to any of the
subcommands for more information."""))
build_parser = subparsers.add_parser(
"build",
description=textwrap.dedent("""
Build XCTest.so, XCTest.swiftmodule, and XCTest.swiftdoc using the
given Swift compiler. This command may also test and install the
built products."""))
build_parser.set_defaults(func=strategy.build)
build_parser.add_argument(
"--swiftc",
help="Path to the 'swiftc' compiler that will be used to build "
"XCTest.so, XCTest.swiftmodule, and XCTest.swiftdoc. This will "
"also be used to build the tests for those built products if the "
"--test option is specified.",
required=True)
build_parser.add_argument(
"--build-dir",
help="Path to the output build directory. If not specified, a "
"temporary directory is used.",
default=tempfile.mkdtemp())
build_parser.add_argument(
"--foundation-build-dir",
help="Path to swift-corelibs-foundation build products, which "
"the built XCTest.so will be linked against.",
required=strategy.requires_foundation_build_dir())
build_parser.add_argument(
"--foundation-install-prefix",
help="Path to the installation location for swift-corelibs-foundation "
"build products ('%(default)s' by default); CoreFoundation "
"dependencies are expected to be found under "
"FOUNDATION_BUILD_DIR/FOUNDATION_INSTALL_PREFIX.",
default="/usr")
build_parser.add_argument(
"--libdispatch-build-dir",
help="Path to swift-corelibs-libdispatch build products, which "
"the built XCTest.so will be linked against.")
build_parser.add_argument(
"--libdispatch-src-dir",
help="Path to swift-corelibs-libdispatch source tree, which "
"the built XCTest.so will be linked against.")
build_parser.add_argument(
"--module-install-path",
help="Location at which to install XCTest.swiftmodule and "
"XCTest.swiftdoc. This directory will be created if it doesn't "
"already exist.",
dest="module_path")
build_parser.add_argument(
"--library-install-path",
help="Location at which to install XCTest.so. This directory will be "
"created if it doesn't already exist.",
dest="lib_path")
build_parser.add_argument(
"--release",
help="builds for release",
action="store_const",
dest="build_style",
const="release",
default="debug")
build_parser.add_argument(
"--debug",
help="builds for debug (the default)",
action="store_const",
dest="build_style",
const="debug",
default="debug")
build_parser.add_argument(
"--test",
help="Whether to run tests after building. Note that you must have "
"cloned https://github.com/apple/swift-llvm at {} in order to "
"run this command.".format(os.path.join(
os.path.dirname(SOURCE_DIR), 'llvm')),
action="store_true")
test_parser = subparsers.add_parser(
"test",
description="Tests a built XCTest framework at the given path.")
test_parser.set_defaults(func=strategy.test)
test_parser.add_argument(
"build_dir",
help="An absolute path to a directory containing the built XCTest.so "
"library.")
test_parser.add_argument(
"--swiftc",
help="Path to the 'swiftc' compiler used to build and run the tests.",
required=True)
test_parser.add_argument(
"--lit",
help="Path to the 'lit' tester tool used to run the test suite. "
"'%(default)s' by default.",
default=os.path.join(os.path.dirname(SOURCE_DIR),
"llvm", "utils", "lit", "lit.py"))
test_parser.add_argument(
"--foundation-build-dir",
help="Path to swift-corelibs-foundation build products, which the "
"tests will be linked against.",
required=strategy.requires_foundation_build_dir())
test_parser.add_argument(
"--foundation-install-prefix",
help="Path to the installation location for swift-corelibs-foundation "
"build products ('%(default)s' by default); CoreFoundation "
"dependencies are expected to be found under "
"FOUNDATION_BUILD_DIR/FOUNDATION_INSTALL_PREFIX.",
default="/usr")
test_parser.add_argument(
"--libdispatch-build-dir",
help="Path to swift-corelibs-libdispatch build products, which "
"the built XCTest.so will be linked against.")
test_parser.add_argument(
"--libdispatch-src-dir",
help="Path to swift-corelibs-libdispatch source tree, which "
"the built XCTest.so will be linked against.")
test_parser.add_argument(
"--release",
help="builds the tests for release",
action="store_const",
dest="build_style",
const="release",
default="debug")
test_parser.add_argument(
"--debug",
help="builds the tests for debug (the default)",
action="store_const",
dest="build_style",
const="debug",
default="debug")
install_parser = subparsers.add_parser(
"install",
description="Installs a built XCTest framework.")
install_parser.set_defaults(func=strategy.install)
install_parser.add_argument(
"build_dir",
help="An absolute path to a directory containing a built XCTest.so, "
"XCTest.swiftmodule, and XCTest.swiftdoc.")
install_parser.add_argument(
"-m", "--module-install-path",
help="Location at which to install XCTest.swiftmodule and "
"XCTest.swiftdoc. This directory will be created if it doesn't "
"already exist.")
install_parser.add_argument(
"-l", "--library-install-path",
help="Location at which to install XCTest.so. This directory will be "
"created if it doesn't already exist.")
# Many versions of Python require a subcommand must be specified.
# We handle this here: if no known subcommand (or none of the help options)
# is included in the arguments, then insert the default subcommand
# argument: 'build'.
if any([a in ["build", "test", "install", "-h", "--help"] for a in args]):
parsed_args = parser.parse_args(args=args)
else:
parsed_args = parser.parse_args(args=["build"] + args)
# Execute the function for the subcommand we've been given.
parsed_args.func(parsed_args)
if __name__ == '__main__':
main()
|
#!/usr/bin/env python3
import hexchat
import requests
import sys
import threading
__module_name__ = "Twitch Title"
__module_author__ = "Poorchop"
__module_version__ = "1.0"
__module_description__ = "Display stream status and description for TwitchTV streams"
t = None
twitch_chans = {}
def is_twitch():
server = hexchat.get_info("server")
if server and "twitch.tv" in server:
return True
else:
return False
def set_topic(channel, display_name, status, game, title):
global twitch_chans
msg = "\00318{0}\00399 - {1} | Now playing: \00318{2}\00399 | {3}".format(display_name, status, game, title)
stripped_msg = hexchat.strip(msg, -1, 3)
if twitch_chans["#{}".format(channel)] != stripped_msg:
twitch_chans["#{}".format(channel)] = stripped_msg
print(msg)
if sys.platform == "win32":
# HexChat on Windows has poor support for colors in topic bar
hexchat.command("RECV :{0}!Topic@twitch.tv TOPIC #{0} :{1}".format(channel, stripped_msg))
else:
hexchat.command("RECV :{0}!Topic@twitch.tv TOPIC #{0} :{1}".format(channel, msg))
def get_stream_info(channel):
url = "https://api.twitch.tv/kraken/streams?"
params = {"channel": channel}
r = requests.get(url, params=params)
data = r.json()
display_name = channel
game = ""
title = "\035Stream is offline\017"
if not data["streams"]:
status = "\00320\002OFFLINE\002\00399"
else:
status = "\00319\002LIVE\002\00399"
display_name = data["streams"][0]["channel"]["display_name"]
game = data["streams"][0]["channel"]["game"]
title = data["streams"][0]["channel"]["status"]
set_topic(channel, display_name, status, game, title)
def update_status():
global twitch_chans
if twitch_chans:
for chan in twitch_chans:
channel = chan[1:]
get_stream_info(channel)
def get_twitch_chans():
global twitch_chans
for chan in hexchat.get_list("channels"):
if chan.type == 2 and chan.context.get_info("server") == "tmi.twitch.tv" and chan.channel not in twitch_chans:
twitch_chans[chan.channel] = ""
def unload_cb(userdata):
"""
Prevent HexChat from crashing while a thread is active
"""
global t
t.cancel()
t.join()
t = None
def channel_check():
"""
Check to see if there are any open Twitch channels; if so, then start/continue the threaded process
"""
for chan in hexchat.get_list("channels"):
if chan.type == 2 and chan.context.get_info("server") == "tmi.twitch.tv":
return True
else:
return False
def get_current_status():
"""
Update the stream status every 10 minutes
"""
global t
if channel_check():
get_twitch_chans()
update_status()
t = threading.Timer(600, get_current_status)
t.daemon = True
t.start()
get_current_status()
hexchat.hook_unload(unload_cb)
print(__module_name__ + " version " + __module_version__ + " loaded")
Fix channel_check bug and add join callback
#!/usr/bin/env python3
import hexchat
import requests
import sys
import threading
__module_name__ = "Twitch Title"
__module_author__ = "Poorchop"
__module_version__ = "1.0"
__module_description__ = "Display stream status and description for TwitchTV streams"
t = None
twitch_chans = {}
def is_twitch():
server = hexchat.get_info("server")
if server and "twitch.tv" in server:
return True
else:
return False
def set_topic(channel, display_name, status, game, title):
global twitch_chans
msg = "\00318{0}\00399 - {1} | Now playing: \00318{2}\00399 | {3}".format(display_name, status, game, title)
stripped_msg = hexchat.strip(msg, -1, 3)
if twitch_chans["#{}".format(channel)] != stripped_msg:
twitch_chans["#{}".format(channel)] = stripped_msg
# try to print stream status in current channel - doesn't seem to work without Do At plugin
current_chan = hexchat.get_info("channel")
hexchat.find_context(channel=current_chan).prnt(msg)
if sys.platform == "win32":
# HexChat on Windows has poor support for colors in topic bar
hexchat.command("RECV :{0}!Topic@twitch.tv TOPIC #{0} :{1}".format(channel, stripped_msg))
else:
hexchat.command("RECV :{0}!Topic@twitch.tv TOPIC #{0} :{1}".format(channel, msg))
def get_stream_info(channel):
url = "https://api.twitch.tv/kraken/streams?"
params = {"channel": channel}
r = requests.get(url, params=params)
data = r.json()
display_name = channel
game = ""
title = "\035Stream is offline\017"
if not data["streams"]:
status = "\00320\002OFFLINE\002\00399"
else:
status = "\00319\002LIVE\002\00399"
display_name = data["streams"][0]["channel"]["display_name"]
game = data["streams"][0]["channel"]["game"]
title = data["streams"][0]["channel"]["status"]
set_topic(channel, display_name, status, game, title)
def update_status():
global twitch_chans
if twitch_chans:
for chan in twitch_chans:
channel = chan[1:]
get_stream_info(channel)
def get_twitch_chans():
global twitch_chans
for chan in hexchat.get_list("channels"):
if chan.type == 2 and chan.context.get_info("server") == "tmi.twitch.tv" and chan.channel not in twitch_chans:
twitch_chans[chan.channel] = ""
def channel_check():
"""
Check to see if there are any open Twitch channels; if so, then start/continue the threaded process
"""
for chan in hexchat.get_list("channels"):
if chan.type == 2 and chan.server == "tmi.twitch.tv":
return True
return False
def get_current_status():
"""
Update the stream status every 10 minutes
"""
global t
if channel_check():
get_twitch_chans()
update_status()
t = threading.Timer(30, get_current_status)
t.daemon = True
t.start()
else:
if t:
t.cancel()
t.join()
t = None
def join_cb(word, word_eol, userdata):
"""
Restart the threaded process if necessary
"""
global t
if is_twitch() and not t:
get_current_status()
def unload_cb(userdata):
"""
Prevent HexChat from crashing while a thread is active
"""
global t
t.cancel()
t.join()
t = None
hexchat.hook_unload(unload_cb)
hexchat.hook_print("Open Context", join_cb)
get_current_status()
print(__module_name__ + " version " + __module_version__ + " loaded")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.