code stringlengths 1 1.72M | language stringclasses 1 value |
|---|---|
"""
Flowblade Movie Editor is a nonlinear video editor.
Copyright 2012 Janne Liljeblad.
This file is part of Flowblade Movie Editor <http://code.google.com/p/flowblade>.
Flowblade Movie Editor is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Flowblade Movie Editor is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Flowblade Movie Editor. If not, see <http://www.gnu.org/licenses/>.
"""
"""
Helper functions and data
"""
import pygtk
pygtk.require('2.0');
import gtk
import math
import os
import threading
import appconsts
from editorstate import PROJECT
# ---------------------------------- CLASSES
class EmptyClass:
pass
class Ticker:
"""
Calls function repeatedly with given delay between calls.
"""
def __init__(self, action, delay):
self.action = action
self.delay = delay
self.running = False
self.exited = False
def start_ticker(self, delay=None):
self.ev = threading.Event()
if delay == None: # If no delay specified, use default delay set at creation time
delay = self.delay
self.thread = threading.Thread(target=self.runner,
args=(self.ev,
delay,
self.action))
self.running = True
self.thread.start()
def stop_ticker(self):
try:
self.ev.set()
self.running = False # ! self.ev.set() may go to Exception leave this having wrong value if already stopped?
except Exception:
pass # called when not running
def runner(self, event, delay, action):
while True:
if not self.running:
break
action()
if not self.running:
break
if event.isSet():
break
event.wait(delay)
self.exited = True
# -------------------------------- UTIL FUNCTIONS
def fps():
return PROJECT().profile.fps()
def clip_length_string(length):
"""
Returns length string for length in frames.
"""
fr = length % fps()
sec = length / fps()
mins = sec / 60
sec = int(math.floor(sec % 60))
hours = int(math.floor(mins / 60))
mins = int(math.floor(mins % 60))
hr_str = ""
if hours > 0:
hr_str = str(hours) + "h"
min_str = ""
if mins > 0 or hours > 0:
min_str = str(mins) + "m"
if sec > 0 or min_str != "":
s_str = str(sec) + "s"
else:
s_str = str(fr) + "fr"
return hr_str + min_str + s_str
def get_tc_string(frame):
"""
Returns timecode string for frame
"""
return get_tc_string_with_fps(frame, fps())
def get_tc_string_with_fps(frame, frames_per_sec):
fr = frame % frames_per_sec
sec = frame / frames_per_sec
mins = sec / 60
sec = sec % 60
hours = mins / 60
mins = mins % 60
return "%02d:%02d:%02d:%02d" % (hours, mins, sec, fr)
def get_time_str_for_sec_float(sec):
mins = sec / 60
sec = sec % 60
hours = mins / 60
mins = mins % 60
if hours >= 24.0:
days = hours / 24
hours = hours % 24
return str(int(days)) + " days " + str(int(hours)) + "h " + str(int(mins)) + "m " + str(int(sec)) + "s"
if hours >= 1.0:
return str(int(hours)) + "h " + str(int(mins)) + "m " + str(int(sec)) + "s"
if mins >= 1.0:
return str(int(mins)) + "m " + str(int(sec)) + "s"
return str(int(sec)) + "s"
def get_track_name(track, sequence):
if track.type == appconsts.VIDEO:
# Video tracks are numbered to USER as 'V1' ,'V2' with 'V1' being
# tracks[current_sequence.first_video_index]
if track.id == sequence.first_video_index:
text = "V1"
else:
text = "V" + str(track.id - sequence.first_video_index + 1)
else:
# Audio tracks are numbered in *opposite* direction for USER view
# so if we have audio tracks in tracks[1] and tracks[2]
# User thinks tracks[1] is 'A2' and track[2] is 'A1'
# This is also compensated for in Sequence.get_first_active_track()
text = "A" + str(sequence.first_video_index - track.id)
return text
def get_media_source_file_filter():
# No idea if these actually play or not, except images mime types
f = gtk.FileFilter()
f.set_name("Media MIME types")
f.add_mime_type("image*")
f.add_mime_type("video*")
f.add_mime_type("audio*")
f.add_mime_type("video/x-theora+ogg")
f.add_mime_type("video/x-sgi-movie")
f.add_mime_type("video/ogg")
f.add_mime_type("video/x-ogm")
f.add_mime_type("video/x-ogm+ogg")
f.add_mime_type("video/x-ms-asf")
f.add_mime_type("video/x-ms-wmv")
f.add_mime_type("video/x-msvideo")
f.add_mime_type("video/x-matroska")
f.add_mime_type("video/x-flv")
f.add_mime_type("video/vnd.rn-realvideo")
f.add_mime_type("video/quicktime")
f.add_mime_type("video/ogg")
f.add_mime_type("video/mpeg")
f.add_mime_type("video/mp4")
f.add_mime_type("video/mp2t")
f.add_mime_type("video/isivideo")
f.add_mime_type("video/dv")
f.add_mime_type("video/annodex")
f.add_mime_type("video/3gpp")
f.add_mime_type("video/webm")
f.add_mime_type("audio/aac")
f.add_mime_type("audio/ac3")
f.add_mime_type("audio/AMR")
f.add_mime_type("audio/ogg")
f.add_mime_type("audio/midi")
f.add_mime_type("audio/mp2")
f.add_mime_type("audio/mp3")
f.add_mime_type("audio/mp4")
f.add_mime_type("audio/mpeg")
f.add_mime_type("audio/ogg")
f.add_mime_type("audio/vnd.rn-realaudio")
f.add_mime_type("audio/vorbis")
f.add_mime_type("audio/x-adpcm")
f.add_mime_type("audio/x-aifc")
f.add_mime_type("audio/x-aiff")
f.add_mime_type("audio/x-aiffc")
f.add_mime_type("audio/x-flac")
f.add_mime_type("audio/x-flac+ogg")
f.add_mime_type("audio/x-m4b")
f.add_mime_type("audio/x-matroska")
f.add_mime_type("audio/x-ms-wma")
f.add_mime_type("audio/x-oggflac")
f.add_mime_type("audio/x-ms-asx")
f.add_mime_type("audio/x-ms-wma")
f.add_mime_type("audio/x-ms-wma")
f.add_mime_type("audio/x-gsm")
f.add_mime_type("audio/x-riff")
f.add_mime_type("audio/x-speex")
f.add_mime_type("audio/x-speex+ogg")
f.add_mime_type("audio/x-tta")
f.add_mime_type("audio/x-voc")
f.add_mime_type("audio/x-vorbis+ogg")
f.add_mime_type("audio/x-wav")
f.add_mime_type("audio/annodex")
f.add_mime_type("image/bmp")
f.add_mime_type("image/tiff")
f.add_mime_type("image/gif")
f.add_mime_type("image/x-tga")
f.add_mime_type("image/png")
f.add_mime_type("image/jpeg")
f.add_mime_type("image/svg+xml")
return f
def get_image_sequence_file_filter():
f = gtk.FileFilter()
f.set_name("Image files")
f.add_mime_type("image/bmp")
f.add_mime_type("image/tiff")
f.add_mime_type("image/gif")
f.add_mime_type("image/x-tga")
f.add_mime_type("image/png")
f.add_mime_type("image/jpeg")
return f
def file_extension_is_graphics_file(ext):
ext = ext.lstrip(".")
ext = ext.lower()
if ext in _graphics_file_extensions:
return True
else:
return False
def get_file_type(file_path):
name, ext = os.path.splitext(file_path)
ext = ext.lstrip(".")
ext = ext.lower()
if ext in _video_file_extensions:
return "video"
if ext in _audio_file_extensions:
return "audio"
if ext in _graphics_file_extensions:
return "image"
return "unknown"
def hex_to_rgb(value):
value = value.lstrip('#')
lv = len(value)
return tuple(int(value[i:i+lv/3], 16) for i in range(0, lv, lv/3))
def int_to_hex(n):
return hex(n)[2:]
def gdk_color_str_to_mlt_color_str(gdk_color_str):
raw_r, raw_g, raw_b = hex_to_rgb(gdk_color_str)
val_str = "#" + int_to_hex(int((float(raw_r) * 255.0) / 65535.0)) + \
int_to_hex(int((float(raw_g) * 255.0) / 65535.0)) + \
int_to_hex(int((float(raw_b) * 255.0) / 65535.0))
return val_str
def gdk_color_str_to_int(gdk_color_str):
# returned int is 32-bit RGBA, alpha is 00
raw_r, raw_g, raw_b = hex_to_rgb(gdk_color_str)
red = int((float(raw_r) * 255.0) / 65535.0)
green = int((float(raw_g) * 255.0) / 65535.0)
blue = int((float(raw_b) * 255.0) / 65535.0)
return (red << 24) + (green << 16) + (blue << 8)
def get_cairo_color_tuple_255_rgb(r, g, b):
return (float(r)/255.0, float(g)/255.0, float(b)/255.0)
def cairo_color_from_gdk_color(gdk_color):
raw_r, raw_g, raw_b = hex_to_rgb(gdk_color.to_string())
return (float(raw_r)/65535.0, float(raw_g)/65535.0, float(raw_b)/65535)
def do_nothing():
pass
def get_hidden_user_dir_path():
return os.getenv("HOME") + "/.flowblade/"
def get_hidden_screenshot_dir_path():
return get_hidden_user_dir_path() + "screenshot/"
# File exntension lists
_audio_file_extensions = [ "act",
"aif",
"aiff",
"alfc",
"aac",
"alac",
"amr",
"atrac",
"awb",
"dct",
"dss",
"dvf",
"flac",
"gsm",
"iklax",
"m4a",
"m4p",
"mmf",
"mp2",
"mp3",
"mpc",
"msv",
"ogg",
"oga",
"opus",
"pcm",
"u16be",
"u16le",
"u24be",
"u24le",
"u32be",
"u32le",
"u8",
"ra",
"rm",
"raw",
"tta",
"vox",
"wav",
"wma",
"wavpack"]
_graphics_file_extensions = [ "bmp",
"tiff",
"tif",
"gif",
"tga",
"png",
"pgm",
"jpeg",
"jpg",
"svg"]
_video_file_extensions = [ "avi",
"dv",
"flv",
"mkv",
"mpg",
"mpeg",
"m2t",
"mov",
"mp4",
"qt",
"vob",
"webm",
"3gp",
"3g2",
"asf",
"divx",
"dirac",
"f4v",
"h264",
"hdmov",
"hdv",
"m2p",
"m2ts",
"m2v",
"m4e",
"mjpg",
"mp4v",
"mts",
"m21",
"m2p",
"m4v",
"mj2",
"m1v",
"mpv",
"m4v",
"mxf",
"mpegts",
"mpegtsraw",
"mpegvideo",
"nsv",
"ogv",
"ogx",
"ps",
"ts",
"tsv",
"tsa",
"vfw",
"video",
"wtv",
"wm",
"wmv",
"xvid",
"y4m",
"yuv"]
| Python |
"""
Flowblade Movie Editor is a nonlinear video editor.
Copyright 2012 Janne Liljeblad.
This file is part of Flowblade Movie Editor <http://code.google.com/p/flowblade>.
Flowblade Movie Editor is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Flowblade Movie Editor is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Flowblade Movie Editor. If not, see <http://www.gnu.org/licenses/>.
"""
"""
Module handles user edit events for insert and over move modes.
"""
import pygtk
pygtk.require('2.0');
import gtk
import appconsts
import dialogutils
import dnd
import edit
from editorstate import current_sequence
from editorstate import get_track
from editorstate import PLAYER
import gui
import updater
import tlinewidgets
import utils
# Mouse delta in pix needed before selection is interpreted as move.
MOVE_START_LIMIT = 5
# Width of area in pixels that is iterpreted as an attemp to place overwrite
# clips, starting from edit
MAGNETIC_AREA_IN_PIX = 5
# Selected clips in timeline.
# Selection handling is part of this module because
# selections can only be done when editing in move modes.
# Therea are no area or multitrack selections in this application.
selected_track = -1
selected_range_in = -1 # clip index
selected_range_out = -1 # clip index, inclusive
# Flag for clearing selection when releasing after pressing on selected.
pressed_on_selected = True
# Blanck clips can be selected but not moved
drag_disabled = False
# Data/state for ongoing edit.
edit_data = None
#------------------------------ playback control
# These four buttons act differently in trimmodes and move modes
def play_pressed():
# This handles only move modes, see trimmodes.py module for others.
PLAYER().start_playback()
def stop_pressed():
# This handles only move modes, see trimmodes.py module for others.
PLAYER().stop_playback()
def prev_pressed():
# This handles only move modes, see trimmodes.py module for others.
PLAYER().seek_delta(-1)
def next_pressed():
# This handles only movemodes, see trimmodes.py module for others.
PLAYER().seek_delta(1)
# ----------------------------------- selection handling
def clear_selected_clips():
if not selected_track == -1:
set_range_selection(selected_track, selected_range_in, \
selected_range_out, False)
clear_selection_values()
def clear_selection_values():
global selected_track, selected_range_in, selected_range_out
selected_track = -1
selected_range_in = -1
selected_range_out = -1
updater.set_transition_render_edit_menu_items_sensitive(selected_range_in, selected_range_out)
def set_range_selection(track_index, range_in, range_out, is_selected):
"""
Sets range of clips in track to selection value.
"""
track = get_track(track_index)
for i in range(range_in, range_out + 1): #+1, range_out is inclusive
track.clips[i].selected = is_selected
def select_clip(track_index, clip_index):
"""
Selects single clip.
"""
clear_selected_clips()
set_range_selection(track_index, clip_index, clip_index, True)
updater.set_transition_render_edit_menu_items_sensitive(clip_index, clip_index)
global selected_track, selected_range_in, selected_range_out
selected_track = track_index
selected_range_in = clip_index
selected_range_out = clip_index
def _select_multiple_clips(track_index, range_start, range_end):
"""
Selects continuous range of clips.
"""
clear_selected_clips()
set_range_selection(track_index, range_start, range_end, True)
updater.set_transition_render_edit_menu_items_sensitive(range_start, range_end)
global selected_track, selected_range_in, selected_range_out
selected_track = track_index
selected_range_in = range_start
selected_range_out = range_end
def _get_blanck_range(track, clip_index):
# look backwards
start_index = _get_blanck_range_limit(track, clip_index, -1)
# Look forward
end_index = _get_blanck_range_limit(track, start_index, 1)
return (start_index, end_index)
def _get_blanck_range_limit(track, clip_index, delta):
try:
while track.clips[clip_index].is_blanck_clip:
clip_index += delta
if clip_index < 0: # It'll start looping from end other wise
return 0
except:
pass
return clip_index - delta
def select_blank_range(track, clip):
clip_index = track.clips.index(clip)
range_in, range_out = _get_blanck_range(track, clip_index)
_select_multiple_clips(track.id, range_in, range_out)
# --------------------------------- INSERT MOVE EVENTS
def insert_move_press(event, frame):
"""
User presses mouse when in insert move mode.
"""
_move_mode_pressed(event, frame)
def insert_move_move(x, y, frame, state):
"""
User moves mouse when in insert move mode.
"""
global edit_data, drag_disabled
if drag_disabled:
return
if edit_data == None:
return
_move_mode_move(frame, x, y)
updater.repaint_tline()
def insert_move_release(x, y, frame, state):
"""
User releases mouse when in insert move mode.
"""
global edit_data, drag_disabled
if drag_disabled:
drag_disabled = False
return
# If mouse was not pressed on clip we cant move anyhing
if edit_data == None:
return
# Get attempt insert frame
press_frame = edit_data["press_frame"]
first_clip_start = edit_data["first_clip_start"]
attempt_insert_frame = first_clip_start + (frame - press_frame)
# Get tracks and insert index
track = edit_data["track_object"]
to_track = edit_data["to_track_object"]
insert_index = to_track.get_clip_index_at(attempt_insert_frame)
# Check locking of target track. Source track checked at press event.
if _track_is_locked(to_track):
edit_data = None
tlinewidgets.set_edit_mode_data(edit_data)
updater.repaint_tline()
return
# Update data for editmode overlay
edit_data["current_frame"] = frame
edit_data["insert_frame"] = track.clip_start(insert_index)
# Collect selection data
range_in = edit_data["selected_range_in"]
range_out = edit_data["selected_range_out"]
data = {"track":track,
"insert_index":insert_index,
"selected_range_in":range_in,
"selected_range_out":range_out,
"move_edit_done_func":move_edit_done}
# Do edit. Use different actions depending on if
# clip is moved to a differrent track
if track == to_track:
# Do edit if were moving and insert is not into same index
# Update selection after edit
if (edit_data["move_on"] == True
and (insert_index < selected_range_in
or insert_index > selected_range_out)):
# Remeber selected range to later find index of dropped range
# after edit
old_range_length = selected_range_out - selected_range_in
clear_selected_clips()
action = edit.insert_move_action(data)
action.do_edit()
# Move playback to first frame of dropped range
select_index = insert_index
if (range_in < insert_index):#when moving forward clips are removed affecting later indexes
select_index = insert_index - (old_range_length + 1)
PLAYER().seek_frame(track.clip_start(select_index), False)
else:
_move_mode_released()
else: # insert to different track
data["to_track"] = to_track
clear_selected_clips()
action = edit.multitrack_insert_move_action(data)
action.do_edit()
PLAYER().seek_frame(to_track.clip_start(insert_index), False)
# Clear edit mode data
edit_data = None
tlinewidgets.set_edit_mode_data(edit_data)
updater.repaint_tline()
# --------------------------------- OVERWRITE MOVE EVENTS
def overwrite_move_press(event, frame):
"""
User presses mouse when in overwrite move mode.
"""
_move_mode_pressed(event, frame)
global edit_data
if (not(edit_data == None)):
edit_data["over_in"] = -1
edit_data["over_out"] = -1
# Length of moving clip/s
moving_length = 0
clip_lengths = edit_data["clip_lengths"]
for length in clip_lengths:
moving_length += length
edit_data["moving_length"] = moving_length
def overwrite_move_move(x, y, frame, state):
"""
User moves mouse when in overwrite move mode.
"""
global edit_data, drag_disabled
if drag_disabled:
return
if edit_data == None:
return
_move_mode_move(frame, x, y)
# Calculate overwrite area if moving
if edit_data["move_on"] == True:
# get in point
over_in = edit_data["attempt_insert_frame"]
# Check and do magnet
cut_x = tlinewidgets._get_frame_x(edit_data["insert_frame"])
clip_head_x = tlinewidgets._get_frame_x(edit_data["attempt_insert_frame"])
if abs(clip_head_x - cut_x) < MAGNETIC_AREA_IN_PIX:
over_in = edit_data["insert_frame"]
over_out = over_in + edit_data["moving_length"]
edit_data["over_in"] = over_in
edit_data["over_out"] = over_out
updater.repaint_tline()
def overwrite_move_release(x, y, frame, state):
"""
User releases mouse when in overwrite move mode.
"""
global edit_data, drag_disabled
if drag_disabled:
drag_disabled = False
return
if edit_data == None:
return
press_frame = edit_data["press_frame"]
first_clip_start = edit_data["first_clip_start"]
track = edit_data["track_object"]
to_track = edit_data["to_track_object"]
over_in = first_clip_start + (frame - press_frame)
over_out = over_in + edit_data["moving_length"]
# Check locking of target track. Source track checked at press event.
if _track_is_locked(to_track):
edit_data = None
tlinewidgets.set_edit_mode_data(edit_data)
updater.repaint_tline()
return
# Moved lips are completely out of displayable track area, can't do edit.
if over_out < 1:
return
# Autocorrect moved clips to be fully on displayable track area
if over_in < 0:
over_out += abs(over_in)
over_in = 0
# Collect data for edit action
data = {"track":track,
"over_in":over_in,
"over_out":over_out,
"selected_range_in":selected_range_in,
"selected_range_out":selected_range_out,
"move_edit_done_func":move_edit_done}
# Do edit. Use different actions depending on if
# clip is moved to a differrent track
if track == to_track:
# Do edit if were moving and clips have moved
if (edit_data["move_on"] == True and (press_frame != frame)):
clear_selected_clips()
action = edit.overwrite_move_action(data)
action.do_edit()
PLAYER().seek_frame(over_in, False)
else:
_move_mode_released()
else: # Moved to different track
data["to_track"] = to_track
clear_selected_clips()
action = edit.multitrack_overwrite_move_action(data)
action.do_edit()
PLAYER().seek_frame(over_in, False)
# Clear edit mode data
edit_data = None
tlinewidgets.set_edit_mode_data(edit_data)
updater.repaint_tline()
# ------------------------------------- MOVE MODES EVENTS
def _move_mode_pressed(event, frame):
"""
User presses mouse when in a move mode.
Initializes move mode edit action based on user action and state.
"""
x = event.x
y = event.y
global edit_data, pressed_on_selected, drag_disabled
# Clear edit data in gui module
edit_data = None
drag_disabled = False
tlinewidgets.set_edit_mode_data(edit_data)
# Get pressed track
track = tlinewidgets.get_track(y)
# Selecting empty clears selection
if track == None:
clear_selected_clips()
pressed_on_selected = False
updater.repaint_tline()
return
# Get pressed clip index
clip_index = current_sequence().get_clip_index(track, frame)
# Selecting empty clears selection
if clip_index == -1:
clear_selected_clips()
pressed_on_selected = False
updater.repaint_tline()
return
# Check locking for pressed track
if _track_is_locked(track):
clear_selected_clips()
pressed_on_selected = False
updater.repaint_tline()
return
pressed_clip = track.clips[clip_index]
# Handle pressed clip according to current selection state
# Case: no selected clips, select single clip
if selected_track == -1:
if not pressed_clip.is_blanck_clip:
select_clip(track.id, clip_index)
pressed_on_selected = False
else:
# There may be multiple blank clips in area that for user
# seems to a single blank area. All of these must be
# selected together automatically or user will be exposed to
# this impl. detail unnecesserarely.
range_in, range_out = _get_blanck_range(track, clip_index)
_select_multiple_clips(track.id, range_in, range_out)
pressed_on_selected = False
drag_disabled = True
# case: CTRL or SHIFT down, combine selection with earlier selected clips
elif ((event.state & gtk.gdk.CONTROL_MASK) or (event.state & gtk.gdk.SHIFT_MASK)):
# CTRL pressing blank clears selection
if pressed_clip.is_blanck_clip:
clear_selected_clips()
pressed_on_selected = False
updater.repaint_tline()
return
# clip before range, make it start
if clip_index < selected_range_in:
_select_multiple_clips(track.id, clip_index,
selected_range_out)
pressed_on_selected = False
# clip after range, make it end
elif clip_index > selected_range_out:
_select_multiple_clips(track.id, selected_range_in,
clip_index)
pressed_on_selected = False
else:
# Pressing on selected clip clears selection on release
pressed_on_selected = True
# case: new single clip pressed
else:
if selected_track != track.id:
clear_selected_clips()
select_clip(track.id, clip_index)
pressed_on_selected = False
else:
if not pressed_clip.is_blanck_clip:
# Pressing on selected clip keeps selection unchanged
if clip_index < selected_range_in or clip_index > selected_range_out:
select_clip(track.id, clip_index)
pressed_on_selected = False
# Pressing on non-selected clip clears current selection and selects newly selected clip
else:
pressed_on_selected = True
else:
# Black clip, see comment above
range_in, range_out = _get_blanck_range(track, clip_index)
_select_multiple_clips(track.id, range_in, range_out)
pressed_on_selected = False
drag_disabled = True
# Get length info on selected clips
clip_lengths = []
for i in range(selected_range_in, selected_range_out + 1):
clip = track.clips[i]
clip_lengths.append(clip.clip_out - clip.clip_in + 1)
# Overwrite mode ignores this
insert_frame = track.clip_start(selected_range_in)
# Set edit mode data. This is not used unless mouse delta big enough
# to initiate move.
edit_data = {"track_id":track.id,
"track_object":track,
"to_track_object":track,
"move_on":False,
"press_frame":frame,
"current_frame":frame,
"first_clip_start":insert_frame,
"insert_frame":insert_frame,
"clip_lengths":clip_lengths,
"mouse_start_x":x,
"mouse_start_y":y,
"selected_range_in":selected_range_in, # clip index
"selected_range_out":selected_range_out} # clip index
tlinewidgets.set_edit_mode_data(edit_data)
updater.repaint_tline()
def _move_mode_move(frame, x, y):
"""
Updates edit data needed for doing edit and drawing overlay
based on mouse movement.
"""
global edit_data
# Get frame that is the one where insert is attempted
press_frame = edit_data["press_frame"]
first_clip_start = edit_data["first_clip_start"]
attempt_insert_frame = first_clip_start + (frame - press_frame)
edit_data["attempt_insert_frame"] = attempt_insert_frame
# Get track where insert is attempted. Track selection forced into range of editable tracks.
to_track = tlinewidgets.get_track(y)
if to_track == None:
if y > tlinewidgets.REF_LINE_Y:
to_track = get_track(1)
else:
to_track = get_track(len(current_sequence().tracks) - 2)
if to_track.id < 1:
to_track = get_track(1)
if to_track.id > len(current_sequence().tracks) - 2:
to_track = get_track(len(current_sequence().tracks) - 2)
edit_data["to_track_object"] = to_track
# Get index for insert in target track
insert_index = to_track.get_clip_index_at(attempt_insert_frame)
edit_data["insert_index"] = insert_index
edit_data["insert_frame"] = to_track.clip_start(insert_index)
_set_current_move_frame_and_check_move_start(frame, x, y)
def _set_current_move_frame_and_check_move_start(frame, x, y):
"""
Sets current mouse frame in edit data and starts move if mouse moved
enough
"""
global edit_data
edit_data["current_frame"] = frame
if abs(x - edit_data["mouse_start_x"]) > MOVE_START_LIMIT:
edit_data["move_on"] = True
if abs(y - edit_data["mouse_start_y"]) > MOVE_START_LIMIT:
edit_data["move_on"] = True
def _clear_after_illegal_edit():
global edit_data
edit_data = None # kill current edit
tlinewidgets.set_edit_mode_data(None)
clear_selected_clips()
updater.repaint_tline()
def _move_mode_released():
# Pressing on selection clears it on release
if pressed_on_selected:
clear_selected_clips()
def move_edit_done(clips):
for clip in clips:
clip.selected = False
clear_selected_clips()
# ------------------------------------ track locks handling
def _track_is_locked(track):
global drag_disabled
if track.edit_freedom == appconsts.LOCKED:
track_name = utils.get_track_name(track, current_sequence())
# No edits on locked tracks.
primary_txt = _("Can't do edit on a locked track")
secondary_txt = _("Track ") + track_name + _(" is locked. Unlock track to edit it.\n")
dialogutils.warning_message(primary_txt, secondary_txt, gui.editor_window.window)
drag_disabled = True
return True
return False
# ------------------------------------- clip d'n'd to range log
def clips_drag_out_started(event):
# Abort move edit
global edit_data, drag_disabled
edit_data = None
drag_disabled = True
tlinewidgets.set_edit_mode_data(None)
# Set dnd
track = current_sequence().tracks[selected_track]
clips = []
for i in range(selected_range_in, selected_range_out + 1):
clips.append(track.clips[i])
dnd.start_tline_clips_out_drag(event, clips, gui.tline_canvas.widget)
# Update tlione gui
updater.repaint_tline()
| Python |
"""
Flowblade Movie Editor is a nonlinear video editor.
Copyright 2012 Janne Liljeblad.
This file is part of Flowblade Movie Editor <http://code.google.com/p/flowblade>.
Flowblade Movie Editor is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Flowblade Movie Editor is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Flowblade Movie Editor. If not, see <http://www.gnu.org/licenses/>.
"""
"""
Module handles clips compositing gui.
"""
import copy
import pygtk
pygtk.require('2.0');
import gtk
import gui
import guicomponents
import guiutils
import edit
from editorstate import current_sequence
import editorpersistance
import propertyeditorbuilder
import propertyedit
import propertyparse
import utils
COMPOSITOR_PANEL_LEFT_WIDTH = 160
widgets = utils.EmptyClass()
compositor = None # Compositor being edited.
# This is updated when filter panel is displayed and cleared when removed.
# Used to update kfeditors with external tline frame position changes
keyframe_editor_widgets = []
def create_widgets():
"""
Widgets for editing compositing properties.
"""
# Left side
widgets.compositor_info = guicomponents.CompositorInfoPanel()
widgets.delete_b = gtk.Button(_("Delete"))
widgets.delete_b.connect("clicked", lambda w,e: _delete_compositor_pressed(), None)
widgets.reset_b = gtk.Button(_("Reset"))
widgets.reset_b.connect("clicked", lambda w,e: _reset_compositor_pressed(), None)
# Right side
widgets.empty_label = gtk.Label(_("No Compositor"))
widgets.value_edit_box = gtk.VBox()
widgets.value_edit_box.pack_start(widgets.empty_label, True, True, 0)
widgets.value_edit_frame = gtk.Frame()
widgets.value_edit_frame.add(widgets.value_edit_box)
widgets.value_edit_frame.set_shadow_type(gtk.SHADOW_NONE)
def get_compositor_clip_panel():
create_widgets()
compositor_vbox = gtk.VBox(False, 2)
compositor_vbox.pack_start(widgets.compositor_info, False, False, 0)
compositor_vbox.pack_start(gtk.Label(), True, True, 0)
compositor_vbox.pack_start(widgets.reset_b, False, False, 0)
compositor_vbox.pack_start(widgets.delete_b, False, False, 0)
compositor_vbox.pack_start(guiutils.get_pad_label(5, 3), False, False, 0)
compositor_vbox.set_size_request(COMPOSITOR_PANEL_LEFT_WIDTH, 200)
set_enabled(False)
return compositor_vbox
def set_compositor(new_compositor):
"""
Sets clip to be edited in compositor editor.
"""
global compositor
if compositor != None and new_compositor.destroy_id != compositor.destroy_id:
compositor.selected = False
compositor = new_compositor
widgets.compositor_info.display_compositor_info(compositor)
set_enabled(True)
_display_compositor_edit_box()
if editorpersistance.prefs.default_layout == True:
gui.middle_notebook.set_current_page(3)
else:
gui.editor_window.right_notebook.set_current_page(2)
def clear_compositor():
global compositor
compositor = None
widgets.compositor_info.set_no_compositor_info()
_display_compositor_edit_box()
set_enabled(False)
def set_enabled(value):
widgets.empty_label.set_sensitive(value)
widgets.compositor_info.set_enabled(value)
widgets.delete_b.set_sensitive(value)
widgets.reset_b.set_sensitive(value)
def maybe_clear_editor(killed_compositor):
if killed_compositor.destroy_id == compositor.destroy_id:
clear_compositor()
def _delete_compositor_pressed():
data = {"compositor":compositor}
action = edit.delete_compositor_action(data)
action.do_edit()
def _reset_compositor_pressed():
global compositor
compositor.transition.properties = copy.deepcopy(compositor.transition.info.properties)
propertyparse.replace_value_keywords(compositor.transition.properties, current_sequence().profile)
compositor.transition.update_editable_mlt_properties()
_display_compositor_edit_box()
def _display_compositor_edit_box():
# This gets called on startup before edit_frame is filled
try:
widgets.value_edit_frame.remove(widgets.value_edit_box)
except:
pass
global keyframe_editor_widgets
keyframe_editor_widgets = []
vbox = gtk.VBox()
# case: Empty edit frame
global compositor
if compositor == None:
widgets.empty_label = gtk.Label(_("No Compositor"))
vbox.pack_start(widgets.empty_label, True, True, 0)
vbox.pack_start(gtk.Label(), True, True, 0)
vbox.show_all()
widgets.value_edit_box = vbox
widgets.value_edit_frame.add(vbox)
return
compositor_name_label = gtk.Label( "<b>" + compositor.name + "</b>")
compositor_name_label.set_use_markup(True)
vbox.pack_start(compositor_name_label, False, False, 0)
vbox.pack_start(guicomponents.EditorSeparator().widget, False, False, 0)
# Track editor
target_combo = guicomponents.get_compositor_track_select_combo(
current_sequence().tracks[compositor.transition.b_track],
current_sequence().tracks[compositor.transition.a_track],
_target_track_changed)
target_row = gtk.HBox()
target_row.pack_start(guiutils.get_pad_label(5, 3), False, False, 0)
target_row.pack_start(gtk.Label(_("Destination Track:")), False, False, 0)
target_row.pack_start(guiutils.get_pad_label(5, 3), False, False, 0)
target_row.pack_start(target_combo, False, False, 0)
target_row.pack_start(gtk.Label(), True, True, 0)
vbox.pack_start(target_row, False, False, 0)
vbox.pack_start(guicomponents.EditorSeparator().widget, False, False, 0)
# Transition editors
t_editable_properties = propertyedit.get_transition_editable_properties(compositor)
for ep in t_editable_properties:
editor_row = propertyeditorbuilder.get_editor_row(ep)
if editor_row != None: # Some properties don't have editors
vbox.pack_start(editor_row, False, False, 0)
vbox.pack_start(guicomponents.EditorSeparator().widget, False, False, 0)
# Add keyframe editor widget to be updated for frame changes if such is created.
try:
editor_type = ep.args[propertyeditorbuilder.EDITOR]
except KeyError:
editor_type = propertyeditorbuilder.SLIDER # this is the default value
if ((editor_type == propertyeditorbuilder.KEYFRAME_EDITOR)
or (editor_type == propertyeditorbuilder.KEYFRAME_EDITOR_RELEASE)
or (editor_type == propertyeditorbuilder.KEYFRAME_EDITOR_CLIP)
or (editor_type == propertyeditorbuilder.GEOMETRY_EDITOR)):
keyframe_editor_widgets.append(editor_row)
# Extra editors. Editable properties have already been created with "editor=no_editor"
# and will be looked up by editors from clip
editor_rows = propertyeditorbuilder.get_transition_extra_editor_rows(compositor, t_editable_properties)
for editor_row in editor_rows:
vbox.pack_start(editor_row, False, False, 0)
vbox.pack_start(guicomponents.EditorSeparator().widget, False, False, 0)
vbox.pack_start(gtk.Label(), True, True, 0)
vbox.show_all()
scroll_window = gtk.ScrolledWindow()
scroll_window.add_with_viewport(vbox)
scroll_window.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
scroll_window.show_all()
widgets.value_edit_box = scroll_window
widgets.value_edit_frame.add(scroll_window)
def _target_track_changed(combo):
if combo.get_active() == 0:
force = True
else:
force = False
a_track = compositor.transition.b_track - combo.get_active() - 1
compositor.transition.set_target_track(a_track, force)
widgets.compositor_info.display_compositor_info(compositor)
def display_kfeditors_tline_frame(frame):
for kf_widget in keyframe_editor_widgets:
kf_widget.display_tline_frame(frame)
def update_kfeditors_positions():
for kf_widget in keyframe_editor_widgets:
kf_widget.update_clip_pos()
| Python |
"""
Flowblade Movie Editor is a nonlinear video editor.
Copyright 2012 Janne Liljeblad.
This file is part of Flowblade Movie Editor <http://code.google.com/p/flowblade>.
Flowblade Movie Editor is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Flowblade Movie Editor is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Flowblade Movie Editor. If not, see <http://www.gnu.org/licenses/>.
"""
"""
Module contains constant values that are used by multiple modules in the application.
"""
PROJECT_FILE_EXTENSION = ".flb"
# Media types for tracks or clips
UNKNOWN = 0
VIDEO = 1
AUDIO = 2
IMAGE = 3
RENDERED_VIDEO = 4 # not implemented
PATTERN_PRODUCER = 5
SYNC_AUDIO = 6
FILE_DOES_NOT_EXIST = 7
IMAGE_SEQUENCE = 8
# Mediaview filtering options
SHOW_ALL_FILES = 0
SHOW_VIDEO_FILES = 1
SHOW_AUDIO_FILES = 2
SHOW_GRAPHICS_FILES = 3
SHOW_IMAGE_SEQUENCES = 4
SHOW_PATTERN_PRODUCERS = 5
# Used to draw indicators that tell if more frames are available while trimming
ON_FIRST_FRAME = 0
ON_LAST_FRAME = 1
ON_BETWEEN_FRAME = 2
# Sync states of sync child clips
SYNC_CORRECT = 0
SYNC_OFF = 1
SYNC_PARENT_GONE = 2
# Allowed editing operations on a track
FREE = 0 # All edits allowed
SYNC_LOCKED = 1 # No insert, splice out or one roll trim.
# Allowed edits do not change positions of later clips
LOCKED = 2 # No edits allowed
# Property types of mlt filters and mlt transitions in filters.xml
# and compositors.xml
PROP_INT = 0
PROP_FLOAT = 1
PROP_EXPRESSION = 2
# Display heights for tracks.
TRACK_HEIGHT_NORMAL = 50 # track height in canvas and column
TRACK_HEIGHT_SMALL = 25 # track height in canvas and column
TRACK_HEIGHT_SMALLEST = 20 # track height in canvas and column
# Notebook widths
NOTEBOOK_WIDTH = 600 # defines app min width with MONITOR_AREA_WIDTH
NOTEBOOK_WIDTH_WIDESCREEN = 500
TOP_ROW_HEIGHT = 500
# Property editing gui consts
PROPERTY_ROW_HEIGHT = 22
PROPERTY_NAME_WIDTH = 90
# Clip mute options
MUTE_NOTHING = 0
MUTE_AUDIO = 1
MUTE_VIDEO = 2
MUTE_ALL = 3
# Track mute options
TRACK_MUTE_NOTHING = 0
TRACK_MUTE_VIDEO = 1
TRACK_MUTE_AUDIO = 2
TRACK_MUTE_ALL = 3
# XML Attribute and element names used in multiple modules
NAME = "name"
ARGS = "args"
PROPERTY = "property"
NON_MLT_PROPERTY = "propertynonmlt"
MLT_SERVICE = "mlt_service"
EXTRA_EDITOR = "extraeditor"
# Available tracks configurations for flowblade
TRACK_CONFIGURATIONS = [(5,4),(4,3),(3,2),(2,1),(8,1),(1,8)]
# Thumbnail image dimensions
THUMB_WIDTH = 116
THUMB_HEIGHT = 87
# Magic value for no pan being applied for audio producer
NO_PAN = -99
# Copy of projectdata.SAVEFILE_VERSION is here to be available at savetime without importing projectdata
# This is set at application startup in app.main()
SAVEFILE_VERSION = -1
# This color is used in two modules
MIDBAR_COLOR = "#bdbdbd"
# Media log event types
MEDIA_LOG_ALL = -1 # no MediaLogEvent has this type, this used when filtering events for display
MEDIA_LOG_INSERT = 0
MEDIA_LOG_MARKS_SET = 1
# Rendered clip types
RENDERED_DISSOLVE = 0
RENDERED_WIPE = 1
RENDERED_COLOR_DIP = 2
RENDERED_FADE_IN = 3
RENDERED_FADE_OUT = 4
# Project proxt modes
USE_ORIGINAL_MEDIA = 0
USE_PROXY_MEDIA = 1
CONVERTING_TO_USE_PROXY_MEDIA = 2
CONVERTING_TO_USE_ORIGINAL_MEDIA = 3
# Autosave directory relative path
AUTOSAVE_DIR = "autosave/"
AUDIO_LEVELS_DIR = "audiolevels/"
# Hidden media folders
THUMBNAILS_DIR = "thumbnails"
RENDERED_CLIPS_DIR = "rendered_clips"
# Luma bands
SHADOWS = 0
MIDTONES = 1
HIGHLIGHTS = 2
# Multi move edit ops
MULTI_NOOP = 0
MULTI_ADD_TRIM = 1
MULTI_TRIM_REMOVE = 2
MULTI_TRIM = 3
# Jack options
JACK_ON_START_UP_NO = 0
JACK_ON_START_UP_YES = 1
JACK_OUT_AUDIO = 0
JACK_OUT_SYNC = 0
# Media load order options
LOAD_ABSOLUTE_FIRST = 0
LOAD_RELATIVE_FIRST = 1
LOAD_ABSOLUTE_ONLY = 2
| Python |
"""
Flowblade Movie Editor is a nonlinear video editor.
Copyright 2012 Janne Liljeblad.
This file is part of Flowblade Movie Editor <http://code.google.com/p/flowblade>.
Flowblade Movie Editor is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Flowblade Movie Editor is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Flowblade Movie Editor. If not, see <http://www.gnu.org/licenses/>.
"""
"""
MLT framework profiles.
"""
import os
import mlt
import editorpersistance
import respaths
import utils
# Inside hidden user folder
USER_PROFILES_DIR = "user_profiles/"
DEFAULT_DEFAULT_PROFILE = "DV/DVD PAL"
# List of mlt profiles
_profile_list = []
_factory_profiles = []
_hidden_factory_profiles = []
_user_profiles = []
def load_profile_list():
"""
Creates a list of MLT profile objects.
Called at app start.
"""
global _profile_list,_factory_profiles, _hidden_factory_profiles, _user_profiles, _proxy_profiles
user_profiles_dir = utils.get_hidden_user_dir_path() + USER_PROFILES_DIR
_user_profiles = _load_profiles_list(user_profiles_dir)
_load_factory_profiles()
_profile_list = _factory_profiles + _user_profiles
_profile_list.sort(_sort_profiles)
_factory_profiles.sort(_sort_profiles)
_hidden_factory_profiles.sort(_sort_profiles)
_user_profiles.sort(_sort_profiles)
def _load_profiles_list(dir_path):
load_profiles = []
file_list = os.listdir(dir_path)
for fname in file_list:
file_path = dir_path + fname
profile = mlt.Profile(file_path)
profile.file_path = file_path
load_profiles.append([profile.description(), profile])
return load_profiles
def _load_factory_profiles():
global _factory_profiles, _hidden_factory_profiles
factory_profiles_all = _load_profiles_list(respaths.PROFILE_PATH)
visible_profiles = []
hidden_profiles = []
for profile in factory_profiles_all:
blocked = False
for hidden_name in editorpersistance.prefs.hidden_profile_names:
if hidden_name == profile[0]:
blocked = True
if blocked == False:
visible_profiles.append(profile)
else:
hidden_profiles.append(profile)
_factory_profiles = visible_profiles
_hidden_factory_profiles = hidden_profiles
def get_profiles():
return _profile_list
def get_factory_profiles():
return _factory_profiles
def get_hidden_profiles():
return _hidden_factory_profiles
def get_user_profiles():
return _user_profiles
def get_profile(profile_name):
for fname, profile in _profile_list:
if profile_name == profile.description():
return profile
def get_profile_for_index(index):
profile_name, profile = _profile_list[index]
return profile
def get_profile_name_for_index(index):
profile_name, profile = _profile_list[index]
return profile_name
def get_default_profile():
return get_profile_for_index(get_default_profile_index())
def get_default_profile_index():
"""
We're making sure here that something is returned as default profile even if user may have removed some profiles.
"""
def_profile_index = get_index_for_name(editorpersistance.prefs.default_profile_name)
if def_profile_index == -1:
print "default profile from prefs nor found"
def_profile_index = get_index_for_name(DEFAULT_DEFAULT_PROFILE)
def_profile_name = DEFAULT_DEFAULT_PROFILE
if def_profile_index == -1:
def_profile_index = 0
def_profile_name, profile = _profile_list[def_profile_index]
print "DEFAULT_DEFAULT_PROFILE deleted returning first profile"
editorpersistance.prefs.default_profile_name = def_profile_name
editorpersistance.save()
return def_profile_index
def get_index_for_name(lookup_profile_name):
# fails if two profiles have same names
for i in range(0, len(_profile_list)):
profile = _profile_list[i]
if lookup_profile_name == profile[0]:
return i
return -1
def _sort_profiles(a, b):
a_desc, a_profile = a
b_desc, b_profile = b
if a_desc.lower() < b_desc.lower():
return -1
elif a_desc.lower() > b_desc.lower():
return 1
else:
return 0
| Python |
"""
Flowblade Movie Editor is a nonlinear video editor.
Copyright 2012 Janne Liljeblad.
This file is part of Flowblade Movie Editor <http://code.google.com/p/flowblade>.
Flowblade Movie Editor is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Flowblade Movie Editor is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Flowblade Movie Editor. If not, see <http://www.gnu.org/licenses/>.
"""
"""
Module holds references to GUI widgets.
"""
import pygtk
pygtk.require('2.0');
import gtk
# Editor window
editor_window = None
# Menu
editmenu = None
# Project data lists
media_list_view = None
bin_list_view = None
sequence_list_view = None
effect_stack_list_view = None
middle_notebook = None # This is now the only notebook, update name sometime
project_info_vbox = None
effect_select_list_view = None
effect_select_combo_box = None
render_out_folder = None
# Media tab
media_view_filter_selector = None
proxy_button = None
# Monitor
pos_bar = None
tc = None
# Timeline
tline_display = None
tline_scale = None
tline_canvas = None
tline_scroll = None
tline_info = None
tline_column = None
tline_left_corner = None
big_tc = None
# indexes match editmode values in editorstate.py
notebook_buttons = None
play_b = None
clip_editor_b = None
sequence_editor_b = None
# Theme colors
note_bg_color = None
fg_color = None
fg_color_tuple = None
bg_color_tuple = None
selected_bg_color = None
def capture_references(new_editor_window):
"""
Create shorter names for some of the frequently used GUI objects.
"""
global editor_window, media_list_view, bin_list_view, sequence_list_view, pos_bar, \
tc, tline_display, tline_scale, tline_canvas, tline_scroll, tline_v_scroll, tline_info, \
tline_column, play_b, clip_editor_b, sequence_editor_b, note_bg_color, fg_color, fg_color_tuple, bg_color_tuple, selected_bg_color, \
effect_select_list_view, effect_select_combo_box, project_info_vbox, middle_notebook, big_tc, editmenu, notebook_buttons, tline_left_corner
editor_window = new_editor_window
media_list_view = editor_window.media_list_view
bin_list_view = editor_window.bin_list_view
sequence_list_view = editor_window.sequence_list_view
middle_notebook = editor_window.notebook
effect_select_list_view = editor_window.effect_select_list_view
effect_select_combo_box = editor_window.effect_select_combo_box
pos_bar = editor_window.pos_bar
tc = editor_window.tc
tline_display = editor_window.tline_display
tline_scale = editor_window.tline_scale
tline_canvas = editor_window.tline_canvas
tline_scroll = editor_window.tline_scroller
tline_info = editor_window.tline_info
tline_column = editor_window.tline_column
tline_left_corner = editor_window.left_corner
clip_editor_b = editor_window.clip_editor_b
sequence_editor_b = editor_window.sequence_editor_b
big_tc = editor_window.big_TC
editmenu = editor_window.uimanager.get_widget('/MenuBar/EditMenu')
style = editor_window.edit_buttons_row.get_style()
note_bg_color = style.bg[gtk.STATE_NORMAL]
fg_color = style.fg[gtk.STATE_NORMAL]
selected_bg_color = style.bg[gtk.STATE_SELECTED]
# Get cairo color tuple from gtk.gdk.Color
raw_r, raw_g, raw_b = hex_to_rgb(fg_color.to_string())
fg_color_tuple = (float(raw_r)/65535.0, float(raw_g)/65535.0, float(raw_b)/65535)
raw_r, raw_g, raw_b = hex_to_rgb(note_bg_color.to_string())
bg_color_tuple = (float(raw_r)/65535.0, float(raw_g)/65535.0, float(raw_b)/65535)
def hex_to_rgb(value):
value = value.lstrip('#')
lv = len(value)
return tuple(int(value[i:i+lv/3], 16) for i in range(0, lv, lv/3))
def enable_save():
editor_window.uimanager.get_widget("/MenuBar/FileMenu/Save").set_sensitive(True)
| Python |
"""
Flowblade Movie Editor is a nonlinear video editor.
Copyright 2012 Janne Liljeblad.
This file is part of Flowblade Movie Editor <http://code.google.com/p/flowblade>.
Flowblade Movie Editor is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Flowblade Movie Editor is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Flowblade Movie Editor. If not, see <http://www.gnu.org/licenses/>.
"""
"""
Module handles button presses from monitor control buttons row.
"""
import editorstate
from editorstate import PLAYER
from editorstate import current_sequence
from editorstate import timeline_visible
from editorstate import EDIT_MODE
from editorstate import current_is_move_mode
from editorstate import MONITOR_MEDIA_FILE
import gui
import movemodes
import trimmodes
import updater
FF_REW_SPEED = 3.0
JKL_SPEEDS = [-32.0, -16.0, -8.0, -1.0, 0.0, 1.0, 3.0, 5.0, 8.0]
#JKL_SPEEDS = [-32.0, -16.0, -8.0, -1.0, -0.2, 0.0, 0.2, 1.0, 3.0, 5.0, 8.0]
JKL_STOPPED_INDEX = 4
# ---------------------------------------- playback
# Some events have different meanings depending on edit mode and
# are handled in either movemodes.py or trimmodes.py modules depending
# on edit mode.
def play_pressed():
if current_is_move_mode():
movemodes.play_pressed()
elif EDIT_MODE() == editorstate.ONE_ROLL_TRIM:
trimmodes.oneroll_play_pressed()
elif EDIT_MODE() == editorstate.ONE_ROLL_TRIM_NO_EDIT:
movemodes.play_pressed()
elif EDIT_MODE() == editorstate.TWO_ROLL_TRIM:
trimmodes.tworoll_play_pressed()
elif EDIT_MODE() == editorstate.TWO_ROLL_TRIM_NO_EDIT:
movemodes.play_pressed()
elif EDIT_MODE() == editorstate.SLIDE_TRIM:
trimmodes.slide_play_pressed()
elif EDIT_MODE() == editorstate.SLIDE_TRIM_NO_EDIT:
movemodes.play_pressed()
def stop_pressed():
if current_is_move_mode():
movemodes.stop_pressed()
elif EDIT_MODE() == editorstate.ONE_ROLL_TRIM_NO_EDIT:
movemodes.stop_pressed()
elif EDIT_MODE() == editorstate.TWO_ROLL_TRIM_NO_EDIT:
movemodes.stop_pressed()
elif EDIT_MODE() == editorstate.ONE_ROLL_TRIM:
trimmodes.oneroll_stop_pressed()
elif EDIT_MODE() == editorstate.TWO_ROLL_TRIM:
trimmodes.tworoll_stop_pressed()
elif EDIT_MODE() == editorstate.SLIDE_TRIM:
trimmodes.slide_stop_pressed()
elif EDIT_MODE() == editorstate.SLIDE_TRIM_NO_EDIT:
movemodes.stop_pressed()
def next_pressed():
if current_is_move_mode():
movemodes.next_pressed()
elif EDIT_MODE() == editorstate.ONE_ROLL_TRIM:
trimmodes.oneroll_next_pressed()
elif EDIT_MODE() == editorstate.TWO_ROLL_TRIM:
trimmodes.tworoll_next_pressed()
elif EDIT_MODE() == editorstate.SLIDE_TRIM:
trimmodes.slide_next_pressed()
def prev_pressed():
if current_is_move_mode():
movemodes.prev_pressed()
elif EDIT_MODE() == editorstate.ONE_ROLL_TRIM:
trimmodes.oneroll_prev_pressed()
elif EDIT_MODE() == editorstate.TWO_ROLL_TRIM:
trimmodes.tworoll_prev_pressed()
elif EDIT_MODE() == editorstate.SLIDE_TRIM:
trimmodes.slide_prev_pressed()
def j_pressed():
if timeline_visible():
trimmodes.set_no_edit_trim_mode()
jkl_index = _get_jkl_speed_index()
if jkl_index > JKL_STOPPED_INDEX - 1: # JKL_STOPPPED_INDEX - 1 is first backwards speed, any bigger is forward, j starts backwards slow from any forward speed
jkl_index = JKL_STOPPED_INDEX - 1
else:
jkl_index = jkl_index - 1
if jkl_index < 0:
jkl_index = 0
new_speed = JKL_SPEEDS[jkl_index]
PLAYER().start_variable_speed_playback(new_speed)
def k_pressed():
if timeline_visible():
trimmodes.set_no_edit_trim_mode()
PLAYER().stop_playback()
def l_pressed():
if timeline_visible():
trimmodes.set_no_edit_trim_mode()
jkl_index = _get_jkl_speed_index()
if jkl_index < JKL_STOPPED_INDEX + 1:# JKL_STOPPPED_INDEX + 1 is first forward speed, any smaller is backward, l starts forward slow from any backwards speed
jkl_index = JKL_STOPPED_INDEX + 1
else:
jkl_index = jkl_index + 1
if jkl_index == len(JKL_SPEEDS):
jkl_index = len(JKL_SPEEDS) - 1
new_speed = JKL_SPEEDS[jkl_index]
PLAYER().start_variable_speed_playback(new_speed)
def _get_jkl_speed_index():
speed = PLAYER().producer.get_speed()
if speed < -8.0:
return 0
for i in range(len(JKL_SPEEDS) - 1):
if speed <= JKL_SPEEDS[i]:
return i
return len(JKL_SPEEDS) - 1
# -------------------------------------- marks
def mark_in_pressed():
mark_in = PLAYER().producer.frame()
if timeline_visible():
trimmodes.set_no_edit_trim_mode()
mark_out_old = PLAYER().producer.mark_out
PLAYER().producer.mark_in = mark_in
else:
mark_out_old = current_sequence().monitor_clip.mark_out
current_sequence().monitor_clip.mark_in = mark_in
# Clear illegal old mark out
if mark_out_old != -1:
if mark_out_old < mark_in:
if timeline_visible():
PLAYER().producer.mark_out = -1
else:
current_sequence().monitor_clip.mark_out = -1
_do_marks_update()
updater.display_marks_tc()
def mark_out_pressed():
mark_out = PLAYER().producer.frame()
if timeline_visible():
trimmodes.set_no_edit_trim_mode()
mark_in_old = PLAYER().producer.mark_in
PLAYER().producer.mark_out = mark_out
else:
mark_in_old = current_sequence().monitor_clip.mark_in
current_sequence().monitor_clip.mark_out = mark_out
# Clear illegal old mark in
if mark_in_old > mark_out:
if timeline_visible():
PLAYER().producer.mark_in = -1
else:
current_sequence().monitor_clip.mark_in = -1
_do_marks_update()
updater.display_marks_tc()
def marks_clear_pressed():
if timeline_visible():
trimmodes.set_no_edit_trim_mode()
PLAYER().producer.mark_in = -1
PLAYER().producer.mark_out = -1
else:
current_sequence().monitor_clip.mark_in = -1
current_sequence().monitor_clip.mark_out = -1
_do_marks_update()
updater.display_marks_tc()
def to_mark_in_pressed():
if timeline_visible():
trimmodes.set_no_edit_trim_mode()
mark_in = PLAYER().producer.mark_in
if not timeline_visible():
mark_in = current_sequence().monitor_clip.mark_in
if mark_in == -1:
return
PLAYER().seek_frame(mark_in)
def to_mark_out_pressed():
if timeline_visible():
trimmodes.set_no_edit_trim_mode()
mark_out = PLAYER().producer.mark_out
if not timeline_visible():
mark_out = current_sequence().monitor_clip.mark_out
if mark_out == -1:
return
PLAYER().seek_frame(mark_out)
def _do_marks_update():
if timeline_visible():
producer = PLAYER().producer
else:
producer = current_sequence().monitor_clip
MONITOR_MEDIA_FILE().mark_in = producer.mark_in
MONITOR_MEDIA_FILE().mark_out = producer.mark_out
gui.media_list_view.widget.queue_draw()
gui.pos_bar.update_display_from_producer(producer)
gui.tline_scale.widget.queue_draw()
# ------------------------------------------------------------ clip arrow seeks
def up_arrow_seek_on_monitor_clip():
current_frame = PLAYER().producer.frame()
if current_frame < MONITOR_MEDIA_FILE().mark_in:
PLAYER().seek_frame(MONITOR_MEDIA_FILE().mark_in)
return
if current_frame < MONITOR_MEDIA_FILE().mark_out:
PLAYER().seek_frame(MONITOR_MEDIA_FILE().mark_out)
return
PLAYER().seek_frame(PLAYER().producer.get_length() - 1)
def down_arrow_seek_on_monitor_clip():
current_frame = PLAYER().producer.frame()
mark_in = MONITOR_MEDIA_FILE().mark_in
mark_out = MONITOR_MEDIA_FILE().mark_out
if current_frame > mark_out and mark_out != -1:
PLAYER().seek_frame(MONITOR_MEDIA_FILE().mark_out)
return
if current_frame > mark_in and mark_in != -1:
PLAYER().seek_frame(MONITOR_MEDIA_FILE().mark_in)
return
PLAYER().seek_frame(0)
def set_monitor_playback_interpolation(new_interpolation):
PLAYER().consumer.set("rescale", str(new_interpolation)) # MLT options "nearest", "bilinear", "bicubic", "hyper" hardcoded into menu items
| Python |
"""
Flowblade Movie Editor is a nonlinear video editor.
Copyright 2012 Janne Liljeblad.
This file is part of Flowblade Movie Editor <http://code.google.com/p/flowblade>.
Flowblade Movie Editor is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Flowblade Movie Editor is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Flowblade Movie Editor. If not, see <http://www.gnu.org/licenses/>.
"""
"""
Module contains objects that wrap mlt.Transition objects used to mix video betweeen
two tracks.
"""
import copy
import mlt
import os
import xml.dom.minidom
import appconsts
import mltrefhold
import patternproducer
import propertyparse
import respaths
# Attr and node names in compositors.xml
NAME = appconsts.NAME
ARGS = appconsts.ARGS
PROPERTY = appconsts.PROPERTY
EXTRA_EDITOR = appconsts.EXTRA_EDITOR
MLT_SERVICE = appconsts.MLT_SERVICE
COMPOSITOR = "compositortransition"
# Property types.
PROP_INT = appconsts.PROP_INT
PROP_FLOAT = appconsts.PROP_FLOAT
PROP_EXPRESSION = appconsts.PROP_EXPRESSION
# Renderered transitions
RENDERED_DISSOLVE = appconsts.RENDERED_DISSOLVE
RENDERED_WIPE = appconsts.RENDERED_WIPE
RENDERED_COLOR_DIP = appconsts.RENDERED_COLOR_DIP
RENDERED_FADE_IN = appconsts.RENDERED_FADE_IN
RENDERED_FADE_OUT = appconsts.RENDERED_FADE_OUT
rendered_transitions = None # list is set here at init_module() because otherwise translations can't be done (module load issue)
# Info objects used to create mlt.Transitions for CompositorObject objects.
# dict name : MLTCompositorInfo
mlt_compositor_transition_infos = {}
# Name -> type dict, used at creation when type is known, but name data has been left behind
name_for_type = {}
# Transitions not found in the system
not_found_transitions = []
wipe_lumas = None # User displayed name -> resource image
compositors = None
blenders = None
def init_module():
# translations and module load order make us do this in method instead of at module load
global wipe_lumas, compositors, blenders, name_for_type, rendered_transitions, single_track_render_type_names
wipe_lumas = { \
_("Vertical From Center"):"bi-linear_x.pgm",
_("Vertical Top to Bottom"):"wipe_top_to_bottom.svg",
_("Vertical Bottom to Top"):"wipe_bottom_to_top.svg",
_("Horizontal From Center"):"bi-linear_y.pgm",
_("Horizontal Left to Right"):"wipe_left_to_right.svg",
_("Horizontal Right to Left"):"wipe_right_to_left.svg",
_("Clock Left To Right"):"clock_left_to_right.pgm",
_("Clock Right to Left"):"clock_right_to_left.pgm",
_("Clock Symmetric"):"symmetric_clock.pgm",
_("Stripes Horizontal"):"blinds_in_to_out.pgm",
_("Stripes Horizontal Big"):"blinds_in_to_out_big.pgm",
_("Stripes Horizontal Moving"):"blinds_sliding.png",
_("Stripes Vertical"):"vertical_blinds_in_to_out.pgm",
_("Stripes Vertical Big"):"vertical_blinds_in_to_out_big.pgm",
_("Burst"):"burst.pgm",
_("Circle From In"):"circle_in_to_out.svg",
_("Circle From Out"):"circle_out_to_in.svg",
_("Cloud"):"cloud.pgm",
_("Hatched 1"):"hatched_1.png",
_("Hatched 2"):"hatched_2.png",
_("Hourglass"):"hourglass_1.png",
_("Puddles"):"mountains.png",
_("Rings"):"radial-bars.pgm",
_("Rectangle From In"):"rectangle_in_to_out.pgm",
_("Rectangle From Out"):"rectangle_out_to_in.pgm",
_("Rectangle Bars"):"square2-bars.pgm",
_("Sand"):"sand.svg",
_("Sphere"):"sphere.png",
_("Spiral Abstract"):"spiral_abstract_1.png",
_("Spiral"):"spiral.pgm",
_("Spiral Galaxy"):"spiral2.pgm",
_("Spiral Big"):"spiral_big.pgm",
_("Spiral Medium"):"spiral_medium.pgm",
_("Spots"):"spots.png",
_("Star"):"star_2.png",
_("Arch"):"fractal_1.png",
_("Patches"):"fractal_4.png",
_("Free Stripes"):"fractal_5.png",
_("Free Curves"):"fractal_7.png",
_("Diagonal 1"):"wipe_diagonal_1.png",
_("Diagonal 2"):"wipe_diagonal_2.png",
_("Diagonal 3"):"wipe_diagonal_3.png",
_("Diagonal 4"):"wipe_diagonal_4.png",
_("Checkerboard"):"checkerboard_small.pgm"}
# name -> mlt_compositor_transition_infos key dict.
unsorted_compositors = [ (_("Affine"),"##affine"),
(_("Dissolve"),"##opacity_kf"),
(_("Picture in Picture"),"##pict_in_pict"),
(_("Region"), "##region"),
(_("Affine Blend"), "##affineblend"),
(_("Blend"), "##blend"),
(_("Wipe Clip Length"),"##wipe")]
compositors = sorted(unsorted_compositors, key=lambda comp: comp[0])
# name -> mlt_compositor_transition_infos key dict.
blenders = [(_("Add"),"##add"),
(_("Burn"),"##burn"),
(_("Color only"),"##color_only"),
(_("Darken"),"##darken"),
(_("Difference"),"##difference"),
(_("Divide"),"##divide"),
(_("Dodge"),"##dodge"),
(_("Grain extract"),"##grain_extract"),
(_("Grain merge"),"##grain_merge"),
(_("Hardlight"),"##hardlight"),
(_("Hue"),"##hue"),
(_("Lighten"),"##lighten"),
(_("Multiply"),"##multiply"),
(_("Overlay"),"##overlay"),
(_("Saturation"),"##saturation"),
(_("Screen"),"##screen"),
(_("Softlight"),"##softlight"),
(_("Subtract"),"##subtract"),
(_("Value"),"##value")]
for comp in compositors:
name, comp_type = comp
name_for_type[comp_type] = name
for blend in blenders:
name, comp_type = blend
name_for_type[comp_type] = name
# change this, tuples are not need we only need list of translatd names
rendered_transitions = [ (_("Dissolve"), RENDERED_DISSOLVE),
(_("Wipe"), RENDERED_WIPE),
(_("Color Dip"), RENDERED_COLOR_DIP),
(_("Fade In"), RENDERED_FADE_IN),
(_("Fade Out"), RENDERED_FADE_OUT)]
# ------------------------------------------ compositors
class CompositorTransitionInfo:
"""
Constructor input is a XML dom node object. Convers XML data to another form
used to create CompositorTransition objects.
"""
def __init__(self, compositor_node):
self.mlt_service_id = compositor_node.getAttribute(MLT_SERVICE)
self.xml = compositor_node.toxml()
self.name = compositor_node.getElementsByTagName(NAME).item(0).firstChild.nodeValue
# Properties saved as name-value-type tuplets
p_node_list = compositor_node.getElementsByTagName(PROPERTY)
self.properties = propertyparse.node_list_to_properties_array(p_node_list)
# Property args saved in propertyname -> propertyargs_string dict
self.property_args = propertyparse.node_list_to_args_dict(p_node_list)
# Extra editors that handle properties that have been set "no_editor"
e_node_list = compositor_node.getElementsByTagName(EXTRA_EDITOR)
self.extra_editors = propertyparse.node_list_to_extraeditors_array(e_node_list)
class CompositorTransition:
"""
These objects are part of sequence.Sequence and desribew video transition between two tracks.
They wrap mlt.Transition objects that do the actual mixing.
"""
def __init__(self, transition_info):
self.mlt_transition = None
self.info = transition_info
# Editable properties, usually a subset of all properties of
# mlt_serveice "composite", defined in compositors.xml
self.properties = copy.deepcopy(transition_info.properties)
self.a_track = -1 # to, destination
self.b_track = -1 # from, source
def create_mlt_transition(self, mlt_profile):
transition = mlt.Transition(mlt_profile,
str(self.info.mlt_service_id))
mltrefhold.hold_ref(transition)
self.mlt_transition = transition
self.set_default_values()
# PROP_EXPR values may have keywords that need to be replaced with
# numerical values that depend on the profile we have. These need
# to be replaced now that we have profile and we are ready to connect this.
propertyparse.replace_value_keywords(self.properties, mlt_profile)
self.update_editable_mlt_properties()
def set_default_values(self):
if self.info.mlt_service_id == "composite":
self._set_composite_service_default_values()
elif self.info.mlt_service_id == "affine":
self._set_affine_service_default_values()
elif self.info.mlt_service_id == "luma":
self._set_luma_service_default_values()
elif self.info.mlt_service_id == "region":
self._set_region_service_default_values()
else:
self._set_blend_service_default_values()
def _set_composite_service_default_values(self):
self.mlt_transition.set("automatic",1)
self.mlt_transition.set("aligned", 1)
self.mlt_transition.set("deinterlace",0)
self.mlt_transition.set("distort",0)
self.mlt_transition.set("fill",1)
self.mlt_transition.set("operator","over")
self.mlt_transition.set("luma_invert",0)
self.mlt_transition.set("progressive",1)
self.mlt_transition.set("softness",0)
def _set_affine_service_default_values(self):
self.mlt_transition.set("distort",0)
self.mlt_transition.set("automatic",1)
self.mlt_transition.set("keyed",1)
def _set_luma_service_default_values(self):
self.mlt_transition.set("automatic",1)
self.mlt_transition.set("invert",0)
self.mlt_transition.set("reverse",0)
self.mlt_transition.set("softness",0)
def _set_region_service_default_values(self):
self.mlt_transition.set("automatic",1)
self.mlt_transition.set("aligned",1)
self.mlt_transition.set("deinterlace",0)
self.mlt_transition.set("distort",0)
self.mlt_transition.set("fill",1)
self.mlt_transition.set("operator","over")
self.mlt_transition.set("luma_invert",0)
self.mlt_transition.set("progressive",1)
self.mlt_transition.set("softness",0)
def _set_blend_service_default_values(self):
self.mlt_transition.set("automatic",1)
def set_tracks(self, a_track, b_track):
self.a_track = a_track
self.b_track = b_track
self.mlt_transition.set("a_track", str(a_track))
self.mlt_transition.set("b_track", str(b_track))
def set_target_track(self, a_track, force_track):
self.a_track = a_track
self.mlt_transition.set("a_track", str(a_track))
if force_track == True:
fval = 1
else:
fval = 0
self.mlt_transition.set("force_track",str(fval))
def update_editable_mlt_properties(self):
for prop in self.properties:
name, value, prop_type = prop
self.mlt_transition.set(str(name), str(value)) # new const strings are created from values
class CompositorObject:
"""
These objects are saved with projects. Thay are used to create,
update and hold references to mlt.Transition
objects that define a composite between two tracks.
mlt.Transition (self.transition) needs it in and out and visibility to be updated
for every single edit action ( see edit.py _insert_clip() and
_remove_clip() )
"""
def __init__(self, transition_info):
self.transition = CompositorTransition(transition_info)
self.clip_in = -1 # ducktyping for clip for property editors
self.clip_out = -1 # ducktyping for clip for property editors
self.planted = False
self.compositor_index = None
self.name = None # ducktyping for clip for property editors
self.selected = False
self.origin_clip_id = None
self.destroy_id = os.urandom(16) # HACK, HACK, HACK - find a way to remove this stuff
# Objects are recreated often in Sequence.restack_compositors()
# and cannot be destroyd in undo/redo with object identidy.
# This is cloned in clone_properties
def get_length(self):
# ducktyping for clip for property editors
return self.clip_out - self.clip_in + 1 # +1 out inclusive
def move(self, delta):
self.clip_in = self.clip_in + delta
self.clip_out = self.clip_out + delta
self.transition.mlt_transition.set("in", str(self.clip_in))
self.transition.mlt_transition.set("out", str(self.clip_out))
def set_in_and_out(self, in_frame, out_frame):
self.clip_in = in_frame
self.clip_out = out_frame
self.transition.mlt_transition.set("in", str(in_frame))
self.transition.mlt_transition.set("out", str(out_frame))
def create_mlt_objects(self, mlt_profile):
self.transition.create_mlt_transition(mlt_profile)
def clone_properties(self, source_compositor):
self.destroy_id = source_compositor.destroy_id
self.origin_clip_id = source_compositor.origin_clip_id
self.transition.properties = copy.deepcopy(source_compositor.transition.properties)
self.transition.update_editable_mlt_properties()
# -------------------------------------------------- compositor interface methods
def load_compositors_xml(transitions):
"""
Load filters document and create MLTCompositorInfo objects and
put them in dict mlt_compositor_infos with names as keys.
"""
compositors_doc = xml.dom.minidom.parse(respaths.COMPOSITORS_XML_DOC)
print "Loading transitions..."
compositor_nodes = compositors_doc.getElementsByTagName(COMPOSITOR)
for c_node in compositor_nodes:
compositor_info = CompositorTransitionInfo(c_node)
if (not compositor_info.mlt_service_id in transitions) and len(transitions) > 0:
print "MLT transition " + compositor_info.mlt_service_id + " not found."
global not_found_transitions
not_found_transitions.append(compositor_info)
continue
mlt_compositor_transition_infos[compositor_info.name] = compositor_info
def get_wipe_resource_path_for_sorted_keys_index(sorted_keys_index):
# This exists to avoid sending a list of sorted keys around or having to use global variables
keys = wipe_lumas.keys()
keys.sort()
return get_wipe_resource_path(keys[sorted_keys_index])
def get_wipe_resource_path(key):
img_file = wipe_lumas[key]
return respaths.WIPE_RESOURCES_PATH + img_file
def create_compositor(compositor_type):
transition_info = mlt_compositor_transition_infos[compositor_type]
compositor = CompositorObject(transition_info)
compositor.compositor_index = -1 # not used since SAVEFILE = 3
compositor.name = name_for_type[compositor_type]
compositor.type_id = compositor_type # this is a string like "##add", "##affineblend", in compositors.xml it is name element: <name>##affine</name> etc...
return compositor
# ------------------------------------------------------ rendered transitions
# These are tractor objects used to create rendered transitions.
def get_rendered_transition_tractor(current_sequence,
orig_from,
orig_to,
action_from_out,
action_from_in,
action_to_out,
action_to_in,
transition_type_selection_index,
wipe_luma_sorted_keys_index,
gdk_color_str):
name, transition_type = rendered_transitions[transition_type_selection_index]
# New from clip
if orig_from.media_type != appconsts.PATTERN_PRODUCER:
from_clip = current_sequence.create_file_producer_clip(orig_from.path)# File producer
else:
from_clip = current_sequence.create_pattern_producer(orig_from.create_data) # pattern producer
current_sequence.clone_clip_and_filters(orig_from, from_clip)
# New to clip
if not(transition_type == RENDERED_FADE_IN or transition_type == RENDERED_FADE_OUT): # fades to not use to_clip
if orig_to.media_type != appconsts.PATTERN_PRODUCER:
to_clip = current_sequence.create_file_producer_clip(orig_to.path)# File producer
else:
to_clip = current_sequence.create_pattern_producer(orig_to.create_data) # pattern producer
current_sequence.clone_clip_and_filters(orig_to, to_clip)
# Create tractor and tracks
tractor = mlt.Tractor()
multitrack = tractor.multitrack()
track0 = mlt.Playlist()
track1 = mlt.Playlist()
multitrack.connect(track0, 0)
multitrack.connect(track1, 1)
# we'll set in and out points for images and pattern producers.
if not(transition_type == RENDERED_FADE_IN or transition_type == RENDERED_FADE_OUT): # fades to not use to_clip or some other data used here
if from_clip.media_type == appconsts.IMAGE or from_clip.media_type == appconsts.PATTERN_PRODUCER:
length = action_from_out - action_from_in
from_clip.clip_in = 0
from_clip.clip_out = length
if to_clip.media_type == appconsts.IMAGE or to_clip.media_type == appconsts.PATTERN_PRODUCER:
length = action_to_out - action_to_in
to_clip.clip_in = 0
to_clip.clip_out = length
else:
length = action_from_out
if from_clip.media_type == appconsts.IMAGE or from_clip.media_type == appconsts.PATTERN_PRODUCER:
from_clip.clip_in = 0
from_clip.clip_out = length
# Add clips to tracks and create keyframe string to contron mixing
if transition_type == RENDERED_DISSOLVE or transition_type == RENDERED_WIPE:
# Add clips. Images and pattern producers always fill full track.
if from_clip.media_type != appconsts.IMAGE and from_clip.media_type != appconsts.PATTERN_PRODUCER:
track0.insert(from_clip, 0, action_from_in, action_from_out)
else:
track0.insert(from_clip, 0, 0, action_from_out - action_from_in)
if to_clip.media_type != appconsts.IMAGE and to_clip.media_type != appconsts.PATTERN_PRODUCER:
track1.insert(to_clip, 0, action_to_in, action_to_out)
else:
track1.insert(to_clip, 0, 0, action_to_out - action_to_in)
kf_str = "0=0/0:100%x100%:0.0;"+ str(tractor.get_length() - 1) + "=0/0:100%x100%:100.0"
elif transition_type == RENDERED_COLOR_DIP:
length = action_from_out - action_from_in
first_clip_length = length / 2
second_clip_length = length - first_clip_length
color_clip = patternproducer.create_color_producer(current_sequence.profile, gdk_color_str)
track0.insert(color_clip, 0, 0, length)
track1.insert(from_clip, 0, action_from_in, action_from_in + first_clip_length)
track1.insert(to_clip, 1, action_to_out - second_clip_length, action_to_out)
kf_str = "0=0/0:100%x100%:100.0;"+ str(first_clip_length) + "=0/0:100%x100%:0.0;" + str(tractor.get_length() - 1) + "=0/0:100%x100%:100.0"
elif (transition_type == RENDERED_FADE_IN or transition_type == RENDERED_FADE_OUT):
color_clip = patternproducer.create_color_producer(current_sequence.profile, gdk_color_str)
track0.insert(color_clip, 0, 0, length)
if transition_type == RENDERED_FADE_IN:
track1.insert(from_clip, 0, orig_from.clip_in, orig_from.clip_in + length)
kf_str = "0=0/0:100%x100%:0.0;"+ str(length) + "=0/0:100%x100%:100.0"
else: # transition_type == RENDERED_FADE_OUT
track1.insert(from_clip, 0, orig_from.clip_out - length, orig_from.clip_out)
kf_str = "0=0/0:100%x100%:100.0;"+ str(length) + "=0/0:100%x100%:0.0"
# Create transition
transition = mlt.Transition(current_sequence.profile, "region")
mltrefhold.hold_ref(transition)
transition.set("composite.geometry", str(kf_str)) # controls mix over time
transition.set("composite.automatic",1)
transition.set("composite.aligned", 0)
transition.set("composite.deinterlace",0)
transition.set("composite.distort",0)
transition.set("composite.fill",1)
transition.set("composite.operator","over")
transition.set("composite.luma_invert",0)
transition.set("composite.progressive",1)
transition.set("composite.softness",0)
transition.set("in", 0)
transition.set("out", tractor.get_length() - 1)
transition.set("a_track", 0)
transition.set("b_track", 1)
# Setting luma resource file turns dissolve into wipe
if transition_type == RENDERED_WIPE:
wipe_resource_path = get_wipe_resource_path_for_sorted_keys_index(wipe_luma_sorted_keys_index)
transition.set("composite.luma", str(wipe_resource_path))
# Add transition
field = tractor.field()
field.plant_transition(transition, 0,1)
return tractor
| Python |
"""
Flowblade Movie Editor is a nonlinear video editor.
Copyright 2012 Janne Liljeblad.
This file is part of Flowblade Movie Editor <http://code.google.com/p/flowblade>.
Flowblade Movie Editor is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Flowblade Movie Editor is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Flowblade Movie Editor. If not, see <http://www.gnu.org/licenses/>.
"""
"""
Module for saving and loading projects.
Main functionality of the module is to replace unpickleable
SwigPyObject MLT objects with pickleable python objects for save,
and then create MLT objects from pickled objects when project is loaded.
"""
import pygtk
pygtk.require('2.0');
import gtk
import copy
import fnmatch
import os
import pickle
import time
import appconsts
import editorstate
import editorpersistance
import mltprofiles
import mltfilters
import mlttransitions
import miscdataobjects
import propertyparse
import resync
# Unpickleable attributes for all objects
# These are removed at save and recreated at load.
PROJECT_REMOVE = ['profile','c_seq']
SEQUENCE_REMOVE = ['profile','field','multitrack','tractor','monitor_clip','vectorscope','audiowave','rgbparade','outputfilter','watermark_filter']
PLAY_LIST_REMOVE = ['this','sequence','get_name','gain_filter','pan_filter']
CLIP_REMOVE = ['this','clip_length']
TRANSITION_REMOVE = ['this']
FILTER_REMOVE = ['mlt_filter','mlt_filters']
MEDIA_FILE_REMOVE = ['icon']
# Used to flag a not found relative path
NOT_FOUND = "/not_found_not_found/not_found"
# Used to send messages when loading project
load_dialog = None
# These are used to recrete parenting relationships
all_clips = {}
sync_clips = []
# Used for for convrtting to and from proxy media using projects
project_proxy_mode = -1
proxy_path_dict = None
# Flag for showing progress messages on GUI when loading
show_messages = True
# Path of file being loaded, global for convenience. Used toimplement relative paths search on load
_load_file_path = None
# Used to change media item and clip paths when saving backup snapshot.
# 'snapshot_paths != None' flags that snapsave is being done and paths need to be replaced
snapshot_paths = None
class FileProducerNotFoundError(Exception):
"""
We're only catching this, other errors we'll just crash on load
"""
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class ProjectProfileNotFoundError(Exception):
"""
We're only catching this, other errors we'll just crash on load
"""
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
# -------------------------------------------------- LOAD MESSAGES
def _show_msg(msg, delay=0.0):
if show_messages == True:
gtk.gdk.threads_enter()
load_dialog.info.set_text(msg)
time.sleep(delay)
gtk.gdk.threads_leave()
# -------------------------------------------------- SAVE
def save_project(project, file_path):
"""
Creates pickleable project object
"""
print "Save project " + os.path.basename(file_path)
# Get shallow copy
s_proj = copy.copy(project)
# Set current sequence index
s_proj.c_seq_index = project.sequences.index(project.c_seq)
# Set project SAVEFILE_VERSION to current in case this is a resave of older file type.
# Older file type has been converted to newer file type on load.
s_proj.SAVEFILE_VERSION = appconsts.SAVEFILE_VERSION
# Init proxy convert data
global project_proxy_mode, proxy_path_dict
project_proxy_mode = s_proj.proxy_data.proxy_mode
proxy_path_dict = {}
# Replace media file objects with pickleable copys
media_files = {}
for k, v in s_proj.media_files.iteritems():
s_media_file = copy.copy(v)
remove_attrs(s_media_file, MEDIA_FILE_REMOVE)
# Convert media files between original and proxy files
if project_proxy_mode == appconsts.CONVERTING_TO_USE_PROXY_MEDIA:
if s_media_file.has_proxy_file:
proxy_path_dict[s_media_file.path] = s_media_file.second_file_path
s_media_file.set_as_proxy_media_file()
elif project_proxy_mode == appconsts.CONVERTING_TO_USE_ORIGINAL_MEDIA:
if s_media_file.is_proxy_file:
proxy_path_dict[s_media_file.path] = s_media_file.second_file_path
s_media_file.set_as_original_media_file()
# Change paths when doing snapshot save
if snapshot_paths != None:
if s_media_file.type != appconsts.PATTERN_PRODUCER:
s_media_file.path = snapshot_paths[s_media_file.path]
media_files[s_media_file.id] = s_media_file
s_proj.media_files = media_files
# Replace sequences with pickleable objects
sequences = []
for i in range(0, len(project.sequences)):
add_seq = project.sequences[i]
sequences.append(get_p_sequence(add_seq))
s_proj.sequences = sequences
# Remove unpickleable attributes
remove_attrs(s_proj, PROJECT_REMOVE)
# Write out file.
write_file = file(file_path, "wb")
pickle.dump(s_proj, write_file)
def get_p_sequence(sequence):
"""
Creates pickleable sequence object from MLT Playlist
"""
s_seq = copy.copy(sequence)
# Replace tracks with pickleable objects
tracks = []
for i in range(0, len(sequence.tracks)):
track = sequence.tracks[i]
tracks.append(get_p_playlist(track))
s_seq.tracks = tracks
# Replace compositors with pwckleable objects
s_compositors = get_p_compositors(sequence.compositors)
s_seq.compositors = s_compositors
# Remove unpickleable attributes
remove_attrs(s_seq, SEQUENCE_REMOVE)
return s_seq
def get_p_playlist(playlist):
"""
Creates pickleable version of MLT Playlist
"""
s_playlist = copy.copy(playlist)
# Get replace clips
add_clips = []
for i in range(0, len(playlist.clips)):
clip = playlist.clips[i]
add_clips.append(get_p_clip(clip))
s_playlist.clips = add_clips
# Remove unpicleable attributes
remove_attrs(s_playlist, PLAY_LIST_REMOVE)
return s_playlist
def get_p_clip(clip):
"""
Creates pickleable version of MLT Producer object
"""
s_clip = copy.copy(clip)
# Set 'type' attribute for MLT object type
# This IS NOT USED anywhere anymore and should be removed.
s_clip.type = 'Mlt__Producer'
# Get replace filters
filters = []
try: # This fails for blank clips
# We'll just save them with empty filters array
for i in range(0, len(clip.filters)):
f = clip.filters[i]
filters.append(get_p_filter(f))
except:
pass
s_clip.filters = filters
# Replace mute filter object with boolean to flag mute
if s_clip.mute_filter != None:
s_clip.mute_filter = True
# Get replace sync data
if s_clip.sync_data != None:
s_clip.sync_data = get_p_sync_data(s_clip.sync_data)
# Remove unpicleable attributes
remove_attrs(s_clip, CLIP_REMOVE)
# Don't save waveform data.
s_clip.waveform_data = None
# Add pickleable filters
s_clip.filters = filters
# Do proxy mode convert if needed
if (project_proxy_mode == appconsts.CONVERTING_TO_USE_PROXY_MEDIA or
project_proxy_mode == appconsts.CONVERTING_TO_USE_ORIGINAL_MEDIA):
try: # This fails when it is supposed to fail: for clips that have no proxy and pattern producers and blanks
s_clip.path = proxy_path_dict[s_clip.path]
except:
pass
# Change paths when doing snapshot save
try: # This fails for pattern producers and blanks
if snapshot_paths != None:
s_clip.path = snapshot_paths[s_clip.path]
except:
pass
return s_clip
def get_p_filter(f):
"""
Creates pickleable version MLT Filter object.
"""
s_filter = copy.copy(f)
remove_attrs(s_filter, FILTER_REMOVE)
if f.info.multipart_filter == False:
s_filter.is_multi_filter = False
else:
s_filter.is_multi_filter = True
return s_filter
def get_p_compositors(compositors):
s_compositors = []
for compositor in compositors:
s_compositor = copy.copy(compositor)
s_compositor.transition = copy.copy(compositor.transition)
s_compositor.transition.mlt_transition = None
s_compositors.append(s_compositor)
return s_compositors
def get_p_sync_data(sync_data):
s_sync_data = copy.copy(sync_data)
if isinstance( sync_data.master_clip, int ): # When saving relinked projects sync_data.master_clip
# is already int and does not need to be replaced
return s_sync_data
s_sync_data.master_clip = sync_data.master_clip.id
return s_sync_data
def remove_attrs(obj, remove_attrs):
"""
Removes unpickleable attributes
"""
for attr in remove_attrs:
try:
delattr(obj, attr)
except Exception:
pass
# -------------------------------------------------- LOAD
def load_project(file_path, icons_and_thumnails=True, relinker_load=False):
_show_msg("Unpickling")
# Load project object
f = open(file_path)
project = pickle.load(f)
# Relinker only operates on pickleable python data
if relinker_load:
return project
global _load_file_path
_load_file_path = file_path
# editorstate.project needs to be available for sequence building
editorstate.project = project
if(not hasattr(project, "SAVEFILE_VERSION")):
project.SAVEFILE_VERSION = 1 # first save files did not have this
print "Loading " + project.name + ", SAVEFILE_VERSION:", project.SAVEFILE_VERSION
# Set MLT profile. NEEDS INFO USER ON MISSING PROFILE!!!!!
project.profile = mltprofiles.get_profile(project.profile_desc)
FIX_MISSING_PROJECT_ATTRS(project)
# Some profiles may not be available in system
# inform user on fix
if project.profile == None:
raise ProjectProfileNotFoundError(project.profile_desc)
# Add MLT objects to sequences.
global all_clips, sync_clips
for seq in project.sequences:
FIX_N_TO_3_SEQUENCE_COMPATIBILITY(seq)
_show_msg(_("Building sequence ") + seq.name)
all_clips = {}
sync_clips = []
seq.profile = project.profile
fill_sequence_mlt(seq, project.SAVEFILE_VERSION)
handle_seq_watermark(seq)
if not hasattr(seq, "seq_len"):
seq.update_edit_tracks_length()
all_clips = {}
sync_clips = []
for k, media_file in project.media_files.iteritems():
if project.SAVEFILE_VERSION < 4:
FIX_N_TO_4_MEDIA_FILE_COMPATIBILITY(media_file)
media_file.current_frame = 0 # this is always reset on load, value is not considered persistent
if media_file.type != appconsts.PATTERN_PRODUCER:
media_file.path = get_media_asset_path(media_file.path, _load_file_path)
# Add icons to media files
if icons_and_thumnails == True:
_show_msg(_("Loading icons"))
for k, media_file in project.media_files.iteritems():
media_file.create_icon()
project.c_seq = project.sequences[project.c_seq_index]
if icons_and_thumnails == True:
project.init_thumbnailer()
return project
def fill_sequence_mlt(seq, SAVEFILE_VERSION):
"""
Replaces sequences py objects with mlt objects
"""
# Create tractor, field, multitrack
seq.init_mlt_objects()
# Grap and replace py tracks. Do this way to use same create
# method as when originally created.
py_tracks = seq.tracks
seq.tracks = []
# editorstate.project.c_seq needs to be available for sequence building
editorstate.project.c_seq = seq
# Create and fill MLT tracks.
for py_track in py_tracks:
mlt_track = seq.add_track(py_track.type)
fill_track_mlt(mlt_track, py_track)
# Set audio gain and pan filter values
if hasattr(mlt_track, "gain_filter"): # Hidden track and black track do not have these
mlt_track.gain_filter.set("gain", str(mlt_track.audio_gain))
if mlt_track.audio_pan != appconsts.NO_PAN:
seq.add_track_pan_filter(mlt_track, mlt_track.audio_pan) # only rtack with non-center pan values have pan filters
# Create and connect compositors.
mlt_compositors = []
for py_compositor in seq.compositors:
# Keeping backwards compability
if SAVEFILE_VERSION < 3:
FIX_N_TO_3_COMPOSITOR_COMPABILITY(py_compositor, SAVEFILE_VERSION)
# Create new compositor object
compositor = mlttransitions.create_compositor(py_compositor.type_id)
compositor.create_mlt_objects(seq.profile)
# Copy and set param values
compositor.transition.properties = copy.deepcopy(py_compositor.transition.properties)
_fix_wipe_relative_path(compositor)
compositor.transition.update_editable_mlt_properties()
compositor.transition.set_tracks(py_compositor.transition.a_track, py_compositor.transition.b_track)
compositor.set_in_and_out(py_compositor.clip_in, py_compositor.clip_out)
compositor.origin_clip_id = py_compositor.origin_clip_id
mlt_compositors.append(compositor)
seq.compositors = mlt_compositors
seq.restack_compositors()
# Connect sync relations
for clip_n_track in sync_clips:
clip, track = clip_n_track
try:
master_clip = all_clips[clip.sync_data.master_clip] # master clip has been replaced with its id on save
clip.sync_data.master_clip = master_clip # put back reference to master clip
resync.clip_added_to_timeline(clip, track) # save data to enagble sync states monitoring after eddits
except KeyError:
clip.sync_data = None # masterclip no longer on track V1
resync.clip_removed_from_timeline(clip)
# This sets MLT properties that actually do mute
seq.set_tracks_mute_state()
seq.length = None
def fill_track_mlt(mlt_track, py_track):
"""
Replaces py objects in track (MLT Playlist) with mlt objects
"""
# Update mlt obj attr values to saved ones
mlt_track.__dict__.update(py_track.__dict__)
# Clear py clips from MLT object
mlt_track.clips = []
# Create clips
sequence = mlt_track.sequence
for i in range(0, len(py_track.clips)):
clip = py_track.clips[i]
mlt_clip = None
append_created = True # blanks get appended at creation time, other clips don't
# Add color attribute if not found
if not hasattr(clip, "color"):
clip.color = None
# normal clip
if (clip.is_blanck_clip == False and (clip.media_type != appconsts.PATTERN_PRODUCER)):
orig_path = clip.path # Save the path for error message
clip.path = get_media_asset_path(clip.path, _load_file_path)
mlt_clip = sequence.create_file_producer_clip(clip.path)
if mlt_clip == None:
raise FileProducerNotFoundError(orig_path)
mlt_clip.__dict__.update(clip.__dict__)
fill_filters_mlt(mlt_clip, sequence)
# pattern producer
elif (clip.is_blanck_clip == False and (clip.media_type == appconsts.PATTERN_PRODUCER)):
mlt_clip = sequence.create_pattern_producer(clip.create_data)
mlt_clip.__dict__.update(clip.__dict__)
fill_filters_mlt(mlt_clip, sequence)
# blank clip
elif (clip.is_blanck_clip == True):
length = clip.clip_out - clip.clip_in + 1
mlt_clip = sequence.create_and_insert_blank(mlt_track, i, length)
mlt_clip.__dict__.update(clip.__dict__)
append_created = False
else: # This is just for info, if this ever happens crash will happen.
print "Could not recognize clip, dict:"
print clip.__dict__
mlt_clip.selected = False # This transient state gets saved and
# we want everything unselected to begin with
# Mute
if clip.mute_filter != None:
mute_filter = mltfilters.create_mute_volume_filter(sequence)
mltfilters.do_clip_mute(mlt_clip, mute_filter)
# Add to track in MLT if hasn't already been appended (blank clip has)
if append_created == True:
append_clip(mlt_track, mlt_clip, clip.clip_in, clip.clip_out)
# Save refences to recreate sync relations after all clips loaded
global all_clips, sync_clips
all_clips[mlt_clip.id] = mlt_clip
if mlt_clip.sync_data != None:
sync_clips.append((mlt_clip, mlt_track))
def fill_filters_mlt(mlt_clip, sequence):
"""
Creates new FilterObject objects and creates and attaches mlt.Filter
objects.
"""
filters = []
for py_filter in mlt_clip.filters:
if py_filter.is_multi_filter == False:
if py_filter.info.mlt_service_id == "affine":
FIX_1_TO_N_BACKWARDS_FILTER_COMPABILITY(py_filter)
filter_object = mltfilters.FilterObject(py_filter.info)
filter_object.__dict__.update(py_filter.__dict__)
filter_object.create_mlt_filter(sequence.profile)
mlt_clip.attach(filter_object.mlt_filter)
else:
filter_object = mltfilters.MultipartFilterObject(py_filter.info)
filter_object.__dict__.update(py_filter.__dict__)
filter_object.create_mlt_filters(sequence.profile, mlt_clip)
filter_object.attach_all_mlt_filters(mlt_clip)
if filter_object.active == False:
filter_object.update_mlt_disabled_value()
filters.append(filter_object)
mlt_clip.filters = filters
#------------------------------------------------------------ track building
# THIS IS COPYPASTED FROM edit.py TO NOT IMPORT IT.
def append_clip(track, clip, clip_in, clip_out):
"""
Affects MLT c-struct and python obj values.
"""
clip.clip_in = clip_in
clip.clip_out = clip_out
track.clips.append(clip) # py
track.append(clip, clip_in, clip_out) # mlt
resync.clip_added_to_timeline(clip, track)
# --------------------------------------------------------- watermarks
def handle_seq_watermark(seq):
if hasattr(seq, "watermark_file_path"):
if seq.watermark_file_path != None:
seq.add_watermark(seq.watermark_file_path)
else:
seq.watermark_filter = None
else:
seq.watermark_filter = None
seq.watermark_file_path = None
# --------------------------------------------------------- relative paths
def get_media_asset_path(path, load_file_path):
# Load order absolute, relative
if editorpersistance.prefs.media_load_order == appconsts.LOAD_ABSOLUTE_FIRST:
if not os.path.isfile(path):
path = get_relative_path(load_file_path, path)
return path
# Load order relative, absolute
elif editorpersistance.prefs.media_load_order == appconsts.LOAD_RELATIVE_FIRST:
abspath = path
path = get_relative_path(load_file_path, path)
if path == NOT_FOUND:
path = abspath
return path
else: # Only look in existing absolute path
return path
def get_relative_path(project_file_path, asset_path):
name = os.path.basename(asset_path)
_show_msg("Relative file search for " + name + "...", delay=0.0)
matches = []
asset_folder, asset_file_name = os.path.split(asset_path)
project_folder, project_file_name = os.path.split(project_file_path)
for root, dirnames, filenames in os.walk(project_folder):
for filename in fnmatch.filter(filenames, asset_file_name):
matches.append(os.path.join(root, filename))
if len(matches) == 1:
return matches[0]
elif len(matches) > 1:
# some error handling may be needed?
return matches[0]
else:
return NOT_FOUND # no relative path found
# ------------------------------------------------------- backwards compability
def FIX_N_TO_3_COMPOSITOR_COMPABILITY(compositor, SAVEFILE_VERSION):
if SAVEFILE_VERSION == 1:
FIX_1_TO_2_BACKWARDS_COMPOSITOR_COMPABILITY(compositor)
FIX_2_TO_N_BACKWARDS_COMPOSITOR_COMPABILITY(compositor)
def FIX_1_TO_2_BACKWARDS_COMPOSITOR_COMPABILITY(compositor):
# fix SAVEFILE_VERSION 1 -> N compability issue with x,y -> x/y in compositors
new_properties = []
for prop in compositor.transition.properties:
name, value, prop_type = prop
value = value.replace(",","/")
new_properties.append((name, value, prop_type))
compositor.transition.properties = new_properties
def FIX_2_TO_N_BACKWARDS_COMPOSITOR_COMPABILITY(compositor):
compositor.type_id = compositors_index_to_type_id[compositor.compositor_index]
def FIX_1_TO_N_BACKWARDS_FILTER_COMPABILITY(py_filter):
# This is only called on "affine" filters
# fix SAVEFILE_VERSION 1 -> N compability issue with x,y -> x/y in compositors
new_properties = []
for prop in py_filter.properties:
name, value, prop_type = prop
value = value.replace(",","/")
new_properties.append((name, value, prop_type))
py_filter.properties = new_properties
def FIX_N_TO_3_SEQUENCE_COMPATIBILITY(seq):
if not hasattr(seq, "master_audio_pan"):
seq.master_audio_pan = appconsts.NO_PAN
seq.master_audio_gain = 1.0
def FIX_N_TO_4_MEDIA_FILE_COMPATIBILITY(media_file):
media_file.has_proxy_file = False
media_file.is_proxy_file = False
media_file.second_file_path = None
def FIX_MISSING_PROJECT_ATTRS(project):
if (not(hasattr(project, "proxy_data"))):
project.proxy_data = miscdataobjects.ProjectProxyEditingData()
if (not(hasattr(project, "media_log"))):
project.media_log = []
if (not(hasattr(project, "events"))):
project.events = []
if (not(hasattr(project, "media_log_groups"))):
project.media_log_groups = []
def _fix_wipe_relative_path(compositor):
if compositor.type_id == "##wipe": # Wipe may have user luma and needs to be looked up relatively
_set_wipe_res_path(compositor, "resource")
if compositor.type_id == "##region": # Wipe may have user luma and needs to be looked up relatively
_set_wipe_res_path(compositor, "composite.luma")
def _set_wipe_res_path(compositor, res_property):
res_path = propertyparse.get_property_value(compositor.transition.properties, res_property)
new_path = get_media_asset_path(res_path, _load_file_path)
propertyparse.set_property_value(compositor.transition.properties, res_property, new_path)
# List is used to convert SAVEFILE_VERSIONs 1 and 2 to SAVEFILE_VERSIONs 3 -> n by getting type_id string for compositor index
compositors_index_to_type_id = ["##affine","##opacity_kf","##pict_in_pict", "##region","##wipe", "##add",
"##burn", "##color_only", "##darken", "##difference", "##divide", "##dodge",
"##grain_extract", "##grain_merge", "##hardlight", "##hue", "##lighten",
"##multiply", "##overlay", "##saturation", "##screen", "##softlight",
"##subtract", "##value"]
| Python |
"""
Flowblade Movie Editor is a nonlinear video editor.
Copyright 2012 Janne Liljeblad.
This file is part of Flowblade Movie Editor <http://code.google.com/p/flowblade>.
Flowblade Movie Editor is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Flowblade Movie Editor is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Flowblade Movie Editor. If not, see <http://www.gnu.org/licenses/>.
"""
"""
Module is used to create pattern producer media objects for bins and
corresponding mlt.Producers for timeline.
"""
import copy
import pygtk
pygtk.require('2.0');
import gtk
import mlt
import appconsts
import dialogutils
import guiutils
from editorstate import PROJECT
import gui
import mltrefhold
import respaths
import utils
# Pattern producer types
UNDEFINED = 0
COLOR_CLIP = 1
NOISE_CLIP = 2
EBUBARS_CLIP = 3
ISING_CLIP = 4
COLOR_PULSE_CLIP = 5
# ---------------------------------------------------- create callbacks
def create_color_clip():
_color_clip_dialog(_create_color_clip_callback)
def _create_color_clip_callback(dialog, response_id, widgets):
if response_id == gtk.RESPONSE_ACCEPT:
entry, color_button = widgets
name = entry.get_text()
color_str = color_button.get_color().to_string()
media_object = BinColorClip(PROJECT().next_media_file_id, name, color_str)
PROJECT().add_pattern_producer_media_object(media_object)
_update_gui_for_pattern_producer_media_object_add()
dialog.destroy()
def create_noise_clip():
media_object = BinNoiseClip(PROJECT().next_media_file_id, _("Noise"))
PROJECT().add_pattern_producer_media_object(media_object)
_update_gui_for_pattern_producer_media_object_add()
def create_bars_clip():
media_object = BinColorBarsClip(PROJECT().next_media_file_id, _("EBU Bars"))
PROJECT().add_pattern_producer_media_object(media_object)
_update_gui_for_pattern_producer_media_object_add()
def create_icing_clip():
_ising_clip_dialog(_create_ising_clip_callback)
def _create_ising_clip_callback(dialog, response_id, widgets):
if response_id == gtk.RESPONSE_ACCEPT:
media_object = BinIsingClip(PROJECT().next_media_file_id, _("Ising"))
temp_slider, bg_slider, sg_slider = widgets
media_object.set_property_values(temp_slider.get_adjustment().get_value() / 100.0,
bg_slider.get_adjustment().get_value() / 100.0,
sg_slider.get_adjustment().get_value() / 100.0)
PROJECT().add_pattern_producer_media_object(media_object)
_update_gui_for_pattern_producer_media_object_add()
dialog.destroy()
def create_color_pulse_clip():
_color_pulse_clip_dialog(_create_color_pulse_clip_callback)
def _create_color_pulse_clip_callback(dialog, response_id, widgets):
if response_id == gtk.RESPONSE_ACCEPT:
media_object = BinColorPulseClip(PROJECT().next_media_file_id, _("Color Pulse"))
s1_slider, s2_slider, s3_slider, s4_slider, m1_slider, m2_slider = widgets
media_object.set_property_values(s1_slider.get_adjustment().get_value() / 100.0,
s2_slider.get_adjustment().get_value() / 100.0,
s3_slider.get_adjustment().get_value() / 100.0,
s4_slider.get_adjustment().get_value() / 100.0,
m1_slider.get_adjustment().get_value() / 100.0,
m2_slider.get_adjustment().get_value() / 100.0)
PROJECT().add_pattern_producer_media_object(media_object)
_update_gui_for_pattern_producer_media_object_add()
dialog.destroy()
def _update_gui_for_pattern_producer_media_object_add():
gui.media_list_view.fill_data_model()
gui.bin_list_view.fill_data_model()
# ----------------------------------------------------
def create_pattern_producer(profile, bin_clip):
"""
bin_clip is instance of AbstractBinClip extending class
"""
try:
clip = bin_clip.create_mlt_producer(profile)
except:
clip = _create_patten_producer_old_style(profile, bin_clip)
clip.path = ""
clip.filters = []
clip.name = bin_clip.name
clip.media_type = appconsts.PATTERN_PRODUCER
# Save creation data for cloning when editing or doing save/load
clip.create_data = copy.copy(bin_clip)
clip.create_data.icon = None # this is not pickleable, recreate when needed
return clip
# --------------------------------------------------- DECPRECATED producer create methods
# --------------------------------------------------- REMOVE 2017
"""
We originally did producer creation using elifs and now using pickle() for save/load
requires keeping this around until atleast 2017 for backwards compatibility.
"""
def _create_patten_producer_old_style(profile, bin_clip):
if bin_clip.patter_producer_type == COLOR_CLIP:
clip = create_color_producer(profile, bin_clip.gdk_color_str)
elif bin_clip.patter_producer_type == NOISE_CLIP:
clip = _create_noise_producer(profile)
elif bin_clip.patter_producer_type == EBUBARS_CLIP:
clip = _create_ebubars_producer(profile)
return clip
def create_color_producer(profile, gdk_color_str):
mlt_color = utils.gdk_color_str_to_mlt_color_str(gdk_color_str)
producer = mlt.Producer(profile, "colour", mlt_color)
mltrefhold.hold_ref(producer)
producer.gdk_color_str = gdk_color_str
return producer
def _create_noise_producer(profile):
producer = mlt.Producer(profile, "frei0r.nois0r")
mltrefhold.hold_ref(producer)
return producer
def _create_ebubars_producer(profile):
producer = mlt.Producer(profile, respaths.PATTERN_PRODUCER_PATH + "ebubars.png")
mltrefhold.hold_ref(producer)
return producer
# --------------------------------------------------- END DECPRECATED producer create methods
# --------------------------------------------------- bin media objects
class AbstractBinClip: # not extends projectdata.MediaFile? too late, too late. Also better name would be AbstractBinPatternProducer
"""
A pattern producer object presnt in Media Bin.
"""
def __init__(self, id, name):
self.id = id
self.name = name
self.length = 15000
self.type = appconsts.PATTERN_PRODUCER
self.icon = None
self.patter_producer_type = UNDEFINED # extending sets value
self.mark_in = -1
self.mark_out = -1
self.has_proxy_file = False
self.is_proxy_file = False
self.second_file_path = None
self.create_icon()
def create_mlt_producer(self, profile):
print "create_mlt_producer not implemented"
def create_icon(self):
print "patter producer create_icon() not implemented"
class BinColorClip(AbstractBinClip):
"""
Color Clip that can added to and edited in Sequence.
"""
def __init__(self, id, name, gdk_color_str):
self.gdk_color_str = gdk_color_str
AbstractBinClip.__init__(self, id, name)
self.patter_producer_type = COLOR_CLIP
def create_mlt_producer(self, profile):
mlt_color = utils.gdk_color_str_to_mlt_color_str(self.gdk_color_str)
producer = mlt.Producer(profile, "colour", mlt_color)
mltrefhold.hold_ref(producer)
producer.gdk_color_str = self.gdk_color_str
return producer
def create_icon(self):
icon = gtk.gdk.Pixbuf(gtk.gdk.COLORSPACE_RGB, False, 8, appconsts.THUMB_WIDTH, appconsts.THUMB_HEIGHT)
pixel = utils.gdk_color_str_to_int(self.gdk_color_str)
icon.fill(pixel)
self.icon = icon
class BinNoiseClip(AbstractBinClip):
def __init__(self, id, name):
AbstractBinClip.__init__(self, id, name)
self.patter_producer_type = NOISE_CLIP
def create_mlt_producer(self, profile):
producer = mlt.Producer(profile, "frei0r.nois0r")
mltrefhold.hold_ref(producer)
return producer
def create_icon(self):
self.icon = gtk.gdk.pixbuf_new_from_file(respaths.PATTERN_PRODUCER_PATH + "noise_icon.png")
class BinColorBarsClip(AbstractBinClip):
def __init__(self, id, name):
AbstractBinClip.__init__(self, id, name)
self.patter_producer_type = EBUBARS_CLIP
def create_mlt_producer(self, profile):
producer = mlt.Producer(profile, respaths.PATTERN_PRODUCER_PATH + "ebubars.png")
mltrefhold.hold_ref(producer)
return producer
def create_icon(self):
self.icon = gtk.gdk.pixbuf_new_from_file(respaths.PATTERN_PRODUCER_PATH + "bars_icon.png")
class BinIsingClip(AbstractBinClip):
def __init__(self, id, name):
AbstractBinClip.__init__(self, id, name)
self.patter_producer_type = ISING_CLIP
def set_property_values(self, temp, bg, sg):
self.temp = temp
self.bg = bg
self.sg = sg
def create_mlt_producer(self, profile):
producer = mlt.Producer(profile, "frei0r.ising0r")
producer.set("Temperature", str(self.temp))
producer.set("Border Growth", str(self.bg))
producer.set("Spontaneous Growth", str(self.sg))
mltrefhold.hold_ref(producer)
return producer
def create_icon(self):
self.icon = gtk.gdk.pixbuf_new_from_file(respaths.PATTERN_PRODUCER_PATH + "ising_icon.png")
class BinColorPulseClip(AbstractBinClip):
def __init__(self, id, name):
AbstractBinClip.__init__(self, id, name)
self.patter_producer_type = COLOR_PULSE_CLIP
def set_property_values(self, s1, s2, s3, s4, m1, m2):
self.s1 = s1
self.s2 = s2
self.s3 = s3
self.s4 = s4
self.m1 = m1
self.m2 = m2
def create_mlt_producer(self, profile):
producer = mlt.Producer(profile, "frei0r.plasma")
producer.set("1_speed", str(self.s1))
producer.set("2_speed", str(self.s2))
producer.set("3_speed", str(self.s3))
producer.set("4_speed", str(self.s4))
producer.set("1_move", str(self.m1))
producer.set("2_move", str(self.m2))
mltrefhold.hold_ref(producer)
return producer
def create_icon(self):
self.icon = gtk.gdk.pixbuf_new_from_file(respaths.PATTERN_PRODUCER_PATH + "color_pulse_icon.png")
# ----------------------------------------------------- dialogs
def _color_clip_dialog(callback):
dialog = gtk.Dialog(_("Create Color Clip"), None,
gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,
(_("Cancel").encode('utf-8'), gtk.RESPONSE_REJECT,
_("Create").encode('utf-8'), gtk.RESPONSE_ACCEPT))
name_entry = gtk.Entry()
name_entry.set_text(_("Color Clip"))
color_button = gtk.ColorButton()
cb_hbox = gtk.HBox(False, 0)
cb_hbox.pack_start(color_button, False, False, 4)
cb_hbox.pack_start(gtk.Label(), True, True, 0)
row1 = guiutils.get_two_column_box(gtk.Label(_("Clip Name:")), name_entry, 200)
row2 = guiutils.get_two_column_box(gtk.Label(_("Select Color:")), cb_hbox, 200)
vbox = gtk.VBox(False, 2)
vbox.pack_start(row1, False, False, 0)
vbox.pack_start(row2, False, False, 0)
vbox.pack_start(gtk.Label(), True, True, 0)
align = gtk.Alignment(0.5, 0.5, 1.0, 1.0)
align.set_padding(12, 0, 12, 12)
align.add(vbox)
selection_widgets = (name_entry, color_button)
dialog.connect('response', callback, selection_widgets)
dialog.vbox.pack_start(align, True, True, 0)
dialogutils.default_behaviour(dialog)
dialog.show_all()
def _ising_clip_dialog(callback):
dialog = gtk.Dialog(_("Create Ising Clip"), None,
gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,
(_("Cancel").encode('utf-8'), gtk.RESPONSE_REJECT,
_("Create").encode('utf-8'), gtk.RESPONSE_ACCEPT))
n_box, n_slider = guiutils.get_non_property_slider_row(0, 100, 1)
bg_box, bg_slider = guiutils.get_non_property_slider_row(0, 100, 1)
sg_box, sg_slider = guiutils.get_non_property_slider_row(0, 100, 1)
row1 = guiutils.get_two_column_box(gtk.Label(_("Noise temperature:")), n_box, 200)
row2 = guiutils.get_two_column_box(gtk.Label(_("Border growth:")), bg_box, 200)
row3 = guiutils.get_two_column_box(gtk.Label(_("Spontanious growth:")), sg_box, 200)
vbox = gtk.VBox(False, 2)
vbox.pack_start(row1, False, False, 0)
vbox.pack_start(row2, False, False, 0)
vbox.pack_start(row3, False, False, 0)
vbox.pack_start(gtk.Label(), True, True, 0)
vbox.set_size_request(450, 150)
align = gtk.Alignment(0.5, 0.5, 1.0, 1.0)
align.set_padding(12, 0, 12, 12)
align.add(vbox)
selection_widgets = (n_slider, bg_slider, sg_slider)
dialog.connect('response', callback, selection_widgets)
dialog.vbox.pack_start(align, True, True, 0)
dialogutils.default_behaviour(dialog)
dialog.show_all()
def _color_pulse_clip_dialog(callback):
dialog = gtk.Dialog(_("Create Color Pulse Clip"), None,
gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,
(_("Cancel").encode('utf-8'), gtk.RESPONSE_REJECT,
_("Create").encode('utf-8'), gtk.RESPONSE_ACCEPT))
s1_box, s1_slider = guiutils.get_non_property_slider_row(0, 100, 1, 100)
s2_box, s2_slider = guiutils.get_non_property_slider_row(0, 100, 1, 100)
s3_box, s3_slider = guiutils.get_non_property_slider_row(0, 100, 1, 100)
s4_box, s4_slider = guiutils.get_non_property_slider_row(0, 100, 1, 100)
m1_box, m1_slider = guiutils.get_non_property_slider_row(0, 100, 1, 100)
m2_box, m2_slider = guiutils.get_non_property_slider_row(0, 100, 1, 100)
row1 = guiutils.get_two_column_box(gtk.Label(_("Speed 1:")), s1_box, 200)
row2 = guiutils.get_two_column_box(gtk.Label(_("Speed 2:")), s2_box, 200)
row3 = guiutils.get_two_column_box(gtk.Label(_("Speed 3:")), s3_box, 200)
row4 = guiutils.get_two_column_box(gtk.Label(_("Speed 4:")), s4_box, 200)
row5 = guiutils.get_two_column_box(gtk.Label(_("Move 1:")), m1_box, 200)
row6 = guiutils.get_two_column_box(gtk.Label(_("Move 2:")), m2_box, 200)
vbox = gtk.VBox(False, 2)
vbox.pack_start(row1, False, False, 0)
vbox.pack_start(row2, False, False, 0)
vbox.pack_start(row3, False, False, 0)
vbox.pack_start(row4, False, False, 0)
vbox.pack_start(row5, False, False, 0)
vbox.pack_start(row6, False, False, 0)
vbox.pack_start(gtk.Label(), True, True, 0)
vbox.set_size_request(450, 220)
align = gtk.Alignment(0.5, 0.5, 1.0, 1.0)
align.set_padding(12, 0, 12, 12)
align.add(vbox)
selection_widgets = (s1_slider, s2_slider, s3_slider, s4_slider, m1_slider, m2_slider)
dialog.connect('response', callback, selection_widgets)
dialog.vbox.pack_start(align, True, True, 0)
dialogutils.default_behaviour(dialog)
dialog.show_all()
| Python |
"""
Flowblade Movie Editor is a nonlinear video editor.
Copyright 2012 Janne Liljeblad.
This file is part of Flowblade Movie Editor <http://code.google.com/p/flowblade>.
Flowblade Movie Editor is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Flowblade Movie Editor is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Flowblade Movie Editor. If not, see <http://www.gnu.org/licenses/>.
"""
"""
Module manages undo and redo stacks and executes edit actions from them
on user requests.
"""
import editorstate
set_post_undo_redo_edit_mode = None # This is set at startup to avoid circular imports
repaint_tline = None
# Max stack size
MAX_UNDOS = 35
# EditActions are placed in this stack after their do_edit()
# method has been called
undo_stack = []
# Index is the stack pointer that tracks done undos and redos.
# The value of index is index of next undo + 1
# The value of index is index of next redo or == stack size if
# no redos.
index = 0
# Some menu items are set active/deactive based on undo stack state
save_item = None
undo_item = None
redo_item = None
def set_post_undo_redo_callback(undo_redo_callback):
global set_post_undo_redo_edit_mode
set_post_undo_redo_edit_mode = undo_redo_callback
def set_menu_items(uimanager):
global save_item, undo_item, redo_item
save_item = uimanager.get_widget("/MenuBar/FileMenu/Save")
undo_item = uimanager.get_widget("/MenuBar/EditMenu/Undo")
redo_item = uimanager.get_widget("/MenuBar/EditMenu/Redo")
def register_edit(undo_edit):
"""
Adds a performed EditAction into undo stack
"""
global index
# New edit action clears all redos(== undos after index)
if index != len(undo_stack) and (len(undo_stack) != 0):
del undo_stack[index:]
# Keep stack in size, if too big remove undo at 0
if len(undo_stack) > MAX_UNDOS:
del undo_stack[0]
index = index - 1
# Add to stack and grow index
undo_stack.append(undo_edit);
index = index + 1
save_item.set_sensitive(True) # Disabled at load and save, first edit enables
undo_item.set_sensitive(True)
redo_item.set_sensitive(False)
def do_undo_and_repaint(widget=None, data=None):
do_undo()
repaint_tline()
def do_redo_and_repaint(widget=None, data=None):
do_redo()
repaint_tline()
def do_undo():
global index
if index == 0:
return
# Empty stack, no undos
if len(undo_stack) == 0:
undo_item.set_sensitive(False)
redo_item.set_sensitive(False)
return
# After undo we may change edit mode
_set_post_edit_mode()
# Move stack pointer down and do undo
index = index - 1
undo_edit = undo_stack[index]
undo_edit.undo()
if index == 0:
undo_item.set_sensitive(False)
redo_item.set_sensitive(True)
def do_redo():
global index
# If we are at the top of the stack, can't do redo
if index == len(undo_stack):
redo_item.set_sensitive(False)
return
# Empty stack, no redos
if len(undo_stack) == 0:
redo_item.set_sensitive(False)
return
# After redo we may change edit mode
_set_post_edit_mode()
# Do redo and move stack pointer up
redo_edit = undo_stack[index]
redo_edit.redo()
index = index + 1
if index == len(undo_stack):
redo_item.set_sensitive(False)
undo_item.set_sensitive(True)
def _set_post_edit_mode():
if editorstate.edit_mode != editorstate.INSERT_MOVE:
set_post_undo_redo_edit_mode()
def undo_redo_stress_test():
times = 10
delay = 0.100
for r in range(0, times):
while undo.index > 0:
print "undo:", undo.index
do_undo()
time.sleep(delay)
while undo.index < len(undo.undo_stack):
print "redo:", undo.index
do_redo()
time.sleep(delay)
| Python |
"""
Flowblade Movie Editor is a nonlinear video editor.
Copyright 2012 Janne Liljeblad.
This file is part of Flowblade Movie Editor <http://code.google.com/p/flowblade>.
Flowblade Movie Editor is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Flowblade Movie Editor is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Flowblade Movie Editor. If not, see <http://www.gnu.org/licenses/>.
"""
"""
Modules provides functions that:
- parses strings to property tuples or argument dicts
- build value strings from property tuples.
"""
import appconsts
from editorstate import current_sequence
import respaths
PROP_INT = appconsts.PROP_INT
PROP_FLOAT = appconsts.PROP_FLOAT
PROP_EXPRESSION = appconsts.PROP_EXPRESSION
NAME = appconsts.NAME
ARGS = appconsts.ARGS
SCREENSIZE = "SCREENSIZE" # replace with "WIDTHxHEIGHT" of profile screensize in pix
WIPE_PATH = "WIPE_PATH" # path to folder contining wipe resource images
SCREENSIZE_WIDTH = "SCREENSIZE_WIDTH" # replace with width of profile screensize in pix
SCREENSIZE_HEIGHT = "SCREENSIZE_HEIGHT" # replace with height of profile screensize in pix
# ------------------------------------------- parse funcs
def node_list_to_properties_array(node_list):
"""
Returns list of property tuples of type (name, value, type)
"""
properties = []
for node in node_list:
p_name = node.getAttribute(NAME)
p_value = node.firstChild.nodeValue
p_type = _property_type(p_value)
properties.append((p_name, p_value, p_type))
return properties
def node_list_to_non_mlt_properties_array(node_list):
"""
Returns list of property tuples of type (name, value, type)
"""
properties = []
for node in node_list:
p_name = node.getAttribute(NAME)
p_value = node.firstChild.nodeValue
p_type = _property_type(p_value)
properties.append((p_name, p_value, p_type))
return properties
def node_list_to_args_dict(node_list):
"""
Returns dict of type property_name -> property_args_string
"""
property_args = {}
for node in node_list:
p_name = node.getAttribute(NAME)
p_args = node.getAttribute(ARGS)
property_args[p_name] = p_args
return property_args
def node_list_to_extraeditors_array(node_list):
editors = []
for node in node_list:
e_name = node.getAttribute(NAME)
editors.append(e_name)
return editors
def args_string_to_args_dict(args_str):
"""
Returns key->value dict of property args.
"""
args_dict = {}
args = args_str.split(" ")
for arg in args:
sides = arg.split("=")
args_dict[sides[0]] = sides[1]
return args_dict
def replace_value_keywords(properties, profile):
"""
Property value expressions may have keywords in default values that
need to be replaced with other expressions when containing
objects first become active.
"""
sreensize_expr = str(profile.width()) + "x" + str(profile.height())
for i in range(0, len(properties)):
name, value, prop_type = properties[i]
if prop_type == PROP_EXPRESSION:
value = value.replace(SCREENSIZE, sreensize_expr)
value = value.replace(WIPE_PATH, respaths.WIPE_RESOURCES_PATH)
properties[i] = (name, value, prop_type)
def get_args_num_value(val_str):
"""
Returns numerical value for expression in property
args.
"""
try: # attempt int
return int(val_str)
except:
try:# attempt float
return float(val_str)
except:
# attempt expression
if val_str == SCREENSIZE_WIDTH:
return current_sequence().profile.width()
elif val_str == SCREENSIZE_HEIGHT:
return current_sequence().profile.height()
return None
# ------------------------------------------ kf editor values strings to kfs funcs
def single_value_keyframes_string_to_kf_array(keyframes_str, out_to_in_func):
#print "keyframes_str", keyframes_str
new_keyframes = []
keyframes_str = keyframes_str.strip('"') # expression have sometimes quotes that need to go away
kf_tokens = keyframes_str.split(";")
for token in kf_tokens:
sides = token.split("=")
add_kf = (int(sides[0]), out_to_in_func(float(sides[1]))) # kf = (frame, value)
new_keyframes.append(add_kf)
return new_keyframes
def geom_keyframes_value_string_to_opacity_kf_array(keyframes_str, out_to_in_func):
# Parse "composite:geometry" properties value string into (frame,opacity_value)
# keyframe tuples.
new_keyframes = []
keyframes_str = keyframes_str.strip('"') # expression have sometimes quotes that need to go away
kf_tokens = keyframes_str.split(";")
for token in kf_tokens:
sides = token.split("=")
values = sides[1].split(':')
add_kf = (int(sides[0]), out_to_in_func(float(values[2]))) # kf = (frame, opacity)
new_keyframes.append(add_kf)
return new_keyframes
def geom_keyframes_value_string_to_geom_kf_array(keyframes_str, out_to_in_func):
# Parse "composite:geometry" properties value string into (frame, source_rect, opacity)
# keyframe tuples.
new_keyframes = []
keyframes_str = keyframes_str.strip('"') # expression have sometimes quotes that need to go away
kf_tokens = keyframes_str.split(';')
for token in kf_tokens:
sides = token.split('=')
values = sides[1].split(':')
pos = values[0].split('/')
size = values[1].split('x')
source_rect = [int(pos[0]), int(pos[1]), int(size[0]), int(size[1])] #x,y,width,height
add_kf = (int(sides[0]), source_rect, out_to_in_func(float(values[2])))
new_keyframes.append(add_kf)
return new_keyframes
def rotating_geom_keyframes_value_string_to_geom_kf_array(keyframes_str, out_to_in_func):
# Parse extraeditor value properties value string into (frame, [x, y, x_scale, y_scale, rotation], opacity)
# keyframe tuples.
new_keyframes = []
screen_width = current_sequence().profile.width()
screen_height = current_sequence().profile.height()
keyframes_str = keyframes_str.strip('"') # expression have sometimes quotes that need to go away
kf_tokens = keyframes_str.split(';')
for token in kf_tokens:
sides = token.split('=')
values = sides[1].split(':')
frame = int(sides[0])
# get values and convert "frei0r.cairoaffineblend" values to editor values
# this because all frei0r plugins require values in range 0 - 1
x = _get_pixel_pos_from_frei0r_cairo_pos(float(values[0]), screen_width)
y = _get_pixel_pos_from_frei0r_cairo_pos(float(values[1]), screen_height)
x_scale = _get_scale_from_frei0r_cairo_scale(float(values[2]))
y_scale = _get_scale_from_frei0r_cairo_scale(float(values[3]))
rotation = float(values[4]) * 360
opacity = float(values[5]) * 100
source_rect = [x,y,x_scale,y_scale,rotation]
add_kf = (frame, source_rect, float(opacity))
new_keyframes.append(add_kf)
return new_keyframes
def _get_pixel_pos_from_frei0r_cairo_pos(value, screen_dim):
# convert positions from range used by frei0r cairo plugins to pixel values
return -2.0 * screen_dim + value * 5.0 * screen_dim
def _get_scale_from_frei0r_cairo_scale(scale):
return scale * 5.0
def get_frei0r_cairo_scale(scale):
return scale / 5.0
def get_frei0r_cairo_position(pos, screen_dim):
pix_range = screen_dim * 5.0
range_pos = pos + screen_dim * 2.0
return range_pos / pix_range
#------------------------------------------------------ util funcs
def _property_type(value_str):
"""
Gets property type from value string by trying to interpret it
as int or float, if both fail it is considered an expression.
"""
try:
int(value_str)
return PROP_INT
except:
try:
float(value_str)
return PROP_FLOAT
except:
return PROP_EXPRESSION
def set_property_value(properties, prop_name, prop_value):
for i in range(0, len(properties)):
name, value, t = properties[i]
if prop_name == name:
properties[i] = (name, prop_value, t)
def get_property_value(properties, prop_name):
for i in range(0, len(properties)):
name, value, t = properties[i]
if prop_name == name:
return value
return None
| Python |
"""
Flowblade Movie Editor is a nonlinear video editor.
Copyright 2012 Janne Liljeblad.
This file is part of Flowblade Movie Editor <http://code.google.com/p/flowblade>.
Flowblade Movie Editor is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Flowblade Movie Editor is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Flowblade Movie Editor. If not, see <http://www.gnu.org/licenses/>.
"""
"""
Handles or passes on mouse edit events from timeline.
Handles edit mode setting.
"""
import pygtk
pygtk.require('2.0');
import gtk
import os
import time
import appconsts
import clipeffectseditor
import compositeeditor
import compositormodes
import dialogutils
import edit
import editorstate
from editorstate import current_sequence
from editorstate import PLAYER
from editorstate import timeline_visible
from editorstate import EDIT_MODE
import editorpersistance
import gui
import guicomponents
import medialog
import movemodes
import multimovemode
import syncsplitevent
import tlinewidgets
import trimmodes
import undo
import updater
import utils
# module state
mouse_disabled = False # Used to ignore drag and release events when press doesn't start an action that can handle those events.
repeat_event = None
parent_selection_data = None # Held here until user presses tline again
# functions are monkeypatched in at app.py
display_clip_menu_pop_up = None
compositor_menu_item_activated = None
# ----------------------------- module funcs
def do_clip_insert(track, new_clip, tline_pos):
index = _get_insert_index(track, tline_pos)
# Can't put audio media on video track
if ((new_clip.media_type == appconsts.AUDIO)
and (track.type == appconsts.VIDEO)):
_display_no_audio_on_video_msg(track)
return
movemodes.clear_selected_clips()
# Do edit
data = {"track":track,
"clip":new_clip,
"index":index,
"clip_in":new_clip.mark_in,
"clip_out":new_clip.mark_out}
action = edit.insert_action(data)
action.do_edit()
updater.display_tline_cut_frame(track, index)
def do_multiple_clip_insert(track, clips, tline_pos):
index = _get_insert_index(track, tline_pos)
# Can't put audio media on video track
for new_clip in clips:
if ((new_clip.media_type == appconsts.AUDIO)
and (track.type == appconsts.VIDEO)):
_display_no_audio_on_video_msg(track)
return
movemodes.clear_selected_clips()
# Do edit
data = {"track":track,
"clips":clips,
"index":index}
action = edit.insert_multiple_action(data)
action.do_edit()
updater.display_tline_cut_frame(track, index)
def _get_insert_index(track, tline_pos):
cut_frame = current_sequence().get_closest_cut_frame(track.id, tline_pos)
index = current_sequence().get_clip_index(track, cut_frame)
if index == -1:
# Fix for case when inserting on empty track, which causes exception in
# editorstate.current_sequence().get_clip_index(...) which returns -1
index = track.count()
elif ((cut_frame == -1) and (index == 0)
and (tline_pos > 0) and (tline_pos >= track.get_length())):
# Fix for case in which we get -1 for cut_frame because
# tline_pos after last frame of the sequence, and
# then get 0 for index which places clip in beginning, but we
# want it appended in the end of sequence.
index = track.count()
return index
def _display_no_audio_on_video_msg(track):
dialogutils.warning_message(_("Can't put an audio clip on a video track."),
_("Track ")+ utils.get_track_name(track, current_sequence()) + _(" is a video track and can't display audio only material."),
gui.editor_window.window)
# ------------------------------------- edit mode setting
def set_default_edit_mode(disable_mouse=False):
"""
This is used as global 'go to start position' exit door from
situations where for example user is in trim and exits it
without specifying which edit mode to go to.
NOTE: As this uses 'programmed click', this method does nothing if insert mode button
is already down.
"""
gui.editor_window.handle_insert_move_mode_button_press()
gui.editor_window.set_mode_selector_to_mode()
if disable_mouse:
global mouse_disabled
mouse_disabled = True
def set_clip_monitor_edit_mode():
"""
Going to clip monitor exits active trimodes into non active trimmodes.
"""
if EDIT_MODE() == editorstate.ONE_ROLL_TRIM:
oneroll_trim_no_edit_init()
elif EDIT_MODE() == editorstate.ONE_ROLL_TRIM_NO_EDIT:
pass
elif EDIT_MODE() == editorstate.TWO_ROLL_TRIM:
tworoll_trim_no_edit_init()
elif EDIT_MODE() == editorstate.TWO_ROLL_TRIM_NO_EDIT:
pass
else:
gui.editor_window.handle_insert_move_mode_button_press()
gui.editor_window.set_mode_selector_to_mode()
def set_post_undo_redo_edit_mode():
if EDIT_MODE() == editorstate.ONE_ROLL_TRIM:
oneroll_trim_no_edit_init()
if EDIT_MODE() == editorstate.TWO_ROLL_TRIM:
tworoll_trim_no_edit_init()
def stop_looping():
# Stop trim mode looping using trimmodes.py methods for it
# Called when entering move modes.
if PLAYER().looping():
if EDIT_MODE() == editorstate.ONE_ROLL_TRIM:
trimmodes.oneroll_stop_pressed()
if EDIT_MODE() == editorstate.TWO_ROLL_TRIM:
trimmodes.tworoll_stop_pressed()
# -------------------------------------------------------------- move modes
def insert_move_mode_pressed():
"""
User selects insert move mode.
"""
stop_looping()
current_sequence().clear_hidden_track()
editorstate.edit_mode = editorstate.INSERT_MOVE
tlinewidgets.set_edit_mode(None, tlinewidgets.draw_insert_overlay)
_set_move_mode()
def overwrite_move_mode_pressed():
"""
User selects overwrite move mode.
"""
stop_looping()
current_sequence().clear_hidden_track()
editorstate.edit_mode = editorstate.OVERWRITE_MOVE
tlinewidgets.set_edit_mode(None, tlinewidgets.draw_overwrite_overlay)
_set_move_mode()
def multi_mode_pressed():
stop_looping()
current_sequence().clear_hidden_track()
editorstate.edit_mode = editorstate.MULTI_MOVE
tlinewidgets.set_edit_mode(None, tlinewidgets.draw_multi_overlay)
updater.set_move_mode_gui()
updater.repaint_tline()
def _set_move_mode():
updater.set_move_mode_gui()
updater.set_transition_render_edit_menu_items_sensitive(movemodes.selected_range_in, movemodes.selected_range_out)
updater.repaint_tline()
# -------------------------------------------------------------- one roll trim
def oneroll_trim_no_edit_init():
"""
This mode is entered and this method is called when:
- user first selects trim tool
- user does cut(X) action while in trim mode
- user clicks empty and preference is to keep using trim tool (to not exit to INSERT_MOVE)
"""
stop_looping()
editorstate.edit_mode = editorstate.ONE_ROLL_TRIM_NO_EDIT
gui.editor_window.set_cursor_to_mode()
tlinewidgets.set_edit_mode(None, None) # No overlays are drawn in this edit mode
movemodes.clear_selected_clips() # Entering trim edit mode clears selection
updater.set_trim_mode_gui()
def oneroll_trim_no_edit_press(event, frame):
"""
Mouse press while in ONE_ROLL_TRIM_NO_EDIT attempts to init edit and
move to ONE_ROLL_TRIM mode.
"""
success = oneroll_trim_mode_init(event.x, event.y)
if success:
# If not quick enter, disable edit until mouse released
if not editorpersistance.prefs.quick_enter_trims:
global mouse_disabled
tlinewidgets.trim_mode_in_non_active_state = True
mouse_disabled = True
# If preference is quick enter, call mouse move handler immediately
# to move edit point to where mouse is
else:
trimmodes.oneroll_trim_move(event.x, event.y, frame, None)
else:
if editorpersistance.prefs.empty_click_exits_trims == True:
set_default_edit_mode(True)
else:
editorstate.edit_mode = editorstate.ONE_ROLL_TRIM_NO_EDIT
def oneroll_trim_no_edit_move(x, y, frame, state):
# Only presses are handled in ONE_ROLL_TRIM_NO_EDIT mode
pass
def oneroll_trim_no_edit_release(x, y, frame, state):
# Only presses are handled in ONE_ROLL_TRIM_NO_EDIT mode
pass
def oneroll_trim_mode_init(x, y):
"""
User enters ONE_ROLL_TRIM mode from ONE_ROLL_TRIM_NO_EDIT
"""
track = tlinewidgets.get_track(y)
if track == None:
return False
if track_lock_check_and_user_info(track, oneroll_trim_mode_init, "one roll trim mode"):
set_default_edit_mode()
return False
stop_looping()
editorstate.edit_mode = editorstate.ONE_ROLL_TRIM
movemodes.clear_selected_clips() # Entering trim edit mode clears selection
updater.set_trim_mode_gui()
# init mode
press_frame = tlinewidgets.get_frame(x)
trimmodes.set_exit_mode_func = set_default_edit_mode
trimmodes.set_no_edit_mode_func = oneroll_trim_no_edit_init
success = trimmodes.set_oneroll_mode(track, press_frame)
return success
# --------------------------------------------------------- two roll trim
def tworoll_trim_no_edit_init():
stop_looping()
editorstate.edit_mode = editorstate.TWO_ROLL_TRIM_NO_EDIT
gui.editor_window.set_cursor_to_mode()
tlinewidgets.set_edit_mode(None, None) # No overlays are drawn in this edit mode
movemodes.clear_selected_clips() # Entering trim edit mode clears selection
updater.set_trim_mode_gui()
def tworoll_trim_no_edit_press(event, frame):
success = tworoll_trim_mode_init(event.x, event.y)
if success:
if not editorpersistance.prefs.quick_enter_trims:
global mouse_disabled
tlinewidgets.trim_mode_in_non_active_state = True
mouse_disabled = True
else:
trimmodes.tworoll_trim_move(event.x, event.y, frame, None)
else:
if editorpersistance.prefs.empty_click_exits_trims == True:
set_default_edit_mode(True)
else:
editorstate.edit_mode = editorstate.TWO_ROLL_TRIM_NO_EDIT
def tworoll_trim_no_edit_move(x, y, frame, state):
pass
def tworoll_trim_no_edit_release(x, y, frame, state):
pass
def tworoll_trim_mode_init(x, y):
"""
User selects two roll mode
"""
track = tlinewidgets.get_track(y)
if track == None:
return False
if track_lock_check_and_user_info(track, tworoll_trim_mode_init, "two roll trim mode",):
set_default_edit_mode()
return False
stop_looping()
editorstate.edit_mode = editorstate.TWO_ROLL_TRIM
movemodes.clear_selected_clips() # Entering trim edit mode clears selection
updater.set_trim_mode_gui()
press_frame = tlinewidgets.get_frame(x)
trimmodes.set_exit_mode_func = set_default_edit_mode
trimmodes.set_no_edit_mode_func = tworoll_trim_no_edit_init
success = trimmodes.set_tworoll_mode(track, press_frame)
return success
# ----------------------------------------------------- slide trim
def slide_trim_no_edit_init():
stop_looping() # Stops looping
editorstate.edit_mode = editorstate.SLIDE_TRIM_NO_EDIT
gui.editor_window.set_cursor_to_mode()
tlinewidgets.set_edit_mode(None, None) # No overlays are drawn in this edit mode
movemodes.clear_selected_clips() # Entering trim edit mode clears selection
updater.set_trim_mode_gui()
def slide_trim_no_edit_press(event, frame):
success = slide_trim_mode_init(event.x, event.y)
if success:
if not editorpersistance.prefs.quick_enter_trims:
global mouse_disabled
tlinewidgets.trim_mode_in_non_active_state = True
mouse_disabled = True
else:
trimmodes.edit_data["press_start"] = frame
trimmodes.slide_trim_move(event.x, event.y, frame, None)
else:
if editorpersistance.prefs.empty_click_exits_trims == True:
set_default_edit_mode(True)
else:
editorstate.edit_mode = editorstate.SLIDE_TRIM_NO_EDIT
def slide_trim_no_edit_move(x, y, frame, state):
pass
def slide_trim_no_edit_release(x, y, frame, state):
pass
def slide_trim_mode_init(x, y):
"""
User selects two roll mode
"""
track = tlinewidgets.get_track(y)
if track == None:
return False
if track_lock_check_and_user_info(track, tworoll_trim_mode_init, "two roll trim mode"):
set_default_edit_mode()
return False
stop_looping()
editorstate.edit_mode = editorstate.SLIDE_TRIM
movemodes.clear_selected_clips() # Entering trim edit mode clears selection
updater.set_trim_mode_gui()
press_frame = tlinewidgets.get_frame(x)
trimmodes.set_exit_mode_func = set_default_edit_mode
trimmodes.set_no_edit_mode_func = slide_trim_no_edit_init
success = trimmodes.set_slide_mode(track, press_frame)
return success
# ------------------------------------ timeline mouse events
def tline_canvas_mouse_pressed(event, frame):
"""
Mouse event callback from timeline canvas widget
"""
global mouse_disabled
if PLAYER().looping():
return
elif PLAYER().is_playing():
PLAYER().stop_playback()
# Double click handled separately
if event.type == gtk.gdk._2BUTTON_PRESS:
return
# Handle and exit parent clip selecting
if EDIT_MODE() == editorstate.SELECT_PARENT_CLIP:
syncsplitevent.select_sync_parent_mouse_pressed(event, frame)
mouse_disabled = True
# Set INSERT_MODE
set_default_edit_mode()
return
# Hitting timeline in clip display mode displays timeline in
# default mode.
if not timeline_visible():
updater.display_sequence_in_monitor()
if (event.button == 1):
# Now that we have correct edit mode we'll reenter
# this method to get e.g. a select action
tline_canvas_mouse_pressed(event, frame)
return
if (event.button == 3):
mouse_disabled == True
# Right mouse + CTRL displays clip menu if we hit clip
if (event.state & gtk.gdk.CONTROL_MASK):
PLAYER().seek_frame(frame)
# Right mouse on timeline seeks frame
else:
success = display_clip_menu_pop_up(event.y, event, frame)
if not success:
PLAYER().seek_frame(frame)
return
# Check if compositor is hit and if so handle compositor editing
if editorstate.current_is_move_mode() and timeline_visible():
hit_compositor = tlinewidgets.compositor_hit(frame, event.y, current_sequence().compositors)
if hit_compositor != None:
movemodes.clear_selected_clips()
if event.button == 1:
compositormodes.set_compositor_mode(hit_compositor)
mode_funcs = EDIT_MODE_FUNCS[editorstate.COMPOSITOR_EDIT]
press_func = mode_funcs[TL_MOUSE_PRESS]
press_func(event, frame)
elif event.button == 3:
mouse_disabled == True
compositormodes.set_compositor_selected(hit_compositor)
guicomponents.display_compositor_popup_menu(event, hit_compositor,
compositor_menu_item_activated)
elif event.button == 2:
updater.zoom_project_length()
return
compositormodes.clear_compositor_selection()
# Handle mouse button presses depending which button was pressed and
# editor state.
# RIGHT BUTTON: seek frame or display clip menu
if (event.button == 3):
if ((not editorstate.current_is_active_trim_mode()) and timeline_visible()):
if not(event.state & gtk.gdk.CONTROL_MASK):
success = display_clip_menu_pop_up(event.y, event, frame)
if not success:
PLAYER().seek_frame(frame)
else:
PLAYER().seek_frame(frame)
else:
# For trim modes set <X>_NO_EDIT edit mode and seek frame. and seek frame
trimmodes.set_no_edit_trim_mode()
PLAYER().seek_frame(frame)
return
# LEFT BUTTON + CTRL: Select new trimmed clip in one roll trim mode
elif (event.button == 1
and (event.state & gtk.gdk.CONTROL_MASK)
and EDIT_MODE() == editorstate.ONE_ROLL_TRIM):
track = tlinewidgets.get_track(event.y)
if track == None:
if editorpersistance.prefs.empty_click_exits_trims == True:
set_default_edit_mode(True)
return
success = trimmodes.set_oneroll_mode(track, frame)
if (not success) and editorpersistance.prefs.empty_click_exits_trims == True:
set_default_edit_mode(True)
return
gui.editor_window.set_cursor_to_mode()
gui.editor_window.set_mode_selector_to_mode()
if not editorpersistance.prefs.quick_enter_trims:
mouse_disabled = True
else:
trimmodes.oneroll_trim_move(event.x, event.y, frame, None)
# LEFT BUTTON + CTRL: Select new trimmed clip in two roll trim mode
elif (event.button == 1
and (event.state & gtk.gdk.CONTROL_MASK)
and EDIT_MODE() == editorstate.TWO_ROLL_TRIM):
track = tlinewidgets.get_track(event.y)
if track == None:
if editorpersistance.prefs.empty_click_exits_trims == True:
set_default_edit_mode(True)
return
success = trimmodes.set_tworoll_mode(track, frame)
if (not success) and editorpersistance.prefs.empty_click_exits_trims == True:
set_default_edit_mode(True)
return
if not editorpersistance.prefs.quick_enter_trims:
mouse_disabled = True
else:
trimmodes.tworoll_trim_move(event.x, event.y, frame, None)
# LEFT BUTTON: Handle left mouse button edits by passing event to current edit mode
# handler func
elif event.button == 1:
mode_funcs = EDIT_MODE_FUNCS[EDIT_MODE()]
press_func = mode_funcs[TL_MOUSE_PRESS]
press_func(event, frame)
elif event.button == 2:
updater.zoom_project_length()
def tline_canvas_mouse_moved(x, y, frame, button, state):
"""
Mouse event callback from timeline canvas widget
"""
# Refuse mouse events for some editor states.
if PLAYER().looping():
return
if mouse_disabled == True:
return
if not timeline_visible():
return
# Handle timeline position setting with right mouse button
if button == 3:
if not timeline_visible():
return
PLAYER().seek_frame(frame)
# Handle left mouse button edits
elif button == 1:
mode_funcs = EDIT_MODE_FUNCS[EDIT_MODE()]
move_func = mode_funcs[TL_MOUSE_MOVE]
move_func(x, y, frame, state)
def tline_canvas_mouse_released(x, y, frame, button, state):
"""
Mouse event callback from timeline canvas widget
"""
gui.editor_window.set_cursor_to_mode()
global mouse_disabled
if mouse_disabled == True:
gui.editor_window.set_cursor_to_mode() # we only need this update when mode change (to active trim mode) disables mouse, so we'll only do this then
tlinewidgets.trim_mode_in_non_active_state = False # we only need this update when mode change (to active trim mode) disables mouse, so we'll only do this then
gui.tline_canvas.widget.queue_draw()
mouse_disabled = False
return
if not timeline_visible():
return
if PLAYER().looping():
PLAYER().stop_loop_playback(trimmodes.trim_looping_stopped)
return
# Handle timeline position setting with right mouse button
if button == 3:
#if not editorstate.current_is_move_mode():
# return
if not timeline_visible():
return
PLAYER().seek_frame(frame)
# Handle left mouse button edits
elif button == 1:
mode_funcs = EDIT_MODE_FUNCS[EDIT_MODE()]
release_func = mode_funcs[TL_MOUSE_RELEASE]
release_func(x, y, frame, state)
def tline_canvas_double_click(frame, x, y):
if PLAYER().looping():
return
elif PLAYER().is_playing():
PLAYER().stop_playback()
if not timeline_visible():
updater.display_sequence_in_monitor()
set_default_edit_mode()
return
hit_compositor = tlinewidgets.compositor_hit(frame, y, current_sequence().compositors)
if hit_compositor != None:
compositeeditor.set_compositor(hit_compositor)
return
track = tlinewidgets.get_track(y)
if track == None:
return
clip_index = current_sequence().get_clip_index(track, frame)
if clip_index == -1:
return
clip = track.clips[clip_index]
data = (clip, track, None, x)
updater.open_clip_in_effects_editor(data)
# -------------------------------------------------- DND release event callbacks
def tline_effect_drop(x, y):
clip, track, index = tlinewidgets.get_clip_track_and_index_for_pos(x, y)
if clip == None:
return
if track == None:
return
if track.id < 1 or track.id >= (len(current_sequence().tracks) - 1):
return
if track_lock_check_and_user_info(track):
set_default_edit_mode()
return
if clip != clipeffectseditor.clip:
clipeffectseditor.set_clip(clip, track, index)
clipeffectseditor.add_currently_selected_effect() # drag start selects the dragged effect
def tline_media_drop(media_file, x, y, use_marks=False):
track = tlinewidgets.get_track(y)
if track == None:
return
if track.id < 1 or track.id >= (len(current_sequence().tracks) - 1):
return
if track_lock_check_and_user_info(track):
set_default_edit_mode()
return
set_default_edit_mode()
frame = tlinewidgets.get_frame(x)
# Create new clip.
if media_file.type != appconsts.PATTERN_PRODUCER:
new_clip = current_sequence().create_file_producer_clip(media_file.path, media_file.name)
else:
new_clip = current_sequence().create_pattern_producer(media_file)
# Set clip in and out
if use_marks == False:
new_clip.mark_in = 0
new_clip.mark_out = new_clip.get_length() - 1 # - 1 because out is mark_out inclusive
if media_file.type == appconsts.IMAGE_SEQUENCE:
new_clip.mark_out = media_file.length
else:
new_clip.mark_in = media_file.mark_in
new_clip.mark_out = media_file.mark_out
if new_clip.mark_in == -1:
new_clip.mark_in = 0
if new_clip.mark_out == -1:
new_clip.mark_out = new_clip.get_length() - 1 # - 1 because out is mark_out inclusive
if media_file.type == appconsts.IMAGE_SEQUENCE:
new_clip.mark_out = media_file.length
# Graphics files get added with their default lengths
f_name, ext = os.path.splitext(media_file.name)
if utils.file_extension_is_graphics_file(ext) and media_file.type != appconsts.IMAGE_SEQUENCE: # image sequences are graphics files but have own length
in_fr, out_fr, l = editorpersistance.get_graphics_default_in_out_length()
new_clip.mark_in = in_fr
new_clip.mark_out = out_fr
do_clip_insert(track, new_clip, frame)
def tline_range_item_drop(rows, x, y):
track = tlinewidgets.get_track(y)
if track == None:
return
if track.id < 1 or track.id >= (len(current_sequence().tracks) - 1):
return
if track_lock_check_and_user_info(track):
set_default_edit_mode()
return
frame = tlinewidgets.get_frame(x)
clips = medialog.get_clips_for_rows(rows)
set_default_edit_mode()
do_multiple_clip_insert(track, clips, frame)
# ------------------------------------ track locks handling
def track_lock_check_and_user_info(track, calling_function="this ain't used anymore", actionname="this ain't used anymore"):
if track.edit_freedom == appconsts.LOCKED:
track_name = utils.get_track_name(track, current_sequence())
# No edits on locked tracks.
primary_txt = _("Can't edit a locked track")
secondary_txt = _("Track ") + track_name + _(" is locked. Unlock track to edit it.")
dialogutils.warning_message(primary_txt, secondary_txt, gui.editor_window.window)
return True
return False
# ------------------------------------ function tables
# mouse event indexes
TL_MOUSE_PRESS = 0
TL_MOUSE_MOVE = 1
TL_MOUSE_RELEASE = 2
# mouse event handler function lists for mode
INSERT_MOVE_FUNCS = [movemodes.insert_move_press,
movemodes.insert_move_move,
movemodes.insert_move_release]
OVERWRITE_MOVE_FUNCS = [movemodes.overwrite_move_press,
movemodes.overwrite_move_move,
movemodes.overwrite_move_release]
ONE_ROLL_TRIM_FUNCS = [trimmodes.oneroll_trim_press,
trimmodes.oneroll_trim_move,
trimmodes.oneroll_trim_release]
ONE_ROLL_TRIM_NO_EDIT_FUNCS = [oneroll_trim_no_edit_press,
oneroll_trim_no_edit_move,
oneroll_trim_no_edit_release]
TWO_ROLL_TRIM_FUNCS = [trimmodes.tworoll_trim_press,
trimmodes.tworoll_trim_move,
trimmodes.tworoll_trim_release]
TWO_ROLL_TRIM_NO_EDIT_FUNCS = [tworoll_trim_no_edit_press,
tworoll_trim_no_edit_move,
tworoll_trim_no_edit_release]
COMPOSITOR_EDIT_FUNCS = [compositormodes.mouse_press,
compositormodes.mouse_move,
compositormodes.mouse_release]
SLIDE_TRIM_FUNCS = [trimmodes.slide_trim_press,
trimmodes.slide_trim_move,
trimmodes.slide_trim_release]
SLIDE_TRIM_NO_EDIT_FUNCS = [slide_trim_no_edit_press,
slide_trim_no_edit_move,
slide_trim_no_edit_release]
MULTI_MOVE_FUNCS = [multimovemode.mouse_press,
multimovemode.mouse_move,
multimovemode.mouse_release]
# (mode - mouse handler function list) table
EDIT_MODE_FUNCS = {editorstate.INSERT_MOVE:INSERT_MOVE_FUNCS,
editorstate.OVERWRITE_MOVE:OVERWRITE_MOVE_FUNCS,
editorstate.ONE_ROLL_TRIM:ONE_ROLL_TRIM_FUNCS,
editorstate.TWO_ROLL_TRIM:TWO_ROLL_TRIM_FUNCS,
editorstate.COMPOSITOR_EDIT:COMPOSITOR_EDIT_FUNCS,
editorstate.ONE_ROLL_TRIM_NO_EDIT:ONE_ROLL_TRIM_NO_EDIT_FUNCS,
editorstate.TWO_ROLL_TRIM_NO_EDIT:TWO_ROLL_TRIM_NO_EDIT_FUNCS,
editorstate.SLIDE_TRIM:SLIDE_TRIM_FUNCS,
editorstate.SLIDE_TRIM_NO_EDIT:SLIDE_TRIM_NO_EDIT_FUNCS,
editorstate.MULTI_MOVE:MULTI_MOVE_FUNCS}
| Python |
"""
Flowblade Movie Editor is a nonlinear video editor.
Copyright 2012 Janne Liljeblad.
This file is part of Flowblade Movie Editor <http://code.google.com/p/flowblade>.
Flowblade Movie Editor is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Flowblade Movie Editor is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Flowblade Movie Editor. If not, see <http://www.gnu.org/licenses/>.
"""
import sys
import vieweditorshape
import viewgeom
# Edit modes
MOVE_MODE = 0
ROTATE_MODE = 1
# Edit types, used as kind of subtypes of modes if needed, e.g. MOVE_MODE can have MOVE_EDIT or HANDLE_EDIT
NO_EDIT = 0 # mouse hit meaningless
ROTATE_EDIT = 1
MOVE_EDIT = 2
HANDLE_EDIT = 4
class AbstactEditorLayer:
def __init__(self, view_editor):
self.view_editor = view_editor
self.edit_point_shape = None
self.name = "unnamed layer"
self.active = False
self.visible = True
self.last_pressed_edit_point = None
self.mouse_start_point = None
self.mouse_current_point = None
self.mouse_rotation_last = None
self.last_press_hit_point = None
self.edit_mode = None # determines how mouse press is interpreted
self.edit_type = None # is interpretation of purpose of mouse press,
# not always used if mouse press in edit_mode can only interpreted in one way
self.mouse_released_listener = None
# --------------------------------------------- state changes
def frame_changed(self):
pass # override to react to frame change
def mode_changed(self):
pass # override to react to mode change
# --------------------------------------------- hit detection
def hit(self, p):
"""
Test hit AND save hit point or clear hit point if only area hit.
"""
self.last_press_hit_point = self.edit_point_shape.get_edit_point(p)
if self.last_press_hit_point != None:
return True
if self.edit_point_shape.point_in_area(p) == True:
self.last_press_hit_point = None
return True
return False
# ---------------------------------------------- mouse events
# All mouse coords in movie space, ViewEditor deals with panel space
def handle_mouse_press(self, p):
self.mouse_start_point = p
self.mouse_current_point = p
self.mouse_rotation_last = 0.0
self.mouse_pressed()
def handle_mouse_drag(self, p):
self.mouse_current_point = p
self.mouse_dragged()
def handle_mouse_release(self, p):
self.mouse_current_point = p
self.mouse_released()
if self.mouse_released_listener != None:
self.mouse_released_listener()
def translate_points_for_mouse_move(self):
sx, sy = self.mouse_start_point
dx, dy = self.get_mouse_delta()
for p in self.edit_point_shape.edit_points:
p.x = sx + dx
p.y = sy + dy
def get_current_mouse_rotation(self, anchor):
return self.get_mouse_rotation_angle(anchor, self.mouse_start_point, self.mouse_current_point)
def get_mouse_rotation_angle(self, anchor, mr_start, mr_end):
angle = viewgeom.get_angle_in_deg(mr_start, anchor, mr_end)
clockw = viewgeom.points_clockwise(mr_start, anchor, mr_end)
if not clockw:
angle = -angle
# Crossed angle for 180 -> 181... range
crossed_angle = angle + 360.0
# Crossed angle for -180 -> 181 ...range.
if angle > 0:
crossed_angle = -360.0 + angle
# See if crossed angle closer to last angle.
if abs(self.mouse_rotation_last - crossed_angle) < abs(self.mouse_rotation_last - angle):
angle = crossed_angle
# Set last to get good results next time.
self.mouse_rotation_last = angle
return angle
def mouse_pressed(self):
print "AbstactEditorLayer.mouse_pressed not overridden in" + self.__class__
sys.exit(1)
def mouse_dragged(self):
print "AbstactEditorLayer.mouse_dragged not overridden in" + self.__class__
sys.exit(1)
def mouse_released(self):
print "AbstactEditorLayer.mouse_released not overridden in" + self.__class__
sys.exit(1)
def get_mouse_delta(self):
cx, cy = self.mouse_current_point
sx, sy = self.mouse_start_point
return (cx - sx, cy - sy)
# -------------------------------------------- draw
def draw(self, cr, write_out_layers, draw_overlays):
print "AbstactEditorLayer.draw not overridden in" + self.__class__
sys.exit(1)
class SimpleRectEditLayer(AbstactEditorLayer):
def __init__(self, view_editor):
AbstactEditorLayer.__init__(self, view_editor)
self.edit_point_shape = vieweditorshape.SimpleRectEditShape()
self.update_rect = False # flag to reinit rect shape
self.edit_mode = MOVE_MODE
self.edit_point_shape.set_all_points_invisible()
self.resizing_allowed = True
self.ACTIVE_COLOR = (0.55,0.55,0.55,1)
self.NOT_ACTIVE_COLOR = (0.2,0.2,0.2,1)
def set_rect_pos(self, x, y):
# were always assuming that point 0 determines positiojn of shape
self.edit_point_shape.translate_points_to_pos(x, y, 0)
def mouse_pressed(self):
self.edit_point_shape.save_start_pos()
if self.edit_mode == MOVE_MODE:
if self.last_press_hit_point != None:
self.last_press_hit_point.save_start_pos()
self.edit_type = HANDLE_EDIT
self.guide_1, self.guide_2 = self.edit_point_shape.get_handle_guides(self.last_press_hit_point)
else:
self.edit_type = MOVE_EDIT
else: # ROTATE_MODE
self.roto_mid = self.edit_point_shape.get_mid_point()
def mouse_dragged(self):
delta = self.get_mouse_delta()
if self.edit_mode == MOVE_MODE:
if self.edit_type == HANDLE_EDIT:
self._update_corner_edit(delta)
else:
self.edit_point_shape.translate_from_move_start(delta)
else: # ROTATE_MODE
angle_change = self.get_current_mouse_rotation(self.roto_mid)
self.edit_point_shape.rotate_from_move_start(self.roto_mid, angle_change)
def mouse_released(self):
delta = self.get_mouse_delta()
if self.edit_mode == MOVE_MODE:
if self.edit_type == HANDLE_EDIT:
self._update_corner_edit(delta)
else:
self.edit_point_shape.translate_from_move_start(delta)
else: # ROTATE_MODE
angle_change = self.get_current_mouse_rotation(self.roto_mid)
self.edit_point_shape.rotate_from_move_start(self.roto_mid, angle_change)
self.mouse_rotation_last = 0.0
def _update_corner_edit(self, delta):
if self.resizing_allowed == False:
return
self.last_press_hit_point.translate_from_move_start(delta)
self.guide_1.set_end_point_to_normal_projection(self.last_press_hit_point.get_pos())
self.guide_2.set_end_point_to_normal_projection(self.last_press_hit_point.get_pos())
if self.guide_1.get_length() < 0:
self.guide_1.set_zero_length()
if self.guide_2.get_length() < 0:
self.guide_2.set_zero_length()
self.edit_point_shape.edit_points[self.guide_1.point_index].set_pos(self.guide_1.end_point)
self.edit_point_shape.edit_points[self.guide_2.point_index].set_pos(self.guide_2.end_point)
def draw(self, cr, write_out_layers, draw_overlays):
if write_out_layers:
return # this layer is not drawn when writing out layers
if draw_overlays:
if self.active:
cr.set_source_rgba(*self.ACTIVE_COLOR)
else:
cr.set_source_rgba(*self.NOT_ACTIVE_COLOR)
self.edit_point_shape.draw_line_shape(cr, self.view_editor)
self.edit_point_shape.draw_points(cr, self.view_editor)
class TextEditLayer(SimpleRectEditLayer):
def __init__(self, view_editor, text_layout):
# text_layout is titler.PangoLayout
SimpleRectEditLayer.__init__(self, view_editor)
self.text_layout = text_layout
self.edit_mode = MOVE_MODE
self.edit_point_shape.line_type = vieweditorshape.LINE_DASH
self.resizing_allowed = False
def draw(self, cr, write_out_layers, draw_overlays):
x, y = self.edit_point_shape.get_panel_point(0, self.view_editor)
rotation = self.edit_point_shape.get_first_two_points_rotation_angle()
xscale = self.view_editor.scale * self.view_editor.aspect_ratio
yscale = self.view_editor.scale
self.text_layout.draw_layout(cr, x, y, rotation, xscale, yscale)
if self.update_rect:
# Text size in layout has changed for added text or attribute change.
# rect size needs to be updated for new size of layout
# Size of layout is always updated in self.text_layout.draw_layout(....)
w, h = self.text_layout.pixel_size
self.edit_point_shape.update_rect_size(w, h)
self.update_rect = False
SimpleRectEditLayer.draw(self, cr, write_out_layers, draw_overlays)
| Python |
"""
Flowblade Movie Editor is a nonlinear video editor.
Copyright 2012 Janne Liljeblad.
This file is part of Flowblade Movie Editor <http://code.google.com/p/flowblade>.
Flowblade Movie Editor is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Flowblade Movie Editor is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Flowblade Movie Editor. If not, see <http://www.gnu.org/licenses/>.
"""
import pygtk
pygtk.require('2.0');
import gtk
import numpy as np
import cairoarea
import cairo
import respaths
MIN_PAD = 20
GUIDES_COLOR = (0.5, 0.5, 0.5, 1.0)
class ViewEditor(gtk.Frame):
def __init__(self, profile, scroll_width, scroll_height):
gtk.Frame.__init__(self)
self.scale = 1.0
self.draw_overlays = True
self.draw_safe_area = True
self.has_safe_area = True
self.profile_w = profile.width()
self.profile_h = profile.height()
self.aspect_ratio = float(profile.sample_aspect_num()) / profile.sample_aspect_den()
self.scaled_screen_width = self.profile_w * self.aspect_ratio # scale is gonna be 1.0 here
self.scaled_screen_height = self.profile_h
self.origo = (MIN_PAD, MIN_PAD)
self.bg_buf = None
self.write_out_layers = False
self.write_file_path = None
self.edit_area = cairoarea.CairoDrawableArea(int(self.scaled_screen_width + MIN_PAD * 2), self.profile_h + MIN_PAD * 2, self._draw)
self.edit_area.press_func = self._press_event
self.edit_area.motion_notify_func = self._motion_notify_event
self.edit_area.release_func = self._release_event
self.scroll_window = gtk.ScrolledWindow()
self.scroll_window.add_with_viewport(self.edit_area)
self.scroll_window.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
self.scroll_window.show_all()
self.scroll_window.set_size_request(scroll_width, scroll_height) # +2 to not show scrollbars
self.add(self.scroll_window)
self.edit_layers = []
self.active_layer = None
self.edit_target_layer = None
self.change_active_layer_for_hit = True
self.active_layer_changed_listener = None # interface: listener(new_active_index)
# note: vieweditor calls activate_layer( ) when non-active layer hit
# here so listener needs only to change its active layer, not call activate_layer( ) here
self.set_scale_and_update(1.0)
def write_layers_to_png(self, save_path):
self.write_out_layers = True
self.write_file_path = save_path
self.edit_area.queue_draw()
def activate_layer(self, layer_index):
if self.active_layer != None:
self.active_layer.active = False
self.active_layer = self.edit_layers[layer_index]
self.active_layer.active = True
def clear_layers(self):
self.edit_layers = []
self.active_layer = None
self.edit_target_layer = None
def add_layer(self, layer):
self.edit_layers.append(layer)
def set_scale_and_update(self, new_scale):
self.scale = new_scale
self.set_scaled_screen_size()
self.set_edit_area_size_and_origo()
def set_scaled_screen_size(self):
self.scaled_screen_width = self.scale * self.profile_w * self.aspect_ratio
self.scaled_screen_height = self.scale * self.profile_h
def set_edit_area_size_and_origo(self):
x, y, scroll_w, scroll_h = self.scroll_window.get_allocation()
# If scaled screen smaller then scroll window size center it and set origo
if ((self.scaled_screen_width < scroll_w) and (self.scaled_screen_height < scroll_h)):
origo_x = (scroll_w - self.scaled_screen_width) / 2
origo_y = (scroll_h - self.scaled_screen_height ) / 2
self.origo = (int(origo_x), int(origo_y))
self.edit_area.set_size_request(self.profile_w + MIN_PAD * 2,
self.profile_h + MIN_PAD * 2)
else:
if self.scaled_screen_width > scroll_w:
new_w = self.scaled_screen_width + MIN_PAD * 2
origo_x = MIN_PAD
else:
new_w = scroll_w
origo_x = (scroll_w - self.scaled_screen_width) / 2
if self.scaled_screen_height > scroll_h:
new_h = self.scaled_screen_height + MIN_PAD * 2
origo_y = MIN_PAD
else:
new_h = scroll_h
origo_y = (scroll_h - self.scaled_screen_height) / 2
self.origo = (int(origo_x), int(origo_y))
self.edit_area.set_size_request(int(new_w), int(new_h))
# ----------------------------------------------------- mouse events
def _press_event(self, event):
"""
Mouse press callback
"""
self.edit_target_layer = None
p = self.panel_coord_to_movie_coord((event.x, event.y))
if self.active_layer.hit(p):
self.edit_area.queue_draw()
self.edit_target_layer = self.active_layer
self.edit_target_layer.handle_mouse_press(p)
else:
if not self.change_active_layer_for_hit:
return
for i in range(len(self.edit_layers)):
layer = self.edit_layers[i]
if layer.hit(p):
self.active_layer_changed_listener(i)
self.activate_layer(i)
self.edit_area.queue_draw()
self.edit_target_layer = self.active_layer
self.edit_target_layer.handle_mouse_press(p)
def _motion_notify_event(self, x, y, state):
"""
Mouse drag callback
"""
p = self.panel_coord_to_movie_coord((x, y))
if self.edit_target_layer != None:
self.edit_target_layer.handle_mouse_drag(p)
self.edit_area.queue_draw()
def _release_event(self, event):
"""
Mouse release
"""
p = self.panel_coord_to_movie_coord((event.x, event.y))
if self.edit_target_layer != None:
self.edit_target_layer.handle_mouse_release(p)
self.edit_area.queue_draw()
self.edit_target_layer = None
# -------------------------------------------- coord conversions
def panel_coord_to_movie_coord(self, panel_point):
panel_x, panel_y = panel_point
origo_x, origo_y = self.origo
panel_o_x = panel_x - origo_x
panel_o_y = panel_y - origo_y
movie_x = (1.0 / (self.scale * self.aspect_ratio)) * panel_o_x
movie_y = (1.0 / self.scale) * panel_o_y
return (movie_x, movie_y)
def movie_coord_to_panel_coord(self, movie_point):
movie_x, movie_y = movie_point
origo_x, origo_y = self.origo
panel_x = movie_x * self.scale * self.aspect_ratio + origo_x
panel_y = movie_y * self.scale + origo_y
return (panel_x, panel_y)
# --------------------------------------------------- drawing
def set_screen_rgb_data(self, screen_rgb_data):
buf = np.fromstring(screen_rgb_data, dtype=np.uint8)
buf.shape = (self.profile_h + 1, self.profile_w, 4) # +1 in h, seemeed to need it
out = np.copy(buf)
r = np.index_exp[:, :, 0]
b = np.index_exp[:, :, 2]
out[r] = buf[b]
out[b] = buf[r]
self.bg_buf = out
def _draw(self, event, cr, allocation):
if self.bg_buf != None:
# MLT Provides images in which R <-> B are swiched from what Cairo wants them,
# so use numpy to switch them and to create a modifiable buffer for Cairo
# Create cairo surface
stride = cairo.ImageSurface.format_stride_for_width(cairo.FORMAT_RGB24, self.profile_w)
surface = cairo.ImageSurface.create_for_data(self.bg_buf, cairo.FORMAT_RGB24, self.profile_w, self.profile_h, stride)
# Display it
ox, oy = self.origo
cr.save()
cr.translate(ox, oy)
cr.scale(self.scale * self.aspect_ratio, self.scale)
cr.set_source_surface(surface, 0, 0)
cr.paint()
cr.restore()
if self.write_out_layers == True:
# We need to go to 1.0 scale, 0,0 origo draw for out the file
current_scale = self.scale
self.scale = 1.0
self.origo = (0.0, 0.0)
img_surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, self.profile_w, self.profile_h)
cr = cairo.Context(img_surface)
for editorlayer in self.edit_layers:
if editorlayer.visible:
editorlayer.draw(cr, self.write_out_layers, self.draw_overlays)
if self.write_out_layers == True:
img_surface.write_to_png(self.write_file_path)
self.write_file_path = None # to make sure user components set this every time
self.write_out_layers = False
self.set_scale_and_update(current_scale) # return to user set scale
self._draw_guidelines(cr)
def _draw_guidelines(self, cr):
ox, oy = self.origo
ox += 0.5
oy += 0.5
w = self.scaled_screen_width + ox
h = self.scaled_screen_height + oy
cr.move_to(ox, oy)
cr.line_to(w, oy)
cr.line_to(w, h)
cr.line_to(ox, h)
cr.close_path()
cr.set_line_width(1.0)
cr.set_source_rgba(*GUIDES_COLOR)
cr.stroke()
# Draw "safe" area, this is not based on any real specification
if self.draw_safe_area == True and self.has_safe_area == True:
dimensions_safe_mult = 0.9
xin = ((w - ox) - ((w - ox) * dimensions_safe_mult)) / 2.0
yin = ((h - oy) - ((h - oy) * dimensions_safe_mult)) / 2.0
cr.move_to(ox + xin, oy + yin)
cr.line_to(w - xin, oy + yin)
cr.line_to(w - xin, h - yin)
cr.line_to(ox + xin, h - yin)
cr.close_path()
cr.stroke()
class ScaleSelector(gtk.VBox):
def __init__(self, listener):
gtk.VBox.__init__(self)
self.listener = listener # listerner needs to implement scale_changed(scale) interface
self.scales = [0.25, 0.33, 0.5, 0.75, 1.0, 1.5, 2.0, 4.0]
combo = gtk.combo_box_new_text()
for scale in self.scales:
scale_str = str(int(100 * scale)) + "%"
combo.append_text(scale_str)
combo.set_active(2)
combo.connect("changed",
lambda w,e: self._scale_changed(w.get_active()),
None)
self.add(combo)
self.combo = combo
def get_current_scale(self):
return self.scales[self.combo.get_active()]
def _scale_changed(self, scale_index):
self.listener.scale_changed(self.scales[scale_index])
class GuidesViewToggle(gtk.ToggleButton):
def __init__(self, view_editor):
gtk.ToggleButton.__init__(self)
icon = gtk.image_new_from_file(respaths.IMAGE_PATH + "guides_view_switch.png")
self.set_image(icon)
self.view_editor = view_editor
self.set_active(True)
self.connect("clicked", lambda w:self._clicked())
def _clicked(self):
self.view_editor.draw_overlays = self.get_active()
self.view_editor.draw_safe_area = self.get_active()
self.view_editor.edit_area.queue_draw()
| Python |
#
# This file marks module.
#
| Python |
"""
Flowblade Movie Editor is a nonlinear video editor.
Copyright 2012 Janne Liljeblad.
This file is part of Flowblade Movie Editor <http://code.google.com/p/flowblade>.
Flowblade Movie Editor is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Flowblade Movie Editor is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Flowblade Movie Editor. If not, see <http://www.gnu.org/licenses/>.
"""
import math
import viewgeom
# Edit point display types
MOVE_HANDLE = 0
ROTATE_HANDLE = 1
CONTROL_POINT = 2
INVISIBLE_POINT = 3
# handle size
EDIT_POINT_SIDE_HALF = 4
# line types
LINE_NORMAL = 0
LINE_DASH = 1
class EditPoint:
"""
A point that user can move on the screen to edit image data.
"""
def __init__(self, x=0, y=0):
self.x = x
self.y = y
self.rotation = 0.0
self.is_hittable = True
self.start_x = x
self.start_y = y
self.display_type = MOVE_HANDLE # default value, can changed for different shapes and edit modes
def set_pos(self, p):
self.x, self.y = p
def get_pos(self):
return (self.x, self.y)
def get_start_pos(self):
return (self.start_x, self.start_y)
def save_start_pos(self):
self.start_x = self.x
self.start_y = self.y
def translate_from_move_start(self, delta):
dx, dy = delta
self.x = self.start_x + dx
self.y = self.start_y + dy
def translate(self, delta):
dx, dy = delta
self.x = self.x + dx
self.y = self.y + dy
def hit(self, test_p, view_scale=1.0):
if not self.is_hittable:
return False
test_x, test_y = test_p
side_mult = 1.0 / view_scale
if((test_x >= self.x - EDIT_POINT_SIDE_HALF * side_mult)
and (test_x <= self.x + EDIT_POINT_SIDE_HALF * side_mult)
and (test_y >= self.y - EDIT_POINT_SIDE_HALF * side_mult)
and (test_y <= self.y + EDIT_POINT_SIDE_HALF * side_mult)):
return True;
return False;
def draw(self, cr, view_editor):
if self.display_type == INVISIBLE_POINT:
return
else:
x, y = view_editor.movie_coord_to_panel_coord((self.x, self.y))
cr.rectangle(x - 4, y - 4, 8, 8)
cr.fill()
class EditPointShape:
"""
A shape that user can move, rotate or scale on the screen to edit image data.
"""
def __init__(self):
self.edit_points = []
self.line_width = 2.0
self.line_type = LINE_DASH
def save_start_pos(self):
for ep in self.edit_points:
ep.save_start_pos()
def translate_points_to_pos(self, px, py, anchor_point_index):
anchor = self.edit_points[anchor_point_index]
dx = px - anchor.x
dy = py - anchor.y
for ep in self.edit_points:
ep.translate((dx, dy))
def translate_from_move_start(self, delta):
for ep in self.edit_points:
ep.translate_from_move_start(delta)
def rotate_from_move_start(self, anchor, angle):
for ep in self.edit_points:
rotated_pos = viewgeom.rotate_point_around_point(angle,
ep.get_start_pos(),
anchor )
ep.set_pos(rotated_pos)
def point_in_area(self, p):
"""
Default hit test is to see if point is inside convex with points in order 0 - n.
Override for different hit test.
"""
points = self.editpoints_as_tuples_list()
return viewgeom.point_in_convex_polygon(p, points, 0)
def get_edit_point(self, p, view_scale=1.0):
for ep in self.edit_points:
if ep.hit(p, view_scale) == True:
return ep
return None
def editpoints_as_tuples_list(self):
points = []
for ep in self.edit_points:
points.append((ep.x, ep.y))
return points
def get_bounding_box(self, p):
if len(self.edit_points) == 0:
return None
x_low = 1000000000
x_high = -100000000
y_low = 1000000000
y_high = -100000000
for p in self.edit_points:
px, py = p
if px < x_low:
x_low = p.x
if px > x_high:
x_high = p.x;
if py < y_low:
y_low = p.y;
if py > y_high:
y_high = p.y;
return (x_low, y_low, x_high - x_low, y_high - y_low)
def draw_points(self, cr, view_editor):
for ep in self.edit_points:
ep.draw(cr, view_editor)
def draw_line_shape(self, cr, view_editor):
self._set_line(cr)
x, y = view_editor.movie_coord_to_panel_coord((self.edit_points[0].x, self.edit_points[0].y))
cr.move_to(x, y)
for i in range(1, len(self.edit_points)):
ep = self.edit_points[i]
x, y = view_editor.movie_coord_to_panel_coord((ep.x, ep.y))
cr.line_to(x, y)
cr.close_path()
cr.stroke()
cr.set_dash([]) # turn dashing off
def _set_line(self, cr):
if self.line_type == LINE_DASH:
dashes = [6.0, 6.0, 6.0, 6.0] # ink, skip, ink, skip
offset = 0
cr.set_dash(dashes, offset)
cr.set_line_width(self.line_width)
def get_panel_point(self, point_index, view_editor):
ep = self.edit_points[point_index]
return view_editor.movie_coord_to_panel_coord((ep.x, ep.y))
def get_first_two_points_rotation_angle(self):
anchor = (self.edit_points[0].x, self.edit_points[0].y)
p1 = (self.edit_points[0].x + 10, self.edit_points[0].y)
p2 = (self.edit_points[1].x, self.edit_points[1].y)
if self.edit_points[0].y < self.edit_points[1].y:
return viewgeom.get_angle_in_rad(p1, anchor, p2)
else:
return 2 * math.pi - viewgeom.get_angle_in_rad(p1, anchor, p2)
def set_all_points_invisible(self):
for ep in self.edit_points:
ep.display_type = INVISIBLE_POINT
class SimpleRectEditShape(EditPointShape):
"""
A rect with four corner points.
"""
def __init__(self):
EditPointShape.__init__(self)
self.rect = (0,0,100,100) # we use this to create points, user should set real rect immediately with set_rect()
self.rotation = 0.0
x, y, w, h = self.rect
# edit point 0 determines the position of the shape
self.edit_points.append(EditPoint(x, y))
self.edit_points.append(EditPoint(x + w, y))
self.edit_points.append(EditPoint(x + w, y + h))
self.edit_points.append(EditPoint(x, y + h))
self.edit_points[0].display_type = MOVE_HANDLE
self.edit_points[2].display_type = MOVE_HANDLE
self.edit_points[1].display_type = MOVE_HANDLE
self.edit_points[3].display_type = MOVE_HANDLE
def set_rect(self, rect):
self.rect = rect
self.reset_points()
def update_rect_size(self, w, h):
# edit point 0 determines the position of the shape
self.rect = (self.edit_points[0].x, self.edit_points[0].y, w, h)
x, y, w, h = self.rect
self.edit_points[0].x = x
self.edit_points[0].y = y
self.edit_points[1].x = x + w
self.edit_points[1].y = y
self.edit_points[2].x = x + w
self.edit_points[2].y = y + h
self.edit_points[3].x = x
self.edit_points[3].y = y + h
def reset_points(self):
x, y, w, h = self.rect
# edit point 0 determines the position of the shape
self.edit_points[0].x = x
self.edit_points[0].y = y
self.edit_points[1].x = x + w
self.edit_points[1].y = y
self.edit_points[2].x = x + w
self.edit_points[2].y = y + h
self.edit_points[3].x = x
self.edit_points[3].y = y + h
def get_mid_point(self):
diag1 = viewgeom.get_line_for_points((self.edit_points[0].x, self.edit_points[0].y),
(self.edit_points[2].x, self.edit_points[2].y))
diag2 = viewgeom.get_line_for_points((self.edit_points[1].x, self.edit_points[1].y),
(self.edit_points[3].x, self.edit_points[3].y))
return diag1.get_intersection_point(diag2)
def get_handle_guides(self, hit_point):
index = self.edit_points.index(hit_point)
opp_handle_index = (index + 2) % 4;
opp_handle = self.edit_points[opp_handle_index]
guide_1_handle = self.edit_points[(opp_handle_index - 1) % 4]
guide_2_handle = self.edit_points[(opp_handle_index + 1) % 4]
guide_1 = viewgeom.get_vec_for_points(opp_handle.get_pos(), guide_1_handle.get_pos())
guide_2 = viewgeom.get_vec_for_points(opp_handle.get_pos(), guide_2_handle.get_pos())
guide_1.point_index = (opp_handle_index - 1) % 4
guide_2.point_index = (opp_handle_index + 1) % 4
return (guide_1, guide_2)
| Python |
"""
Flowblade Movie Editor is a nonlinear video editor.
Copyright 2012 Janne Liljeblad.
This file is part of Flowblade Movie Editor <http://code.google.com/p/flowblade>.
Flowblade Movie Editor is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Flowblade Movie Editor is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Flowblade Movie Editor. If not, see <http://www.gnu.org/licenses/>.
"""
import math
CLOCKWISE = 1
COUNTER_CLOCKWISE = 2
def point_in_convex_polygon(test_point, points, first_point_index):
# Polygon has to have > 2 points to contain anything.
if len(points) < 3:
return False
# Get first points direction
direction = get_points_direction( points[first_point_index],
points[first_point_index + 1],
points[first_point_index + 2])
# direction with two points and test point must always be same
# if point is inside polygon.
for i in range(0, len(points) - 1):
if get_points_direction(points[i], points[ i + 1], test_point) != direction:
return False
if get_points_direction(points[-1], points[0], test_point) != direction:
return False
return True;
def get_points_direction(p1, p2, p3):
if points_clockwise(p1, p2, p3):
return CLOCKWISE
else:
return COUNTER_CLOCKWISE
def points_clockwise(p1, p2, p3):
p1x, p1y = p1
p2x, p2y = p2
p3x, p3y = p3
e1x = p1x - p2x
e1y = p1y - p2y
e2x = p3x - p2x
e2y = p3y - p2y
if ((e1x * e2y) - (e1y * e2x)) >= 0:
return True
else:
return False
def rotate_point_around_point(rotation_angle, p, anchor):
px, py = p
ax, ay = anchor
offset_point = (px - ax, py - ay)
rx, ry = rotate_point_around_origo(rotation_angle, offset_point)
return (rx + ax, ry + ay)
def rotate_point_around_origo(rotation_angle, p):
px, py = p
angle_rad = math.radians(rotation_angle)
sin_val = math.sin(angle_rad)
cos_val = math.cos(angle_rad)
new_x = px * cos_val - py * sin_val
new_y = px * sin_val + py * cos_val
return (new_x, new_y)
def get_angle_in_deg(p1, corner, p2):
angle_in_rad = get_angle_in_rad(p1, corner, p2)
return math.degrees(angle_in_rad)
def get_angle_in_rad(p1, corner, p2):
side1 = distance(p1, corner)
side2 = distance(p2, corner)
if side1==0.0 or side2==0.0:
# this get fed 0 lengh sides
return 0.0
opposite_side = distance(p1, p2)
angle_cos = ((side1*side1) + (side2*side2) - (opposite_side*opposite_side)) / (2*side1*side2)
return math.acos(angle_cos)
def distance(p1, p2):
x1, y1 = p1
x2, y2 = p2
return math.sqrt((x2 - x1)**2 + (y2 - y1)**2)
def get_line_for_points(p1, p2):
m, b, is_vertical, x_icept = _get_line_params_for_points(p1, p2)
return Line(m, b, is_vertical, x_icept)
def get_vec_for_points(p1, p2):
if p1 == p2:
return None
m, b, is_vertical, x_icept = _get_line_params_for_points(p1, p2)
return Vec(m, b, is_vertical, x_icept, p1, p2)
def _get_line_params_for_points(p1, p2):
x1, y1 = p1
x2, y2 = p2
if (x1 == x2):
is_vertical = True;
x_icept = x1;
m = None
b = None
else:
is_vertical = False
# slope
m = (y2-y1) / (x2-x1)
# get y intercept b
b = y1 - (m * x1)
x_icept = None
return (m, b, is_vertical, x_icept)
class Line:
"""
Mathematical line using function y = mx + b.
"""
def __init__(self, m, b, is_vertical, x_icept):
self.m = m
self.b = b
self.is_vertical = is_vertical
self.x_icept = x_icept
def get_normal_projection_point(self, p):
# Returns point on this line and that is also on the line
# that is perpendicular with this and goes through provided point
x, y = p
# vertical
if (self.is_vertical == True):
return (self.x_icept, y)
# horizontal
if( self.m == 0 ):
return (x, self.b)
# has slope
normal_m = -1.0 / self.m
normal_b = y - normal_m * x
intersect_x = (normal_b - self.b) / (self.m - normal_m)
intersect_y = intersect_x * self.m + self.b
return (intersect_x, intersect_y)
def get_intersection_point(self, i_line):
# If both are vertical, no inter section
if i_line.is_vertical and self.is_vertical:
return None
# If both have same slope and neither is vertical, no intersection
if (i_line.m == self.m) and (not i_line.is_vertical) and (not self.is_vertical):
return None
# One line is vertical
if self.is_vertical:
return get_isp_for_vert_and_non_vert(self, i_line)
if i_line.is_vertical:
return get_isp_for_vert_and_non_vert(i_line, self)
# Both lines are non-vertical
intersect_x = (i_line.b - self.b) / (self.m - i_line.m)
intersect_y = intersect_x * self.m + self.b
return (intersect_x, intersect_y)
class Vec(Line):
"""
A mathematical vector.
"""
def __init__(self, m, b, is_vertical, x_icept, start_point, end_point):
Line.__init__(self, m, b, is_vertical, x_icept)
# start point and end point being on line is quaranteed by builder function so
# don't use this constructor directly or set start or end points directly
# only use Vec.set_end_point_to_normal_projection() to set end point.
self.start_point = start_point
self.end_point = end_point
self.direction = self.get_direction()
self.orig_direction = self.direction
def set_end_point_to_normal_projection(self, p):
self.end_point = self.get_normal_projection_point(p)
def get_direction(self):
"""
Return 1 or -1 for direction and 0 if length is zero and direction undetermined)
"""
sx, sy = self.start_point
ex, ey = self.end_point
if self.is_vertical:
return (sy - ey) / abs(sy - ey)
else:
return (sx - ex ) / abs(sx - ex)
def get_length(self):
# Returns length as positive if direction same as original and as negative if reversed
# and as zero is length is 0
if self.is_zero_length():
return 0;
current_direction = self.get_direction() / self.orig_direction
d = distance( self.start_point, self.end_point );
return current_direction * d
def get_multiplied_vec(self, multiplier):
start_x, start_y = self.start_point
end_x, end_y = self.end_point
if (end_x - start_x) == 0:
x_dist = 0
else:
x_dist = abs(end_x - start_x) * abs( end_x - start_x ) / (end_x - start_x)
if (end_y - start_y ) == 0:
y_dist = 0
else:
y_dist = abs(end_y - start_y) * abs(end_y - start_y) / (end_y - start_y)
xm_dist = x_dist * multiplier
ym_dist = y_dist * multiplier
new_end_x = start_x + xm_dist
new_end_y = start_y + ym_dist
return get_vec_for_points(self.start_point, (new_end_x, new_end_y))
def is_zero_length(self):
if self.start_point == self.end_point:
return True
else:
return False
def set_zero_length(self):
self.end_point = self.start_point
def get_isp_for_vert_and_non_vert(vertical, non_vertical):
is_y = non_vertical.m * vertical.x_icept + non_vertical.b
return (vertical.x_icept, is_y)
| Python |
"""
Flowblade Movie Editor is a nonlinear video editor.
Copyright 2012 Janne Liljeblad.
This file is part of Flowblade Movie Editor <http://code.google.com/p/flowblade>.
Flowblade Movie Editor is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Flowblade Movie Editor is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Flowblade Movie Editor. If not, see <http://www.gnu.org/licenses/>.
"""
"""
Module handles user edit events for trim, roll and slip trim modes.
"""
import appconsts
import dialogutils
import edit
import editorpersistance
import editorstate
from editorstate import current_sequence
from editorstate import PLAYER
import gui
import tlinewidgets
import updater
# Default value for pre- and post roll in loop playback
DEFAULT_LOOP_HALF_LENGTH = 25
# Pre- and post roll in loop playback
loop_half_length = DEFAULT_LOOP_HALF_LENGTH
# Data/state for ongoing edit.
edit_data = None
# Flag for disabling mouse event
mouse_disabled = False
# Flag for temporary blank needed for one roll trim editing track's last clip's out
last_from_trimmed = False
# Function that sets edit mode when exiting with click on empty
set_exit_mode_func = None
# Function that sets <X>_NO_EDIT mode that displays trim cursor but no edit is under way.
#
# This is used e.g. when user clicks empty and preference is to stay in trim mode,
# so active edit is exited to <X>_NO_EDIT mode.
#
# This function is set when trim modes are entered to be to the "edit init func for" the entered trim mode.
set_no_edit_mode_func = None
# ------------------------------------ module functions
def _get_trim_edit(track, frame):
"""
Return a trim edit for a frame on a track.
"""
# Trying to trim from frame after last clip will init from-side trim
# for frame where last clip ends.
if ((frame >= track.get_length())
and (track.get_length() > 1)):
cut_frame = track.get_length()
edit_to_side = False
return(cut_frame, edit_to_side)
# Get cut frame for trim
cut_frame = current_sequence().get_closest_cut_frame(track.id, frame)
if cut_frame == -1:
return(-1, None)
edit_to_side = False
if frame >= cut_frame:
edit_to_side = True
return(cut_frame, edit_to_side)
def _get_trim_limits(cut_frame, from_clip, to_clip):
"""
NOTE: trim_limits frames here are TIMELINE frames, not CLIP frames
Returns relevant clip boundaries when doing trim edits.
- clip handles on both sides of cut
- clip ends on both sides of cut
"""
# This too complex now that roll is handled separately, could be reworked
trim_limits = {}
if from_clip == None:
trim_limits["from_start"] = -1
trim_limits["from_end"] = -1
trim_limits["both_start"] = -1
else:
trim_limits["from_start"] = cut_frame - from_clip.clip_out
from_length = from_clip.get_length()
trim_limits["from_end"] = cut_frame - from_clip.clip_out + from_length - 1
trim_limits["both_start"] = cut_frame - (from_clip.clip_out - from_clip.clip_in)
if to_clip == None:
trim_limits["to_start"] = -1
trim_limits["to_end"] = -1
trim_limits["both_end"] = -1
else:
trim_limits["to_start"] = cut_frame - to_clip.clip_in
to_length = to_clip.get_length()
trim_limits["to_end"] = cut_frame - to_clip.clip_in + to_length
trim_limits["both_end"] = cut_frame + (to_clip.clip_out - to_clip.clip_in)
return trim_limits
def _get_roll_limits(cut_frame, from_clip, to_clip):
# Trim_limits frames here are TIMELINE frames, not CLIP frames
trim_limits = {}
trim_limits["from_start"] = cut_frame - (from_clip.clip_out - from_clip.clip_in)
from_length = from_clip.get_length()
trim_limits["from_end"] = cut_frame - from_clip.clip_out + from_length - 2 # -1 incl, -1 leave one frame, == -2
if from_clip.is_blanck_clip:
trim_limits["from_end"] = 10000000
trim_limits["to_start"] = cut_frame - to_clip.clip_in
to_length = to_clip.get_length()
trim_limits["to_end"] = cut_frame + (to_clip.clip_out - to_clip.clip_in) #- to_clip.clip_in + to_length - 1 # - 1, leave one frame
if to_clip.is_blanck_clip:
trim_limits["to_start"] = 0
if trim_limits["from_start"] > trim_limits["to_start"]:
trim_limits["both_start"] = trim_limits["from_start"]
else:
trim_limits["both_start"] = trim_limits["to_start"]
if trim_limits["to_end"] < trim_limits["from_end"]:
trim_limits["both_end"] = trim_limits["to_end"]
else:
trim_limits["both_end"] = trim_limits["from_end"]
return trim_limits
def _set_edit_data(track, edit_frame, is_one_roll_trim):
"""
Sets edit mode data used by both trim modes
"""
# Find index of to-clip of edit
index = current_sequence().get_clip_index(track, edit_frame)
to_clip = track.clips[index]
if index > 0:
from_clip = track.clips[index -1]
else:
from_clip = None
# Trimming last clip on track can only be edited from side
# but code so farproduces to_clip == last clip, from_clip == None,
# fix this by setting new values for from_clip and_to clip.
#
# we're also getting wrong index from mlt as edit frame == track.get_length()
if edit_frame == track.get_length():
global last_from_trimmed
index = current_sequence().get_clip_index(track, edit_frame - 1)
last_from_trimmed = True
from_clip = to_clip
to_clip = None
else:
last_from_trimmed = False
# Get trimlimits
if is_one_roll_trim:
trim_limits = _get_trim_limits(edit_frame, from_clip, to_clip)
else:
trim_limits = _get_roll_limits(edit_frame, from_clip, to_clip)
global edit_data
edit_data = {"track":track.id,
"track_object":track,
"index":index,
"edit_frame":edit_frame,
"selected_frame":edit_frame,
"trim_limits":trim_limits,
"from_clip":from_clip,
"to_clip":to_clip}
def _pressed_on_edited_track(y):
pressed_track = tlinewidgets.get_track(y)
if ((pressed_track == None)
or(pressed_track.id != edit_data["track"])):
return False
return True
def _trimmed_clip_is_blank():
if edit_data["to_side_being_edited"]:
if edit_data["to_clip"].is_blanck_clip:
return True
else:
if edit_data["from_clip"].is_blanck_clip:
return True
return False
def trim_looping_stopped():
# Reinits current trim mode
if editorstate.edit_mode == editorstate.ONE_ROLL_TRIM:
set_oneroll_mode(edit_data["track_object"],
edit_data["edit_frame"],
edit_data["to_side_being_edited"])
if editorstate.edit_mode == editorstate.TWO_ROLL_TRIM:
set_tworoll_mode(edit_data["track_object"],
edit_data["edit_frame"])
if editorstate.edit_mode == editorstate.SLIDE_TRIM:
set_slide_mode(edit_data["track_object"],
edit_data["reinit_frame"])
def update_cursor_to_mode():
gui.editor_window.set_cursor_to_mode()
def set_no_edit_trim_mode():
if editorstate.edit_mode == editorstate.ONE_ROLL_TRIM or \
editorstate.edit_mode == editorstate.TWO_ROLL_TRIM or \
editorstate.edit_mode == editorstate.SLIDE_TRIM:
set_no_edit_mode_func()
# ------------------------------------- ONE ROLL TRIM EVENTS
def set_oneroll_mode(track, current_frame=-1, editing_to_clip=None):
"""
Sets one roll mode
"""
if track == None:
return False
if track.id < 1 or (track.id >= len(current_sequence().tracks) - 1):
return False
if current_frame == -1: # from button, ctrl + mouse calls with frame
current_frame = PLAYER().producer.frame() + 1 # +1 because cut frame selects previous clip
if current_frame >= track.get_length():
return False
edit_frame, to_side_being_edited = _get_trim_edit(track, current_frame)
if edit_frame == -1:
return False
# hack fix for last clip out trim. If frame pointer not at very end of clip
# the other functions for getting trim frame given +1 too much
if edit_frame > track.get_length():
edit_frame = track.get_length()
if editing_to_clip != None: # This is set when mode reset after edit or after undo or redo
# _get_trim_edit() might give different(wrong) clip being edited
# because cut is now at a different place.
to_side_being_edited = editing_to_clip
_set_edit_data(track, edit_frame, True)
global edit_data
# Set side being edited to default to-side
edit_data["to_side_being_edited"] = to_side_being_edited
current_sequence().clear_hidden_track()
# Cant't trim a blank clip. Blank clips are special in MLT and can't be
# made to do things that are needed in trim.
if _trimmed_clip_is_blank():
set_exit_mode_func()
primary_txt = _("Cant ONE ROLL TRIM blank clips.")
secondary_txt = _("You can use MOVE OVERWRITE or TWO ROLL TRIM edits instead\nto get the desired change.")
dialogutils.info_message(primary_txt, secondary_txt, gui.editor_window.window)
return False
# Give timeline widget needed data
tlinewidgets.set_edit_mode(edit_data,
tlinewidgets.draw_one_roll_overlay)
# Set clip as special producer on hidden track and display current frame
# from it.
trim_limits = edit_data["trim_limits"]
if edit_data["to_side_being_edited"]:
clip = edit_data["to_clip"]
clip_start = trim_limits["to_start"]
else:
clip = edit_data["from_clip"]
clip_start = trim_limits["from_start"]
# Display trim clip
if clip.media_type != appconsts.PATTERN_PRODUCER:
current_sequence().display_trim_clip(clip.path, clip_start) # file producer
else:
current_sequence().display_trim_clip(None, clip_start, clip.create_data) # pattern producer
PLAYER().seek_frame(edit_frame)
return True
def oneroll_trim_press(event, frame):
"""
User presses mouse when in one roll mode.
"""
global mouse_disabled
if not _pressed_on_edited_track(event.y):
track = tlinewidgets.get_track(event.y)
success = set_oneroll_mode(track, frame)
if not success:
if editorpersistance.prefs.empty_click_exits_trims == True:
set_exit_mode_func(True) # further mouse events are handled at editevent.py
else:
set_no_edit_mode_func() # further mouse events are handled at editevent.py
else:
if not editorpersistance.prefs.quick_enter_trims:
# new trim inited, editing non-active until release
tlinewidgets.trim_mode_in_non_active_state = True
gui.tline_canvas.widget.queue_draw()
gui.editor_window.set_tline_cursor(editorstate.ONE_ROLL_TRIM_NO_EDIT)
mouse_disabled = True
else:
# new trim inited, active immediately
oneroll_trim_move(event.x, event.y, frame, None)
gui.tline_canvas.widget.queue_draw()
return
if not _pressed_on_one_roll_active_area(frame):
track = tlinewidgets.get_track(event.y)
success = set_oneroll_mode(track, frame)
if not success:
if editorpersistance.prefs.empty_click_exits_trims == True:
set_exit_mode_func(True) # further mouse events are handled at editevent.py
else:
set_no_edit_mode_func() # no furter mouse events will come here
else:
if not editorpersistance.prefs.quick_enter_trims:
# new trim inited, editing non-active until release
tlinewidgets.trim_mode_in_non_active_state = True
gui.tline_canvas.widget.queue_draw()
gui.editor_window.set_tline_cursor(editorstate.ONE_ROLL_TRIM_NO_EDIT)
mouse_disabled = True
else:
# new trim inited, active immediately
oneroll_trim_move(event.x, event.y, frame, None)
gui.tline_canvas.widget.queue_draw()
return
# Get legal edit delta and set to edit mode data for overlay draw
global edit_data
frame = _legalize_one_roll_trim(frame, edit_data["trim_limits"])
edit_data["selected_frame"] = frame
PLAYER().seek_frame(frame)
def oneroll_trim_move(x, y, frame, state):
"""
User moves mouse when in one roll mode.
"""
if mouse_disabled:
return
# Get legal edit frame for overlay display
global edit_data
frame = _legalize_one_roll_trim(frame, edit_data["trim_limits"])
edit_data["selected_frame"] = frame
PLAYER().seek_frame(frame)
def oneroll_trim_release(x, y, frame, state):
"""
User releases mouse when in one roll mode.
"""
global mouse_disabled
if mouse_disabled:
mouse_disabled = False
# we may have been in non active state because the clip being edited was changed
gui.editor_window.set_cursor_to_mode()
tlinewidgets.trim_mode_in_non_active_state = False
gui.tline_canvas.widget.queue_draw()
return
_do_one_roll_trim_edit(frame)
def _do_one_roll_trim_edit(frame):
# Get legal edit delta and set to edit mode data for overlay draw
global edit_data
frame = _legalize_one_roll_trim(frame, edit_data["trim_limits"])
delta = frame - edit_data["edit_frame"]
# case: editing from-side of last clip
global last_from_trimmed
if last_from_trimmed:
data = {"track":edit_data["track_object"],
"index":edit_data["index"],
"clip":edit_data["from_clip"],
"delta":delta,
"undo_done_callback":clip_end_first_do_done,
"first_do":True}
action = edit.trim_last_clip_end_action(data)
last_from_trimmed = False
action.do_edit()
# Edit is reinitialized in callback from edit action one_roll_trim_undo_done
# case: editing to-side of cut
elif edit_data["to_side_being_edited"]:
data = {"track":edit_data["track_object"],
"index":edit_data["index"],
"clip":edit_data["to_clip"],
"delta":delta,
"undo_done_callback":one_roll_trim_undo_done,
"first_do":True}
action = edit.trim_start_action(data)
action.do_edit()
# Edit is reinitialized in callback from edit action one_roll_trim_undo_done
# case: editing from-side of cut
else:
data = {"track":edit_data["track_object"],
"index":edit_data["index"] - 1,
"clip":edit_data["from_clip"],
"delta":delta,
"undo_done_callback":one_roll_trim_undo_done,
"first_do":True}
action = edit.trim_end_action(data)
action.do_edit()
# Edit is reinitialized in callback from edit action one_roll_trim_undo_done
def oneroll_play_pressed():
# Start trim preview playback loop
current_sequence().hide_hidden_clips()
PLAYER().start_loop_playback(edit_data["edit_frame"], loop_half_length, edit_data["track_object"].get_length())
def oneroll_stop_pressed():
# Stop trim preview playback loop
PLAYER().stop_loop_playback(trim_looping_stopped)
def oneroll_prev_pressed():
_do_one_roll_trim_edit(edit_data["edit_frame"] - 1)
def oneroll_next_pressed():
_do_one_roll_trim_edit(edit_data["edit_frame"] + 1)
def one_roll_trim_undo_done(track, index, is_to_side_edit):
"""
WRONG NAME FOR FUNCTION
Callback if initial edit done. Undo and redo do not cause this to be called
"""
# reinit edit mode to correct side
frame = track.clip_start(index)
success = set_oneroll_mode(track, frame, is_to_side_edit)
if not success:
set_no_edit_mode_func()
def clip_end_first_do_done(track):
frame = track.get_length() - 1
set_oneroll_mode(track, frame, False)
def _legalize_one_roll_trim(frame, trim_limits):
"""
Keeps one roll trim selection in legal edit area.
"""
# Case: editing to-clip
if edit_data["to_side_being_edited"]:
first = trim_limits["to_start"]
last = trim_limits["both_end"]
# Case: editing from-clip
else:
first = trim_limits["both_start"]
last = trim_limits["from_end"]
if frame <= first:
frame = first
tlinewidgets.trim_status = appconsts.ON_FIRST_FRAME
elif frame >= last:
frame = last
tlinewidgets.trim_status = appconsts.ON_LAST_FRAME
else:
tlinewidgets.trim_status = appconsts.ON_BETWEEN_FRAME
return frame
def _pressed_on_one_roll_active_area(frame):
trim_limits = edit_data["trim_limits"]
if edit_data["to_side_being_edited"]:
if frame < trim_limits["to_start"]:
return False
if frame > trim_limits["both_end"]:
return False
if frame < edit_data["edit_frame"]:
return False
else:
if frame < trim_limits["both_start"]:
return False
if frame > trim_limits["from_end"]:
return False
if frame > edit_data["edit_frame"]:
return False
return True
#---------------------------------------- TWO ROLL TRIM EVENTS
def set_tworoll_mode(track, current_frame = -1):
"""
Sets two roll mode
"""
if track == None:
return False
if current_frame == -1:
current_frame = PLAYER().producer.frame() + 1 # +1 because cut frame selects previous clip
if current_frame >= track.get_length():
return False
current_sequence().clear_hidden_track()
edit_frame, to_side_being_edited = _get_trim_edit(track, current_frame)
# Trying to two roll edit last clip's out frame inits one roll trim mode
# via programmed click.
if edit_frame >= track.get_length():
return False
try:
_set_edit_data(track, edit_frame, False)
except: # fails for last clip
return False
if edit_frame == 0:
_tworoll_init_failed_window()
return False
global edit_data
if edit_data["from_clip"] == None:
_tworoll_init_failed_window()
return False
# Force edit side to be on non-blanck side
if to_side_being_edited and edit_data["to_clip"].is_blanck_clip:
to_side_being_edited = False
if ((to_side_being_edited == False)
and edit_data["from_clip"].is_blanck_clip):
to_side_being_edited = True
edit_data["to_side_being_edited"] = to_side_being_edited
# Find out if non edit side is blank
non_edit_side_blank = False
if to_side_being_edited and edit_data["from_clip"].is_blanck_clip:
non_edit_side_blank = True
if ((to_side_being_edited == False) and edit_data["to_clip"].is_blanck_clip):
non_edit_side_blank = True
edit_data["non_edit_side_blank"] = non_edit_side_blank
# Give timeline widget needed data
tlinewidgets.set_edit_mode(edit_data, tlinewidgets.draw_two_roll_overlay)
# Set clip as producer on hidden track and display current frame
# from it.
trim_limits = edit_data["trim_limits"]
if edit_data["to_side_being_edited"]:
clip = edit_data["to_clip"]
clip_start = trim_limits["to_start"]
else:
clip = edit_data["from_clip"]
clip_start = trim_limits["from_start"]
if clip.media_type != appconsts.PATTERN_PRODUCER:
current_sequence().display_trim_clip(clip.path, clip_start) # File producer
else:
current_sequence().display_trim_clip(None, clip_start, clip.create_data) # pattern producer
PLAYER().seek_frame(edit_frame)
updater.repaint_tline()
return True
def _tworoll_init_failed_window():
primary_txt = _("Initializing TWO ROLL TRIM failed")
secondary_txt = _("You are attempting TWO ROLL TRIM at a position in the timeline\nwhere it can't be performed.")
dialogutils.info_message(primary_txt, secondary_txt, gui.editor_window.window)
def tworoll_trim_press(event, frame):
"""
User presses mouse when in two roll mode.
"""
if not _pressed_on_edited_track(event.y):
_attempt_reinit_tworoll(event, frame)
return
if not _pressed_on_two_roll_active_area(frame):
_attempt_reinit_tworoll(event, frame)
return
global edit_data
frame = _legalize_two_roll_trim(frame, edit_data["trim_limits"])
edit_data["selected_frame"] = frame
PLAYER().seek_frame(frame)
def _attempt_reinit_tworoll(event, frame):
track = tlinewidgets.get_track(event.y)
success = set_tworoll_mode(track, frame)
if not success:
if editorpersistance.prefs.empty_click_exits_trims == True:
set_exit_mode_func(True) # further mouse events are handled at editevent.py
else:
set_no_edit_mode_func() # further mouse events are handled at editevent.py
else:
if not editorpersistance.prefs.quick_enter_trims:
# new trim inited, editing non-active until release
global mouse_disabled
tlinewidgets.trim_mode_in_non_active_state = True
gui.tline_canvas.widget.queue_draw()
gui.editor_window.set_tline_cursor(editorstate.TWO_ROLL_TRIM_NO_EDIT)
mouse_disabled = True
else:
# new trim inited, active immediately
tworoll_trim_move(event.x, event.y, frame, None)
gui.tline_canvas.widget.queue_draw()
def tworoll_trim_move(x, y, frame, state):
"""
User moves mouse when in two roll mode.
"""
if mouse_disabled:
return
global edit_data
frame = _legalize_two_roll_trim(frame, edit_data["trim_limits"])
edit_data["selected_frame"] = frame
PLAYER().seek_frame(frame)
def tworoll_trim_release(x, y, frame, state):
"""
User releases mouse when in two roll mode.
"""
global mouse_disabled
if mouse_disabled == True:
# we may have been in non active state because the clip being edited was changed
gui.editor_window.set_cursor_to_mode()
tlinewidgets.trim_mode_in_non_active_state = False
gui.tline_canvas.widget.queue_draw()
mouse_disabled = False
return
global edit_data
frame = _legalize_two_roll_trim(frame, edit_data["trim_limits"])
edit_data["selected_frame"] = frame
_do_two_roll_edit(frame)
def tworoll_play_pressed():
current_sequence().hide_hidden_clips()
PLAYER().start_loop_playback(edit_data["edit_frame"], loop_half_length, edit_data["track_object"].get_length())
def tworoll_stop_pressed():
PLAYER().stop_loop_playback(trim_looping_stopped)
def tworoll_prev_pressed():
new_cut_frame = _legalize_two_roll_trim(edit_data["edit_frame"] - 1, \
edit_data["trim_limits"])
_do_two_roll_edit(new_cut_frame)
def tworoll_next_pressed():
new_cut_frame = _legalize_two_roll_trim(edit_data["edit_frame"] + 1, \
edit_data["trim_limits"])
_do_two_roll_edit(new_cut_frame)
def _do_two_roll_edit(new_cut_frame):
"""
Called from drag-release and next, prev button presses.
"""
# Only do two roll edit if both clips exist
if ((edit_data["from_clip"] != None) and
(edit_data["to_clip"] != None)):
# Get edit data
delta = new_cut_frame - edit_data["edit_frame"]
data = {"track":edit_data["track_object"],
"index":edit_data["index"],
"from_clip":edit_data["from_clip"],
"to_clip":edit_data["to_clip"],
"delta":delta,
"edit_done_callback":two_rolledit_done,
"cut_frame":edit_data["edit_frame"],
"to_side_being_edited":edit_data["to_side_being_edited"],
"non_edit_side_blank":edit_data["non_edit_side_blank"],
"first_do":True}
action = edit.tworoll_trim_action(data)
edit.do_gui_update = True
action.do_edit()
def two_rolledit_done(was_redo, cut_frame, delta, track, to_side_being_edited):
"""
Set two roll playback to correct place after edit or redo or undo.
Callback from edit action.
"""
# This is done because cut_frame is the frame where cut was before original edit.
if was_redo:
frame = cut_frame + delta
else:
frame = cut_frame
# Calculated frame always reinits in to side, so we need to
# step one back to reinit on from side if we did the edit from that side
if to_side_being_edited != True:
frame = frame - 1
if frame < 0:
frame = 0
# seek and reinit
PLAYER().seek_frame(frame)
set_tworoll_mode(track)
def two_roll_audio_sync_edit_done(cut_frame, delta, track, to_side_being_edited):
"""
Set two roll playback to correct place after edit or redo or undo.
Callback from edit action.
"""
frame = cut_frame + delta
# Calculated frame always reinits on to side, so we need to
# step one back to reinit on from side if we did the edit from that side
if to_side_being_edited != True:
frame = frame - 1
if frame < 0:
frame = 0
# seek and reinit
PLAYER().seek_frame(frame)
set_tworoll_mode(track)
def _legalize_two_roll_trim(frame, trim_limits):
"""
Keeps two roll trim selection in legal edit area.
"""
first = trim_limits["both_start"]
last = trim_limits["both_end"]
if frame <= first:
frame = first
tlinewidgets.trim_status = appconsts.ON_FIRST_FRAME
elif frame >= last:
frame = last
tlinewidgets.trim_status = appconsts.ON_LAST_FRAME
else:
tlinewidgets.trim_status = appconsts.ON_BETWEEN_FRAME
return frame
def _pressed_on_two_roll_active_area(frame):
first, last = _get_two_roll_first_and_last()
if frame < first:
return False
if frame > last:
return False
return True
def _get_two_roll_first_and_last():
first = -1
last = -1
index = edit_data["index"]
track = edit_data["track_object"]
first = track.clip_start(index - 1) + 1
end_clip = track.clips[index]
last = track.clip_start(index) + end_clip.clip_out - end_clip.clip_in
return (first, last)
#---------------------------------------- SLIDE ROLL TRIM EVENTS
def set_slide_mode(track, current_frame):
"""
Sets two roll mode
"""
if track == None:
return None
if current_frame > track.get_length():
return False
current_sequence().clear_hidden_track()
view_frame, start_frame_being_viewed = _get_trim_edit(track, current_frame)
# _get_trim_edit() gives first frame belonging to next clip if press closer to end frame of clip
if not start_frame_being_viewed:
view_frame = view_frame -1
try:
_set_slide_mode_edit_data(track, view_frame)
except:
return False
if edit_data["clip"].is_blanck_clip:
return False
clip = edit_data["clip"]
clip_start = edit_data["trim_limits"]["clip_start"]
edit_data["start_frame_being_viewed"] = start_frame_being_viewed
fake_current_frame = clip_start
if not start_frame_being_viewed:
fake_current_frame = clip_start + clip.clip_out - clip.clip_in
# Give timeline widget needed data
tlinewidgets.set_edit_mode(edit_data, tlinewidgets.draw_slide_overlay)
tlinewidgets.fake_current_frame = fake_current_frame
# Set clip as producer on hidden track and display current frame from it.
clip = edit_data["clip"]
clip_start = 0 # we'll calculate the offset from actual position of clip on timeline to display the frame displayed after sliding
if clip.media_type != appconsts.PATTERN_PRODUCER:
current_sequence().display_trim_clip(clip.path, clip_start) # File producer
else:
current_sequence().display_trim_clip(None, clip_start, clip.create_data) # pattern producer
if start_frame_being_viewed:
PLAYER().seek_frame(clip.clip_in)
else:
PLAYER().seek_frame(clip.clip_out)
updater.repaint_tline()
return True
def _set_slide_mode_edit_data(track, edit_frame):
"""
Sets edit mode data used by both trim modes
"""
index = current_sequence().get_clip_index(track, edit_frame)
clip = track.clips[index]
trim_limits = {}
trim_limits["start_handle"] = clip.clip_in
trim_limits["end_handle"] = clip.get_length() - clip.clip_out
trim_limits["clip_start"] = track.clip_start(index)
trim_limits["media_length"] = clip.get_length()
global edit_data
edit_data = {"track":track.id, # tlinewidgets.py uses this to get draw y
"track_object":track,
"index":index,
"trim_limits":trim_limits,
"mouse_delta":0,
"clip":clip}
def _attempt_reinit_slide(event, frame):
track = tlinewidgets.get_track(event.y)
success = set_slide_mode(track, frame)
if not success:
if editorpersistance.prefs.empty_click_exits_trims == True:
set_exit_mode_func(True) # further mouse events are handled at editevent.py
else:
set_no_edit_mode_func() # further mouse events are handled at editevent.py
else:
if not editorpersistance.prefs.quick_enter_trims:
gui.tline_canvas.widget.queue_draw()
gui.editor_window.set_tline_cursor(editorstate.SLIDE_TRIM_NO_EDIT)
tlinewidgets.trim_mode_in_non_active_state = True
global mouse_disabled
mouse_disabled = True
else:
# new trim inited, active immediately
global edit_data
edit_data["press_start"] = frame
slide_trim_move(event.x, event.y, frame, None)
gui.tline_canvas.widget.queue_draw()
def slide_trim_press(event, frame):
global edit_data
edit_data["press_start"] = frame
if not _pressed_on_edited_track(event.y):
_attempt_reinit_slide(event, frame)
return
if frame > tlinewidgets.get_track(event.y).get_length():
if editorpersistance.prefs.empty_click_exits_trims == True:
set_exit_mode_func(True) # further mouse events are handled at editevent.py
else:
set_no_edit_mode_func() # further mouse events are handled at editevent.py
return
if not _pressed_on_slide_active_area(frame):
_attempt_reinit_slide(event, frame)
return
display_frame = _update_slide_trim_for_mouse_frame(frame)
PLAYER().seek_frame(display_frame)
def slide_trim_move(x, y, frame, state):
if mouse_disabled:
return
display_frame = _update_slide_trim_for_mouse_frame(frame)
PLAYER().seek_frame(display_frame)
def slide_trim_release(x, y, frame, state):
global mouse_disabled
if mouse_disabled == True:
# we may have been in non active state because the clip being edited was changed
gui.editor_window.set_cursor_to_mode()
tlinewidgets.trim_mode_in_non_active_state = False
gui.tline_canvas.widget.queue_draw()
mouse_disabled = False
return
display_frame = _update_slide_trim_for_mouse_frame(frame)
PLAYER().seek_frame(display_frame)
global edit_data
display_frame = _update_slide_trim_for_mouse_frame(frame)
PLAYER().seek_frame(display_frame)
_do_slide_edit()
def _update_slide_trim_for_mouse_frame(frame):
global edit_data
clip = edit_data["clip"]
mouse_delta = edit_data["press_start"] - frame
# make sure slided clip area stays inside available media
# fix_diff, herp, derp ... jeessus
fix_diff_in = _legalize_slide(clip.clip_in + mouse_delta, clip)
fix_diff_out = _legalize_slide(clip.clip_out + mouse_delta, clip)
if fix_diff_in == 0 and fix_diff_out != 0:
fix_diff = fix_diff_out
elif fix_diff_in != 0 and fix_diff_out == 0:
fix_diff = fix_diff_in
elif fix_diff_in != 0 and fix_diff_out != 0:
if abs(fix_diff_in) > abs(fix_diff_out):
fix_diff = fix_diff_in
else:
fix_diff = fix_diff_out
else:
fix_diff = 0
edit_data["mouse_delta"] = mouse_delta - fix_diff
# Get display frame on hidden track
if edit_data["start_frame_being_viewed"]:
display_frame = clip.clip_in + mouse_delta - fix_diff
else:
display_frame = clip.clip_out + mouse_delta - fix_diff
return display_frame
def _pressed_on_slide_active_area(frame):
trim_limits = edit_data["trim_limits"]
clip_start = trim_limits["clip_start"]
clip = edit_data["clip"]
clip_end = clip_start + clip.clip_out - clip.clip_in
if frame >= clip_start and frame < clip_end:
return True
else:
return False
def _legalize_slide(media_frame, clip):
if media_frame < 0:
return media_frame
if media_frame >= clip.get_length():
return media_frame - clip.get_length() - 1 # -1 out inclusive.
return 0
def _do_slide_edit():
"""
Called from drag-release and next, prev button presses.
"""
# "track","clip","delta","index","first_do","first_do_callback"
data = {"track":edit_data["track_object"],
"index":edit_data["index"],
"clip":edit_data["clip"],
"delta":edit_data["mouse_delta"],
"first_do_callback":_slide_trim_first_do_callback,
"start_frame_being_viewed":edit_data["start_frame_being_viewed"],
"first_do":True}
action = edit.slide_trim_action(data)
edit.do_gui_update = True
action.do_edit()
def _slide_trim_first_do_callback(track, clip, index, start_frame_being_viewed):
# If in one roll mode, reinit edit mode to correct side
if start_frame_being_viewed:
frame = track.clip_start(index) + 1 # +1 because cut frame selects previous clip
else:
frame = track.clip_start(index) + clip.clip_out - clip.clip_in - 1
set_slide_mode(track, frame)
def slide_play_pressed():
current_sequence().hide_hidden_clips()
clip_start = edit_data["trim_limits"]["clip_start"]
clip = edit_data["clip"]
if edit_data["start_frame_being_viewed"]:
frame = clip_start + 1 # +1 because cut frame selects previous clip
else:
frame = clip_start + clip.clip_out - clip.clip_in - 1
edit_data["reinit_frame"] = frame
PLAYER().start_loop_playback(frame, loop_half_length, edit_data["track_object"].get_length())
def slide_stop_pressed():
PLAYER().stop_loop_playback(trim_looping_stopped)
def slide_prev_pressed():
global edit_data
edit_data["mouse_delta"] = -1
_do_slide_edit()
def slide_next_pressed():
global edit_data
edit_data["mouse_delta"] = 1
_do_slide_edit()
| Python |
"""
Flowblade Movie Editor is a nonlinear video editor.
Copyright 2012 Janne Liljeblad.
This file is part of Flowblade Movie Editor <http://code.google.com/p/flowblade>.
Flowblade Movie Editor is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Flowblade Movie Editor is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Flowblade Movie Editor. If not, see <http://www.gnu.org/licenses/>.
"""
import cairo
import pygtk
pygtk.require('2.0');
import gtk
import math
from cairoarea import CairoDrawableArea
import editorpersistance
import gui
import respaths
BUTTONS_GRAD_STOPS = [ (1, 1, 1, 1, 0.2),
(0.8, 1, 1, 1, 0),
(0.51, 1, 1, 1, 0),
(0.50, 1, 1, 1, 0.25),
(0, 1, 1, 1, 0.4)]
BUTTONS_PRESSED_GRAD_STOPS = [(1, 0.7, 0.7, 0.7, 1),
(0, 0.5, 0.5, 0.5, 1)]
LINE_GRAD_STOPS = [ (1, 0.66, 0.66, 0.66, 1),
(0.95, 0.7, 0.7, 0.7, 1),
(0.65, 0.3, 0.3, 0.3, 1),
(0, 0.64, 0.64, 0.64, 1)]
BUTTON_NOT_SENSITIVE_GRAD_STOPS = [(1, 0.9, 0.9, 0.9, 0.7),
(0, 0.9, 0.9, 0.9, 0.7)]
CORNER_DIVIDER = 5
MB_BUTTONS_WIDTH = 317
MB_BUTTONS_HEIGHT = 30
MB_BUTTON_HEIGHT = 22
MB_BUTTON_WIDTH = 35
MB_BUTTON_Y = 4
MB_BUTTON_IMAGE_Y = 6
M_PI = math.pi
NO_HIT = -1
# Focus groups are used to test if one widget in the group of buttons widgets has keyboard focus
DEFAULT_FOCUS_GROUP = "default_focus_group"
focus_groups = {DEFAULT_FOCUS_GROUP:[]}
class AbstractGlassButtons:
def __init__(self, button_width, button_height, button_y, widget_width, widget_height):
# Create widget and connect listeners
self.widget = CairoDrawableArea(widget_width,
widget_height,
self._draw)
self.widget.press_func = self._press_event
self.widget.motion_notify_func = self._motion_notify_event
self.widget.release_func = self._release_event
self.pressed_callback_funcs = None # set later
self.released_callback_funcs = None # set later
self.pressed_button = -1
self.degrees = M_PI / 180.0
self.button_width = button_width
self.button_height = button_height
self.button_y = button_y
self.button_x = 0 # set when first allocation known by extending class
self.icons = []
self.image_x = []
self.image_y = []
self.sensitive = []
if editorpersistance.prefs.buttons_style == editorpersistance.GLASS_STYLE:
self.glass_style = True
else:
self.glass_style = False
# Dark theme comes with flat buttons
self.dark_theme = False
if editorpersistance.prefs.dark_theme == True:
self.glass_style = False
self.dark_theme = True
self.draw_button_gradients = True # set False at object creation site to kill all gradients
def _set_button_draw_consts(self, x, y, width, height):
aspect = 1.0
corner_radius = height / CORNER_DIVIDER
radius = corner_radius / aspect
self._draw_consts = (x, y, width, height, aspect, corner_radius, radius)
def set_sensitive(self, value):
self.sensitive = []
for i in self.icons:
self.sensitive.append(value)
def _round_rect_path(self, cr):
x, y, width, height, aspect, corner_radius, radius = self._draw_consts
degrees = self.degrees
cr.new_sub_path()
cr.arc (x + width - radius, y + radius, radius, -90 * degrees, 0 * degrees)
cr.arc (x + width - radius, y + height - radius, radius, 0 * degrees, 90 * degrees)
cr.arc (x + radius, y + height - radius, radius, 90 * degrees, 180 * degrees)
cr.arc (x + radius, y + radius, radius, 180 * degrees, 270 * degrees)
cr.close_path ()
def _press_event(self, event):
print "_press_event not impl"
def _motion_notify_event(self, x, y, state):
print "_motion_notify_event not impl"
def _release_event(self, event):
print "_release_event not impl"
def _draw(self, event, cr, allocation):
print "_draw not impl"
def _get_hit_code(self, x, y):
button_x = self.button_x
for i in range(0, len(self.icons)):
if ((x >= button_x) and (x <= button_x + self.button_width)
and (y >= self.button_y) and (y <= self.button_y + self.button_height)):
if self.sensitive[i] == True:
return i
button_x += self.button_width
return NO_HIT
def _draw_buttons(self, cr, w, h):
# Width of buttons group
buttons_width = self.button_width * len(self.icons)
# Draw bg
cr.set_source_rgb(*gui.bg_color_tuple)
cr.rectangle(0, 0, w, h)
cr.fill()
# Line width for all strokes
cr.set_line_width(1.0)
# bg
self._set_button_draw_consts(self.button_x + 0.5, self.button_y + 0.5, buttons_width, self.button_height + 1.0)
self._round_rect_path(cr)
r, g, b = gui.bg_color_tuple
if self.draw_button_gradients:
if self.glass_style == True:
cr.set_source_rgb(0.75, 0.75, 0.75)#*gui.bg_color_tuple)#0.75, 0.75, 0.75)
cr.fill_preserve()
else:
grad = cairo.LinearGradient (self.button_x, self.button_y, self.button_x, self.button_y + self.button_height)
if self.dark_theme == False:
grad.add_color_stop_rgba(1, r - 0.1, g - 0.1, b - 0.1, 1)
grad.add_color_stop_rgba(0, r + 0.1, g + 0.1, b + 0.1, 1)
else:
grad.add_color_stop_rgba(1, r + 0.04, g + 0.04, b + 0.04, 1)
grad.add_color_stop_rgba(0, r + 0.07, g + 0.07, b + 0.07, 1)
cr.set_source(grad)
cr.fill_preserve()
# Pressed button gradient
if self.pressed_button > -1:
if self.draw_button_gradients:
grad = cairo.LinearGradient (self.button_x, self.button_y, self.button_x, self.button_y + self.button_height)
if self.glass_style == True:
for stop in BUTTONS_PRESSED_GRAD_STOPS:
grad.add_color_stop_rgba(*stop)
else:
grad = cairo.LinearGradient (self.button_x, self.button_y, self.button_x, self.button_y + self.button_height)
grad.add_color_stop_rgba(1, r - 0.3, g - 0.3, b - 0.3, 1)
grad.add_color_stop_rgba(0, r - 0.1, g - 0.1, b - 0.1, 1)
else:
grad = cairo.LinearGradient (self.button_x, self.button_y, self.button_x, self.button_y + self.button_height)
grad.add_color_stop_rgba(1, r - 0.3, g - 0.3, b - 0.3, 1)
grad.add_color_stop_rgba(0, r - 0.3, g - 0.3, b - 0.3, 1)
cr.save()
cr.set_source(grad)
cr.clip()
cr.rectangle(self.button_x + self.pressed_button * self.button_width, self.button_y, self.button_width, self.button_height)
cr.fill()
cr.restore()
# Icons and sensitive gradient
grad = cairo.LinearGradient (self.button_x, self.button_y, self.button_x, self.button_y + self.button_height)
for stop in BUTTON_NOT_SENSITIVE_GRAD_STOPS:
grad.add_color_stop_rgba(*stop)
x = self.button_x
for i in range(0, len(self.icons)):
icon = self.icons[i]
cr.set_source_pixbuf(icon, x + self.image_x[i], self.image_y[i])
cr.paint()
if self.sensitive[i] == False:
cr.save()
self._round_rect_path(cr)
cr.set_source(grad)
cr.clip()
cr.rectangle(x, self.button_y, self.button_width, self.button_height)
cr.fill()
cr.restore()
x += self.button_width
if self.glass_style == True and self.draw_button_gradients:
# Glass gradient
self._round_rect_path(cr)
grad = cairo.LinearGradient (self.button_x, self.button_y, self.button_x, self.button_y + self.button_height)
for stop in BUTTONS_GRAD_STOPS:
grad.add_color_stop_rgba(*stop)
cr.set_source(grad)
cr.fill()
else:
pass
if self.dark_theme != True:
# Round line
grad = cairo.LinearGradient (self.button_x, self.button_y, self.button_x, self.button_y + self.button_height)
for stop in LINE_GRAD_STOPS:
grad.add_color_stop_rgba(*stop)
cr.set_source(grad)
self._set_button_draw_consts(self.button_x + 0.5, self.button_y + 0.5, buttons_width, self.button_height)
self._round_rect_path(cr)
cr.stroke()
if self.dark_theme == True:
cr.set_source_rgb(*gui.bg_color_tuple)
# Vert lines
x = self.button_x
for i in range(0, len(self.icons)):
if (i > 0) and (i < len(self.icons)):
cr.move_to(x + 0.5, self.button_y)
cr.line_to(x + 0.5, self.button_y + self.button_height)
cr.stroke()
x += self.button_width
class PlayerButtons(AbstractGlassButtons):
def __init__(self):
AbstractGlassButtons.__init__(self, MB_BUTTON_WIDTH, MB_BUTTON_HEIGHT, MB_BUTTON_Y, MB_BUTTONS_WIDTH, MB_BUTTONS_HEIGHT)
IMG_PATH = respaths.IMAGE_PATH
play_icon = gtk.gdk.pixbuf_new_from_file(IMG_PATH + "play_2_s.png")
stop_icon = gtk.gdk.pixbuf_new_from_file(IMG_PATH + "stop_s.png")
next_icon = gtk.gdk.pixbuf_new_from_file(IMG_PATH + "next_frame_s.png")
prev_icon = gtk.gdk.pixbuf_new_from_file(IMG_PATH + "prev_frame_s.png")
mark_in_icon = gtk.gdk.pixbuf_new_from_file(IMG_PATH + "mark_in_s.png")
mark_out_icon = gtk.gdk.pixbuf_new_from_file(IMG_PATH + "mark_out_s.png")
marks_clear_icon = gtk.gdk.pixbuf_new_from_file(IMG_PATH + "marks_clear_s.png")
to_mark_in_icon = gtk.gdk.pixbuf_new_from_file(IMG_PATH + "to_mark_in_s.png")
to_mark_out_icon = gtk.gdk.pixbuf_new_from_file(IMG_PATH + "to_mark_out_s.png")
self.icons = [prev_icon, next_icon, play_icon, stop_icon,
mark_in_icon, mark_out_icon,
marks_clear_icon, to_mark_in_icon, to_mark_out_icon]
self.image_x = [8, 10, 13, 13, 6, 14, 5, 10, 9]
for i in range(0, len(self.icons)):
self.image_y.append(MB_BUTTON_IMAGE_Y)
self.pressed_callback_funcs = None # set using set_callbacks()
self.set_sensitive(True)
focus_groups[DEFAULT_FOCUS_GROUP].append(self.widget)
def set_trim_sensitive_pattern(self):
self.sensitive = [True, True, True, True, False, False, False, False, False]
self.widget.queue_draw()
def set_normal_sensitive_pattern(self):
self.set_sensitive(True)
self.widget.queue_draw()
# ------------------------------------------------------------- mouse events
def _press_event(self, event):
"""
Mouse button callback
"""
self.pressed_button = self._get_hit_code(event.x, event.y)
if self.pressed_button >= 0 and self.pressed_button < len(self.icons):
callback_func = self.pressed_callback_funcs[self.pressed_button] # index is set to match at editorwindow.py where callback func list is created
callback_func()
self.widget.queue_draw()
def _motion_notify_event(self, x, y, state):
"""
Mouse move callback
"""
button_under = self._get_hit_code(x, y)
if self.pressed_button != button_under: # pressed button is released
self.pressed_button = NO_HIT
self.widget.queue_draw()
def _release_event(self, event):
"""
Mouse release callback
"""
self.pressed_button = -1
self.widget.queue_draw()
def set_callbacks(self, pressed_callback_funcs):
self.pressed_callback_funcs = pressed_callback_funcs
# ---------------------------------------------------------------- painting
def _draw(self, event, cr, allocation):
x, y, w, h = allocation
self.allocation = allocation
mid_x = w / 2
buttons_width = self.button_width * len(self.icons)
self.button_x = mid_x - (buttons_width / 2)
self._draw_buttons(cr, w, h)
class GlassButtonsGroup(AbstractGlassButtons):
def __init__(self, button_width, button_height, button_y, image_x_default, image_y_default, focus_group=DEFAULT_FOCUS_GROUP):
AbstractGlassButtons.__init__(self, button_width, button_height, button_y, button_width, button_height)
self.released_callback_funcs = []
self.image_x_default = image_x_default
self.image_y_default = image_y_default
focus_groups[focus_group].append(self.widget)
def add_button(self, pix_buf, release_callback):
self.icons.append(pix_buf)
self.released_callback_funcs.append(release_callback)
self.image_x.append(self.image_x_default)
self.image_y.append(self.image_y_default)
self.sensitive.append(True)
self.widget.set_pref_size(len(self.icons) * self.button_width + 2, self.button_height + 2)
def _draw(self, event, cr, allocation):
x, y, w, h = allocation
self.allocation = allocation
self.button_x = 0
self._draw_buttons(cr, w, h)
def _press_event(self, event):
self.pressed_button = self._get_hit_code(event.x, event.y)
self.widget.queue_draw()
def _motion_notify_event(self, x, y, state):
button_under = self._get_hit_code(x, y)
if self.pressed_button != button_under: # pressed button is released if mouse moves from over it
if self.pressed_button > 0 and self.pressed_button < len(self.icons):
release_func = self.released_callback_funcs[self.pressed_button]
release_func()
self.pressed_button = NO_HIT
self.widget.queue_draw()
def _release_event(self, event):
if self.pressed_button >= 0 and self.pressed_button < len(self.icons):
release_func = self.released_callback_funcs[self.pressed_button]
release_func()
self.pressed_button = -1
self.widget.queue_draw()
class GlassButtonsToggleGroup(GlassButtonsGroup):
def set_pressed_button(self, pressed_button_index, fire_clicked_cb=False):
self.pressed_button = pressed_button_index
if fire_clicked_cb == True:
self._fire_pressed_button()
self.widget.queue_draw()
def _fire_pressed_button(self):
release_func = self.released_callback_funcs[self.pressed_button]
release_func()
def _press_event(self, event):
new_pressed_button = self._get_hit_code(event.x, event.y)
if new_pressed_button == NO_HIT:
return
if new_pressed_button != self.pressed_button:
self.pressed_button = new_pressed_button
self._fire_pressed_button()
self.widget.queue_draw()
def _motion_notify_event(self, x, y, state):
pass
def _release_event(self, event):
pass
def focus_group_has_focus(focus_group):
group = focus_groups[focus_group]
for widget in group:
if widget.is_focus():
return True
return False
| Python |
"""
Flowblade Movie Editor is a nonlinear video editor.
Copyright 2012 Janne Liljeblad.
This file is part of Flowblade Movie Editor <http://code.google.com/p/flowblade>.
Flowblade Movie Editor is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Flowblade Movie Editor is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Flowblade Movie Editor. If not, see <http://www.gnu.org/licenses/>.
"""
"""
This module handles the less central actions inited by user from menu.
"""
import pygtk
pygtk.require('2.0');
import gtk
import platform
import threading
import webbrowser
import time
import appconsts
import dialogs
import dialogutils
from editorstate import PROJECT
from editorstate import PLAYER
from editorstate import current_sequence
import editorstate
import gui
import jackaudio
import mltenv
import mltfilters
import mlttransitions
import projectdata
import patternproducer
import profilesmanager
import renderconsumer
import respaths
profile_manager_dialog = None
# ---------------------------------------------- recreate icons
class RecreateIconsThread(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
def run(self):
# NEEDS FIXING FOR COMPACT PROJECTS
gtk.gdk.threads_enter()
recreate_progress_window = dialogs.recreate_icons_progress_dialog()
time.sleep(0.1)
gtk.gdk.threads_leave()
no_icon_path = respaths.IMAGE_PATH + projectdata.FALLBACK_THUMB
loaded = 0
for key in PROJECT().media_files.iterkeys():
media_file = PROJECT().media_files[key]
gtk.gdk.threads_enter()
recreate_progress_window.info.set_text(media_file.name)
gtk.gdk.threads_leave()
if ((not isinstance(media_file, patternproducer.AbstractBinClip))
and (not isinstance(media_file, projectdata.BinColorClip))):
if media_file.icon_path == no_icon_path:
if media_file.type == appconsts.AUDIO:
icon_path = respaths.IMAGE_PATH + "audio_file.png"
else:
(icon_path, length) = projectdata.thumbnailer.write_image(media_file.path)
media_file.icon_path = icon_path
media_file.create_icon()
loaded = loaded + 1
gtk.gdk.threads_enter()
loaded_frac = float(loaded) / float(len(PROJECT().media_files))
recreate_progress_window.progress_bar.set_fraction(loaded_frac)
time.sleep(0.01)
gtk.gdk.threads_leave()
# Update editor gui
gtk.gdk.threads_enter()
recreate_progress_window.destroy()
time.sleep(0.3)
gtk.gdk.threads_leave()
gtk.gdk.threads_enter()
gui.media_list_view.fill_data_model()
gui.bin_list_view.fill_data_model()
gui.enable_save()
gtk.gdk.threads_leave()
def recreate_media_file_icons():
recreate_thread = RecreateIconsThread()
recreate_thread.start()
def show_project_info():
dialogs.project_info_dialog(gui.editor_window.window, _show_project_info_callback)
def _show_project_info_callback(dialog, response_id):
dialog.destroy()
# ------------------------------------------------------ help menu
def about():
dialogs.about_dialog(gui.editor_window)
def environment():
dialogs.environment_dialog(gui.editor_window, write_env_data)
# ----------------------------------------------------- environment data
def write_env_data():
dialogs.save_env_data_dialog(write_out_env_data_cb)
def write_out_env_data_cb(dialog, response_id):
if response_id == gtk.RESPONSE_ACCEPT:
filenames = dialog.get_filenames()
file_path = filenames[0]
# Build env data string list
str_list = []
str_list.append("FLOWBLADE RUNTIME ENVIROMNMENT\n")
str_list.append("------------------------------\n")
str_list.append("\n")
str_list.append("APPLICATION AND LIBRARIES\n")
str_list.append("-------------------------\n")
str_list.append("Application version: " + editorstate.appversion + "\n")
if editorstate.app_running_from == editorstate.RUNNING_FROM_INSTALLATION:
run_type = "INSTALLATION"
else:
run_type = "DEVELOPER VERSION"
str_list.append("Application running from: " + run_type + "\n")
str_list.append("MLT version: " + str(editorstate.mlt_version) + "\n")
try:
major, minor, rev = editorstate.gtk_version
gtk_ver = str(major) + "." + str(minor) + "." + str(rev)
except:
gtk_ver = str(editorstate.gtk_version)
str_list.append("GTK VERSION: " + gtk_ver + "\n")
str_list.append("SCREEN_HEIGHT: " + str(editorstate.SCREEN_HEIGHT) + "\n")
str_list.append("\n")
str_list.append("PLATFORM\n")
str_list.append("--------\n")
str_list.append(platform.platform())
str_list.append("\n")
str_list.append("\n")
str_list.append("FORMATS\n")
str_list.append("-------\n")
sorted_formats = sorted(mltenv.formats)
for f in sorted_formats:
str_list.append(f + "\n")
str_list.append("\n")
str_list.append("\n")
str_list.append("VIDEO_CODECS\n")
str_list.append("------------\n")
sorted_vcodecs = sorted(mltenv.vcodecs)
for vc in sorted_vcodecs:
str_list.append(vc + "\n")
str_list.append("\n")
str_list.append("\n")
str_list.append("AUDIO_CODECS\n")
str_list.append("------------\n")
sorted_acodecs = sorted(mltenv.acodecs)
for ac in sorted_acodecs:
str_list.append(ac + "\n")
str_list.append("\n")
str_list.append("\n")
str_list.append("MLT SERVICES\n")
str_list.append("------------\n")
sorted_services = sorted(mltenv.services)
for s in sorted_services:
str_list.append(s + "\n")
str_list.append("\n")
str_list.append("\n")
str_list.append("MLT TRANSITIONS\n")
str_list.append("---------------\n")
sorted_transitions = sorted(mltenv.transitions)
for t in sorted_transitions:
str_list.append(t + "\n")
str_list.append("\n")
str_list.append("\n")
str_list.append("ENCODING OPTIONS\n")
str_list.append("----------------\n")
enc_ops = renderconsumer.encoding_options + renderconsumer.not_supported_encoding_options
for e_opt in enc_ops:
if e_opt.supported:
msg = e_opt.name + " AVAILABLE\n"
else:
msg = e_opt.name + " NOT AVAILABLE, " + e_opt.err_msg + " MISSING\n"
str_list.append(msg)
str_list.append("\n")
str_list.append("\n")
str_list.append("MISSING FILTERS\n")
str_list.append("---------------\n")
for f in mltfilters.not_found_filters:
msg = "mlt.Filter " + f.mlt_service_id + " FOR FILTER " + f.name + " NOT FOUND\n"
str_list.append(msg)
str_list.append("\n")
str_list.append("\n")
str_list.append("MISSING TRANSITIONS\n")
str_list.append("---------------\n")
for t in mlttransitions.not_found_transitions:
msg = "mlt.Transition " + t.mlt_service_id + " FOR TRANSITION " + t.name + " NOT FOUND\n"
str_list.append(msg)
# Write out data
env_text = ''.join(str_list)
env_file = open(file_path, "w")
env_file.write(env_text)
env_file.close()
dialog.destroy()
else:
dialog.destroy()
def quick_reference():
try:
url = "file://" + respaths.HELP_DOC
print url
#webbrowser.open('http://code.google.com/p/flowblade/wiki/FlowbladeReference')
webbrowser.open(url)
except:
dialogutils.info_message(_("Help page not found!"), _("Unfortunately the webresource containing help information\nfor this application was not found."), None)
def profiles_manager():
global profile_manager_dialog
profile_manager_dialog = profilesmanager.profiles_manager_dialog()
def edit_watermark():
dialogs.watermark_dialog(_watermark_add_callback, _watermark_remove_callback)
def _watermark_add_callback(button, widgets):
dialogs.watermark_file_dialog(_watermark_file_select_callback, widgets)
def _watermark_file_select_callback(dialog, response_id, widgets):
add_button, remove_button, file_path_value_label = widgets
if response_id == gtk.RESPONSE_ACCEPT:
filenames = dialog.get_filenames()
current_sequence().add_watermark(filenames[0])
add_button.set_sensitive(False)
remove_button.set_sensitive(True)
file_path_value_label.set_text(filenames[0])
dialog.destroy()
def _watermark_remove_callback(button, widgets):
add_button, remove_button, file_path_value_label = widgets
add_button.set_sensitive(True)
remove_button.set_sensitive(False)
file_path_value_label.set_text("Not Set")
current_sequence().remove_watermark()
def jack_output_managing():
dialog = jackaudio.JackAudioManagerDialog()
#PLAYER().jack_output_on()
| Python |
#
# This file marks module.
#
| Python |
"""
Flowblade Movie Editor is a nonlinear video editor.
Copyright 2012 Janne Liljeblad.
This file is part of Flowblade Movie Editor <http://code.google.com/p/flowblade>.
Flowblade Movie Editor is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Flowblade Movie Editor is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Flowblade Movie Editor. If not, see <http://www.gnu.org/licenses/>.
"""
"""
Module contains absolute paths to various resources.
"""
ROOT_PATH = None
BLACK_IMAGE_PATH = None
IMAGE_PATH = None
PROFILE_PATH = None
PREFS_PATH = None
WIPE_RESOURCES_PATH = None
FILTERS_XML_DOC = None
COMPOSITORS_XML_DOC = None
REPLACEMENTS_XML_DOC = None
HELP_DOC = None
GPL_3_DOC = None
TRANSLATIONS_DOC = None
LOCALE_PATH = None
ROOT_PARENT = None
PATTERN_PRODUCER_PATH = None
LAUNCH_DIR = None
def set_paths(root_path):
global ROOT_PATH, IMAGE_PATH, THUMBNAIL_PATH, PROFILE_PATH,\
BLACK_IMAGE_PATH, FILTERS_XML_DOC, COMPOSITORS_XML_DOC, \
WIPE_RESOURCES_PATH, PREFS_PATH, HELP_DOC, LOCALE_PATH, \
GPL_3_DOC, ROOT_PARENT, PATTERN_PRODUCER_PATH, TRANSLATIONS_DOC, \
LAUNCH_DIR, REPLACEMENTS_XML_DOC #, PROXY_PROFILE_PATH
ROOT_PATH = root_path
IMAGE_PATH = root_path + "/res/img/"
WIPE_RESOURCES_PATH = root_path + "/res/filters/wipes/"
PROFILE_PATH = root_path + "/res/profiles/"
BLACK_IMAGE_PATH = root_path + "/res/img/black.jpg"
FILTERS_XML_DOC = root_path + "/res/filters/filters.xml"
COMPOSITORS_XML_DOC = root_path + "/res/filters/compositors.xml"
REPLACEMENTS_XML_DOC = root_path + "/res/filters/replace.xml"
PREFS_PATH = root_path + "/res/prefs/"
HELP_DOC = root_path + "/res/help/help.html"
LOCALE_PATH = root_path + "/locale/"
GPL_3_DOC = root_path + "/res/help/gpl3"
TRANSLATIONS_DOC = root_path + "/res/help/translations"
ROOT_PARENT = ROOT_PATH.strip("Flowblade")
PATTERN_PRODUCER_PATH = root_path + "/res/patternproducer/"
LAUNCH_DIR = root_path + "/launch/"
def apply_dark_theme():
global IMAGE_PATH
IMAGE_PATH = ROOT_PATH + "/res/darktheme/"
| Python |
"""
Flowblade Movie Editor is a nonlinear video editor.
Copyright 2014 Janne Liljeblad.
This file is part of Flowblade Movie Editor <http://code.google.com/p/flowblade>.
Flowblade Movie Editor is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Flowblade Movie Editor is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Flowblade Movie Editor. If not, see <http://www.gnu.org/licenses/>.
"""
import pygtk
pygtk.require('2.0');
import gtk
import math
import cairo
from cairoarea import CairoDrawableArea
import editorpersistance
from editorstate import PLAYER
import gui
import guiutils
import guicomponents
import glassbuttons
import lutfilter
import respaths
import viewgeom
SHADOW = 0
MID = 1
HI = 2
NO_HIT = 99
SELECT_CIRCLE = 0
SELECT_LINE = 1
ACTIVE_RING_COLOR = (0.0, 0.0, 0.0)
DEACTIVE_RING_COLOR = (0.6, 0.6, 0.6)
ACTIVE_SHADOW_COLOR = (0.15, 0.15, 0.15)
ACTIVE_MID_COLOR = (0.5, 0.5, 0.5)
ACTIVE_HI_COLOR = (1.0, 1.0, 1.0)
DEACTIVE_SHADOW_COLOR = (0.6, 0.6, 0.6)
DEACTIVE_MID_COLOR = (0.7, 0.7, 0.7)
DEACTIVE_HI_COLOR = (0.85, 0.85, 0.85)
BOX_BG_COLOR = (0.8, 0.8, 0.8)
BOX_LINE_COLOR = (0.4, 0.4, 0.4)
CURVE_COLOR = (0, 0, 0)
R_CURVE_COLOR = (0.78, 0, 0)
G_CURVE_COLOR = (0, 0.75, 0)
B_CURVE_COLOR = (0, 0, 0.8)
RED_STOP = (0, 1, 0, 0, 1)
YELLOW_STOP = (1.0/6.0, 1, 1, 0, 1)
GREEN_STOP = (2.0/6.0, 0, 1, 0, 1)
CYAN_STOP = (3.0/6.0, 0, 1, 1, 1)
BLUE_STOP = (4.0/6.0, 0, 0, 1, 1)
MAGENTA_STOP = (5.0/6.0, 1, 0, 1, 1)
RED_STOP_END = (1, 1, 0, 0, 1)
GREY_GRAD_1 = (1, 0.4, 0.4, 0.4, 1)
GREY_GRAD_2 = (0, 0.4, 0.4, 0.4, 0)
MID_GREY_GRAD_1 = (1, 0.3, 0.3, 0.3, 0)
MID_GREY_GRAD_2 = (0.5, 0.3, 0.3, 0.3, 1)
MID_GREY_GRAD_3 = (0, 0.3, 0.3, 0.3, 0)
CIRCLE_GRAD_1 = (1, 0.3, 0.3, 0.3, 1)
CIRCLE_GRAD_2 = (0, 0.8, 0.8, 0.8, 1)
FX_GRAD_1 = (0, 1.0, 1.0, 1.0, 0.4)
FX_GRAD_2 = (1, 0.3, 0.3, 0.3, 0.4)
def _draw_select_circle(cr, x, y, main_color, radius, small_radius, pad, x_off=0, y_off=0):
degrees = math.pi / 180.0
grad = cairo.LinearGradient (x, y, x, y + 2 * radius)
grad.add_color_stop_rgba(*CIRCLE_GRAD_1)
grad.add_color_stop_rgba(*CIRCLE_GRAD_2)
cr.set_source(grad)
cr.move_to(x + pad, y + pad)
cr.arc (x + pad, y + pad, radius, 0.0 * degrees, 360.0 * degrees)
cr.fill()
cr.set_source_rgb(*main_color)
cr.move_to(x + pad, y + pad)
cr.arc (x + pad, y + pad, small_radius, 0.0 * degrees, 360.0 * degrees)
cr.fill()
grad = cairo.LinearGradient (x, y, x, y + 2 * radius)
grad.add_color_stop_rgba(*FX_GRAD_1)
grad.add_color_stop_rgba(*FX_GRAD_2)
cr.set_source(grad)
cr.move_to(x + pad, y + pad)
cr.arc (x + pad, y + pad, small_radius, 0.0 * degrees, 360.0 * degrees)
cr.fill()
x = x + x_off
y = y + y_off
cr.set_source_rgb(0.4,0.4,0.4)
cr.set_line_width(1.0)
cr.move_to(x + radius - 0.5, y)
cr.line_to(x + radius - 0.5, y + 2 * radius)
cr.stroke()
cr.set_source_rgb(0.4,0.4,0.4)
cr.set_line_width(1.0)
cr.move_to(x, y + radius - 0.5)
cr.line_to(x + 2 * radius, y + radius - 0.5)
cr.stroke()
cr.set_source_rgb(0.6,0.6,0.6)
cr.move_to(x, y + radius + 0.5)
cr.line_to(x + radius * 2.0, y + radius + 0.5)
cr.stroke()
cr.set_source_rgb(0.6,0.6,0.6)
cr.move_to(x + radius + 0.5, y)
cr.line_to(x + radius + 0.5, y + 2 * radius)
cr.stroke()
def _draw_select_line(cr, x, y):
height = 22
y = y - 19
cr.set_source_rgb(0.7,0.7,0.7)
cr.rectangle(x - 2.0, y, 4, height)
cr.fill()
cr.set_source_rgb(0.3,0.3,0.3)
cr.set_line_width(1.0)
cr.move_to(x - 0.5, y)
cr.line_to(x - 0.5, y + height)
cr.stroke()
cr.set_source_rgb(0.95,0.95,0.95)
cr.move_to(x + 0.5, y)
cr.line_to(x + 0.5, y + height)
cr.stroke()
def _draw_cursor_indicator(cr, x, y, radius):
degrees = math.pi / 180.0
pad = radius
cr.set_source_rgba(0.9, 0.9, 0.9, 0.6)
cr.set_line_width(3.0)
cr.arc (x + pad, y + pad, radius, 0.0 * degrees, 360.0 * degrees)
cr.stroke()
class ColorBox:
def __init__(self, edit_listener, width=260, height=260):
self.W = width
self.H = height
self.widget = CairoDrawableArea(self.W,
self.H,
self._draw)
self.widget.press_func = self._press_event
self.widget.motion_notify_func = self._motion_notify_event
self.widget.release_func = self._release_event
self.X_PAD = 12
self.Y_PAD = 12
self.CIRCLE_HALF = 8
self.cursor_x = self.X_PAD
self.cursor_y = self.H - self.Y_PAD
self.edit_listener = edit_listener
self.hue = 0.0
self.saturation = 0.0
self.draw_saturation_gradient = True
self.selection_cursor = SELECT_CIRCLE
def get_hue_saturation(self):
return (self.hue, self.saturation)
def _save_values(self):
self.hue = float((self.cursor_x - self.X_PAD)) / float((self.W - 2 * self.X_PAD))
self.saturation = float(abs(self.cursor_y - self.H + self.Y_PAD)) / float((self.H - 2 * self.Y_PAD))
def set_cursor(self, hue, saturation):
self.cursor_x = self._x_for_hue(hue)
self.cursor_y = self._y_for_saturation(saturation)
self._save_values()
def _x_for_hue(self, hue):
return self.X_PAD + hue * (self.W - self.X_PAD * 2)
def _y_for_saturation(self, saturation):
return self.Y_PAD + (1.0 - saturation) * (self.H - self.Y_PAD *2)
def _press_event(self, event):
self.cursor_x, self.cursor_y = self._get_legal_point(event.x, event.y)
self._save_values()
self.edit_listener()
self.widget.queue_draw()
def _motion_notify_event(self, x, y, state):
self.cursor_x, self.cursor_y = self._get_legal_point(x, y)
self._save_values()
self.edit_listener()
self.widget.queue_draw()
def _release_event(self, event):
self.cursor_x, self.cursor_y = self._get_legal_point(event.x, event.y)
self._save_values()
self.edit_listener()
self.widget.queue_draw()
def _get_legal_point(self, x, y):
if x < self.X_PAD:
x = self.X_PAD
elif x > self.W - self.X_PAD:
x = self.W - self.X_PAD
if y < self.Y_PAD:
y = self.Y_PAD
elif y > self.H - self.Y_PAD:
y = self.H - self.Y_PAD
return (x, y)
def _draw(self, event, cr, allocation):
"""
Callback for repaint from CairoDrawableArea.
We get cairo context and allocation.
"""
x, y, w, h = allocation
# Draw bg
cr.set_source_rgb(*(gui.bg_color_tuple))
cr.rectangle(0, 0, w, h)
cr.fill()
x_in = self.X_PAD
x_out = self.W - self.X_PAD
y_in = self.Y_PAD
y_out = self.H - self.Y_PAD
grad = cairo.LinearGradient (x_in, 0, x_out, 0)
grad.add_color_stop_rgba(*RED_STOP)
grad.add_color_stop_rgba(*YELLOW_STOP)
grad.add_color_stop_rgba(*GREEN_STOP)
grad.add_color_stop_rgba(*CYAN_STOP)
grad.add_color_stop_rgba(*MAGENTA_STOP)
grad.add_color_stop_rgba(*RED_STOP_END)
cr.set_source(grad)
cr.rectangle(self.X_PAD, self.Y_PAD, x_out - x_in, y_out - y_in)
cr.fill()
if self.draw_saturation_gradient == True:
grey_grad = cairo.LinearGradient (0, y_in, 0, y_out)
grey_grad.add_color_stop_rgba(*GREY_GRAD_1)
grey_grad.add_color_stop_rgba(*GREY_GRAD_2)
cr.set_source(grey_grad)
cr.rectangle(self.X_PAD, self.Y_PAD, x_out - x_in, y_out - y_in)
cr.fill()
if self.selection_cursor == SELECT_CIRCLE:
_draw_select_circle(cr, self.cursor_x - self.CIRCLE_HALF, self.cursor_y - self.CIRCLE_HALF, (1, 1, 1), 8, 6, 8)
else:
_draw_select_line(cr, self.cursor_x, y_out)
class ThreeBandColorBox(ColorBox):
def __init__(self, edit_listener, band_change_listerner, width=260, height=260):
ColorBox.__init__(self, edit_listener, width, height)
self.band = SHADOW
self.shadow_x = self.cursor_x
self.shadow_y = self.cursor_y
self.mid_x = self.cursor_x
self.mid_y = self.cursor_y
self.hi_x = self.cursor_x
self.hi_y = self.cursor_y
self.band_change_listerner = band_change_listerner
def set_cursors(self, s_h, s_s, m_h, m_s, h_h, h_s):
self.shadow_x = self._x_for_hue(s_h)
self.shadow_y = self._y_for_saturation(s_s)
self.mid_x = self._x_for_hue(m_h)
self.mid_y = self._y_for_saturation(m_s)
self.hi_x = self._x_for_hue(h_h)
self.hi_y = self._y_for_saturation(h_s)
def _press_event(self, event):
self.cursor_x, self.cursor_y = self._get_legal_point(event.x, event.y)
hit_value = self._check_band_hit(self.cursor_x, self.cursor_y)
if hit_value != self.band and hit_value != NO_HIT:
self.band = hit_value
self.band_change_listerner(self.band)
self._save_values()
self.edit_listener()
self.widget.queue_draw()
def _motion_notify_event(self, x, y, state):
self.cursor_x, self.cursor_y = self._get_legal_point(x, y)
self._save_values()
self.edit_listener()
self.widget.queue_draw()
def _release_event(self, event):
self.cursor_x, self.cursor_y = self._get_legal_point(event.x, event.y)
self._save_values()
self.edit_listener()
self.widget.queue_draw()
def _check_band_hit(self, x, y):
if self._control_point_hit(x, y, self.shadow_x, self.shadow_y):
return SHADOW
elif self._control_point_hit(x, y, self.mid_x, self.mid_y):
return MID
elif self._control_point_hit(x, y, self.hi_x, self.hi_y):
return HI
else:
return NO_HIT
def _control_point_hit(self, x, y, cx, cy):
if x >= cx - self.CIRCLE_HALF and x <= cx + self.CIRCLE_HALF:
if y >= cy - self.CIRCLE_HALF and y <= cy + self.CIRCLE_HALF:
return True
return False
def _save_values(self):
self.hue = float((self.cursor_x - self.X_PAD)) / float((self.W - 2 * self.X_PAD))
self.saturation = float(abs(self.cursor_y - self.H + self.Y_PAD)) / float((self.H - 2 * self.Y_PAD))
if self.band == SHADOW:
self.shadow_x = self.cursor_x
self.shadow_y = self.cursor_y
elif self.band == MID:
self.mid_x = self.cursor_x
self.mid_y = self.cursor_y
else:
self.hi_x = self.cursor_x
self.hi_y = self.cursor_y
def _draw(self, event, cr, allocation):
"""
Callback for repaint from CairoDrawableArea.
We get cairo context and allocation.
"""
x, y, w, h = allocation
# Draw bg
cr.set_source_rgb(*(gui.bg_color_tuple))
cr.rectangle(0, 0, w, h)
cr.fill()
x_in = self.X_PAD
x_out = self.W - self.X_PAD
y_in = self.Y_PAD
y_out = self.H - self.Y_PAD
grad = cairo.LinearGradient (x_in, 0, x_out, 0)
grad.add_color_stop_rgba(*RED_STOP)
grad.add_color_stop_rgba(*YELLOW_STOP)
grad.add_color_stop_rgba(*GREEN_STOP)
grad.add_color_stop_rgba(*CYAN_STOP)
grad.add_color_stop_rgba(*MAGENTA_STOP)
grad.add_color_stop_rgba(*RED_STOP_END)
cr.set_source(grad)
cr.rectangle(self.X_PAD, self.Y_PAD, x_out - x_in, y_out - y_in)
cr.fill()
grey_grad = cairo.LinearGradient (0, y_in, 0, y_out)
grey_grad.add_color_stop_rgba(*MID_GREY_GRAD_1)
grey_grad.add_color_stop_rgba(*MID_GREY_GRAD_2)
grey_grad.add_color_stop_rgba(*MID_GREY_GRAD_3)
cr.set_source(grey_grad)
cr.rectangle(self.X_PAD, self.Y_PAD, x_out - x_in, y_out - y_in)
cr.fill()
y_mid = self.Y_PAD + math.floor((y_out - y_in)/2.0) + 0.2
cr.set_line_width(0.6)
cr.set_source_rgb(0.7,0.7,0.7)
cr.move_to(x_in, y_mid)
cr.line_to(x_out, y_mid)
cr.stroke()
_draw_select_circle(cr, self.shadow_x - self.CIRCLE_HALF, self.shadow_y - self.CIRCLE_HALF, ACTIVE_SHADOW_COLOR, 8, 7, 8)
_draw_select_circle(cr, self.mid_x - self.CIRCLE_HALF, self.mid_y - self.CIRCLE_HALF, ACTIVE_MID_COLOR, 8, 7, 8)
_draw_select_circle(cr, self.hi_x - self.CIRCLE_HALF, self.hi_y - self.CIRCLE_HALF, ACTIVE_HI_COLOR, 8, 7, 8)
_draw_cursor_indicator(cr, self.cursor_x - 11, self.cursor_y - 11, 11)
class ColorBoxFilterEditor:
def __init__(self, editable_properties):
self.SAT_MAX = 0.5
self.widget = gtk.VBox()
self.hue = filter(lambda ep: ep.name == "hue", editable_properties)[0]
self.saturation = filter(lambda ep: ep.name == "saturation", editable_properties)[0]
self.R = filter(lambda ep: ep.name == "R", editable_properties)[0]
self.G = filter(lambda ep: ep.name == "G", editable_properties)[0]
self.B = filter(lambda ep: ep.name == "B", editable_properties)[0]
self.color_box = ColorBox(self.color_box_values_changed)
self.color_box.set_cursor(self.hue.get_float_value(), self.saturation.get_float_value())
box_row = gtk.HBox()
box_row.pack_start(gtk.Label(), True, True, 0)
box_row.pack_start(self.color_box.widget, False, False, 0)
box_row.pack_start(gtk.Label(), True, True, 0)
self.h_label = gtk.Label()
self.s_label = gtk.Label()
info_box = gtk.HBox(True)
info_box.pack_start(self.h_label, False, False, 0)
info_box.pack_start(self.s_label, False, False, 0)
info_box.set_size_request(65, 20)
info_row = gtk.HBox()
info_row.pack_start(gtk.Label(), True, True, 0)
info_row.pack_start(info_box, False, False, 0)
info_row.pack_start(gtk.Label(), True, True, 0)
self.widget.pack_start(box_row, False, False, 0)
self.widget.pack_start(info_row, False, False, 0)
self.widget.pack_start(gtk.Label(), True, True, 0)
self._display_values(self.hue.get_float_value(), self.saturation.get_float_value())
def color_box_values_changed(self):
hue_val, sat_val = self.color_box.get_hue_saturation()
self.hue.write_property_value(str(hue_val))
self.saturation.write_property_value(str(sat_val))
self._display_values(hue_val, sat_val)
r, g, b = lutfilter.get_RGB_for_angle_saturation_and_value(hue_val * 360, sat_val * self.SAT_MAX, 0.5)
self.R.write_value("0=" + str(r))
self.G.write_value("0=" + str(g))
self.B.write_value("0=" + str(b))
def _display_values(self, hue, saturation):
sat_str = str(int(saturation * 100)) + "%"
hue_str = unicode(int(360 * hue)) + ColorGrader.DEGREE_CHAR + u' '
self.h_label.set_text(hue_str)
self.s_label.set_text(sat_str)
class ColorLGGFilterEditor:
def __init__(self, editable_properties):
self.widget = gtk.VBox()
# Get MLT properties
self.lift_r = filter(lambda ep: ep.name == "lift_r", editable_properties)[0]
self.lift_g = filter(lambda ep: ep.name == "lift_g", editable_properties)[0]
self.lift_b = filter(lambda ep: ep.name == "lift_b", editable_properties)[0]
self.gamma_r = filter(lambda ep: ep.name == "gamma_r", editable_properties)[0]
self.gamma_g = filter(lambda ep: ep.name == "gamma_g", editable_properties)[0]
self.gamma_b = filter(lambda ep: ep.name == "gamma_b", editable_properties)[0]
self.gain_r = filter(lambda ep: ep.name == "gain_r", editable_properties)[0]
self.gain_g = filter(lambda ep: ep.name == "gain_g", editable_properties)[0]
self.gain_b = filter(lambda ep: ep.name == "gain_b", editable_properties)[0]
# Get Non-MLT properties
self.lift_hue = filter(lambda ep: ep.name == "lift_hue", editable_properties)[0]
self.lift_value = filter(lambda ep: ep.name == "lift_value", editable_properties)[0]
self.gamma_hue = filter(lambda ep: ep.name == "gamma_hue", editable_properties)[0]
self.gamma_value = filter(lambda ep: ep.name == "gamma_value", editable_properties)[0]
self.gain_hue = filter(lambda ep: ep.name == "gain_hue", editable_properties)[0]
self.gain_value = filter(lambda ep: ep.name == "gain_value", editable_properties)[0]
# Lift editor
self.lift_hue_selector = self.get_hue_selector(self.lift_hue_edited)
self.lift_hue_value_label = gtk.Label()
self.lift_hue_row = self.get_hue_row(self.lift_hue_selector.widget, self.lift_hue_value_label)
self.lift_adjustment = self.lift_value.get_input_range_adjustment()
self.lift_adjustment.connect("value-changed", self.lift_value_changed)
self.lift_slider_row = self.get_slider_row(self.lift_adjustment)
self.update_lift_display(self.lift_hue.get_float_value(), self.lift_value.get_current_in_value())
# Gamma editor
self.gamma_hue_selector = self.get_hue_selector(self.gamma_hue_edited)
self.gamma_hue_value_label = gtk.Label()
self.gamma_hue_row = self.get_hue_row(self.gamma_hue_selector.widget, self.gamma_hue_value_label)
self.gamma_adjustment = self.gamma_value.get_input_range_adjustment()
self.gamma_adjustment.connect("value-changed", self.gamma_value_changed)
self.gamma_slider_row = self.get_slider_row(self.gamma_adjustment)
self.update_gamma_display(self.gamma_hue.get_float_value(), self.gamma_value.get_current_in_value())
# Gain editor
self.gain_hue_selector = self.get_hue_selector(self.gain_hue_edited)
self.gain_hue_value_label = gtk.Label()
self.gain_hue_row = self.get_hue_row(self.gain_hue_selector.widget, self.gain_hue_value_label)
self.gain_adjustment = self.gain_value.get_input_range_adjustment()
self.gain_adjustment.connect("value-changed", self.gain_value_changed)
self.gain_slider_row = self.get_slider_row(self.gain_adjustment)
self.update_gain_display(self.gain_hue.get_float_value(), self.gain_value.get_current_in_value())
# Pack
self.widget.pack_start(self.get_name_row("Lift"), True, True, 0)
self.widget.pack_start(self.lift_hue_row, True, True, 0)
self.widget.pack_start(self.lift_slider_row, True, True, 0)
self.widget.pack_start(guicomponents.EditorSeparator().widget, True, True, 0)
self.widget.pack_start(self.get_name_row("Gamma"), True, True, 0)
self.widget.pack_start(self.gamma_hue_row , True, True, 0)
self.widget.pack_start(self.gamma_slider_row , True, True, 0)
self.widget.pack_start(guicomponents.EditorSeparator().widget, True, True, 0)
self.widget.pack_start(self.get_name_row("Gain"), True, True, 0)
self.widget.pack_start(self.gain_hue_row , True, True, 0)
self.widget.pack_start(self.gain_slider_row , True, True, 0)
self.widget.pack_start(gtk.Label(), True, True, 0)
# ---------------------------------------------- gui building
def get_hue_selector(self, callback):
color_box = ColorBox(callback, width=290, height=40)
color_box.draw_saturation_gradient = False
color_box.selection_cursor = SELECT_LINE
return color_box
def get_name_row(self, name):
name_label = gtk.Label(name + ":")
hbox = gtk.HBox(False, 4)
hbox.pack_start(name_label, False, False, 4)
hbox.pack_start(gtk.Label(), True, True, 0)
return hbox
def get_hue_row(self, color_box, value_label):
hbox = gtk.HBox(False, 4)
hbox.pack_start(color_box, False, False, 0)
hbox.pack_start(value_label, False, False, 4)
hbox.pack_start(gtk.Label(), False, False, 0)
return hbox
def get_slider_row(self, adjustment):#, name):
hslider = gtk.HScale()
hslider.set_adjustment(adjustment)
hslider.set_draw_value(False)
spin = gtk.SpinButton()
spin.set_numeric(True)
spin.set_adjustment(adjustment)
hslider.set_digits(0)
spin.set_digits(0)
hbox = gtk.HBox(False, 4)
#hbox.pack_start(name_label, False, False, 4)
hbox.pack_start(hslider, True, True, 0)
hbox.pack_start(spin, False, False, 4)
return hbox
# --------------------------------------- gui updating
def update_lift_display(self, hue, val):
self.lift_hue_selector.set_cursor(hue, 0.0)
self.set_hue_label_value(hue, self.lift_hue_value_label)
self.lift_adjustment.set_value(val)
def update_gamma_display(self, hue, val):
self.gamma_hue_selector.set_cursor(hue, 0.0)
self.set_hue_label_value(hue, self.gamma_hue_value_label)
self.gamma_adjustment.set_value(val)
def update_gain_display(self, hue, val):
self.gain_hue_selector.set_cursor(hue, 0.0)
self.set_hue_label_value(hue, self.gain_hue_value_label)
self.gain_adjustment.set_value(val)
def set_hue_label_value(self, hue, label):
hue_str = unicode(int(360 * hue)) + ColorGrader.DEGREE_CHAR + u' '
label.set_text(hue_str)
# ------------------------------ color box listeners
def lift_hue_edited(self):
hue, sat = self.lift_hue_selector.get_hue_saturation()
self.set_hue_label_value(hue, self.lift_hue_value_label)
self.update_lift_property_values()
def gamma_hue_edited(self):
hue, sat = self.gamma_hue_selector.get_hue_saturation()
self.set_hue_label_value(hue, self.gamma_hue_value_label)
self.update_gamma_property_values()
def gain_hue_edited(self):
hue, sat = self.gain_hue_selector.get_hue_saturation()
self.set_hue_label_value(hue, self.gain_hue_value_label)
self.update_gain_property_values()
# ----------------------------------- slider listeners
def lift_value_changed(self, adjustment):
self.update_lift_property_values()
def gamma_value_changed(self, adjustment):
self.update_gamma_property_values()
def gain_value_changed(self, adjustment):
self.update_gain_property_values()
# -------------------------------------- value writers
def update_lift_property_values(self):
hue, sat = self.lift_hue_selector.get_hue_saturation()
r, g, b = lutfilter.get_RGB_for_angle(hue * 360)
value = self.lift_adjustment.get_value() / 100.0
r = r * value
g = g * value
b = b * value
self.lift_hue.write_number_value(hue)
self.lift_value.write_number_value(value)
self.lift_r.write_value(r)
self.lift_g.write_value(g)
self.lift_b.write_value(b)
def update_gamma_property_values(self):
hue, sat = self.gamma_hue_selector.get_hue_saturation()
r, g, b = lutfilter.get_RGB_for_angle(hue * 360)
value = self.gamma_value.get_out_value(self.gamma_adjustment.get_value())
r = 1.0 + r * (value - 1.0)
g = 1.0 + g * (value - 1.0)
b = 1.0 + b * (value - 1.0)
self.gamma_hue.write_number_value(hue)
self.gamma_value.write_number_value(value)
self.gamma_r.write_value(r)
self.gamma_g.write_value(g)
self.gamma_b.write_value(b)
def update_gain_property_values(self):
hue, sat = self.gain_hue_selector.get_hue_saturation()
r, g, b = lutfilter.get_RGB_for_angle(hue * 360)
value = self.gain_value.get_out_value(self.gain_adjustment.get_value())
r = 1.0 + r * (value - 1.0)
g = 1.0 + g * (value - 1.0)
b = 1.0 + b * (value - 1.0)
self.gain_hue.write_number_value(hue)
self.gain_value.write_number_value(value)
self.gain_r.write_value(r)
self.gain_g.write_value(g)
self.gain_b.write_value(b)
class BoxEditor:
def __init__(self, pix_size):
self.value_size = 1.0 # Box editor works in 0-1 normalized space
self.pix_size = pix_size;
self.pix_per_val = self.value_size / pix_size
self.off_x = 0.5
self.off_y = 0.5
def get_box_val_point(self, x, y):
# calculate value
px = (x - self.off_x) * self.pix_per_val
py = (self.pix_size - (y - self.off_y)) * self.pix_per_val
# force range
if px < 0:
px = 0.0
if py < 0:
py = 0.0
if px >= self.value_size:
px = self.value_size
if py >= self.value_size:
py = self.value_size
return px, py
def get_box_panel_point(self, x, y, max_value):
px = x/max_value * self.pix_size + self.off_x
py = self.off_y + self.pix_size - (y/max_value * self.pix_size) # higher values are up
return (px, py)
def draw_box(self, cr, allocation):
x, y, w, h = allocation
# Draw bg
cr.set_source_rgb(*(gui.bg_color_tuple))
cr.rectangle(0, 0, w, h)
cr.fill()
if editorpersistance.prefs.dark_theme == False:
cr.set_source_rgb(*BOX_BG_COLOR )
cr.rectangle(0, 0, self.pix_size + 1, self.pix_size + 1)
cr.fill()
# value lines
cr.set_source_rgb(*BOX_LINE_COLOR)
step = self.pix_size / 8
cr.set_line_width(1.0)
for i in range(0, 9):
cr.move_to(0.5 + step * i, 0.5)
cr.line_to(step * i, self.pix_size + 0.5)
cr.stroke()
for i in range(0, 9):
cr.move_to(0.5, step * i + 0.5)
cr.line_to(self.pix_size + 0.5, step * i + 0.5)
cr.stroke()
class CatmullRomFilterEditor:
RGB = 0
R = 1
G = 2
B = 3
def __init__(self, editable_properties):
self.widget = gtk.VBox()
# These properties hold the values that are writtenout to MLT to do the filtering
self.cr_filter = lutfilter.CatmullRomFilter(editable_properties)
default_curve = self.cr_filter.value_cr_curve
self.current_edit_curve = CatmullRomFilterEditor.RGB
# This is used to edit points of currently active curve
self.curve_editor = CurvesBoxEditor(256.0, default_curve, self)
# This is used to change currently active curve
self.channel_buttons = glassbuttons.GlassButtonsToggleGroup(32, 19, 2, 2, 5)
self.channel_buttons.add_button(gtk.gdk.pixbuf_new_from_file(respaths.IMAGE_PATH + "rgb_channel.png"), self.channel_changed)
self.channel_buttons.add_button(gtk.gdk.pixbuf_new_from_file(respaths.IMAGE_PATH + "red_channel.png"), self.channel_changed)
self.channel_buttons.add_button(gtk.gdk.pixbuf_new_from_file(respaths.IMAGE_PATH + "green_channel.png"), self.channel_changed)
self.channel_buttons.add_button(gtk.gdk.pixbuf_new_from_file(respaths.IMAGE_PATH + "blue_channel.png"), self.channel_changed)
self.channel_buttons.widget.set_pref_size(132, 28)
self.channel_buttons.set_pressed_button(0)
self.curve_buttons = glassbuttons.GlassButtonsGroup(32, 19, 2, 2, 5)
self.curve_buttons.add_button(gtk.gdk.pixbuf_new_from_file(respaths.IMAGE_PATH + "linear_curve.png"), self.do_curve_reset_pressed)
self.curve_buttons.add_button(gtk.gdk.pixbuf_new_from_file(respaths.IMAGE_PATH + "curve_s.png"), self.do_curve_reset_pressed)
self.curve_buttons.add_button(gtk.gdk.pixbuf_new_from_file(respaths.IMAGE_PATH + "curve_flipped_s.png"), self.do_curve_reset_pressed)
self.curve_buttons.widget.set_pref_size(97, 28)
button_hbox = gtk.HBox()
button_hbox.pack_start(self.channel_buttons.widget, False, False, 0)
button_hbox.pack_start(guiutils.get_pad_label(4, 4), False, False, 0)
button_hbox.pack_start(self.curve_buttons.widget, False, False, 0)
buttons_row = guiutils.get_in_centering_alignment(button_hbox)
box_row = gtk.HBox()
box_row.pack_start(gtk.Label(), True, True, 0)
box_row.pack_start(self.curve_editor.widget, False, False, 0)
box_row.pack_start(gtk.Label(), True, True, 0)
self.widget.pack_start(gtk.Label(), True, True, 0)
self.widget.pack_start(box_row, False, False, 0)
self.widget.pack_start(guiutils.get_pad_label(12, 8), False, False, 0)
self.widget.pack_start(buttons_row, False, False, 0)
self.widget.pack_start(gtk.Label(), True, True, 0)
def channel_changed(self):
channel = self.channel_buttons.pressed_button # indexes match
self.update_editors_to_channel(channel)
def update_editors_to_channel(self, channel):
# Channel values and button indexes match
if channel == CatmullRomFilterEditor.RGB:
self.current_edit_curve = CatmullRomFilterEditor.RGB
self.curve_editor.set_curve(self.cr_filter.value_cr_curve, CURVE_COLOR)
elif channel == CatmullRomFilterEditor.R:
self.current_edit_curve = CatmullRomFilterEditor.R
self.curve_editor.set_curve(self.cr_filter.r_cr_curve, R_CURVE_COLOR)
elif channel == CatmullRomFilterEditor.G:
self.current_edit_curve = CatmullRomFilterEditor.G
self.curve_editor.set_curve(self.cr_filter.g_cr_curve, G_CURVE_COLOR)
else:
self.current_edit_curve = CatmullRomFilterEditor.B
self.curve_editor.set_curve(self.cr_filter.b_cr_curve, B_CURVE_COLOR)
def do_curve_reset_pressed(self):
button_index = self.curve_buttons.pressed_button
channel = self.current_edit_curve
if button_index == 0: # Linear
new_points_str = "0/0;255/255"
elif button_index == 1: # Default add gamma
new_points_str = "0/0;64/48;192/208;255/255"
elif button_index == 2: # Default remove gamma
new_points_str = "0/0;64/80;192/176;255/255"
if channel == CatmullRomFilterEditor.RGB:
self.cr_filter.value_cr_curve.set_points_from_str(new_points_str)
elif channel == CatmullRomFilterEditor.R:
self.cr_filter.r_cr_curve.set_points_from_str(new_points_str)
elif channel== CatmullRomFilterEditor.G:
self.cr_filter.g_cr_curve.set_points_from_str(new_points_str)
else:
self.cr_filter.b_cr_curve.set_points_from_str(new_points_str)
self.write_points_to_current_curve(new_points_str)
self.update_editors_to_channel(channel)
def curve_edit_done(self):
points_str = self.curve_editor.curve.get_points_string()
self.write_points_to_current_curve(points_str)
def write_points_to_current_curve(self, points_str):
if self.current_edit_curve == CatmullRomFilterEditor.RGB:
self.cr_filter.value_points_prop.write_property_value(points_str)
elif self.current_edit_curve == CatmullRomFilterEditor.R:
self.cr_filter.r_points_prop.write_property_value(points_str)
elif self.current_edit_curve == CatmullRomFilterEditor.G:
self.cr_filter.g_points_prop.write_property_value(points_str)
else: # CatmullRomFilterEditor.G
self.cr_filter.b_points_prop.write_property_value(points_str)
self.cr_filter.update_table_property_values()
class CurvesBoxEditor(BoxEditor):
def __init__(self, pix_size, curve, edit_listener):
BoxEditor.__init__(self, pix_size)
self.curve = curve # lutfilter.CRCurve
global BOX_LINE_COLOR, CURVE_COLOR
self.curve_color = CURVE_COLOR
self.edit_listener = edit_listener # Needs to implement "curve_edit_done()"
self.widget = CairoDrawableArea(self.pix_size + 2,
self.pix_size + 2,
self._draw)
self.widget.press_func = self._press_event
self.widget.motion_notify_func = self._motion_notify_event
self.widget.release_func = self._release_event
self.last_point = None
self.edit_on = False
if editorpersistance.prefs.dark_theme == True:
BOX_LINE_COLOR = (0.8, 0.8, 0.8)
CURVE_COLOR = (0.8, 0.8, 0.8)
self.curve_color = CURVE_COLOR
def set_curve(self, curve, curve_color):
self.curve = curve
self.curve_color = curve_color
self.widget.queue_draw()
def _press_event(self, event):
vx, vy = BoxEditor.get_box_val_point(self, event.x, event.y)
p = lutfilter.CurvePoint(int(round(vx * 255)), int(round(vy * 255)))
self.last_point = p
self.edit_on = True
self.curve.remove_range(self.last_point.x - 3, self.last_point.x + 3 )
self.curve.set_curve_point(p)
self.widget.queue_draw()
def _motion_notify_event(self, x, y, state):
if self.edit_on == False:
return
vx, vy = BoxEditor.get_box_val_point(self, x, y)
p = lutfilter.CurvePoint(int(round(vx * 255)), int(round(vy * 255)))
self.curve.remove_range(self.last_point.x, p.x)
self.curve.set_curve_point(p)
self.last_point = p
self.widget.queue_draw()
def _release_event(self, event):
if self.edit_on == False:
return
vx, vy = BoxEditor.get_box_val_point(self, event.x, event.y)
p = lutfilter.CurvePoint(int(round(vx * 255)),int(round(vy * 255)))
self.curve.remove_range(self.last_point.x, p.x)
self.curve.set_curve_point(p)
self.edit_on = False
self.edit_listener.curve_edit_done()
self.widget.queue_draw()
def _draw(self, event, cr, allocation):
# bg box
BoxEditor.draw_box(self, cr, allocation)
x, y, w, h = allocation
# curve
cr.set_source_rgb(*self.curve_color)# seg.setColor( CURVE_COLOR );
cr.set_line_width(1.5)
cp = self.curve.get_curve(True) #we get 256 values
px, py = BoxEditor.get_box_panel_point(self, 0, cp[0], 255)
cr.move_to(px, py)
for i in range(1, len(cp)): #int i = 0; i < cp.length - 1; i++ )
px, py = BoxEditor.get_box_panel_point(self, i, cp[i], 255.0)
cr.line_to(px, py)
cr.stroke()
cr.rectangle(1, 1, w - 3, h - 3)
cr.clip()
# edit points
for p in self.curve.points:
px, py = BoxEditor.get_box_panel_point(self, p.x, p.y, 255.0)
_draw_select_circle(cr, px, py, (1,1,1), 4, 2, 0, -4, -4)
class ColorGrader:
DEGREE_CHAR = u'\u00B0'
def __init__(self, editable_properties):
# Initial active band
self.band = SHADOW
# HUE and SAT are both saved in range (0,1)
# HUE and SAT are both handled in editor using range (0,1)
# Saved and editor ranges are the same.
# ColorGradeBandCorrection objects handle ranges differently
# - saturation values 0-1 converted to range (-1, 1)
# - saturation value 0.5 is converted to 0 and means no correction applied
# - converted range(-1, 0) means negative correction applied
# - negative correction is interpreted as positive correction of complimentary color
# Editable properties
self.shadow_hue = filter(lambda ep: ep.name == "shadow_hue", editable_properties)[0]
self.shadow_saturation = filter(lambda ep: ep.name == "shadow_saturation", editable_properties)[0]
self.mid_hue = filter(lambda ep: ep.name == "mid_hue", editable_properties)[0]
self.mid_saturation = filter(lambda ep: ep.name == "mid_saturation", editable_properties)[0]
self.hi_hue = filter(lambda ep: ep.name == "hi_hue", editable_properties)[0]
self.hi_saturation = filter(lambda ep: ep.name == "hi_saturation", editable_properties)[0]
# Create filter and init values
self.filt = lutfilter.ColorGradeFilter(editable_properties)
self.filt.shadow_band.set_hue_and_saturation(self.shadow_hue.get_float_value(),
self.shadow_saturation.get_float_value())
self.filt.mid_band.set_hue_and_saturation(self.mid_hue.get_float_value(),
self.mid_saturation.get_float_value())
self.filt.hi_band.set_hue_and_saturation(self.hi_hue.get_float_value(),
self.hi_saturation.get_float_value())
self.filt.update_all_corrections()
self.filt.update_rgb_lookups()
self.filt.write_out_tables()
# Create GUI
self.color_box = ThreeBandColorBox(self.color_box_values_changed, self.band_changed, 340, 200)
self.color_box.set_cursor(self.shadow_hue.get_float_value(), self.shadow_saturation.get_float_value())
self.color_box.set_cursors(self.shadow_hue.get_float_value(), self.shadow_saturation.get_float_value(),
self.mid_hue.get_float_value(), self.mid_saturation.get_float_value(),
self.hi_hue.get_float_value(), self.hi_saturation.get_float_value())
box_row = gtk.HBox()
box_row.pack_start(gtk.Label(), True, True, 0)
box_row.pack_start(self.color_box.widget, False, False, 0)
box_row.pack_start(gtk.Label(), True, True, 0)
shadow_icon = gtk.image_new_from_file(respaths.IMAGE_PATH + "shadow.png")
self.sh_label = gtk.Label()
self.ss_label = gtk.Label()
shadow_box = gtk.HBox()
shadow_box.pack_start(shadow_icon, False, False, 0)
shadow_box.pack_start(guiutils.pad_label(3,5), False, False, 0)
shadow_box.pack_start(self.sh_label, False, False, 0)
shadow_box.pack_start(self.ss_label, False, False, 0)
shadow_box.set_size_request(95, 20)
midtone_icon = gtk.image_new_from_file(respaths.IMAGE_PATH + "midtones.png")
self.mh_label = gtk.Label()
self.ms_label = gtk.Label()
midtone_box = gtk.HBox()
midtone_box.pack_start(midtone_icon, False, False, 0)
midtone_box.pack_start(guiutils.pad_label(3,5), False, False, 0)
midtone_box.pack_start(self.mh_label, False, False, 0)
midtone_box.pack_start(self.ms_label, False, False, 0)
midtone_box.set_size_request(95, 20)
highligh_icon = gtk.image_new_from_file(respaths.IMAGE_PATH + "highlights.png")
self.hh_label = gtk.Label()
self.hs_label = gtk.Label()
highlight_box = gtk.HBox()
highlight_box.pack_start(highligh_icon, False, False, 0)
highlight_box.pack_start(guiutils.pad_label(3,5), False, False, 0)
highlight_box.pack_start(self.hh_label, False, False, 0)
highlight_box.pack_start(self.hs_label, False, False, 0)
highlight_box.set_size_request(95, 20)
self._display_values(SHADOW, self.shadow_hue.get_float_value(), self.shadow_saturation.get_float_value())
self._display_values(MID, self.mid_hue.get_float_value(), self.mid_saturation.get_float_value())
self._display_values(HI, self.hi_hue.get_float_value(), self.hi_saturation.get_float_value())
info_row = gtk.HBox()
info_row.pack_start(gtk.Label(), True, True, 0)
info_row.pack_start(shadow_box, False, False, 0)
info_row.pack_start(midtone_box, False, False, 0)
info_row.pack_start(highlight_box, False, False, 0)
info_row.pack_start(gtk.Label(), True, True, 0)
self.widget = gtk.VBox()
self.widget.pack_start(box_row, False, False, 0)
self.widget.pack_start(info_row, False, False, 0)
self.widget.pack_start(gtk.Label(), True, True, 0)
def band_changed(self, band):
self.band = band
def color_box_values_changed(self):
hue, sat = self.color_box.get_hue_saturation()
if self.band == SHADOW:
self.shadow_hue.write_number_value(hue)
self.shadow_saturation.write_number_value(sat)
self.filt.shadow_band.set_hue_and_saturation(hue, sat)
self.filt.shadow_band.update_correction()
elif self.band == MID:
self.mid_hue.write_number_value(hue)
self.mid_saturation.write_number_value(sat)
self.filt.mid_band.set_hue_and_saturation(hue, sat)
self.filt.mid_band.update_correction()
else:
self.hi_hue.write_number_value(hue)
self.hi_saturation.write_number_value(sat)
self.filt.hi_band.set_hue_and_saturation(hue, sat)
self.filt.hi_band.update_correction()
self._display_values(self.band, hue, sat)
self.filt.update_rgb_lookups()
self.filt.write_out_tables()
def _display_values(self, band, hue, saturation):
sat_str = str(int(((saturation - 0.5) * 2.0) * 100)) + "%"
hue_str = unicode(int(360 * hue)) + ColorGrader.DEGREE_CHAR + u' '
if band == SHADOW:
self.sh_label.set_text(hue_str)
self.ss_label.set_text(sat_str)
elif band == MID:
self.mh_label.set_text(hue_str)
self.ms_label.set_text(sat_str)
else:
self.hh_label.set_text(hue_str)
self.hs_label.set_text(sat_str)
"""
# NON_ MLT PROPERTY SLIDER DEMO CODE
def hue_changed(self, ep, value):
ep.write_property_value(str(value))
self.update_properties()
def saturation_changed(self, ep, value):
ep.write_property_value(str(value))
self.update_properties()
def value_changed(self, ep, value):
ep.write_property_value(str(value))
self.update_properties()
"""
class AbstractColorWheel:
def __init__(self, edit_listener):
self.widget = CairoDrawableArea(260,
260,
self._draw)
self.widget.press_func = self._press_event
self.widget.motion_notify_func = self._motion_notify_event
self.widget.release_func = self._release_event
self.X_PAD = 3
self.Y_PAD = 3
self.CENTER_X = 130
self.CENTER_Y = 130
self.MAX_DIST = 123
self.twelwe_p = (self.CENTER_X , self.CENTER_Y - self.MAX_DIST)
self.CIRCLE_HALF = 6
self.cursor_x = self.CENTER_X
self.cursor_y = self.CENTER_Y
self.WHEEL_IMG = gtk.gdk.pixbuf_new_from_file(respaths.IMAGE_PATH + "color_wheel.png")
self.edit_listener = edit_listener
self.angle = 0.0
self.distance = 0.0
def _press_event(self, event):
"""
Mouse button callback
"""
self.cursor_x, self.cursor_y = self._get_legal_point(event.x, event.y)
self._save_point()
self.widget.queue_draw()
def _motion_notify_event(self, x, y, state):
"""
Mouse move callback
"""
self.cursor_x, self.cursor_y = self._get_legal_point(x, y)
self._save_point()
self.widget.queue_draw()
def _release_event(self, event):
self.cursor_x, self.cursor_y = self._get_legal_point(event.x, event.y)
self._save_point()
self.edit_listener()
self.widget.queue_draw()
def _get_legal_point(self, x, y):
vec = viewgeom.get_vec_for_points((self.CENTER_X, self.CENTER_Y), (x, y))
dist = vec.get_length()
if dist < self.MAX_DIST:
return (x, y)
new_vec = vec.get_multiplied_vec(self.MAX_DIST / dist )
return new_vec.end_point
def get_angle(self, p):
angle = viewgeom.get_angle_in_deg(self.twelwe_p, (self.CENTER_X, self.CENTER_Y), p)
clockwise = viewgeom.points_clockwise(self.twelwe_p, (self.CENTER_X, self.CENTER_Y), p)
if clockwise:
angle = 360.0 - angle;
# Color circle starts from 11 o'clock
angle = angle - 30.0
if angle < 0.0:
angle = angle + 360.0
return angle
def get_distance(self, p):
vec = viewgeom.get_vec_for_points((self.CENTER_X, self.CENTER_Y), p)
dist = vec.get_length()
return dist/self.MAX_DIST
def _save_point(self):
print "_save_point not implemented"
pass
def get_angle_and_distance(self):
if self.band == SHADOW:
x = self.shadow_x
y = self.shadow_y
elif self.band == MID:
x = self.mid_x
y = self.mid_y
else:
x = self.hi_x
y = self.hi_y
p = (x, y)
angle = self._get_angle(p)
distance = self._get_distance(p)
return (angle, distance)
def _draw(self, event, cr, allocation):
"""
Callback for repaint from CairoDrawableArea.
We get cairo context and allocation.
"""
x, y, w, h = allocation
# Draw bg
cr.set_source_rgb(*(gui.bg_color_tuple))
cr.rectangle(0, 0, w, h)
cr.fill()
cr.set_source_pixbuf(self.WHEEL_IMG, self.X_PAD, self.Y_PAD)
cr.paint()
class SimpleColorWheel(AbstractColorWheel):
def __init__(self, edit_listener):
AbstractColorWheel.__init__(self, edit_listener)
self.value_x = self.cursor_x
self.value_y = self.cursor_y
def _save_point(self):
self.value_x = self.cursor_x
self.value_y = self.cursor_y
def get_angle_and_distance(self):
p = (self.value_x, self.value_y)
angle = self.get_angle(p)
distance = self.get_distance(p)
return (angle, distance)
def _draw(self, event, cr, allocation):
"""
Callback for repaint from CairoDrawableArea.
We get cairo context and allocation.
"""
AbstractColorWheel._draw(self, event, cr, allocation)
_draw_select_circle(cr, self.cursor_x - self.CIRCLE_HALF, self.cursor_y - self.CIRCLE_HALF, (1,1,1), ACTIVE_RING_COLOR)
class SMHColorWheel(AbstractColorWheel):
def __init__(self, edit_listener):
AbstractColorWheel.__init__(self, edit_listener)
self.band = SHADOW
self.shadow_x = self.cursor_x
self.shadow_y = self.cursor_y
self.mid_x = self.cursor_x
self.mid_y = self.cursor_y
self.hi_x = self.cursor_x
self.hi_y = self.cursor_y
def set_band(self, band):
self.band = band
if self.band == SHADOW:
self.cursor_x = self.shadow_x
self.cursor_y = self.shadow_y
elif self.band == MID:
self.cursor_x = self.mid_x
self.cursor_y = self.mid_y
else:
self.cursor_x = self.hi_x
self.cursor_y = self.hi_y
def _save_point(self):
if self.band == SHADOW:
self.shadow_x = self.cursor_x
self.shadow_y = self.cursor_y
elif self.band == MID:
self.mid_x = self.cursor_x
self.mid_y = self.cursor_y
else:
self.hi_x = self.cursor_x
self.hi_y = self.cursor_y
def get_angle_and_distance(self):
if self.band == SHADOW:
x = self.shadow_x
y = self.shadow_y
elif self.band == MID:
x = self.mid_x
y = self.mid_y
else:
x = self.hi_x
y = self.hi_y
p = (x, y)
angle = self.get_angle(p)
distance = self.get_distance(p)
return (angle, distance)
def _draw(self, event, cr, allocation):
"""
Callback for repaint from CairoDrawableArea.
We get cairo context and allocation.
"""
AbstractColorWheel._draw(self, event, cr, allocation)
if self.band == SHADOW:
band_color = ACTIVE_SHADOW_COLOR
elif self.band == MID:
band_color = ACTIVE_MID_COLOR
else:
band_color = ACTIVE_HI_COLOR
_draw_select_circle(cr, self.cursor_x - self.CIRCLE_HALF, self.cursor_y - self.CIRCLE_HALF, band_color, ACTIVE_RING_COLOR)
class ColorBandSelector:
def __init__(self):
self.band = SHADOW
self.widget = CairoDrawableArea(42,
18,
self._draw)
self.widget.press_func = self._press_event
self.SHADOW_X = 0
self.MID_X = 15
self.HI_X = 30
self.band_change_listener = None # monkey patched in at creation site
def _press_event(self, event):
x = event.x
y = event.y
if self._circle_hit(self.SHADOW_X, x, y):
self.band_change_listener(SHADOW)
elif self._circle_hit(self.MID_X, x, y):
self.band_change_listener(MID)
elif self._circle_hit(self.HI_X, x, y):
self.band_change_listener(HI)
def _circle_hit(self, band_x, x, y):
if x >= band_x and x < band_x + 12:
if y > 0 and y < 12:
return True
return False
def _draw(self, event, cr, allocation):
"""
Callback for repaint from CairoDrawableArea.
We get cairo context and allocation.
"""
x, y, w, h = allocation
# Draw bg
cr.set_source_rgb(*(gui.bg_color_tuple))
cr.rectangle(0, 0, w, h)
cr.fill()
ring_color = (0.0, 0.0, 0.0)
_draw_select_circle(cr, self.SHADOW_X, 0, (0.1, 0.1, 0.1), ring_color)
_draw_select_circle(cr, self.MID_X, 0, (0.5, 0.5, 0.5), ring_color)
_draw_select_circle(cr, self.HI_X, 0, (1.0, 1.0, 1.0), ring_color)
self._draw_active_indicator(cr)
def _draw_active_indicator(self, cr):
y = 14.5
HALF = 4.5
HEIGHT = 2
if self.band == SHADOW:
x = self.SHADOW_X + 1.5
elif self.band == MID:
x = self.MID_X + 1.5
else:
x = self.HI_X + 1.5
cr.set_source_rgb(0, 0, 0)
cr.move_to(x, y)
cr.line_to(x + 2 * HALF, y)
cr.line_to(x + 2 * HALF, y + HEIGHT)
cr.line_to(x, y + HEIGHT)
cr.close_path()
cr.fill()
| Python |
"""
Flowblade Movie Editor is a nonlinear video editor.
Copyright 2012 Janne Liljeblad.
This file is part of Flowblade Movie Editor <http://code.google.com/p/flowblade>.
Flowblade Movie Editor is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Flowblade Movie Editor is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Flowblade Movie Editor. If not, see <http://www.gnu.org/licenses/>.
"""
"""
Module handles clip effects editing logic and gui
"""
import pygtk
pygtk.require('2.0');
import gtk
import dnd
import edit
from editorstate import PROJECT
import gui
import guicomponents
import guiutils
import mltfilters
import propertyedit
import propertyeditorbuilder
import respaths
import translations
import updater
import utils
widgets = utils.EmptyClass()
clip = None # Clip being edited
track = None # Track of the clip being editeds
clip_index = None # Index of clip being edited
block_changed_update = False # Used to block unwanted callback update from "changed", hack and a broken one, look to fix
# This is updated when filter panel is displayed and cleared when removed.
# Used to update kfeditors with external tline frame position changes
keyframe_editor_widgets = []
def get_clip_effects_editor_panel(group_combo_box, effects_list_view):
"""
Use components created at clipeffectseditor.py.
"""
create_widgets()
ad_buttons_box = gtk.HBox(True,1)
ad_buttons_box.pack_start(widgets.add_effect_b)
ad_buttons_box.pack_start(widgets.del_effect_b)
stack_buttons_box = gtk.HBox(False,1)
stack_buttons_box.pack_start(ad_buttons_box, True, True, 0)
stack_buttons_box.pack_start(widgets.toggle_all, False, False, 0)
effect_stack = widgets.effect_stack_view
for group in mltfilters.groups:
group_name, filters_array = group
group_combo_box.append_text(group_name)
group_combo_box.set_active(0)
# Same callback function works for filter select window too
group_combo_box.connect("changed",
lambda w,e: _group_selection_changed(w,effects_list_view),
None)
widgets.group_combo = group_combo_box
widgets.effect_list_view = effects_list_view
set_enabled(False)
exit_button_vbox = gtk.VBox(False, 2)
exit_button_vbox.pack_start(widgets.exit_button, False, False, 0)
exit_button_vbox.pack_start(gtk.Label(), True, True, 0)
info_row = gtk.HBox(False, 2)
info_row.pack_start(widgets.clip_info, False, False, 0)
info_row.pack_start(exit_button_vbox, True, True, 0)
combo_row = gtk.HBox(False, 2)
combo_row.pack_start(group_combo_box, True, True, 0)
combo_row.pack_start(guiutils.get_pad_label(8, 2), False, False, 0)
group_name, filters_array = mltfilters.groups[0]
effects_list_view.fill_data_model(filters_array)
effects_list_view.treeview.get_selection().select_path("0")
effects_vbox = gtk.VBox(False, 2)
effects_vbox.pack_start(info_row, False, False, 0)
effects_vbox.pack_start(guiutils.get_pad_label(2, 2), False, False, 0)
effects_vbox.pack_start(stack_buttons_box, False, False, 0)
effects_vbox.pack_start(effect_stack, True, True, 0)
effects_vbox.pack_start(combo_row, False, False, 0)
effects_vbox.pack_start(effects_list_view, True, True, 0)
widgets.group_combo.set_tooltip_text(_("Select Filter Group"))
widgets.effect_list_view.set_tooltip_text(_("Current group Filters"))
return effects_vbox
def _group_selection_changed(group_combo, filters_list_view):
group_name, filters_array = mltfilters.groups[group_combo.get_active()]
filters_list_view.fill_data_model(filters_array)
filters_list_view.treeview.get_selection().select_path("0")
def set_clip(new_clip, new_track, new_index):
"""
Sets clip being edited and inits gui.
"""
global clip, track, clip_index
clip = new_clip
track = new_track
clip_index = new_index
widgets.clip_info.display_clip_info(clip, track, clip_index)
set_enabled(True)
update_stack_view()
effect_selection_changed() # This may get called twice
gui.middle_notebook.set_current_page(2) # 2 == index of clipeditor page in notebook
def clip_removed_during_edit(removed_clip):
"""
Called from edit.py after a clip is removed from timeline during edit
so that we cannot edit effects on clip that is no longer on timeline.
"""
if clip == removed_clip:
clear_clip()
def effect_select_row_double_clicked(treeview, tree_path, col):
add_currently_selected_effect()
def filter_stack_button_press(treeview, event):
path_pos_tuple = treeview.get_path_at_pos(int(event.x), int(event.y))
if path_pos_tuple == None:
row = -1 # Empty row was clicked
else:
path, column, x, y = path_pos_tuple
selection = treeview.get_selection()
selection.unselect_all()
selection.select_path(path)
(model, rows) = selection.get_selected_rows()
row = max(rows[0])
if row == -1:
return False
if event.button == 3:
guicomponents.display_filter_stack_popup_menu(row, treeview, _filter_stack_menu_item_selected, event)
return True
return False
def _filter_stack_menu_item_selected(widget, data):
item_id, row, treeview = data
# Toggle filter active state
if item_id == "toggle":
toggle_filter_active(row)
if item_id == "reset":
reset_filter_values()
def _quit_editing_clip_clicked(): # this is a button callback
clear_clip()
def clear_clip():
"""
Removes clip from effects editing gui.
"""
global clip
clip = None
_set_no_clip_info()
clear_effects_edit_panel()
update_stack_view()
set_enabled(False)
def _set_no_clip_info():
widgets.clip_info.set_no_clip_info()
def create_widgets():
"""
Widgets for editing clip effects properties.
"""
widgets.clip_info = guicomponents.ClipInfoPanel()
widgets.exit_button = gtk.Button()
icon = gtk.image_new_from_stock(gtk.STOCK_CLOSE, gtk.ICON_SIZE_MENU)
widgets.exit_button.set_image(icon)
widgets.exit_button.connect("clicked", lambda w: _quit_editing_clip_clicked())
widgets.exit_button.set_tooltip_text(_("Quit editing Clip in editor"))
widgets.effect_stack_view = guicomponents.FilterSwitchListView(lambda ts: effect_selection_changed(), toggle_filter_active)
dnd.connect_stack_treeview(widgets.effect_stack_view)
gui.effect_stack_list_view = widgets.effect_stack_view
widgets.value_edit_box = gtk.VBox()
widgets.value_edit_frame = gtk.Frame()
widgets.value_edit_frame.set_shadow_type(gtk.SHADOW_NONE)
widgets.value_edit_frame.add(widgets.value_edit_box)
widgets.add_effect_b = gtk.Button(_("Add"))
widgets.del_effect_b = gtk.Button(_("Delete"))
widgets.toggle_all = gtk.Button()
widgets.toggle_all.set_image(gtk.image_new_from_file(respaths.IMAGE_PATH + "filters_all_toggle.png"))
widgets.add_effect_b.connect("clicked", lambda w,e: add_effect_pressed(), None)
widgets.del_effect_b.connect("clicked", lambda w,e: delete_effect_pressed(), None)
widgets.toggle_all.connect("clicked", lambda w: toggle_all_pressed())
# These are created elsewhere and then monkeypatched here
widgets.group_combo = None
widgets.effect_list_view = None
widgets.clip_info.set_tooltip_text(_("Clip being edited"))
widgets.effect_stack_view.set_tooltip_text(_("Clip Filter Stack"))
widgets.add_effect_b.set_tooltip_text(_("Add Filter to Clip Filter Stack"))
widgets.del_effect_b.set_tooltip_text(_("Delete Filter from Clip Filter Stack"))
widgets.toggle_all.set_tooltip_text(_("Toggle all Filters On/Off"))
def set_enabled(value):
widgets.clip_info.set_enabled( value)
widgets.add_effect_b.set_sensitive(value)
widgets.del_effect_b.set_sensitive(value)
widgets.effect_stack_view.treeview.set_sensitive(value)
widgets.exit_button.set_sensitive(value)
widgets.toggle_all.set_sensitive(value)
def update_stack_view():
if clip != None:
filter_infos = []
for f in clip.filters:
filter_infos.append(f.info)
widgets.effect_stack_view.fill_data_model(filter_infos, clip.filters)
else:
widgets.effect_stack_view.fill_data_model([], [])
widgets.effect_stack_view.treeview.queue_draw()
def update_stack_view_changed_blocked():
global block_changed_update
block_changed_update = True
update_stack_view()
block_changed_update = False
def add_currently_selected_effect():
# Check we have clip
if clip == None:
return
filter_info = get_selected_filter_info()
action = get_filter_add_action(filter_info, clip)
action.do_edit() # gui update in callback from EditAction object.
updater.repaint_tline()
filter_info = get_selected_filter_info()
def get_filter_add_action(filter_info, target_clip):
if filter_info.multipart_filter == False:
data = {"clip":target_clip,
"filter_info":filter_info,
"filter_edit_done_func":filter_edit_done}
action = edit.add_filter_action(data)
else:
data = {"clip":target_clip,
"filter_info":filter_info,
"filter_edit_done_func":filter_edit_done}
action = edit.add_multipart_filter_action(data)
return action
def get_selected_filter_info():
# Get current selection on effects treeview - that's a vertical list.
treeselection = gui.effect_select_list_view.treeview.get_selection()
(model, rows) = treeselection.get_selected_rows()
row = rows[0]
row_index = max(row)
# Add filter
group_name, filters_array = mltfilters.groups[gui.effect_select_combo_box.get_active()]
return filters_array[row_index]
def add_effect_pressed():
add_currently_selected_effect()
def delete_effect_pressed():
if len(clip.filters) == 0:
return
# Block updates until we have set selected row
global edit_effect_update_blocked
edit_effect_update_blocked = True
treeselection = widgets.effect_stack_view.treeview.get_selection()
(model, rows) = treeselection.get_selected_rows()
try:
row = rows[0]
except:
return # This fails when there are filters but no rows are selected
row_index = max(row)
data = {"clip":clip,
"index":row_index,
"filter_edit_done_func":filter_edit_done}
action = edit.remove_filter_action(data)
action.do_edit()
updater.repaint_tline()
# Set last filter selected and display in editor
edit_effect_update_blocked = False
if len(clip.filters) == 0:
return
path = str(len(clip.filters) - 1)
# Causes edit_effect_selected() called as it is the "change" listener
widgets.effect_stack_view.treeview.get_selection().select_path(path)
def toggle_all_pressed():
for i in range(0, len(clip.filters)):
filter_object = clip.filters[i]
filter_object.active = (filter_object.active == False)
filter_object.update_mlt_disabled_value()
update_stack_view()
def reset_filter_values():
treeselection = widgets.effect_stack_view.treeview.get_selection()
(model, rows) = treeselection.get_selected_rows()
row = rows[0]
row_index = max(row)
clip.filters[row_index].reset_values(PROJECT().profile, clip)
effect_selection_changed()
def toggle_filter_active(row, update_stack_view=True):
filter_object = clip.filters[row]
filter_object.active = (filter_object.active == False)
filter_object.update_mlt_disabled_value()
if update_stack_view == True:
update_stack_view_changed_blocked()
def effect_selection_changed():
global keyframe_editor_widgets
# Check we have clip
if clip == None:
keyframe_editor_widgets = []
return
# Check we actually have filters so we can display one.
# If not, clear previous filters from view.
if len(clip.filters) == 0:
vbox = gtk.VBox(False, 0)
vbox.pack_start(gtk.Label(), False, False, 0)
widgets.value_edit_frame.remove(widgets.value_edit_box)
widgets.value_edit_frame.add(vbox)
vbox.show_all()
widgets.value_edit_box = vbox
keyframe_editor_widgets = []
return
# "changed" get's called twice when adding filter and selecting last
# so we use this do this only once
if block_changed_update == True:
return
keyframe_editor_widgets = []
# Get selected row which is also index of filter in clip.filters
treeselection = widgets.effect_stack_view.treeview.get_selection()
(model, rows) = treeselection.get_selected_rows()
# If we don't get legal selection select first filter
try:
row = rows[0]
filter_index = max(row)
except:
filter_index = 0
filter_object = clip.filters[filter_index]
# Create EditableProperty wrappers for properties
editable_properties = propertyedit.get_filter_editable_properties(
clip,
filter_object,
filter_index,
track,
clip_index)
# Get editors and set them displayed
vbox = gtk.VBox(False, 0)
try:
filter_name = translations.filter_names[filter_object.info.name]
except KeyError:
filter_name = filter_object.info.name
filter_name_label = gtk.Label( "<b>" + filter_name + "</b>")
filter_name_label.set_use_markup(True)
vbox.pack_start(filter_name_label, False, False, 0)
vbox.pack_start(guicomponents.EditorSeparator().widget, False, False, 0)
if len(editable_properties) > 0:
# Create editor row for each editable property
for ep in editable_properties:
editor_row = propertyeditorbuilder.get_editor_row(ep)
if editor_row == None:
continue
# Set keyframe editor widget to be updated for frame changes if such is created
try:
editor_type = ep.args[propertyeditorbuilder.EDITOR]
except KeyError:
editor_type = propertyeditorbuilder.SLIDER # this is the default value
if ((editor_type == propertyeditorbuilder.KEYFRAME_EDITOR)
or (editor_type == propertyeditorbuilder.KEYFRAME_EDITOR_RELEASE)
or (editor_type == propertyeditorbuilder.KEYFRAME_EDITOR_CLIP)):
keyframe_editor_widgets.append(editor_row)
vbox.pack_start(editor_row, False, False, 0)
if not hasattr(editor_row, "no_separator"):
vbox.pack_start(guicomponents.EditorSeparator().widget, False, False, 0)
# Create NonMltEditableProperty wrappers for properties
non_mlteditable_properties = propertyedit.get_non_mlt_editable_properties( clip,
filter_object,
filter_index)
# Extra editors. Editable properties may have already been created
# with "editor=no_editor" and now extra editors may be created to edit those
# Non mlt properties are added as these are only need with extraeditors
editable_properties.extend(non_mlteditable_properties)
editor_rows = propertyeditorbuilder.get_filter_extra_editor_rows(filter_object, editable_properties)
for editor_row in editor_rows:
vbox.pack_start(editor_row, False, False, 0)
if not hasattr(editor_row, "no_separator"):
vbox.pack_start(guicomponents.EditorSeparator().widget, False, False, 0)
vbox.pack_start(gtk.Label(), True, True, 0)
else:
vbox.pack_start(gtk.Label(_("No editable parameters")), True, True, 0)
vbox.show_all()
scroll_window = gtk.ScrolledWindow()
scroll_window.add_with_viewport(vbox)
scroll_window.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
scroll_window.show_all()
widgets.value_edit_frame.remove(widgets.value_edit_box)
widgets.value_edit_frame.add(scroll_window)
widgets.value_edit_box = scroll_window
def clear_effects_edit_panel():
widgets.value_edit_frame.remove(widgets.value_edit_box)
label = gtk.Label()
widgets.value_edit_frame.add(label)
widgets.value_edit_box = label
def filter_edit_done(edited_clip, index=-1):
"""
EditAction object calls this after edits and undos and redos.
"""
if edited_clip != clip: # This gets called by all undos/redos, we only want to update if clip being edited here is affected
return
global block_changed_update
block_changed_update = True
update_stack_view()
block_changed_update = False
# Select row in effect stack view and so display corresponding effect editor panel.
if not(index < 0):
widgets.effect_stack_view.treeview.get_selection().select_path(str(index))
else: # no effects after edit, clear effect editor panel
clear_effects_edit_panel()
def display_kfeditors_tline_frame(frame):
for kf_widget in keyframe_editor_widgets:
kf_widget.display_tline_frame(frame)
def update_kfeditors_positions():
for kf_widget in keyframe_editor_widgets:
kf_widget.update_clip_pos()
| Python |
"""
Flowblade Movie Editor is a nonlinear video editor.
Copyright 2013 Janne Liljeblad.
This file is part of Flowblade Movie Editor <http://code.google.com/p/flowblade>.
Flowblade Movie Editor is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Flowblade Movie Editor is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Flowblade Movie Editor. If not, see <http://www.gnu.org/licenses/>.
"""
import pygtk
pygtk.require('2.0');
import gtk
import dialogs
import dialogutils
import editorpersistance
import gui
import guiutils
import mltprofiles
PREFERENCES_WIDTH = 730
PREFERENCES_HEIGHT = 300
PREFERENCES_LEFT = 410
select_thumbnail_dir_callback = None # app.py sets at start up
select_render_clips_dir_callback = None # app.py sets at start up
def preferences_dialog():
#global select_thumbnail_dir_callback, select_render_clips_dir_callback
#select_thumbnail_dir_callback = select_thumbnail_cb
#select_render_clips_dir_callback = select_render_clips_cb
dialog = gtk.Dialog(_("Editor Preferences"), None,
gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,
(_("Cancel").encode('utf-8'), gtk.RESPONSE_REJECT,
_("OK").encode('utf-8'), gtk.RESPONSE_ACCEPT))
gen_opts_panel, gen_opts_widgets = _general_options_panel(_thumbs_select_clicked, _renders_select_clicked)
edit_prefs_panel, edit_prefs_widgets = _edit_prefs_panel()
view_pres_panel, view_pref_widgets = _view_prefs_panel()
notebook = gtk.Notebook()
notebook.set_size_request(PREFERENCES_WIDTH, PREFERENCES_HEIGHT)
notebook.append_page(gen_opts_panel, gtk.Label(_("General")))
notebook.append_page(edit_prefs_panel, gtk.Label(_("Editing")))
notebook.append_page(view_pres_panel, gtk.Label(_("View")))
dialog.connect('response', _preferences_dialog_callback, (gen_opts_widgets, edit_prefs_widgets, view_pref_widgets))
dialog.vbox.pack_start(notebook, True, True, 0)
dialogutils.default_behaviour(dialog)
dialog.show_all()
def _thumbs_select_clicked(widget):
dialogs.select_thumbnail_dir(select_thumbnail_dir_callback, gui.editor_window.window, editorpersistance.prefs.thumbnail_folder, False)
def _renders_select_clicked(widget):
dialogs.select_rendred_clips_dir(select_render_clips_dir_callback, gui.editor_window.window, editorpersistance.prefs.render_folder)
def _preferences_dialog_callback(dialog, response_id, all_widgets):
if response_id == gtk.RESPONSE_ACCEPT:
editorpersistance.update_prefs_from_widgets(all_widgets)
editorpersistance.save()
dialog.destroy()
primary_txt = _("Restart required for some setting changes to take effect.")
secondary_txt = _("If requested change is not in effect, restart application.")
dialogutils.info_message(primary_txt, secondary_txt, gui.editor_window.window)
return
dialog.destroy()
def _general_options_panel(folder_select_clicked_cb, render_folder_select_clicked_cb):
prefs = editorpersistance.prefs
# Widgets
open_in_last_opened_check = gtk.CheckButton()
open_in_last_opened_check.set_active(prefs.open_in_last_opended_media_dir)
open_in_last_rendered_check = gtk.CheckButton()
open_in_last_rendered_check.set_active(prefs.remember_last_render_dir)
default_profile_combo = gtk.combo_box_new_text()
profiles = mltprofiles.get_profiles()
for profile in profiles:
default_profile_combo.append_text(profile[0])
default_profile_combo.set_active( mltprofiles.get_default_profile_index())
spin_adj = gtk.Adjustment(prefs.undos_max, editorpersistance.UNDO_STACK_MIN, editorpersistance.UNDO_STACK_MAX, 1)
undo_max_spin = gtk.SpinButton(spin_adj)
undo_max_spin.set_numeric(True)
folder_select = gtk.Button(_("Select Folder")) # thumbnails
folder_select.connect("clicked" , folder_select_clicked_cb)
render_folder_select = gtk.Button(_("Select Folder"))
render_folder_select.connect("clicked" , render_folder_select_clicked_cb)
autosave_combo = gtk.combo_box_new_text()
for i in range(0, len(editorpersistance.prefs.AUTO_SAVE_OPTS)):
time, desc = editorpersistance.prefs.AUTO_SAVE_OPTS[i]
autosave_combo.append_text(desc)
autosave_combo.set_active(prefs.auto_save_delay_value_index)
load_order_combo = gtk.combo_box_new_text()
load_order_combo.append_text("Absolute paths first, relative second")
load_order_combo.append_text("Relative paths first, absolute second")
load_order_combo.append_text("Absolute paths only")
load_order_combo.set_active(prefs.media_load_order)
# Layout
row1 = guiutils.get_two_column_box(gtk.Label(_("Default Profile:")), default_profile_combo, PREFERENCES_LEFT)
row2 = guiutils.get_checkbox_row_box(open_in_last_opened_check, gtk.Label(_("Remember last media directory")))
row3 = guiutils.get_two_column_box(gtk.Label(_("Undo stack size:")), undo_max_spin, PREFERENCES_LEFT)
row4 = guiutils.get_two_column_box(gtk.Label(_("Thumbnail folder:")), folder_select, PREFERENCES_LEFT)
row5 = guiutils.get_checkbox_row_box(open_in_last_rendered_check, gtk.Label(_("Remember last render directory")))
row6 = guiutils.get_two_column_box(gtk.Label(_("Autosave for crash recovery every:")), autosave_combo, PREFERENCES_LEFT)
row8 = guiutils.get_two_column_box(gtk.Label(_("Rendered Clips folder:")), render_folder_select, PREFERENCES_LEFT)
row9 = guiutils.get_two_column_box(gtk.Label(_("Media look-up order on load:")), load_order_combo, PREFERENCES_LEFT)
vbox = gtk.VBox(False, 2)
vbox.pack_start(row1, False, False, 0)
vbox.pack_start(row6, False, False, 0)
vbox.pack_start(row2, False, False, 0)
vbox.pack_start(row5, False, False, 0)
vbox.pack_start(row3, False, False, 0)
vbox.pack_start(row4, False, False, 0)
vbox.pack_start(row8, False, False, 0)
vbox.pack_start(row9, False, False, 0)
vbox.pack_start(gtk.Label(), True, True, 0)
align = gtk.Alignment(0.5, 0.5, 1.0, 1.0)
align.set_padding(12, 0, 12, 12)
align.add(vbox)
return align, (default_profile_combo, open_in_last_opened_check, open_in_last_rendered_check, undo_max_spin, load_order_combo)
def _edit_prefs_panel():
prefs = editorpersistance.prefs
# Widgets
auto_play_in_clip_monitor = gtk.CheckButton()
auto_play_in_clip_monitor.set_active(prefs.auto_play_in_clip_monitor)
auto_center_on_stop = gtk.CheckButton()
auto_center_on_stop.set_active(prefs.auto_center_on_play_stop)
spin_adj = gtk.Adjustment(prefs.default_grfx_length, 1, 15000, 1)
gfx_length_spin = gtk.SpinButton(spin_adj)
gfx_length_spin.set_numeric(True)
trim_exit_on_empty = gtk.CheckButton()
trim_exit_on_empty.set_active(prefs.empty_click_exits_trims)
quick_enter_trim = gtk.CheckButton()
quick_enter_trim.set_active(prefs.quick_enter_trims)
remember_clip_frame = gtk.CheckButton()
remember_clip_frame.set_active(prefs.remember_monitor_clip_frame)
# Layout
row1 = guiutils.get_checkbox_row_box(auto_play_in_clip_monitor, gtk.Label(_("Autoplay new Clips in Clip Monitor")))
row2 = guiutils.get_checkbox_row_box(auto_center_on_stop, gtk.Label(_("Center Current Frame on Playback Stop")))
row4 = guiutils.get_two_column_box(gtk.Label(_("Graphics default length:")), gfx_length_spin, PREFERENCES_LEFT)
row5 = guiutils.get_checkbox_row_box(trim_exit_on_empty, gtk.Label(_("Trim Modes exit on empty click")))
row6 = guiutils.get_checkbox_row_box(quick_enter_trim, gtk.Label(_("Quick enter Trim Modes")))
row7 = guiutils.get_checkbox_row_box(remember_clip_frame, gtk.Label(_("Remember Monitor Clip Frame")))
vbox = gtk.VBox(False, 2)
vbox.pack_start(row5, False, False, 0)
vbox.pack_start(row6, False, False, 0)
vbox.pack_start(row1, False, False, 0)
vbox.pack_start(row2, False, False, 0)
vbox.pack_start(row4, False, False, 0)
vbox.pack_start(row7, False, False, 0)
vbox.pack_start(gtk.Label(), True, True, 0)
align = gtk.Alignment(0.5, 0.5, 1.0, 1.0)
align.set_padding(12, 0, 12, 12)
align.add(vbox)
return align, (auto_play_in_clip_monitor, auto_center_on_stop, gfx_length_spin, trim_exit_on_empty, quick_enter_trim, remember_clip_frame)
def _view_prefs_panel():
prefs = editorpersistance.prefs
# Widgets
display_splash_check = gtk.CheckButton()
display_splash_check.set_active(prefs.display_splash_screen)
buttons_combo = gtk.combo_box_new_text()
buttons_combo.append_text(_("Glass"))
buttons_combo.append_text(_("Simple"))
if prefs.buttons_style == editorpersistance.GLASS_STYLE:
buttons_combo.set_active(0)
else:
buttons_combo.set_active(1)
dark_combo = gtk.combo_box_new_text()
dark_combo.append_text(_("Light Theme"))
dark_combo.append_text(_("Dark Theme"))
if prefs.dark_theme == True:
dark_combo.set_active(1)
else:
dark_combo.set_active(0)
# Layout
row1 = guiutils.get_checkbox_row_box(display_splash_check, gtk.Label(_("Display splash screen")))
row2 = guiutils.get_two_column_box(gtk.Label(_("Buttons style:")), buttons_combo, PREFERENCES_LEFT)
row3 = guiutils.get_two_column_box(gtk.Label(_("Icons and color optimized for:")), dark_combo, PREFERENCES_LEFT)
vbox = gtk.VBox(False, 2)
vbox.pack_start(row1, False, False, 0)
vbox.pack_start(row2, False, False, 0)
vbox.pack_start(row3, False, False, 0)
vbox.pack_start(gtk.Label(), True, True, 0)
align = gtk.Alignment(0.5, 0.5, 1.0, 1.0)
align.set_padding(12, 0, 12, 12)
align.add(vbox)
return align, (display_splash_check, buttons_combo, dark_combo)
| Python |
"""
Flowblade Movie Editor is a nonlinear video editor.
Copyright 2012 Janne Liljeblad.
This file is part of Flowblade Movie Editor <http://code.google.com/p/flowblade>.
Flowblade Movie Editor is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Flowblade Movie Editor is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Flowblade Movie Editor. If not, see <http://www.gnu.org/licenses/>.
"""
import pygtk
pygtk.require('2.0');
import gtk
import glib
import os, sys
from xml.dom import minidom
from decimal import Decimal,getcontext,ROUND_DOWN
from math import modf, floor
import mlt
import time
import md5
import re
import shutil
import dialogs
import dialogutils
from editorstate import PLAYER
from editorstate import PROJECT
from editorstate import current_sequence
import gui
import guiutils
import renderconsumer
import utils
EDL_TYPE_AVID_CMX3600 = 0
AUDIO_FROM_VIDEO = 0
AUDIO_FROM_AUDIO_TRACK = 1
NO_AUDIO = 2
REEL_NAME_3_NUMBER = 0
REEL_NAME_8_NUMBER = 1
REEL_NAME_FILE_NAME_START = 2
CLIP_OUT_IS_LAST_FRAME = -999
_xml_render_player = None
_screenshot_img = None
_img_types = ["png", "bmp", "targa","tiff"]
_img_extensions = ["png", "bmp", "tga","tif"]
####---------------MLT--------------####
def MELT_XML_export():
dialogs.export_xml_dialog(_export_melt_xml_dialog_callback, PROJECT().name)
def _export_melt_xml_dialog_callback(dialog, response_id):
if response_id == gtk.RESPONSE_ACCEPT:
filenames = dialog.get_filenames()
save_path = filenames[0]
global _xml_render_monitor
_xml_render_player = renderconsumer.XMLRenderPlayer(save_path,
_xml_render_done,
None)
_xml_render_player.start()
dialog.destroy()
else:
dialog.destroy()
def _xml_render_done(data):
global _xml_render_player
_xml_render_player = None
####---------------EDL--------------####
def EDL_export():
dialogs.export_edl_dialog(_export_edl_dialog_callback, gui.editor_window.window, PROJECT().name)
def _export_edl_dialog_callback(dialog, response_id, data):
if response_id == gtk.RESPONSE_YES:
file_name, out_folder, track_select_combo, cascade_check, op_combo, audio_track_select_combo = data
edl_path = out_folder.get_filename()+ "/" + file_name.get_text() + ".edl"
global _xml_render_monitor
_xml_render_player = renderconsumer.XMLRenderPlayer(get_edl_temp_xml_path(),
_edl_xml_render_done,
(edl_path, track_select_combo, cascade_check, op_combo, audio_track_select_combo))
_xml_render_player.start()
dialog.destroy()
else:
dialog.destroy()
def _edl_xml_render_done(data):
edl_path, track_select_combo, cascade_check, op_combo, audio_track_select_combo = data
video_track = current_sequence().first_video_index + track_select_combo.get_active()
audio_track = 1 + audio_track_select_combo.get_active()
global _xml_render_player
_xml_render_player = None
mlt_parse = MLTXMLToEDLParse(get_edl_temp_xml_path(), edl_path)
edl_contents = mlt_parse.create_edl(video_track,
cascade_check.get_active(),
op_combo.get_active(),
audio_track)
f = open(edl_path, 'w')
f.write(edl_contents)
f.close()
def get_edl_temp_xml_path():
return utils.get_hidden_user_dir_path() + "edl_temp_xml.xml"
class MLTXMLToEDLParse:
def __init__(self, xmlfile, title):
self.xmldoc = minidom.parse(xmlfile)
self.title = title
self.reel_name_type = REEL_NAME_FILE_NAME_START
self.from_clip_comment = False
self.use_drop_frames = False
self.blender_fix = False
def get_project_profile(self):
profile_dict = {}
profile = self.xmldoc.getElementsByTagName("profile")
key_list = profile.item(0).attributes.keys()
for a in key_list:
profile_dict[a] = profile.item(0).attributes[a].value
return profile_dict
def get_tracks(self):
tracks = []
t = self.xmldoc.getElementsByTagName("track")
for track in t:
tracks.append(track.attributes["producer"].value)
return tuple(tracks)
def get_playlists(self):
playlist_list = []
playlists = self.xmldoc.getElementsByTagName("playlist")
eid = 0
for p in playlists:
event_list = []
pl_dict = {}
pl_dict["pid"] = p.attributes["id"].value
event_nodes = p.childNodes
events = []
for i in range(0, event_nodes.length):
# Get edit event
event = event_nodes.item(i)
# Create event dict and give it id
ev_dict = {}
ev_dict["eid"] = eid
eid = eid + 1
# Set event data
if event.localName == "entry":# or event.localName == "blank":
ev_dict["type"] = event.localName
ev_dict["producer"] = event.attributes["producer"].value
ev_dict["inTime"] = event.attributes["in"].value
ev_dict["outTime"] = event.attributes["out"].value
event_list.append(ev_dict)
elif event.localName == "blank":
ev_dict["type"] = event.localName
ev_dict["length"] = event.attributes["length"].value
event_list.append(ev_dict)
pl_dict["events"] = event_list
playlist_list.append(pl_dict)
return tuple(playlist_list)
def get_events_dict(self, playlists, source_links):
events_dict = {}
for play_list in playlists:
for event in play_list["events"]:
# Replace pattern producer events with blanks
try:
producer = event["producer"]
resource = source_links[producer]
if resource == "<producer>" or resource[0:1] == "#": # This is what MLT puts as resource for pattern producers or color clips
event["type"] = "blank"
event["length"] = int(event["outTime"]) - int(event["inTime"]) + 1
except:
pass
# Add events to event dict
eid = event["eid"]
events_dict[eid] = event
return events_dict
def get_producers(self):
producer_list = []
producers = self.xmldoc.getElementsByTagName("producer")
for p in producers:
p_dict = {}
p_dict["pid"] = p.attributes["id"].value
p_dict["inTime"] = p.attributes["in"].value
p_dict["outTime"] = p.attributes["out"].value
properties = p.getElementsByTagName("property")
for props in properties:
p_dict[props.attributes["name"].value.replace(".","_")] = props.firstChild.data
producer_list.append(p_dict)
return tuple(producer_list)
def link_references(self):
source_links = {}
for i in self.get_producers():
src_pid = i["pid"]
source_links[src_pid] = i["resource"]
reel_names = {}
resources = []
reel_count = 1
for pid, resource in source_links.iteritems():
# Only create reel name once for each unique resource
if resource in resources:
continue
else:
resources.append(resource)
# Get 8 char uppercase alphanumeric reelname.
reel_name = self.get_reel_name(resource, reel_count)
# If we happen to get same reel name for two different resources we need to
# create different reel names for them
if reel_name in reel_names.values():
reel_name = reel_name[0:4] + "{0:04d}".format(reel_count)
reel_names[resource] = reel_name
reel_count = reel_count + 1
return (source_links, reel_names)
def get_reel_name(self, resource, reel_count):
if self.reel_name_type == REEL_NAME_3_NUMBER:
return "{0:03d}".format(reel_count)
elif self.reel_name_type == REEL_NAME_8_NUMBER:
return "{0:08d}".format(reel_count)
else:
file_name = resource.split("/")[-1]
file_name_no_ext = file_name.split(".")[0]
file_name_no_ext = re.sub('[^0-9a-zA-Z]+', 'X', file_name_no_ext)
file_name_len = len(file_name_no_ext)
if file_name_len >= 8:
reel_name = file_name_no_ext[0:8]
else:
reel_name = file_name_no_ext + "XXXXXXXX"[0:8 - file_name_len]
return reel_name.upper()
def create_edl(self, track_index, cascade, audio_op, audio_track_index):
str_list = []
title = self.title.split("/")[-1]
title = title.split(".")[0].upper()
str_list.append("TITLE: " + title + "\n")
source_links, reel_names = self.link_references()
playlists = self.get_playlists()
event_dict = self.get_events_dict(playlists, source_links)
edl_event_count = 1 # incr. event index
# Write video events
if not cascade:
playlist = playlists[track_index]
track_frames = self.get_track_frame_array(playlist)
else:
track_frames = self.cascade_playlists(playlists, event_dict)
if audio_op == AUDIO_FROM_VIDEO:
src_channel = "AA/V"
else:
src_channel = "V"
if len(track_frames) != 0:
edl_event_count = self.write_track_events(str_list,
track_frames, src_channel,
source_links,
reel_names, event_dict,
edl_event_count)
# Write audio events
if audio_op == AUDIO_FROM_AUDIO_TRACK:
src_channel = "AA"
playlist = playlists[audio_track_index]
track_frames = self.get_track_frame_array(playlist)
self.write_track_events(str_list, track_frames, src_channel,
source_links, reel_names, event_dict,
edl_event_count)
print ''.join(str_list).strip("\n")
return ''.join(str_list).strip("\n")
def write_track_events(self, str_list, track_frames, src_channel,
source_links, reel_names, event_dict,
edl_event_count):
prog_in = 0
prog_out = 0
running = True
while running:
current_clip = track_frames[prog_in]
event = event_dict[current_clip]
prog_out = self.get_last_clip_frame(track_frames, prog_in)
if prog_out == CLIP_OUT_IS_LAST_FRAME:
running = False
prog_out = len(track_frames)
if event["type"] == "entry":
# Get media producer atrrs
producer = event["producer"]
resource = source_links[producer]
reel_name = reel_names[resource]
src_in = int(event["inTime"]) # source clip IN time
src_out = int(event["outTime"]) # source clip OUT time
src_out = src_out + 1 # EDL out is exclusive, MLT out is inclusive
if self.blender_fix:
src_in = src_in + 1
src_out = src_in + 1
self.write_producer_edl_event_CMX3600(str_list, resource,
edl_event_count, reel_name, src_channel,
src_in, src_out, prog_in, prog_out)
prog_in = prog_out
elif event["type"] == "blank":
reel_name = "BL"
src_in = 0
src_out = int(event["length"])
prog_out = prog_in + int(event["length"])
resource = None
self.write_producer_edl_event_CMX3600(str_list, resource,
edl_event_count, reel_name, src_channel,
src_in, src_out, prog_in, prog_out)
prog_in = prog_out
else:
print "event type error at create_edl"
break
edl_event_count = edl_event_count + 1
return edl_event_count
def get_last_clip_frame(self, frames, first):
val = frames[first]
last = first + 1
try:
while frames[last] == val:
last = last + 1
return last
except:
return CLIP_OUT_IS_LAST_FRAME
def write_producer_edl_event_CMX3600(self, str_list, resource, edl_event, reel_name,
src_channel, src_in, src_out, prog_in, prog_out):
src_transition = "C"
if self.from_clip_comment == True and resource != None:
str_list.append("* FROM CLIP NAME: " + resource.split("/")[-1] + "\n")
str_list.append("{0:03d}".format(edl_event))
str_list.append(" ")
str_list.append(reel_name)
str_list.append(" ")
str_list.append(src_channel)
str_list.append(" ")
str_list.append(src_transition)
str_list.append(" ")
str_list.append(" ")
str_list.append(self.frames_to_tc(src_in))
str_list.append(" ")
str_list.append(self.frames_to_tc(src_out))
str_list.append(" ")
str_list.append(self.frames_to_tc(prog_in))
str_list.append(" ")
str_list.append(self.frames_to_tc(prog_out))
str_list.append("\n")
def cascade_playlists(self, playlists, event_dict):
tracks_count = len(current_sequence().tracks) - current_sequence().first_video_index - 1
# Handle 1 and 2 video tracks cases
if tracks_count == 1:
return self.get_track_frame_array(playlists[len(current_sequence().tracks) - 2])
if tracks_count == 2:
top_track_frames = self.get_track_frame_array(playlists[len(current_sequence().tracks) - 2])
bottom_track_frames = self.get_track_frame_array(playlists[len(current_sequence().tracks) - 3])
return self.combine_two_tracks(top_track_frames, bottom_track_frames, event_dict)
top_track_frames = self.get_track_frame_array(playlists[len(current_sequence().tracks) - 2])
for i in range(len(current_sequence().tracks) - 3, current_sequence().first_video_index - 1, -1):
bottom_track_frames = self.get_track_frame_array(playlists[i])
top_track_frames = self.combine_two_tracks(top_track_frames, bottom_track_frames, event_dict)
return top_track_frames
def combine_two_tracks(self, t_frames, b_frames, event_dict):
if len(t_frames) == 0 and len(b_frames) == 0:
return []
if len(t_frames) == 0:
return b_frames
if len(b_frames) == 0:
return t_frames
combined_frames = []
if len(b_frames) > len(t_frames):
length = len(b_frames)
t_frames = self.ljust(t_frames, len(b_frames), None)
elif len(b_frames) < len(t_frames):
length = len(t_frames)
b_frames = self.ljust(b_frames, len(t_frames), None)
else:
length = len(t_frames)
for i in range(0, length):
frame = t_frames[i]
if frame != None:
t_event = event_dict[frame]
else:
t_event = None
frame = b_frames[i]
if frame != None:
b_event = event_dict[frame]
else:
b_event = None
if t_event != None and t_event["type"] != "blank":
combined_frames.append(t_frames[i])
elif b_event != None:
combined_frames.append(b_frames[i])
else:
combined_frames.append(None)
return combined_frames
def get_track_frame_array(self, track):
frames = []
for event in track["events"]:
if event["type"] == "entry":
count = int(event["outTime"]) - int(event["inTime"]) + 1
self.append_frames(frames, count, event["eid"])
elif event["type"] == "blank":
count = int(event["length"])
self.append_frames(frames, count, event["eid"])
return frames
def append_frames(self, frames, count, value):
for i in range(0, count):
frames.append(value)
def ljust(self, lst, n, fillvalue=''):
return lst + [fillvalue] * (n - len(lst))
def frames_to_tc(self, frame):
if self.use_drop_frames == True:
return self.frames_to_DF(frame)
else:
return utils.get_tc_string(frame)
def frames_to_DF(self, framenumber):
"""
This method adapted from C++ code called "timecode" by Jason Wood.
begin: Wed Dec 17 2003
copyright: (C) 2003 by Jason Wood
email: jasonwood@blueyonder.co.uk
Framerate should be 29.97, 59.94, or 23.976, otherwise the calculations will be off.
"""
projectMeta = self.get_project_profile()
framerate = float(projectMeta["frame_rate_num"]) / float(projectMeta["frame_rate_den"])
# Number of frames to drop on the minute marks is the nearest integer to 6% of the framerate
dropFrames = round(framerate * 0.066666)
# Number of frames in an hour
framesPerHour = round(framerate * 60 * 60)
# Number of frames in a day - timecode rolls over after 24 hours
framesPerDay = framesPerHour * 24
# Number of frames per ten minutes
framesPer10Minutes = round(framerate * 60 * 10)
# Number of frames per minute is the round of the framerate * 60 minus the number of dropped frames
framesPerMinute = (round(framerate) * 60) - dropFrames
if (framenumber < 0): # For negative time, add 24 hours.
framenumber = framesPerDay + framenumber
# If framenumber is greater than 24 hrs, next operation will rollover clock
# % is the modulus operator, which returns a remainder. a % b = the remainder of a/b
framenumber = framenumber % framesPerDay
d = floor(framenumber / framesPer10Minutes)
m = framenumber % framesPer10Minutes
if (m > 1):
framenumber=framenumber + (dropFrames * 9 * d) + dropFrames * floor((m-dropFrames) / framesPerMinute)
else:
framenumber = framenumber + dropFrames * 9 * d;
frRound = round(framerate);
frames = framenumber % frRound;
seconds = floor(framenumber / frRound) % 60;
minutes = floor(floor(framenumber / frRound) / 60) % 60;
hours = floor(floor(floor(framenumber / frRound) / 60) / 60);
tc = "%d:%02d:%02d;%02d" % (hours, minutes, seconds, frames)
return tc
####---------------Screenshot--------------####
def screenshot_export():
length = current_sequence().tractor.get_length()
if length < 2:
dialogutils.info_message("Sequence is too short", "Sequence needs to be at least 2 frames long to allow frame export.", None)
return
frame = PLAYER().current_frame()
# Can't get last frame to render easily, so just force range.
if frame > length - 2:
frame = length - 2
render_screen_shot(frame, get_displayed_image_render_path(), "png")
export_screenshot_dialog(_export_screenshot_dialog_callback, frame,
gui.editor_window.window, PROJECT().name)
PLAYER().seek_frame(frame)
def _export_screenshot_dialog_callback(dialog, response_id, data):
file_name, out_folder, file_type_combo, frame = data
if response_id == gtk.RESPONSE_YES:
vcodec = _img_types[file_type_combo.get_active()]
ext = _img_extensions[file_type_combo.get_active()]
render_path = utils.get_hidden_screenshot_dir_path() + "screenshot_%01d." + ext
rendered_file_path = utils.get_hidden_screenshot_dir_path() + "screenshot_1." + ext
out_file_path = out_folder.get_filename()+ "/" + file_name.get_text() + "." + ext
dialog.destroy()
render_screen_shot(frame, render_path, vcodec)
shutil.copyfile(rendered_file_path, out_file_path)
else:
dialog.destroy()
purge_screenshots()
PLAYER().seek_frame(frame)
def get_displayed_image_render_path():
return utils.get_hidden_screenshot_dir_path() + "screenshot_%01d.png"
def get_displayed_image_path():
return utils.get_hidden_screenshot_dir_path() + "screenshot_1.png"
def _screenshot_frame_changed(adjustment):
_update_displayed_image(int(adjustment.get_value()))
def render_screen_shot(frame, render_path, vcodec):
producer = current_sequence().tractor
consumer = mlt.Consumer(PROJECT().profile, "avformat", str(render_path))
consumer.set("real_time", -1)
consumer.set("rescale", "bicubic")
consumer.set("vcodec", str(vcodec))
renderer = renderconsumer.FileRenderPlayer(None, producer, consumer, frame, frame + 1)
renderer.wait_for_producer_end_stop = False
renderer.consumer_pos_stop_add = 2 # Hack, see FileRenderPlayer
renderer.start()
while renderer.has_started_running == False:
time.sleep(0.05)
while renderer.stopped == False:
time.sleep(0.05)
def export_screenshot_dialog(callback, frame, parent_window, project_name):
cancel_str = _("Cancel").encode('utf-8')
ok_str = _("Export Image").encode('utf-8')
dialog = gtk.Dialog(_("Export Frame Image"),
parent_window,
gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,
(cancel_str, gtk.RESPONSE_CANCEL,
ok_str, gtk.RESPONSE_YES))
global _screenshot_img
_screenshot_img = guiutils.get_gtk_image_from_file(get_displayed_image_path(), 300)
frame_frame = guiutils.get_named_frame_with_vbox(None, [_screenshot_img])
INPUT_LABELS_WITDH = 320
project_name = project_name.strip(".flb")
file_name = gtk.Entry()
file_name.set_text(project_name)
extension_label = gtk.Label(".png")
extension_label.set_size_request(35, 20)
name_pack = gtk.HBox(False, 4)
name_pack.pack_start(file_name, True, True, 0)
name_pack.pack_start(extension_label, False, False, 0)
name_row = guiutils.get_two_column_box(gtk.Label(_("Export file name:")), name_pack, INPUT_LABELS_WITDH)
out_folder = gtk.FileChooserButton(_("Select target folder"))
out_folder.set_action(gtk.FILE_CHOOSER_ACTION_SELECT_FOLDER)
out_folder.set_current_folder(os.path.expanduser("~") + "/")
folder_row = guiutils.get_two_column_box(gtk.Label(_("Export folder:")), out_folder, INPUT_LABELS_WITDH)
file_type_combo = gtk.combo_box_new_text()
for img in _img_types:
file_type_combo.append_text(img)
file_type_combo.set_active(0)
file_type_combo.connect("changed", _file_type_changed, extension_label)
file_type_row = guiutils.get_two_column_box(gtk.Label(_("Image type:")), file_type_combo, INPUT_LABELS_WITDH)
file_frame = guiutils.get_named_frame_with_vbox(None, [file_type_row, name_row, folder_row])
vbox = gtk.VBox(False, 2)
vbox.pack_start(frame_frame, False, False, 0)
vbox.pack_start(guiutils.pad_label(12, 12), False, False, 0)
vbox.pack_start(file_frame, False, False, 0)
alignment = gtk.Alignment(0.5, 0.5, 1.0, 1.0)
alignment.set_padding(12, 12, 12, 12)
alignment.add(vbox)
dialog.vbox.pack_start(alignment, True, True, 0)
dialogutils.default_behaviour(dialog)
dialog.connect('response', callback, (file_name, out_folder, file_type_combo, frame)) #(file_name, out_folder, track_select_combo, cascade_check, op_combo, audio_track_select_combo))
dialog.show_all()
def _file_type_changed(combo, label):
label.set_text("." + _img_extensions[combo.get_active()])
def purge_screenshots():
d = utils.get_hidden_screenshot_dir_path()
for f in os.listdir(d):
os.remove(os.path.join(d, f))
| Python |
"""
Flowblade Movie Editor is a nonlinear video editor.
Copyright 2013 Janne Liljeblad.
This file is part of Flowblade Movie Editor <http://code.google.com/p/flowblade>.
Flowblade Movie Editor is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Flowblade Movie Editor is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Flowblade Movie Editor. If not, see <http://www.gnu.org/licenses/>.
"""
import pygtk
pygtk.require('2.0');
import gtk
import datetime
import pango
import appconsts
import dialogs
import dialogutils
import dnd
import edit
import gui
import guicomponents
import guiutils
import editorstate
from editorstate import PROJECT
import monitorevent
import respaths
import updater
import utils
widgets = utils.EmptyClass()
do_multiple_clip_insert_func = None # this mankeypathched her in app.py
class MediaLogEvent:
def __init__(self, event_type, mark_in, mark_out, name, path):
self.event_type = event_type
self.timestamp = datetime.datetime.now()
self.mark_in = mark_in
self.mark_out = mark_out
self.name = name
self.path = path
self.comment = ""
self.starred = False
def get_event_name(self):
if self.event_type == appconsts.MEDIA_LOG_INSERT:
return "Insert"
elif self.event_type == appconsts.MEDIA_LOG_MARKS_SET:
return "Marks"
def get_mark_in_str(self):
return utils.get_tc_string(self.mark_in)
def get_mark_out_str(self):
return utils.get_tc_string(self.mark_out)
def get_date_str(self):
date_str = self.timestamp.strftime('%d %B, %Y - %H:%M')
date_str = date_str.lstrip('0')
return date_str
# ----------------------------------------------------------- dnd drop
def clips_drop(clips):
for clip in clips:
if clip.media_type == appconsts.VIDEO or clip.media_type == appconsts.AUDIO:
log_event = MediaLogEvent( appconsts.MEDIA_LOG_MARKS_SET,
clip.clip_in,
clip.clip_out,
clip.name,
clip.path)
editorstate.PROJECT().media_log.append(log_event)
_update_list_view()
# ----------------------------------------------------------- gui events
def media_log_filtering_changed():
widgets.media_log_view.fill_data_model()
def media_log_star_button_pressed():
selected = widgets.media_log_view.get_selected_rows_list()
log_events = get_current_filtered_events()
for row in selected:
index = max(row) # these are tuples, max to extract only value
log_events[index].starred = True
widgets.media_log_view.fill_data_model()
def media_log_no_star_button_pressed():
selected = widgets.media_log_view.get_selected_rows_list()
log_events = get_current_filtered_events()
for row in selected:
index = max(row) # these are tuples, max to extract only value
log_events[index].starred = False
widgets.media_log_view.fill_data_model()
def log_range_clicked():
media_file = editorstate.MONITOR_MEDIA_FILE()
if media_file == None:
return
if media_file.type == appconsts.PATTERN_PRODUCER:
# INFOWINDOW ???
return
if media_file.mark_in == -1 or media_file.mark_out == -1:
return
log_event = MediaLogEvent( appconsts.MEDIA_LOG_MARKS_SET,
media_file.mark_in,
media_file.mark_out,
media_file.name,
media_file.path)
editorstate.PROJECT().media_log.append(log_event)
_update_list_view()
def _update_list_view():
widgets.media_log_view.fill_data_model()
max_val = widgets.media_log_view.treeview.get_vadjustment().get_upper()
gui.middle_notebook.set_current_page(1)
widgets.media_log_view.treeview.get_selection().select_path(str(len(get_current_filtered_events())-1))
widgets.media_log_view.treeview.get_vadjustment().set_value(max_val)
def log_item_name_edited(cell, path, new_text, user_data):
if len(new_text) == 0:
return
item_index = int(path)
current_view_events = get_current_filtered_events()
current_view_events[item_index].comment = new_text
widgets.media_log_view.fill_data_model()
def delete_selected():
selected = widgets.media_log_view.get_selected_rows_list()
log_events = get_current_filtered_events()
delete_events = []
for row in selected:
index = max(row) # these are tuple, max to extract only value
delete_events.append(log_events[index])
PROJECT().delete_media_log_events(delete_events)
widgets.media_log_view.fill_data_model()
def display_item(row):
log_events = get_current_filtered_events()
event_item = log_events[row]
media_file = PROJECT().get_media_file_for_path(event_item.path)
media_file.mark_in = event_item.mark_in
media_file.mark_out = event_item.mark_out
updater.set_and_display_monitor_media_file(media_file)
monitorevent.to_mark_in_pressed()
def log_list_view_button_press(treeview, event):
path_pos_tuple = treeview.get_path_at_pos(int(event.x), int(event.y))
if path_pos_tuple == None:
return False
if not (event.button == 3):
return False
path, column, x, y = path_pos_tuple
selection = treeview.get_selection()
selection.unselect_all()
selection.select_path(path)
row = int(max(path))
guicomponents.display_media_log_event_popup_menu(row, treeview, _log_event_menu_item_selected, event)
return True
def _log_event_menu_item_selected(widget, data):
item_id, row, treeview = data
if item_id == "delete":
delete_selected()
elif item_id == "toggle":
log_events = get_current_filtered_events()
log_events[row].starred = not log_events[row].starred
widgets.media_log_view.fill_data_model()
elif item_id == "display":
display_item(row)
def get_current_filtered_events():
log_events = PROJECT().get_filtered_media_log_events(widgets.group_view_select.get_active() - 1,
widgets.star_check.get_active(),
widgets.star_not_active_check.get_active())
return log_events
def append_log_events():
clips = []
log_events = get_current_filtered_events()
for le in log_events:
clips.append(get_log_event_clip(le))
track = editorstate.current_sequence().get_first_active_track() # audio tracks??!!??
data = {"track":track,
"clips":clips}
action = edit.append_media_log_action(data)
action.do_edit()
def insert_selected_log_events():
clips = []
log_events = get_current_filtered_events()
treeselection = widgets.media_log_view.treeview.get_selection()
(model, rows) = treeselection.get_selected_rows()
for row_tuple in rows:
row = row_tuple[0]
le = log_events[row]
clips.append(get_log_event_clip(le))
track = editorstate.current_sequence().get_first_active_track()
tline_pos = editorstate.current_tline_frame()
do_multiple_clip_insert_func(track, clips, tline_pos)
def get_log_event_clip(log_event):
# currently quarateed not to be a pattern producer
new_clip = editorstate.current_sequence().create_file_producer_clip(log_event.path)
# Set clip in and out points
new_clip.clip_in = log_event.mark_in
new_clip.clip_out = log_event.mark_out
if widgets.use_comments_check.get_active() == True:
new_clip.name = log_event.comment
if len(new_clip.name) == 0:
new_clip.name = log_event.name
else:
new_clip.name = log_event.name
return new_clip
def get_clips_for_rows(rows):
clips = []
log_events = get_current_filtered_events()
for row in rows:
log_event = log_events[max(row)]
clips.append(get_log_event_clip(log_event))
return clips
def display_log_clip_double_click_listener(treeview, path, view_column):
row = int(max(path))
data = ("display", row, treeview)
_log_event_menu_item_selected(treeview, data)
def _group_action_pressed(widget, event):
actions_menu = gtk.Menu()
actions_menu.add(guiutils.get_menu_item(_("New Group..."), _actions_callback, "new"))
actions_menu.add(guiutils.get_menu_item(_("New Group From Selected..."), _actions_callback, "newfromselected"))
guiutils.add_separetor(actions_menu)
item = guiutils.get_menu_item(_("Rename Current Group..."), _actions_callback, "rename")
_unsensitive_for_all_view(item)
actions_menu.add(item)
guiutils.add_separetor(actions_menu)
move_menu_item = gtk.MenuItem(_("Move Selected Items To Group").encode('utf-8'))
move_menu = gtk.Menu()
if len(PROJECT().media_log_groups) == 0:
move_menu.add(guiutils.get_menu_item(_("No Groups").encode('utf-8'), _actions_callback, "dummy", False))
else:
index = 0
for group in PROJECT().media_log_groups:
name, items = group
move_menu.add(guiutils.get_menu_item(name, _actions_callback, str(index)))
index = index + 1
move_menu_item.set_submenu(move_menu)
actions_menu.add(move_menu_item)
move_menu_item.show()
guiutils.add_separetor(actions_menu)
item = guiutils.get_menu_item(_("Delete Current Group"), _actions_callback, "delete")
_unsensitive_for_all_view(item)
actions_menu.add(item)
#item = guiutils.get_menu_item(_("Delete Current Group and Items"), _actions_callback, "deletewithitems")
#_unsensitive_for_all_view(item)
#actions_menu.add(item)
actions_menu.popup(None, None, None, event.button, event.time)
def _unsensitive_for_all_view(item):
if widgets.group_view_select.get_active() == 0:
item.set_sensitive(False)
def _actions_callback(widget, data):
if data == "newfromselected":
next_index = len(PROJECT().media_log_groups)
dialogs.new_media_log_group_name_dialog(_new_group_name_callback, next_index, True)
elif data == "new":
next_index = len(PROJECT().media_log_groups)
dialogs.new_media_log_group_name_dialog(_new_group_name_callback, next_index, False)
elif data == "delete":
current_group_index = widgets.group_view_select.get_active() - 1
if current_group_index < 0:
return
PROJECT().media_log_groups.pop(current_group_index)
_create_group_select()
widgets.group_view_select.set_active(0)
elif data == "deletewithitems":
current_group_index = widgets.group_view_select.get_active() - 1
if current_group_index < 0:
return
name, items = PROJECT().media_log_groups[current_group_index]
primary_txt = _("Delete Group and Items?")
secondary_txt = _("Are you sure you want to delete group ") + name + _(" and ") + str(len(items)) + _(" items it contains?\n") + \
_("This operation cannot be undone.")
dialogutils.warning_confirmation(_delete_with_items_dialog_callback,
primary_txt, secondary_txt, gui.editor_window.window, None, True)
elif data == "rename":
current_group_index = widgets.group_view_select.get_active() - 1
name, items = PROJECT().media_log_groups[current_group_index]
dialogs.group_rename_dialog(_rename_callback, name)
else:
try:
to_group_index = int(data)
except:
return
current_group_index = widgets.group_view_select.get_active() - 1
if to_group_index == current_group_index:
return
# Get items to move
selected = widgets.media_log_view.get_selected_rows_list()
log_events = get_current_filtered_events()
move_items = []
for row in selected:
index = max(row) # these are tuples, max to extract only value
move_items.append(log_events[index])
# Move items and update
PROJECT().remove_from_group(current_group_index, move_items)
PROJECT().add_to_group(to_group_index, move_items)
widgets.group_view_select.set_active(to_group_index + 1) # 0 index items is "All" items group not a user created group
def _delete_with_items_dialog_callback(dialog, response_id):
dialog.destroy()
if response_id != gtk.RESPONSE_ACCEPT:
return
current_group_index = widgets.group_view_select.get_active() - 1
name, items = PROJECT().media_log_groups[current_group_index]
PROJECT().delete_media_log_events(items)
PROJECT().media_log_groups.pop(current_group_index)
_create_group_select()
widgets.group_view_select.set_active(0)
def _rename_callback(dialog, response_id, entry):
new_name = entry.get_text()
dialog.destroy()
if response_id == gtk.RESPONSE_CANCEL:
return
if len(new_name) == 0:
return
current_group_index = widgets.group_view_select.get_active() - 1
old_name, items = PROJECT().media_log_groups[current_group_index]
PROJECT().media_log_groups.pop(current_group_index)
PROJECT().media_log_groups.insert(current_group_index, (new_name, items))
_create_group_select()
widgets.group_view_select.set_active(current_group_index + 1)
def _viewed_group_changed(widget):
update_media_log_view()
def _new_group_name_callback(dialog, response_id, data):
if response_id == gtk.RESPONSE_CANCEL:
dialog.destroy()
return
# Get group name and create type
name_entry, add_selected = data
new_name = name_entry.get_text()
dialog.destroy()
if len(new_name) == 0:
new_name = _("Group ") + str(len(PROJECT().media_log_groups))
# Add items to new group if requested
items = []
if add_selected:
selected = widgets.media_log_view.get_selected_rows_list()
log_events = get_current_filtered_events()
for row in selected:
index = max(row) # these are tuples, max to extract only value
items.append(log_events[index])
current_group_index = widgets.group_view_select.get_active() - 1
if current_group_index >= 0:
PROJECT().remove_from_group(current_group_index, items)
# Update view
PROJECT().add_media_log_group(new_name, items)
_create_group_select()
widgets.group_view_select.set_active(len(PROJECT().media_log_groups))
update_media_log_view()
# ------------------------------------------------------------ gui
def get_media_log_list_view():
media_log_view = MediaLogListView()
global widgets
widgets.media_log_view = media_log_view
return media_log_view
def update_media_log_view():
widgets.media_log_view.fill_data_model()
# Does not show last line, do we need timer?
max_val = widgets.media_log_view.treeview.get_vadjustment().get_upper()
widgets.media_log_view.treeview.get_vadjustment().set_value(max_val)
class MediaLogListView(gtk.VBox):
def __init__(self):
gtk.VBox.__init__(self)
# Datamodel: icon, text, text
self.storemodel = gtk.ListStore(gtk.gdk.Pixbuf, str, str, str, str, str)
# Scroll container
self.scroll = gtk.ScrolledWindow()
self.scroll.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
self.scroll.set_shadow_type(gtk.SHADOW_ETCHED_IN)
# View
self.treeview = gtk.TreeView(self.storemodel)
self.treeview.set_property("rules_hint", True)
self.treeview.set_headers_visible(True)
tree_sel = self.treeview.get_selection()
tree_sel.set_mode(gtk.SELECTION_MULTIPLE)
self.treeview.connect("button-press-event", log_list_view_button_press)
self.treeview.connect("row-activated", display_log_clip_double_click_listener)
# Column views
self.icon_col_1 = gtk.TreeViewColumn("icon1")
self.icon_col_1.set_title(_("Star"))
self.text_col_1 = gtk.TreeViewColumn("text1")
self.text_col_1.set_title(_("Event"))
self.text_col_2 = gtk.TreeViewColumn("text2")
self.text_col_2.set_title(_("Comment"))
self.text_col_3 = gtk.TreeViewColumn("text3")
self.text_col_3.set_title(_("File Name"))
self.text_col_4 = gtk.TreeViewColumn("text4")
self.text_col_4.set_title(_("Mark In"))
self.text_col_5 = gtk.TreeViewColumn("text5")
self.text_col_5.set_title(_("Mark Out"))
self.text_col_6 = gtk.TreeViewColumn("text6")
self.text_col_6.set_title(_("Date"))
# Cell renderers
self.icon_rend_1 = gtk.CellRendererPixbuf()
self.icon_rend_1.props.xpad = 6
self.text_rend_1 = gtk.CellRendererText()
self.text_rend_1.set_property("ellipsize", pango.ELLIPSIZE_END)
self.text_rend_2 = gtk.CellRendererText()
self.text_rend_2.set_property("yalign", 0.0)
self.text_rend_2.set_property("editable", True)
self.text_rend_2.connect("edited", log_item_name_edited, (self.storemodel, 2))
self.text_rend_3 = gtk.CellRendererText()
self.text_rend_3.set_property("yalign", 0.0)
self.text_rend_4 = gtk.CellRendererText()
self.text_rend_4.set_property("yalign", 0.0)
self.text_rend_5 = gtk.CellRendererText()
self.text_rend_5.set_property("yalign", 0.0)
self.text_rend_6 = gtk.CellRendererText()
self.text_rend_6.set_property("yalign", 0.0)
# Build column views
self.icon_col_1.set_expand(False)
self.icon_col_1.set_spacing(5)
self.text_col_1.set_min_width(20)
self.icon_col_1.pack_start(self.icon_rend_1)
self.icon_col_1.add_attribute(self.icon_rend_1, 'pixbuf', 0)
self.text_col_2.set_expand(True)
self.text_col_2.set_sizing(gtk.TREE_VIEW_COLUMN_GROW_ONLY)
self.text_col_2.set_min_width(150)
self.text_col_2.pack_start(self.text_rend_2)
self.text_col_2.add_attribute(self.text_rend_2, "text", 1)
self.text_col_3.set_expand(True)
self.text_col_3.pack_start(self.text_rend_3)
self.text_col_3.add_attribute(self.text_rend_3, "text", 2)
self.text_col_4.set_expand(True)
self.text_col_4.pack_start(self.text_rend_4)
self.text_col_4.add_attribute(self.text_rend_4, "text", 3)
self.text_col_5.set_expand(True)
self.text_col_5.pack_start(self.text_rend_5)
self.text_col_5.add_attribute(self.text_rend_5, "text", 4)
self.text_col_6.set_expand(True)
self.text_col_6.pack_start(self.text_rend_6)
self.text_col_6.add_attribute(self.text_rend_6, "text", 5)
# Add column views to view
self.treeview.append_column(self.icon_col_1)
self.treeview.append_column(self.text_col_2)
self.treeview.append_column(self.text_col_3)
self.treeview.append_column(self.text_col_4)
self.treeview.append_column(self.text_col_5)
self.treeview.append_column(self.text_col_6)
# Build widget graph and display
self.scroll.add(self.treeview)
self.pack_start(self.scroll)
self.scroll.show_all()
def fill_data_model(self):
self.storemodel.clear()
star_icon_path = respaths.IMAGE_PATH + "star.png"
no_star_icon_path = respaths.IMAGE_PATH + "star_not_active.png"
log_events = get_current_filtered_events()
for log_event in log_events:
if log_event.starred == True:
icon = gtk.gdk.pixbuf_new_from_file(star_icon_path)
else:
icon = gtk.gdk.pixbuf_new_from_file(no_star_icon_path)
row_data = [icon,
log_event.comment,
log_event.name,
log_event.get_mark_in_str(),
log_event.get_mark_out_str(),
log_event.get_date_str()]
self.storemodel.append(row_data)
self.scroll.queue_draw()
def get_selected_rows_list(self):
model, rows = self.treeview.get_selection().get_selected_rows()
return rows
def get_media_log_events_panel(events_list_view):
global widgets
actions_pixbuf = gtk.gdk.pixbuf_new_from_file(respaths.IMAGE_PATH + "media_log_action.png")
group_actions_menu = guicomponents.PressLaunch(_group_action_pressed, actions_pixbuf, 38, 22)
star_check = gtk.CheckButton()
star_check.set_active(True)
star_check.connect("clicked", lambda w:media_log_filtering_changed())
widgets.star_check = star_check
star_label = gtk.Image()
star_label.set_from_file(respaths.IMAGE_PATH + "star.png")
star_not_active_check = gtk.CheckButton()
star_not_active_check.set_active(True)
star_not_active_check.connect("clicked", lambda w:media_log_filtering_changed())
widgets.star_not_active_check = star_not_active_check
star_not_active_label = gtk.Image()
star_not_active_label.set_from_file(respaths.IMAGE_PATH + "star_not_active.png")
star_button = gtk.Button()
star_button.set_image(gtk.image_new_from_file(respaths.IMAGE_PATH + "star.png"))
star_button.connect("clicked", lambda w: media_log_star_button_pressed())
no_star_button = gtk.Button()
no_star_button.set_image(gtk.image_new_from_file(respaths.IMAGE_PATH + "star_not_active.png"))
no_star_button.connect("clicked", lambda w: media_log_no_star_button_pressed())
widgets.group_box = gtk.HBox()
_create_group_select()
widgets.group_view_select.set_active(0)
row1 = gtk.HBox()
row1.pack_start(guiutils.get_pad_label(6, 12), False, True, 0)
row1.pack_start(group_actions_menu.widget, False, True, 0)
row1.pack_start(guiutils.get_pad_label(6, 12), False, True, 0)
row1.pack_start(widgets.group_box, False, True, 0)
row1.pack_start(guiutils.get_pad_label(6, 12), False, True, 0)
row1.pack_start(star_check, False, True, 0)
row1.pack_start(star_label, False, True, 0)
row1.pack_start(guiutils.get_pad_label(6, 12), False, True, 0)
row1.pack_start(star_not_active_check, False, True, 0)
row1.pack_start(star_not_active_label, False, True, 0)
row1.pack_start(guiutils.pad_label(12, 12), False, False, 0)
row1.pack_start(star_button, False, True, 0)
row1.pack_start(no_star_button, False, True, 0)
row1.pack_start(gtk.Label(), True, True, 0)
widgets.log_range = gtk.Button()
widgets.log_range.set_image(gtk.image_new_from_file(respaths.IMAGE_PATH + "log_range.png"))
widgets.log_range.set_size_request(80, 30)
widgets.log_range.connect("clicked", lambda w:log_range_clicked())
delete_button = gtk.Button()
delete_button.set_image(gtk.image_new_from_file(respaths.IMAGE_PATH + "delete_log_range.png"))
delete_button.set_size_request(80, 30)
delete_button.connect("clicked", lambda w:delete_selected())
use_comments_label = gtk.Label(_("Use Comments as Clip Names"))
use_comments_check = gtk.CheckButton()
use_comments_check.set_active(False)
widgets.use_comments_check = use_comments_check
insert_displayed = gtk.Button()
insert_displayed.set_image(gtk.image_new_from_file(respaths.IMAGE_PATH + "insert_media_log.png"))
insert_displayed.set_size_request(80, 22)
insert_displayed.connect("clicked", lambda w:insert_selected_log_events())
append_displayed = gtk.Button()
append_displayed.set_image(gtk.image_new_from_file(respaths.IMAGE_PATH + "append_media_log.png"))
append_displayed.set_size_request(80, 22)
append_displayed.connect("clicked", lambda w:append_log_events())
row2 = gtk.HBox()
row2.pack_start(widgets.log_range, False, True, 0)
row2.pack_start(delete_button, False, True, 0)
row2.pack_start(gtk.Label(), True, True, 0)
row2.pack_start(use_comments_label, False, True, 0)
row2.pack_start(use_comments_check, False, True, 0)
row2.pack_start(gtk.Label(), True, True, 0)
row2.pack_start(insert_displayed, False, True, 0)
row2.pack_start(append_displayed, False, True, 0)
panel = gtk.VBox()
panel.pack_start(row1, False, True, 0)
panel.pack_start(events_list_view, True, True, 0)
panel.pack_start(row2, False, True, 0)
panel.set_size_request(400, 200)
star_check.set_tooltip_text(_("Display starred ranges"))
star_not_active_check.set_tooltip_text(_("Display non-starred ranges"))
star_button.set_tooltip_text(_("Set selected ranges starred"))
no_star_button.set_tooltip_text(_("Set selected ranges non-starred"))
widgets.log_range.set_tooltip_text(_("Log current marked range"))
delete_button.set_tooltip_text(_("Delete selected ranges"))
insert_displayed.set_tooltip_text(_("Insert selected ranges on Timeline"))
append_displayed.set_tooltip_text(_("Append displayed ranges on Timeline"))
dnd.connect_range_log(events_list_view.treeview)
return panel
def _create_group_select():
try:
widgets.group_box.remove(widgets.group_view_select)
except:
pass
group_view_select = gtk.combo_box_new_text() # filled later when current sequence known
group_view_select.append_text(_("All Items"))
for group_data in PROJECT().media_log_groups:
name, items = group_data
group_view_select.append_text(name)
group_view_select.set_size_request(250, 30)
group_view_select.connect('changed', _viewed_group_changed)
group_view_select.set_tooltip_text(_("Select viewed Range Log Items Group"))
widgets.group_view_select = group_view_select
widgets.group_box.add(widgets.group_view_select)
widgets.group_view_select.show()
| Python |
"""
Flowblade Movie Editor is a nonlinear video editor.
Copyright 2012 Janne Liljeblad.
This file is part of Flowblade Movie Editor <http://code.google.com/p/flowblade>.
Flowblade Movie Editor is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Flowblade Movie Editor is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Flowblade Movie Editor. If not, see <http://www.gnu.org/licenses/>.
"""
"""
Module contains objects and methods needed to create render consumers.
"""
import pygtk
pygtk.require('2.0');
import gtk
import mlt
import time
import threading
import xml.dom.minidom
import os
import mltenv
import respaths
from editorstate import PLAYER
from editorstate import PROJECT
# File describing existing encoding and quality options
RENDER_ENCODING_FILE = "/res/render/renderencoding.xml"
# Node, attribute names.
NAME = "name"
TYPE = "type"
ID = "id"
EXTENSION = "extension"
RESIZABLE = "resize"
ARGS = "args"
REPLACED_VALUES = "replvalues"
ADDED_ATTRIBUTES = "addargs"
BITRATE_OPTION = "boption"
QUALITY_GROUP = "qualityqroup"
ENCODING_OPTION = "encodingoption"
PROXY_ENCODING_OPTION = "proxyencodingoption"
QGROUP = "qgroup"
DEFAULT_INDEX = "defaultindex"
PROFILE = "profile"
QUALITY = "quality"
BITRATE = "bitrate"
AUDIO_DESCRIPTION = "audiodesc"
NON_USER = "nonuser"
# Replace strings and attribute values
BITRATE_RPL = "%BITRATE%"
VARIABLE_VAL = "%VARIABLE%"
SCREEN_SIZE_RPL = "%SCREENSIZE%"
ASPECT_RPL = "%ASPECT%"
render_encoding_doc = None
encoding_options = []
not_supported_encoding_options = []
quality_option_groups = {}
quality_option_groups_default_index = {}
non_user_encodings = []
proxy_encodings = None
# replace empty strings with None values
def _get_attribute(node, attr_name):
value = node.getAttribute(attr_name)
if value == "":
return None
return value
class QualityOption:
"""
A render quality option for an EncodingOption.
Values of mlt render consumer properties (usually bitrate) that equal
key expressions are replaced with corresponding values.
"""
def __init__(self, quality_node):
self.name = _get_attribute(quality_node, NAME)
# Replaced render arguments
replaced_values_str = _get_attribute(quality_node, REPLACED_VALUES)
self.replaced_expressions = []
self.replace_map = {}
if replaced_values_str != None:
tokens = replaced_values_str.split(";")
for token in tokens:
token_sides = token.split(" ")
self.replaced_expressions.append(token_sides[0])
self.replace_map[token_sides[0]] = token_sides[1]
# Added render arguments
added_atrrs_str = _get_attribute(quality_node, ADDED_ATTRIBUTES)
self.add_map = {}
if added_atrrs_str != None:
tokens = added_atrrs_str.split(" ")
for token in tokens:
token_sides = token.split("=")
self.add_map[token_sides[0]] = token_sides[1]
class EncodingOption:
"""
An object that groups together vcodoc, acodec, format and quality options group.
Object is used to set mlt render consumer properties.
"""
def __init__(self, option_node):
self.name = _get_attribute(option_node, NAME)
self.type = _get_attribute(option_node, TYPE)
self.resizable = (_get_attribute(option_node, RESIZABLE) == "True")
self.extension = _get_attribute(option_node, EXTENSION)
self.nonuser = _get_attribute(option_node, NON_USER)
self.quality_qroup_id = _get_attribute(option_node, QGROUP)
self.quality_options = quality_option_groups[self.quality_qroup_id]
try:
quality_default_index = int(quality_option_groups_default_index[self.quality_qroup_id])
except KeyError:
quality_default_index = None
self.quality_default_index = quality_default_index
self.audio_desc = _get_attribute(option_node, AUDIO_DESCRIPTION)
profile_node = option_node.getElementsByTagName(PROFILE).item(0)
self.attr_string = _get_attribute(profile_node, ARGS)
self.acodec = None
self.vcodec = None
self.format = None
tokens = self.attr_string.split(" ")
for token in tokens:
token_sides = token.split("=")
if token_sides[0] == "acodec":
self.acodec = token_sides[1]
elif token_sides[0] == "vcodec":
self.vcodec = token_sides[1]
elif token_sides[0] == "f":
self.format = token_sides[1]
self.supported, self.err_msg = mltenv.render_profile_supported(self.format,
self.vcodec,
self.acodec)
def get_args_vals_tuples_list(self, profile, quality_option=None):
# Encoding options
tokens = self.attr_string.split(" ")
args_tuples = []
for token in tokens:
# Get property keys and values
token_sides = token.split("=")
arg1 = str(token_sides[0])
arg2 = str(token_sides[1])
# Replace keyword values
if arg2 == SCREEN_SIZE_RPL:
arg2 = str(profile.width())+ "x" + str(profile.height())
if arg2 == ASPECT_RPL:
arg2 = "@" + str(profile.display_aspect_num()) + "/" + str(profile.display_aspect_den())
# Replace keyword values from quality options values
if quality_option != None:
if arg2 in quality_option.replaced_expressions:
arg2 = str(quality_option.replace_map[arg2])
args_tuples.append((arg1, arg2))
return args_tuples
def get_audio_description(self):
if self.audio_desc == None:
desc = "Not available"
else:
desc = self.audio_desc
return "<small>" + desc + "</small>"
def load_render_profiles():
"""
Load render profiles from xml into DOM at start-up and build
object tree.
"""
print "Loading render profiles..."
file_path = respaths.ROOT_PATH + RENDER_ENCODING_FILE
global render_encoding_doc
render_encoding_doc = xml.dom.minidom.parse(file_path)
# Create quality option groups
global quality_option_groups
qgroup_nodes = render_encoding_doc.getElementsByTagName(QUALITY_GROUP)
for qgnode in qgroup_nodes:
quality_qroup = []
group_key = _get_attribute(qgnode, ID)
group_default_index = _get_attribute(qgnode, DEFAULT_INDEX)
if group_default_index != None:
quality_option_groups_default_index[group_key] = group_default_index
option_nodes = qgnode.getElementsByTagName(QUALITY)
for option_node in option_nodes:
q_option = QualityOption(option_node)
quality_qroup.append(q_option)
quality_option_groups[group_key] = quality_qroup
# Create encoding options
global encoding_options, not_supported_encoding_options, non_user_encodings
encoding_option_nodes = render_encoding_doc.getElementsByTagName(ENCODING_OPTION)
for eo_node in encoding_option_nodes:
encoding_option = EncodingOption(eo_node)
if encoding_option.supported:
if encoding_option.nonuser == None:
encoding_options.append(encoding_option)
else:
non_user_encodings.append(encoding_option)
else:
msg = "...NOT available, " + encoding_option.err_msg + " missing"
not_supported_encoding_options.append(encoding_option)
print encoding_option.name + msg
# Proxy encoding
proxy_encoding_nodes = render_encoding_doc.getElementsByTagName(PROXY_ENCODING_OPTION)
found_proxy_encodings = []
for proxy_node in proxy_encoding_nodes:
proxy_encoding_option = EncodingOption(proxy_node)
if proxy_encoding_option.supported:
msg = " ...available"
found_proxy_encodings.append(proxy_encoding_option)
else:
msg = " ...NOT available, " + encoding_option.err_msg + " missing"
print "Proxy encoding " + proxy_encoding_option.name + msg
global proxy_encodings
proxy_encodings = found_proxy_encodings
def get_render_consumer_for_encoding_and_quality(file_path, profile, enc_opt_index, quality_opt_index):
args_vals_list = get_args_vals_tuples_list_for_encoding_and_quality(profile,
enc_opt_index,
quality_opt_index)
return get_mlt_render_consumer(file_path, profile, args_vals_list)
def get_render_consumer_for_encoding(file_path, profile, encoding_option):
# Encoding options key, value list
args_vals_list = encoding_option.get_args_vals_tuples_list(profile)
return get_mlt_render_consumer(file_path, profile, args_vals_list)
def get_render_consumer_for_text_buffer(file_path, profile, buf):
args_vals_list, error = get_ffmpeg_opts_args_vals_tuples_list(buf)
if error != None:
return (None, error)
render_consumer = get_mlt_render_consumer(file_path, profile, args_vals_list)
return (render_consumer, None)
def get_img_seq_render_consumer(file_path, profile, encoding_option):
#render_path = "%1/%2-%05d.%3" + file_path
args_vals_list = encoding_option.get_args_vals_tuples_list(profile)
vcodec = None
for arg_val in args_vals_list:
arg, val = arg_val
if arg == "vcodec":
vcodec = val
render_path = os.path.dirname(file_path) + "/" + os.path.basename(file_path).split(".")[0] + "_%05d." + encoding_option.extension
consumer = mlt.Consumer(profile, "avformat", str(render_path))
consumer.set("real_time", -1)
consumer.set("rescale", "bicubic")
consumer.set("vcodec", str(vcodec))
print "img seq render consumer created, path:" + str(render_path) #+ ", args: " + args_msg
return consumer
def get_mlt_render_consumer(file_path, profile, args_vals_list):
consumer = mlt.Consumer(profile, "avformat", str(file_path))
consumer.set("real_time", -1)
consumer.set("rescale", "bicubic")
args_msg = ""
for arg_val in args_vals_list:
k, v = arg_val
consumer.set(str(k), str(v))
args_msg = args_msg + str(k) + "="+ str(v) + ", "
args_msg = args_msg.strip(", ")
print "render consumer created, path:" + str(file_path) + ", args: " + args_msg
return consumer
def get_args_vals_tuples_list_for_encoding_and_quality(profile, enc_opt_index, quality_opt_index):
encoding_option = encoding_options[enc_opt_index]
if quality_opt_index >= 0:
quality_option = encoding_option.quality_options[quality_opt_index]
else:
quality_option = None
args_vals_list = encoding_option.get_args_vals_tuples_list(profile, quality_option)
# Quality options key, value list
if quality_option != None:
for k, v in quality_option.add_map.iteritems():
args_vals_list.append((str(k), str(v)))
return args_vals_list
def get_ffmpeg_opts_args_vals_tuples_list(buf):
end = buf.get_end_iter()
arg_vals = []
for i in range(0, buf.get_line_count()):
line_start = buf.get_iter_at_line(i)
if i == buf.get_line_count() - 1:
line_end = end
else:
line_end = buf.get_iter_at_line(i + 1)
av_tuple, error = _parse_line(line_start, line_end, buf)
if error != None:
errs_str = _("Error on line ") + str(i + 1) + ": " + error + _("\nLine contents: ") \
+ buf.get_text(line_start, line_end, include_hidden_chars=False)
return (None, errs_str)
if av_tuple != None:
arg_vals.append(av_tuple)
return (arg_vals, None)
def _parse_line(line_start, line_end, buf):
line = buf.get_text(line_start, line_end, include_hidden_chars=False)
if len(line) == 0:
return (None, None)
if line.find("=") == -1:
return (None, _("No \'=\' found."))
sides = line.split("=")
if len(sides) != 2:
return (None, _("Number of tokens on line is ")+ str(len(sides)) + _(", should be 2 (key, value)."))
k = sides[0].strip()
v = sides[1].strip()
if len(k) == 0:
return (None, _("Arg name token is empty."))
if len(v) == 0:
return (None, _("Arg value token is empty."))
try:
k.decode('ascii')
except UnicodeDecodeError:
return (None, _("Non-ascii char in Arg name."))
try:
v.decode('ascii')
except UnicodeDecodeError:
return (None, _("Non-ascii char in Arg value."))
if k.find(" ") != -1:
return (None, _("Whitespace in Arg name."))
if v.find(" ") != -1:
return (None, _("Whitespace in Arg value."))
return ((k,v), None)
class FileRenderPlayer(threading.Thread):
def __init__(self, file_name, producer, consumer, start_frame, stop_frame):
self.file_name = file_name
self.producer = producer
self.consumer = consumer
self.start_frame = start_frame
self.stop_frame = stop_frame
self.stopped = False
self.wait_for_producer_end_stop = True
self.running = False
self.has_started_running = False
print "FileRenderPlayer started, start frame: " + str(self.start_frame) + ", stop frame: " + str(self.stop_frame)
self.consumer_pos_stop_add = 1 # HACK!!! File renders work then this one, screenshot render requires this to be 2 to work
threading.Thread.__init__(self)
def run(self):
self.running = True
self.has_started_running = True
self.connect_and_start()
while self.running: # set false at shutdown() for abort
if self.producer.frame() >= self.stop_frame:
# This method of stopping makes sure that whole producer is rendered and written to disk
# Used when producer out frame is last frame.
if self.wait_for_producer_end_stop:
while self.producer.get_speed() > 0:
time.sleep(0.2)
while not self.consumer.is_stopped():
time.sleep(0.2)
# This method of stopping stops producer
# and waits for consumer to reach that frame.
# Used when producer out frame is NOT last frame.
else:
self.producer.set_speed(0)
last_frame = self.producer.frame()
while self.consumer.position() + self.consumer_pos_stop_add < last_frame:
time.sleep(0.2)
self.consumer.stop()
self.running = False
time.sleep(0.1)
print "FileRenderPlayer stopped, producer frame: " + str(self.producer.frame())
self.stopped = True
def shutdown(self):
self.consumer.stop()
self.producer.set_speed(0)
self.running = False
def connect_and_start(self):
self.consumer.connect(self.producer)
self.producer.set_speed(0)
self.producer.seek(self.start_frame)
self.producer.set_speed(1)
self.consumer.start()
def get_render_fraction(self):
render_length = self.stop_frame - self.start_frame + 1
if (self.producer.get_length() - 1) < 1:
render_fraction = 1.0
else:
current_frame = self.producer.frame() - self.start_frame
render_fraction = (float(current_frame)) / (float(render_length))
if render_fraction > 1.0:
render_fraction = 1.0
return render_fraction
class XMLRenderPlayer(threading.Thread):
def __init__(self, file_name, callback, data):
self.file_name = file_name
self.render_done_callback = callback
self.data = data
self.current_playback_frame = 0
threading.Thread.__init__(self)
def run(self):
print "Starting XML render"
player = PLAYER()
# Don't try anything if somehow this was started
# while timeline rendering is running
if player.is_rendering:
print "Can't render XML when another render is already running!"
return
# Stop all playback before producer is disconnected
self.current_playback_frame = player.producer.frame()
player.ticker.stop_ticker()
player.consumer.stop()
player.producer.set_speed(0)
player.producer.seek(0)
# Wait until producer is at start
while player.producer.frame() != 0:
time.sleep(0.1)
# Get render producer
timeline_producer = PROJECT().c_seq.tractor
# Get render consumer
xml_consumer = mlt.Consumer(PROJECT().profile, "xml", str(self.file_name))
# Connect and start rendering
xml_consumer.connect(timeline_producer)
xml_consumer.start()
timeline_producer.set_speed(1)
# Wait until done
while xml_consumer.is_stopped() == False:
print "In XML render wait loop..."
time.sleep(0.1)
print "XML render done"
# Get app player going again
player.connect_and_start()
player.seek_frame(0)
self.render_done_callback(self.data)
class ProgressWindowThread(threading.Thread):
def __init__(self, dialog, progress_bar, clip_renderer, callback):
self.dialog = dialog
self.progress_bar = progress_bar
self.clip_renderer = clip_renderer
self.callback = callback
threading.Thread.__init__(self)
def run(self):
self.running = True
while self.running:
render_fraction = self.clip_renderer.get_render_fraction()
gtk.gdk.threads_enter()
self.progress_bar.set_fraction(render_fraction)
pros = int(render_fraction * 100)
self.progress_bar.set_text(str(pros) + "%")
gtk.gdk.threads_leave()
if self.clip_renderer.stopped == True:
gtk.gdk.threads_enter()
self.progress_bar.set_fraction(1.0)
self.progress_bar.set_text("Render Complete!")
self.callback(self.dialog, 0)
gtk.gdk.threads_leave()
self.running = False
time.sleep(0.33)
| Python |
"""
Flowblade Movie Editor is a nonlinear video editor.
Copyright 2012 Janne Liljeblad.
This file is part of Flowblade Movie Editor <http://code.google.com/p/flowblade>.
Flowblade Movie Editor is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Flowblade Movie Editor is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Flowblade Movie Editor. If not, see <http://www.gnu.org/licenses/>.
"""
"""
Module contains class Sequence that is the multitrack media object being edited
by the application. A project has 1-n of these.
"""
import mlt
import os
import appconsts
import edit
import editorstate
import mltfilters
import mlttransitions
import mltrefhold
import patternproducer
import utils
# Media types for tracks or clips
UNKNOWN = appconsts.UNKNOWN
VIDEO = appconsts.VIDEO
AUDIO = appconsts.AUDIO
IMAGE = appconsts.IMAGE
IMAGE_SEQUENCE = appconsts.IMAGE_SEQUENCE
RENDERED_VIDEO = appconsts.RENDERED_VIDEO
PATTERN_PRODUCER = appconsts.PATTERN_PRODUCER
FILE_DOES_NOT_EXIST = appconsts.FILE_DOES_NOT_EXIST
# Allowed editing operations on a track
FREE = appconsts.FREE # all edits allowed
SYNC_LOCKED = appconsts.SYNC_LOCKED # FEATURE NOT AVAILABLE TO USER CURRENTLY!
# no insert, splice out or one roll trim.
# Allowed edits do not change positions of later clips
LOCKED = appconsts.LOCKED # no edits allowed
# Display heights
TRACK_HEIGHT_NORMAL = appconsts.TRACK_HEIGHT_NORMAL # track height in canvas and column
TRACK_HEIGHT_SMALL = appconsts.TRACK_HEIGHT_SMALL # track height in canvas and column
# pan magic value indicating that no pan is applied
NO_PAN = appconsts.NO_PAN #-99
# MLT types
MLT_PLAYLIST = 0
MLT_PRODUCER = 1
MLT_FILTER = 2
# Number of tracks available
# NOTE: These are set from other modules (and this one when cloning) when creating or loading projects
# and used in Sequence.__init__(...) when creating sequences.
# Weak design, tracks count should be provided via constructor at creation time.
AUDIO_TRACKS_COUNT = 4
VIDEO_TRACKS_COUNT = 5
def set_track_counts(project):
global AUDIO_TRACKS_COUNT, VIDEO_TRACKS_COUNT
AUDIO_TRACKS_COUNT = project.sequences[0].first_video_index - 1
VIDEO_TRACKS_COUNT = AUDIO_TRACKS_COUNT + 1
# Output modes. These correspond to option indexes in guicomponents.get_monitor_view_select_combo()
PROGRAM_OUT_MODE = 0
VECTORSCOPE_MODE = 1
RGB_PARADE_MODE = 2
# black clip
black_track_clip = None
# DEAD CODE !??!
# Mute states as (video_on, audio_on) tuples
# Indexes correspond to "hide" property values 0 - 3
# for playlists in MLT
# USED FOR TRACKS, NOT CLIPS. Clips handled using values in appconsts.py
#MUTE_STATES = [(True, True), (False, True), (True, False), (False, False)]
# Track that all audio is mixed down to combine for output.
AUDIO_MIX_DOWN_TRACK = 0
class Sequence:
"""
Multitrack MLT object
"""
def __init__(self, profile, name="sequence"):
# Data members
self.name = name # name of sequence
self.next_id = 0 # id for next created clip
self.profile = profile
self.master_audio_gain = 1.0
self.master_audio_pan = NO_PAN
self.tracks = []
self.compositors = []
self.markers = [] # markers are tuples (name_str, frame_int)
self.proxyclips = {}
self.rendered_versions = {}
self.watermark_filter = None
self.watermark_file_path = None
self.seq_len = 0 # used in trim crash hack, remove when fixed
# MLT objects for a multitrack sequence
self.init_mlt_objects()
# ----------------------------------- mlt init
def init_mlt_objects(self):
# MLT objects for multitrack sequence
self.tractor = mlt.Tractor()
self.tractor.mark_in = -1
self.tractor.mark_out = -1
# Only create and add pan filter if actual pan is applied
# This method gets called on load and we only want to add a filter then if pan is applied,
# and not on initial creation.
# audiomonitoring.py calls add_track_pan_filter() when pan turned on for initial creation
if self.master_audio_pan != NO_PAN:
self.add_track_pan_filter(self.tractor, self.master_audio_pan)
# Create and ad gain filter
gain_filter = mlt.Filter(self.profile, "volume")
mltrefhold.hold_ref(gain_filter)
gain_filter.set("gain", str(self.master_audio_gain))
self.tractor.attach(gain_filter)
self.tractor.gain_filter = gain_filter
self.field = self.tractor.field()
self.multitrack = self.tractor.multitrack()
self.vectorscope = mlt.Filter(self.profile, "frei0r.vectorscope")
mltrefhold.hold_ref(self.vectorscope) # ?? is this just some anti-crash hack attempt that was not removed
self.vectorscope.set("mix", "0.5")
self.vectorscope.set("overlay sides", "0.0")
self.rgbparade = mlt.Filter(self.profile, "frei0r.rgbparade")
mltrefhold.hold_ref(self.rgbparade) # ?? is this just some anti-crash hack attempt that was not removed
self.rgbparade.set("mix", "0.4")
self.rgbparade.set("overlay sides", "0.0")
self.outputfilter = None
# ---------------------------------------- tracks
def create_default_tracks(self):
"""
This is done when sequence first created, but when sequence is loaded
tracks are added using add_track(...)
TRACKS LAYOUT:
index track type
----- ----------
0 black bg track
1 - (self.first_video_index - 1) audio tracks
self.first_video_index - (len(self.tracks) - 2) video tracks
(len(self.tracks) - 1) hidden track
Tracks are never changed after creation, changing tracks count feature is
achieved by creating a new sequence.
"""
# Default tracks
# black bg track
self.add_track(VIDEO)
# Audio tracks
for i in range(0, AUDIO_TRACKS_COUNT):
track = self.add_track(AUDIO)
track.height = TRACK_HEIGHT_SMALL
# Video tracks
self.first_video_index = AUDIO_TRACKS_COUNT + 1 # index of first editable video track
for i in range(0, VIDEO_TRACKS_COUNT):
self.add_track(VIDEO) # editable
if i > 0:
track_index = i + self.first_video_index
self.tracks[track_index].height = TRACK_HEIGHT_SMALL # only V1 is normal size after creation
self.tracks[track_index].active = False # only V1 is active after creation
# ---Hidden track--- #
# Hidden video track for clip and trimming display.
# Hidden track is a video track that is always the topmost track.
# It is used when displaying monitor clip and
# displaying the clip that is being trim edited. When trim is loop previewed
# the hidden track is cleared so that the edit that is on the tracks
# below can be viewed.
self.add_track(VIDEO, True)
self._create_black_track_clip()
# Add black clip to black bg track
self.tracks[0].clips.append(black_track_clip) # py
self.tracks[0].append(black_track_clip, 0, 0) # mlt
def _create_black_track_clip(self):
# Create 1 fr long black bg clip and set in and out
global black_track_clip # btw, why global?
# This is not an actual bin clip so id can be -1, it is just used to create the producer
pattern_producer_data = patternproducer.BinColorClip(-1, "black_bg", "#000000000000")
black_track_clip = self.create_pattern_producer(pattern_producer_data)
black_track_clip.clip_in = 0
black_track_clip.clip_out = 0
def add_track(self, track_type, is_hidden=False):
"""
Creates a MLT playlist object, adds project
data and adds to tracks list.
"""
new_track = mlt.Playlist()
self._add_track_attributes(new_track, track_type)
new_track.is_sync_track = False
# Connect to MLT multitrack
self.multitrack.connect(new_track, len(self.tracks))
# Add to tracklist and set id to list index
new_track.id = len(self.tracks)
self.tracks.append(new_track)
# Mix all audio to track 1 by combining them one after another
# using an always active field transition.
if ((new_track.id > AUDIO_MIX_DOWN_TRACK) # black bg or track1 it's self does not need to be mixed
and (is_hidden == False)): # We actually do want hidden track to cover all audio below, which happens if it is not mixed.
self._mix_audio_for_track(new_track)
# Add method that returns track name
new_track.get_name = lambda : utils.get_track_name(new_track, self)
return new_track
def _add_track_attributes(self, track, type):
# Add data attr
track.type = type
track.sequence = self
# Add state attr
track.active = True
# Set initial video and audio playback values
if type == VIDEO:
track.mute_state = 0 # video on, audio on as mlt "hide" value
else:
track.mute_state = 1 # video off, audio on as mlt "hide" value
track.set("hide", track.mute_state)
# This is kept in sync with mlt.Playlist inner data
track.clips = []
# Display height
track.height = TRACK_HEIGHT_NORMAL
if editorstate.SCREEN_HEIGHT < 863:# Fix for 786 screens
track.height = TRACK_HEIGHT_SMALL
# Audio gain and pan values, these are overwritten later with saved values when loading
track.audio_gain = 1.0 # active range 0 - 1
track.audio_pan = NO_PAN # active range 0-1, 0.5 is middle
# Tracks may be FREE or LOCKED
track.edit_freedom = FREE
def _mix_audio_for_track(self, track):
# Create and add transition to combine track audios
transition = mlt.Transition(self.profile, "mix")
mltrefhold.hold_ref(transition)
transition.set("a_track", int(AUDIO_MIX_DOWN_TRACK))
transition.set("b_track", track.id)
transition.set("always_active", 1)
transition.set("combine", 1)
self.field.plant_transition(transition, int(AUDIO_MIX_DOWN_TRACK), track.id)
# Create and ad gain filter
gain_filter = mlt.Filter(self.profile, "volume")
mltrefhold.hold_ref(gain_filter)
gain_filter.set("gain", str(track.audio_gain))
track.attach(gain_filter)
track.gain_filter = gain_filter
# Add pan filter if this track is panorated
if track.audio_pan != NO_PAN:
self.add_track_pan_filter(track, 0.5)
track.audio_pan = 0.5
def minimize_tracks_height(self):
for i in range (1, len(self.tracks) - 1):# visible tracks
track = self.tracks[i]
track.height = TRACK_HEIGHT_SMALL
def maximize_tracks_height(self, allocation):
for i in range (1, len(self.tracks) - 1):# visible tracks
track = self.tracks[i]
track.height = TRACK_HEIGHT_NORMAL
self.resize_tracks_to_fit(allocation)
def maximize_video_tracks_height(self, allocation):
self.minimize_tracks_height()
for i in range (self.first_video_index, len(self.tracks) - 1):# visible tracks
track = self.tracks[i]
track.height = TRACK_HEIGHT_NORMAL
self.resize_tracks_to_fit(allocation)
def maximize_audio_tracks_height(self, allocation):
self.minimize_tracks_height()
for i in range (1, self.first_video_index):
track = self.tracks[i]
track.height = TRACK_HEIGHT_NORMAL
self.resize_tracks_to_fit(allocation)
def get_tracks_height(self):
h = 0
for i in range (1, len(self.tracks) - 1):# visible tracks
track = self.tracks[i]
h += track.height
return h
def set_track_gain(self, track, gain):
track.gain_filter.set("gain", str(gain))
track.audio_gain = gain
def set_master_gain(self, gain):
self.tractor.gain_filter.set("gain", str(gain))
self.master_audio_gain = gain
def add_track_pan_filter(self, track, value):
# This method is used for master too, and called with tractor then
pan_filter = mlt.Filter(self.profile, "panner")
mltrefhold.hold_ref(pan_filter)
pan_filter.set("start", value)
track.attach(pan_filter)
track.pan_filter = pan_filter
def set_track_pan_value(self, track, value):
track.pan_filter.set("start", str(value))
track.audio_pan = value
def remove_track_pan_filter(self, track):
# This method is used for master too, and called with tractor then
track.detach(track.pan_filter)
track.pan_filter = None
track.audio_pan = NO_PAN
def set_master_pan_value(self, value):
self.tractor.pan_filter.set("start", str(value))
self.master_audio_pan = value
def first_video_track(self):
return self.tracks[self.first_video_index]
def all_tracks_off(self):
for i in range (1, len(self.tracks) - 1):
track = self.tracks[i]
if track.active == True:
return False
return True
# -------------------------------------------------- clips
def create_file_producer_clip(self, path, new_clip_name=None):
"""
Creates MLT Producer and adds attributes to it, but does
not add it to track/playlist object.
"""
producer = mlt.Producer(self.profile, str(path)) # this runs 0.5s+ on some clips
mltrefhold.hold_ref(producer)
producer.path = path
producer.filters = []
(dir, file_name) = os.path.split(path)
(name, ext) = os.path.splitext(file_name)
producer.name = name
if new_clip_name != None:
producer.name = new_clip_name
producer.media_type = get_media_type(path)
if producer.media_type == FILE_DOES_NOT_EXIST:
print "file does not exist"
return None
self.add_clip_attr(producer)
return producer
def create_slowmotion_producer(self, path, speed):
"""
Creates MLT Producer and adds attributes to it, but does
not add it to track/playlist object.
"""
fr_path = "framebuffer:" + path + "?" + str(speed)
producer = mlt.Producer(self.profile, None, str(fr_path)) # this runs 0.5s+ on some clips
mltrefhold.hold_ref(producer)
(folder, file_name) = os.path.split(path)
(name, ext) = os.path.splitext(file_name)
producer.name = name
producer.path = path
producer.speed = speed
producer.media_type = get_media_type(path)
if producer.media_type == FILE_DOES_NOT_EXIST:
return None
self.add_clip_attr(producer)
return producer
def create_pattern_producer(self, pattern_producer_data):
"""
pattern_producer_data is instance of patternproducer.AbstractBinClip
"""
clip = patternproducer.create_pattern_producer(self.profile, pattern_producer_data)
self.add_clip_attr(clip)
return clip
def create_rendered_transition_clip(self, path, rendered_type):
clip = self.create_file_producer_clip(path)
clip.rendered_type = rendered_type
return clip
def add_clip_attr(self, clip):
"""
File producers, transitions and black clips have same
clip attributes.
"""
clip.id = self.get_next_id()
# example: in 10, out 10 == 1 frame long clip
clip.clip_in = -1 # inclusive. -1 == not set
clip.clip_out = -1 # inclusive, -1 == not set
clip.is_blanck_clip = False
clip.selected = False
clip.sync_data = None
clip.mute_filter = None #
clip.stream_indexes = None # a, v stream indexes when not muted
clip.clip_length = lambda: _clip_length(clip) # MLT get_length gives wrong values for blanks
clip.waveform_data = None
clip.color = None # None means that clip type default color is displayed
def clone_track_clip(self, track, index):
orig_clip = track.clips[index]
return self.create_clone_clip(orig_clip)
def create_clone_clip(self, clip):
if clip.media_type != appconsts.PATTERN_PRODUCER:
clone_clip = self.create_file_producer_clip(clip.path) # file producer
else:
clone_clip = self.create_pattern_producer(clip.create_data) # pattern producer
self.clone_clip_and_filters(clip, clone_clip)
return clone_clip
def clone_clip_and_filters(self, clip, clone_clip):
"""
Clones clip range properties and filters that are needed for clip to be
used in another clip's place, but not id, master_clip and selection
properties that are part of original clips state in sequence.
"""
clone_clip.clip_in = clip.clip_in
clone_clip.clip_out = clip.clip_out
clone_clip.filters = []
for f in clip.filters:
clone_filter = mltfilters.clone_filter_object(f, self.profile)
clone_clip.attach(clone_filter.mlt_filter)
clone_clip.filters.append(clone_filter)
def clone_filters(self, clip):
clone_filters = []
for f in clip.filters:
clone_filter = mltfilters.clone_filter_object(f, self.profile)
clone_filters.append(clone_filter)
return clone_filters
def get_next_id(self):
"""
Growing id for newly created clip or transition.
"""
self.next_id += 1
return self.next_id - 1
# ------------------------------------------ blanks
def create_and_insert_blank(self, track, index, length):
"""
Used for persistance.
"""
edit._insert_blank(track, index, length)
return track.clips[index]
def append_blank(self, blank_length, track):
"""
Used in hack for trim editing last clip of a track.
"""
index = len(track.clips)
edit._insert_blank(track, index, blank_length)
def remove_last_clip(self, track):
"""
Used in hack for trim editing last clip of a track.
"""
edit._remove_clip(track, len(track.clips) - 1)
# ------------------------------------------ filters
def create_filter(self, filter_info):
filter_object = mltfilters.FilterObject(filter_info)
filter_object.create_mlt_filter(self.profile)
return filter_object
def create_multipart_filter(self, filter_info, clip):
filter_object = mltfilters.MultipartFilterObject(filter_info)
filter_object.create_mlt_filters(self.profile, clip)
return filter_object
# ------------------------------------------------------ compositors
def create_compositor(self, compositor_type):
compositor = mlttransitions.create_compositor(compositor_type)
compositor.create_mlt_objects(self.profile)
return compositor
def restack_compositors(self):
self.sort_compositors()
new_compositors = []
for compositor in self.compositors:
if compositor.planted == False:
self._plant_compositor(compositor)
new_compositors.append(compositor)
else:
clone_compositor = self._create_and_plant_clone_compositor(compositor)
new_compositors.append(clone_compositor)
self.compositors = new_compositors
def _plant_compositor(self, compositor):
self.field.plant_transition(compositor.transition.mlt_transition,
int(compositor.transition.a_track),
int(compositor.transition.b_track))
compositor.planted = True
def _create_and_plant_clone_compositor(self, old_compositor):
# Remove old compositor
#edit.old_compositors.append(old_compositor) # HACK. Garbage collecting compositors causes crashes.
self.field.disconnect_service(old_compositor.transition.mlt_transition)
# Create and plant new compositor
compositor = self.create_compositor(old_compositor.type_id)
compositor.clone_properties(old_compositor)
compositor.set_in_and_out(old_compositor.clip_in, old_compositor.clip_out)
compositor.transition.set_tracks(old_compositor.transition.a_track, old_compositor.transition.b_track)
self._plant_compositor(compositor)
return compositor
def clone_compositors_from_sequence(self, from_sequence, track_delta):
# Used when cloning compositors to change track count by cloning sequence
new_compositors = []
video_diff = self.first_video_index - from_sequence.first_video_index
for old_compositor in from_sequence.compositors:
if old_compositor.transition.b_track + video_diff < len(self.tracks) - 1:
clone_compositor = self._create_and_plant_clone_compositor_for_sequnce_clone(old_compositor, track_delta)
new_compositors.append(clone_compositor)
self.compositors = new_compositors
def _create_and_plant_clone_compositor_for_sequnce_clone(self, old_compositor, track_delta):
# Create and plant new compositor
compositor = self.create_compositor(old_compositor.type_id)
compositor.clone_properties(old_compositor)
compositor.set_in_and_out(old_compositor.clip_in, old_compositor.clip_out)
compositor.transition.set_tracks(old_compositor.transition.a_track + track_delta, old_compositor.transition.b_track + track_delta)
self._plant_compositor(compositor)
return compositor
def get_compositors(self):
return self.compositors
def add_compositor(self, compositor):
self.compositors.append(compositor)
def remove_compositor(self, old_compositor):
#edit.old_compositors.append(old_compositor)# HACK. Garbage collecting compositors causes crashes.
try:
self.compositors.remove(old_compositor)
except ValueError: # has been restacked since creation, needs to looked up using destroy_id
found = False
for comp in self.compositors:
if comp.destroy_id == old_compositor.destroy_id:
found = True
self.compositors.remove(comp)
#edit.old_compositors.append(comp)
old_compositor = comp
if found == False:
raise ValueError('compositor not found using destroy_id')
self.field.disconnect_service(old_compositor.transition.mlt_transition)
def get_compositor_for_destroy_id(self, destroy_id):
for comp in self.compositors:
if comp.destroy_id == destroy_id:
return comp
raise ValueError('compositor for id not found')
def sort_compositors(self):
"""
Compositor order must be from top to bottom or will not work.
"""
self.compositors.sort(_sort_compositors_comparator)
# -------------------------- monitor clip, trimming display, output mode and hidden track
def display_monitor_clip(self, path, pattern_producer_data=None):
"""
Adds media clip to hidden track for viewing and for setting mark
in and mark out points.
pattern_producer_data is MediaFile or AbstractPatternProduer object
"""
track = self.tracks[-1] # Always last track
if pattern_producer_data == None:
self.monitor_clip = self.create_file_producer_clip(path)
else:
if pattern_producer_data.type == IMAGE_SEQUENCE:
self.monitor_clip = self.create_file_producer_clip(pattern_producer_data.path)
else:
self.monitor_clip = self.create_pattern_producer(pattern_producer_data)
edit._insert_clip(track, self.monitor_clip, 0, 0, \
self.monitor_clip.get_length() - 1)
self._mute_editable()
return self.monitor_clip
def display_trim_clip(self, path, clip_start_pos, patter_producer_data=None):
"""
Adds clip to hidden track for trim editing display.
"""
track = self.tracks[-1] # Always last track
track.clear() # # TRIM INIT CRASH HACK, see clear_hidden_track there may be blank clip here
track.clips = []
# Display trimmmed clip on hidden track by creating copy of it.
# File producer
if path != None:
clip = self.create_file_producer_clip(path)
if clip_start_pos > 0:
edit._insert_blank(track, 0, clip_start_pos)
edit._insert_clip(track, clip, 1, 0, clip.get_length() - 1)
else:
edit._insert_clip(track, clip, 1, -clip_start_pos, clip.get_length() - 1) # insert index 1 ?
# Pattern producer (FIX ME: does not allow for keyframes in pattern producer)
else:
clip = self.create_pattern_producer(patter_producer_data)
edit._insert_clip(track, clip, 0, 0, clip.get_length() - 1)
self._mute_editable()
def hide_hidden_clips(self):
"""
Called to temporarely remove hidden clips for trim mode loop playback
"""
self.tracks[-1].clear()
self._unmute_editable()
def redisplay_hidden_clips(self):
"""
Called after trim mode loop playback to redisplay hidden track clips
"""
clips = self.tracks[-1].clips
self.tracks[-1].clips = []
for i in range(0, len(clips)):
clip = clips[i]
if clip.is_blanck_clip:
edit._insert_blank(self.tracks[-1], i,
clip.clip_out - clip.clip_in + 1)
else:
edit._insert_clip(self.tracks[-1], clip, i,
clip.clip_in, clip.clip_out)
self._mute_editable()
def clear_hidden_track(self):
"""
Last track is hidden track used to display clips and trim edits.
Here that track is cleared of any content.
"""
self.update_edit_tracks_length()
# Empty timeline needs blank clip of len atleast 1 because
# edit_insert_blank() always needs a clip to add attributes to
# and that method is fundamendal and cannot be changed.
seq_len = self.seq_len
if seq_len < 1:
seq_len = 1
self.tracks[-1].clips = []
self.tracks[-1].clear()
edit._insert_blank(self.tracks[-1], 0, seq_len) # TRIM INIT CRASH HACK. This being empty crashes a lot, so far unexplained.
self._unmute_editable()
def update_edit_tracks_length(self):
# NEEDED FOR TRIM CRASH HACK, REMOVE IF FIXED
self.seq_len = 0 # muuta arvoksi 1 ???
for i in range(1, len(self.tracks) - 1):
track_len = self.tracks[i].get_length()
if track_len > self.seq_len:
self.seq_len = track_len
def update_trim_hack_blank_length(self):
# NEEDED FOR TRIM CRASH HACK, REMOVE IF FIXED
self.tracks[-1].clips = []
self.tracks[-1].clear()
seq_len = self.seq_len
if seq_len < 1:
seq_len = 1
edit._insert_blank(self.tracks[-1], 0, seq_len)
def get_seq_range_frame(self, frame):
# NEEDED FOR TRIM CRASH HACK, REMOVE IF FIXED
# remove TimeLineFrameScale then too
if frame >= (self.seq_len - 1):
return self.seq_len - 1
else:
return frame
def _mute_editable(self):
for i in range(1, len(self.tracks) - 1):
track = self.tracks[i]
track.set("hide", 3)
def _unmute_editable(self):
for i in range(1, len(self.tracks) - 1):
track = self.tracks[i]
track.set("hide", int(track.mute_state))
def set_tracks_mute_state(self):
self._unmute_editable() # same thing, this method exists to declare purpose
def set_output_mode(self, mode):
if self.outputfilter != None:
self.tractor.detach(self.outputfilter)
self.outputfilter = None
if mode == PROGRAM_OUT_MODE:
return
elif mode == VECTORSCOPE_MODE:
self.tractor.attach(self.vectorscope)
self.outputfilter = self.vectorscope
elif mode == RGB_PARADE_MODE:
self.tractor.attach(self.rgbparade)
self.outputfilter = self.rgbparade
# ---------------------------------------------------- watermark
def add_watermark(self, watermark_file_path):
watermark = mlt.Filter(self.profile, "watermark")
mltrefhold.hold_ref(watermark)
watermark.set("resource",str(watermark_file_path))
watermark.set("composite.always_active", 1)
self.tractor.attach(watermark)
self.watermark_filter = watermark
self.watermark_file_path = watermark_file_path
def remove_watermark(self):
self.tractor.detach(self.watermark_filter)
self.watermark_filter = None
self.watermark_file_path = None
# ------------------------------------------------ length, seek, misc
def update_length(self):
"""
Set black to track length of sequence.
"""
global black_track_clip
if black_track_clip == None: # This fails for launch with assoc Gnome file because this has not been made yet.
# This global black_track_clip is brain dead.
self._create_black_track_clip()
c_in = 0
c_out = self.get_length()
black_track_clip.clip_in = c_in
black_track_clip.clip_out = c_out
black_track_clip.set_in_and_out(c_in, c_out)
def get_length(self):
return self.multitrack.get_length()
def resize_tracks_to_fit(self, allocation):
x, y, w, panel_height = allocation
count = 0
fix_next = True
while(fix_next):
tracks_height = self.get_tracks_height()
if tracks_height < panel_height:
fix_next = False
elif count + 1 == self.first_video_index:
# This shold not happen because track heights should be set up so that minimized app
# has enough space to display all tracks.
# Yet it happens sometimes, meh.
print "sequence.resize_tracks_to_fit (): could not make panels fit"
fix_next = False
else:
self.tracks[1 + count].height = TRACK_HEIGHT_SMALL
self.tracks[len(self.tracks) - 2 - count].height = TRACK_HEIGHT_SMALL
count += 1
def find_next_cut_frame(self, tline_frame):
"""
Returns frame of next cut in active tracks relative to timeline.
"""
cut_frame = -1
for i in range(1, len(self.tracks)):
track = self.tracks[i]
if track.active == False:
continue
# Get index and clip
index = track.get_clip_index_at(tline_frame)
try:
clip = track.clips[index]
except Exception:
continue # Frame after last clip in track
# Get next cut frame
clip_start_in_tline = track.clip_start(index)
length = clip.clip_out - clip.clip_in
next_cut_frame = clip_start_in_tline + length + 1 # +1 clip out inclusive
# Set cut frame
if cut_frame == -1:
cut_frame = next_cut_frame
elif next_cut_frame < cut_frame:
cut_frame = next_cut_frame
return cut_frame
def find_prev_cut_frame(self, tline_frame):
"""
Returns frame of next cut in active tracks relative to timeline.
"""
cut_frame = -1
for i in range(1, len(self.tracks)):
track = self.tracks[i]
if track == False:
continue
# Get index and clip start
index = track.get_clip_index_at(tline_frame)
clip_start_frame = track.clip_start(index)
# If we are on cut, we want previous cut
if clip_start_frame == tline_frame:
index = index - 1
# Check index is good
try:
clip = track.clips[index]
except Exception:
continue # index not good clip
# Get prev cut frame
next_cut_frame = track.clip_start(index)
# Set cut frame
if cut_frame == -1:
cut_frame = next_cut_frame
elif next_cut_frame > cut_frame:
cut_frame = next_cut_frame
return cut_frame
def get_closest_cut_frame(self, track_id, frame):
track = self.tracks[track_id]
index = track.get_clip_index_at(frame)
try:
clip = track.clips[index]
except Exception:
return -1
start_frame = track.clip_start(index)
start_dist = frame - start_frame
end_frame = start_frame + (clip.clip_out - clip.clip_in + 1) # frames are inclusive
end_dist = end_frame - frame
if start_dist < end_dist:
return start_frame
else:
return end_frame
return start_frame # equal distance
def get_first_active_track(self):
"""
This is done in a way that the user sees the track displayed as top most
on screen being the first active when doing for e.g. a monitor insert.
track: 0, black bg video
tracks: 1 - (self.first_video_index - 1), audio, numbered to user in opposite direction as 1 - n (user_index = self.first_video_index - index)
tracks: self.first_video_index - (len - 2), video, numbered to user as 1 - n (user_index = index - self.first_video_index + 1)
track: (len - 1). hidden video track for trim and clip display
"""
# Video
for i in range(len(self.tracks) - 2, self.first_video_index - 1, -1):
if self.tracks[i].active:
return self.tracks[i]
# Audio
for i in range(self.first_video_index - 1, 0, -1):
if self.tracks[i].active:
return self.tracks[i]
return None
def get_clip_index(self, track, frame):
"""
Returns index or -1 if frame not on a clip
"""
index = track.get_clip_index_at(frame)
try:
clip = track.clips[index]
except Exception:
return -1
return index
"""
def next_mute_state(self, track_index):
# track.mute_state values corrspond to mlt "hide" values
track = self.tracks[track_index]
if track.type == VIDEO:
track.mute_state = track.mute_state + 1
if track.mute_state > 3:
track.mute_state = 0 # mlt "hide" all on
else:
if track.mute_state == 1:
track.mute_state = 3 # mlt "hide" all off
else:
track.mute_state = 1 # mlt "hide" video off
track.set("hide", int(track.mute_state))
"""
def set_track_mute_state(self, track_index, mute_state):
track = self.tracks[track_index]
track.mute_state = mute_state
track.set("hide", int(track.mute_state))
def print_all(self):
print "------------------------######"
for i in range(0, len(self.tracks)):
print "TRACK:", i
self.print_track(i)
def print_track(self, track_id):
track = self.tracks[track_id]
print "PYTHON"
for i in range(0, len(track.clips)):
clip = track.clips[i]
if clip.is_blank():
msg = "BLANK"
else:
msg = clip.name
print i, ": id:", clip.id, " in:",clip.clip_in," out:", \
clip.clip_out, msg
print "MLT"
for i in range(0, track.count()):
clip = track.get_clip(i)
print i, " in:", clip.get_in()," out:", clip.get_out()
def print_compositors(self):
for compositor in self.compositors:
print "---"
print compositor.name
print "a_track:" , compositor.transition.a_track
print "b_track:" , compositor.transition.b_track
# ------------------------------------------------ module util methods
def get_media_type(file_path):
"""
Returns media type of file.
"""
if os.path.exists(file_path):
mime_type = utils.get_file_type(file_path)
else:
# IMAGE_SEQUENCE media objects have a MLT formatted resource path that does not
# point to an existing file in the file system.
# We're doing a heuristic here to identify those.
pros_index = file_path.find("%0")
d_index = file_path.find("d.")
if pros_index != -1 and d_index != -1:
return IMAGE_SEQUENCE
all_index = file_path.find(".all")
if all_index != -1:
return IMAGE_SEQUENCE
return FILE_DOES_NOT_EXIST
if mime_type.startswith("video"):
return VIDEO
if mime_type.startswith("audio"):
return AUDIO
if mime_type.startswith("image"):
return IMAGE
return UNKNOWN
def _clip_length(clip):
return clip.clip_out - clip.clip_in + 1
def _sort_compositors_comparator(a_comp, b_comp):
# compositors on top most tracks first
if a_comp.transition.b_track > b_comp.transition.b_track:
return -1
elif a_comp.transition.b_track < b_comp.transition.b_track:
return 1
else:
return 0
# ----------------------------- sequence cloning for tracks count change
def create_sequence_clone_with_different_track_count(old_seq, v_tracks, a_tracks):
# Create new sequence with different number of tracks
global AUDIO_TRACKS_COUNT, VIDEO_TRACKS_COUNT
AUDIO_TRACKS_COUNT = a_tracks
VIDEO_TRACKS_COUNT = v_tracks
new_seq = Sequence(old_seq.profile, old_seq.name)
new_seq.create_default_tracks()
# Clone track clips from old sequence to clone sequence
if old_seq.first_video_index - 1 > a_tracks:
_clone_for_fewer_tracks(old_seq, new_seq)
else:
_clone_for_more_tracks(old_seq, new_seq)
# Clone compositors from old seq to new to correct tracks on new seq
track_delta = new_seq.first_video_index - old_seq.first_video_index
new_seq.clone_compositors_from_sequence(old_seq, track_delta)
# copy next clip id data
new_seq.next_id = old_seq.next_id
return new_seq
def _clone_for_more_tracks(old_seq, new_seq):
# clone track contentents
audio_tracks_count_diff = new_seq.first_video_index - old_seq.first_video_index
first_to_track_index = audio_tracks_count_diff + 1 # +1, black bg track
last_to_track_index = first_to_track_index + len(old_seq.tracks) - 3 # - 3 because: black bg track, hidden track, out inclusive
_clone_tracks(old_seq, new_seq, first_to_track_index, last_to_track_index, 1)
def _clone_for_fewer_tracks(old_seq, new_seq):
first_to_track_index = 1
last_to_track_index = first_to_track_index + len(new_seq.tracks) - 3 # - 3 because: black bg track, hidden track, out inclusive
audio_tracks_count_diff = old_seq.first_video_index - new_seq.first_video_index
from_track_index = audio_tracks_count_diff + 1 # +1, black bg track
_clone_tracks(old_seq, new_seq, first_to_track_index, last_to_track_index, from_track_index)
def _clone_tracks(old_seq, new_seq, first_to_track_index, last_to_track_index, first_from_track_index):
from_track_index = first_from_track_index
for i in range(first_to_track_index, last_to_track_index + 1):
if from_track_index > len(old_seq.tracks) - 1: # when changing to a (8V,1A) tracks sequence this range needs to be checked for
continue
from_track = old_seq.tracks[from_track_index]
if i > len(new_seq.tracks) - 1: # when changing to a (1V,8A) tracks sequence this range needs to be checked for
continue
to_track = new_seq.tracks[i]
_copy_track_contents(from_track, to_track, new_seq)
from_track_index = from_track_index + 1
def _copy_track_contents(from_track, to_track, to_sequence):
# Copy clips
for i in range(0, len(from_track.clips)):
clip = from_track.clips[i]
if clip.is_blanck_clip != True:
edit.append_clip(to_track, clip, clip.clip_in, clip.clip_out)
else:
edit._insert_blank(to_track, i, clip.clip_out - clip.clip_in + 1)
from_track.clear()
from_track.clips = []
# Copy track attributes.
to_sequence.set_track_mute_state(to_track.id, from_track.mute_state)
to_track.edit_freedom = from_track.edit_freedom
| Python |
"""
Flowblade Movie Editor is a nonlinear video editor.
Copyright 2012 Janne Liljeblad.
This file is part of Flowblade Movie Editor <http://code.google.com/p/flowblade>.
Flowblade Movie Editor is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Flowblade Movie Editor is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Flowblade Movie Editor. If not, see <http://www.gnu.org/licenses/>.
"""
"""
Module contains objects used to capture project data.
"""
import datetime
import pygtk
pygtk.require('2.0');
import gtk
import mlt
import md5
import os
import shutil
import time
import appconsts
import editorpersistance
from editorstate import PLAYER
import mltprofiles
import mltrefhold
import patternproducer
import projectaction
import miscdataobjects
import respaths
import sequence
import utils
SAVEFILE_VERSION = 5 # this is changed when backwards incompatible changes are introduced
# to project files to allow for fixing them at load time
FALLBACK_THUMB = "fallback_thumb.png"
# Project events
EVENT_CREATED_BY_NEW_DIALOG = 0
EVENT_CREATED_BY_SAVING = 1
EVENT_SAVED = 2
EVENT_SAVED_AS = 3
EVENT_RENDERED = 4
EVENT_SAVED_SNAPSHOT = 5
thumbnailer = None
class Project:
"""
Collection of all the data edited as a single unit.
Contains collection of media files and one or more sequences
Only one sequence is edited at a time.
"""
def __init__(self, profile): #profile is mlt.Profile here, made using file path
self.name = _("untitled") + appconsts.PROJECT_FILE_EXTENSION
self.profile = profile
self.profile_desc = profile.description()
self.bins = []
self.media_files = {} # MediaFile.id(key) -> MediaFile object(value)
self.sequences = []
self.next_media_file_id = 0
self.next_bin_number = 1 # This is for creating name for new bin
self.next_seq_number = 1 # This is for creating name for new sequence
self.last_save_path = None
self.events = []
self.media_log = []
self.media_log_groups = []
self.proxy_data = miscdataobjects.ProjectProxyEditingData()
self.SAVEFILE_VERSION = SAVEFILE_VERSION
# c_seq is the currently edited Sequence
self.add_unnamed_sequence()
self.c_seq = self.sequences[0]
# c_bin is the currently displayed bin
self.add_unnamed_bin()
self.c_bin = self.bins[0]
self.init_thumbnailer()
def init_thumbnailer(self):
global thumbnailer
if thumbnailer == None:
thumbnailer = Thumbnailer()
thumbnailer.set_context(self.profile)
def add_image_sequence_media_object(self, resource_path, name, length):
media_object = self.add_media_file(resource_path)
media_object.length = length
media_object.name = name
def add_media_file(self, file_path):
"""
Adds media file to project if exists and file is of right type.
"""
(directory, file_name) = os.path.split(file_path)
(name, ext) = os.path.splitext(file_name)
# Get media type
media_type = sequence.get_media_type(file_path)
# Get length and icon
if media_type == appconsts.AUDIO:
icon_path = respaths.IMAGE_PATH + "audio_file.png"
length = thumbnailer.get_file_length(file_path)
else: # For non-audio we need write a thumbbnail file and get file lengh while we're at it
(icon_path, length) = thumbnailer.write_image(file_path)
# Create media file object
media_object = MediaFile(self.next_media_file_id, file_path,
file_name, media_type, length, icon_path)
self._add_media_object(media_object)
return media_object
def add_pattern_producer_media_object(self, media_object):
self._add_media_object(media_object)
def _add_media_object(self, media_object):
"""
Adds media file or color clip to project data structures.
"""
self.media_files[media_object.id] = media_object
self.next_media_file_id += 1
# Add to bin
self.c_bin.file_ids.append(media_object.id)
def media_file_exists(self, file_path):
for key, media_file in self.media_files.items():
if media_file.type == appconsts.PATTERN_PRODUCER:
continue
if file_path == media_file.path:
return True
return False
def get_media_file_for_path(self, file_path):
for key, media_file in self.media_files.items():
if media_file.type == appconsts.PATTERN_PRODUCER:
continue
if file_path == media_file.path:
return media_file
return None
def delete_media_file_from_current_bin(self, media_file):
self.c_bin.file_ids.pop(media_file.id)
def get_current_proxy_paths(self):
paths_dict = {}
for idkey, media_file in self.media_files.items():
try:
if media_file.is_proxy_file:
paths_dict[media_file.path] = media_file
except AttributeError: # Pattern producers or old media files do not have these, add values
self.has_proxy_file = False
self.is_proxy_file = False
self.second_file_path = None
return paths_dict
def add_unnamed_bin(self):
"""
Adds bin with default name.
"""
name = _("bin_") + str(self.next_bin_number)
self.bins.append(Bin(name))
self.next_bin_number += 1
def add_unnamed_sequence(self):
"""
Adds sequence with default name
"""
name = _("sequence_") + str(self.next_seq_number)
self.add_named_sequence(name)
def add_named_sequence(self, name):
seq = sequence.Sequence(self.profile, name)
seq.create_default_tracks()
self.sequences.append(seq)
self.next_seq_number += 1
def get_filtered_media_log_events(self, group_index, incl_starred, incl_not_starred):
filtered_events = []
if group_index < 0:
view_items = self.media_log
else:
name, items = self.media_log_groups[group_index]
view_items = items
for media_log_event in view_items:
if self._media_log_included_by_starred(media_log_event.starred, incl_starred, incl_not_starred):
filtered_events.append(media_log_event)
return filtered_events
def _media_log_included_by_starred(self, starred, incl_starred, incl_not_starred):
if starred == True and incl_starred == True:
return True
if starred == False and incl_not_starred == True:
return True
return False
def delete_media_log_events(self, items):
for i in items:
self.media_log.remove(i)
def remove_from_group(self, group_index, items):
if group_index < 0: # -1 is used as "All" group index in medialog.py, but it isn't group, it is contents of self.media_log
return
name, group_items = self.media_log_groups[group_index]
for i in items:
group_items.remove(i)
def add_to_group(self, group_index, items):
if group_index < 0: # -1 is used as "All" group index in medialog.py, but it isn't group, it is contents of self.media_log
return
name, group_items = self.media_log_groups[group_index]
for i in items:
try:
group_items.remove(i) # single ref to item in list allowed
except:
pass
group_items.append(i)
def add_media_log_group(self, name, items):
self.media_log_groups.append((name, items))
def exit_clip_renderer_process(self):
pass
def get_last_render_folder(self):
last_render_event = None
for pe in self.events:
if pe.event_type == EVENT_RENDERED:
last_render_event = pe
if last_render_event == None:
return None
return os.path.dirname(last_render_event.data)
# ------------------------------------------------------- Snapshot save project
def save_backup_snapshot(self, root_folder_path):
media_folder = root_folder_path + "media/"
d = os.path.dirname(media_folder)
os.mkdir(d)
asset_paths = {}
# Copy media files
for idkey, media_file in self.media_files.items():
# Copy asset file and fix path
directory, file_name = os.path.split(media_file.path)
media_file_copy = media_folder + file_name
if media_file_copy in asset_paths: # Create different filename for files
# that have same filename but different path
file_name = self.get_unique_name(media_file.path, file_name)
media_file_copy = media_folder + file_name
shutil.copyfile(media_file.path, media_file_copy)
asset_paths[media_file.path] = media_file_copy
# Copy clip producers paths
for seq in self.sequences:
for track in seq.tracks:
for i in range(0, len(track.clips)):
clip = track.clips[i]
# Only producer clips are affected
if (clip.is_blanck_clip == False and (clip.media_type != appconsts.PATTERN_PRODUCER)):
directory, file_name = os.path.split(clip.path)
clip_file_copy = media_folder + file_name
if not os.path.isfile(clip_file_copy):
print "clip_file_copy", clip_file_copy
shutil.copyfile(clip.path, clip_file_copy) # only rendered files are copied here
asset_paths[clip.path] = clip_file_copy # This stuff is already md5 hashed, so no duplicate problems here
def get_unique_name(self, file_path, file_name):
(name, ext) = os.path.splitext(file_name)
return md5.new(file_path).hexdigest() + ext
class MediaFile:
"""
Media file that can added to and edited in Sequence.
"""
def __init__(self, id, file_path, name, media_type, length, icon_path):
self.id = id
self.path = file_path
self.name = name
self.type = media_type
self.length = length
self.icon_path = icon_path
self.icon = None
self.create_icon()
self.mark_in = -1
self.mark_out = -1
self.has_proxy_file = False
self.is_proxy_file = False
self.second_file_path = None # to proxy when original, to original when proxy
self.current_frame = 0
# Set default length for graphics files
(f_name, ext) = os.path.splitext(self.name)
if utils.file_extension_is_graphics_file(ext):
in_fr, out_fr, l = editorpersistance.get_graphics_default_in_out_length()
self.mark_in = in_fr
self.mark_out = out_fr
self.length = l
def create_icon(self):
try:
icon = gtk.gdk.pixbuf_new_from_file(self.icon_path)
self.icon = icon.scale_simple(appconsts.THUMB_WIDTH, appconsts.THUMB_HEIGHT, \
gtk.gdk.INTERP_BILINEAR)
except:
print "failed to make icon from:", self.icon_path
self.icon_path = respaths.IMAGE_PATH + FALLBACK_THUMB
icon = gtk.gdk.pixbuf_new_from_file(self.icon_path)
self.icon = icon.scale_simple(appconsts.THUMB_WIDTH, appconsts.THUMB_HEIGHT, \
gtk.gdk.INTERP_BILINEAR)
def create_proxy_path(self, proxy_width, proxy_height, file_extesion):
proxy_md_key = self.path + str(proxy_width) + str(proxy_height)
if hasattr(self, "use_unique_proxy"): # This may have been added in proxyediting.py to prevent interfering with existing projects
proxy_md_key = proxy_md_key + os.urandom(16)
md_str = md5.new(proxy_md_key).hexdigest()
return str(editorpersistance.prefs.render_folder + "/proxies/" + md_str + "." + file_extesion) # str() because we get unicode here
def add_proxy_file(self, proxy_path):
self.has_proxy_file = True
self.second_file_path = proxy_path
def add_existing_proxy_file(self, proxy_width, proxy_height, file_extesion):
proxy_path = self.create_proxy_path(proxy_width, proxy_height, file_extesion)
self.add_proxy_file(proxy_path)
def set_as_proxy_media_file(self):
self.path, self.second_file_path = self.second_file_path, self.path
self.is_proxy_file = True
def set_as_original_media_file(self):
self.path, self.second_file_path = self.second_file_path, self.path
self.is_proxy_file = False
class BinColorClip:
# DECPRECATED, this is replaced by patternproducer.BinColorClip.
# This is kept for project file backwards compatiblity,
# unpickle fails for color clips if this isn't here.
# kill 2016-ish
def __init__(self, id, name, gdk_color_str):
self.id = id
self.name = name
self.gdk_color_str = gdk_color_str
self.length = 15000
self.type = appconsts.PATTERN_PRODUCER
self.icon = None
self.create_icon()
self.patter_producer_type = patternproducer.COLOR_CLIP
self.mark_in = -1
self.mark_out = -1
def create_icon(self):
icon = gtk.gdk.Pixbuf(gtk.gdk.COLORSPACE_RGB, False, 8, appconsts.THUMB_WIDTH, appconsts.THUMB_HEIGHT)
pixel = utils.gdk_color_str_to_int(self.gdk_color_str)
icon.fill(pixel)
self.icon = icon
class Bin:
"""
Group of media files
"""
def __init__(self, name="name"):
self.name = name # Displayed name
self.file_ids = [] # List of media files ids in the bin.
# Ids are increasing integers given in
# Project.add_media_file(...)
class ProducerNotValidError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class Thumbnailer:
def __init__(self):
self.profile = None
def set_context(self, profile):
self.profile = profile
def write_image(self, file_path):
"""
Writes thumbnail image from file producer
"""
# Get data
md_str = md5.new(file_path).hexdigest()
thumbnail_path = editorpersistance.prefs.thumbnail_folder + "/" + md_str + ".png"
# Create consumer
consumer = mlt.Consumer(self.profile, "avformat",
thumbnail_path)
consumer.set("real_time", 0)
consumer.set("vcodec", "png")
# Create one frame producer
producer = mlt.Producer(self.profile, str(file_path))
if producer.is_valid() == False:
raise ProducerNotValidError(file_path)
length = producer.get_length()
frame = length / 2
producer = producer.cut(frame, frame)
# Connect and write image
consumer.connect(producer)
consumer.run()
return (thumbnail_path, length)
def get_file_length(self, file_path):
# This is used for audio files which don't need a thumbnail written
# but do need file length known
# Create one frame producer
producer = mlt.Producer(self.profile, str(file_path))
return producer.get_length()
# ----------------------------------- project and media log events
class ProjectEvent:
def __init__(self, event_type, data):
self.event_type = event_type
self.timestamp = datetime.datetime.now()
self.data = data
def get_date_str(self):
date_str = self.timestamp.strftime('%y-%m-%d %H:%M')
date_str = date_str.lstrip('0')
return date_str
def get_desc_and_path(self):
if self.event_type == EVENT_CREATED_BY_NEW_DIALOG:
return (_("Created using dialog"), None)
elif self.event_type == EVENT_CREATED_BY_SAVING:
return (_("Created using Save As... "), self.data)
elif self.event_type == EVENT_SAVED:
return (_("Saved "), self.data)
elif self.event_type == EVENT_SAVED_AS:
name, path = self.data
return (_("Saved as ") + name, path)
elif self.event_type == EVENT_RENDERED:
return (_("Rendered "), self.data)
elif self.event_type == EVENT_SAVED_SNAPSHOT:
return (_("Saved backup snapshot"), self.data)
else:
return ("Unknown project event, bug or data corruption", None)
# ------------------------------- MODULE FUNCTIONS
def get_default_project():
"""
Creates the project displayed at start up.
"""
profile = mltprofiles.get_default_profile()
project = Project(profile)
return project
| Python |
"""
Flowblade Movie Editor is a nonlinear video editor.
Copyright 2014 Janne Liljeblad.
This file is part of Flowblade Movie Editor <http://code.google.com/p/flowblade>.
Flowblade Movie Editor is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Flowblade Movie Editor is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Flowblade Movie Editor. If not, see <http://www.gnu.org/licenses/>.
"""
"""
This module handles track actions; mute, active state, size change.
"""
import appconsts
import dialogutils
import gui
import guicomponents
import editorstate
from editorstate import get_track
from editorstate import current_sequence
import tlinewidgets
import updater
# --------------------------------------- menu events
def _track_menu_item_activated(widget, data):
track, item_id, selection_data = data
handler = POPUP_HANDLERS[item_id]
if selection_data == None:
handler(track)
else:
handler(track, selection_data)
def lock_track(track_index):
track = get_track(track_index)
track.edit_freedom = appconsts.LOCKED
updater.repaint_tline()
def unlock_track(track_index):
track = get_track(track_index)
track.edit_freedom = appconsts.FREE
updater.repaint_tline()
def set_track_normal_height(track_index):
track = get_track(track_index)
track.height = appconsts.TRACK_HEIGHT_NORMAL
# Check that new height tracks can be displayed and cancel if not.
new_h = current_sequence().get_tracks_height()
x, y, w, h = gui.tline_canvas.widget.allocation
if new_h > h:
track.height = appconsts.TRACK_HEIGHT_SMALL
dialogutils.warning_message(_("Not enough vertical space on Timeline to expand track"),
_("Maximize or resize application window to get more\nspace for tracks if possible."),
gui.editor_window.window,
True)
return
tlinewidgets.set_ref_line_y(gui.tline_canvas.widget.allocation)
gui.tline_column.init_listeners()
updater.repaint_tline()
def set_track_small_height(track_index):
track = get_track(track_index)
track.height = appconsts.TRACK_HEIGHT_SMALL
if editorstate.SCREEN_HEIGHT < 863:
track.height = appconsts.TRACK_HEIGHT_SMALLEST
tlinewidgets.set_ref_line_y(gui.tline_canvas.widget.allocation)
gui.tline_column.init_listeners()
updater.repaint_tline()
def mute_track(track, new_mute_state):
# NOTE: THIS IS A SAVED EDIT OF SEQUENCE, BUT IS NOT AN UNDOABLE EDIT
current_sequence().set_track_mute_state(track.id, new_mute_state)
gui.tline_column.widget.queue_draw()
def all_tracks_menu_launch_pressed(widget, event):
guicomponents.get_all_tracks_popup_menu(event, _all_tracks_item_activated)
def _all_tracks_item_activated(widget, msg):
if msg == "min":
current_sequence().minimize_tracks_height()
_tracks_resize_update()
if msg == "max":
current_sequence().maximize_tracks_height(gui.tline_canvas.widget.allocation)
_tracks_resize_update()
if msg == "maxvideo":
current_sequence().maximize_video_tracks_height(gui.tline_canvas.widget.allocation)
_tracks_resize_update()
if msg == "maxaudio":
current_sequence().maximize_audio_tracks_height(gui.tline_canvas.widget.allocation)
_tracks_resize_update()
def _tracks_resize_update():
tlinewidgets.set_ref_line_y(gui.tline_canvas.widget.allocation)
gui.tline_column.init_listeners()
updater.repaint_tline()
gui.tline_column.widget.queue_draw()
# ------------------------------------------------------------- mouse events
def track_active_switch_pressed(data):
track = get_track(data.track) # data.track is index, not object
# Flip active state
if data.event.button == 1:
track.active = (track.active == False)
if current_sequence().all_tracks_off() == True:
track.active = True
gui.tline_column.widget.queue_draw()
elif data.event.button == 3:
guicomponents.display_tracks_popup_menu(data.event, data.track, \
_track_menu_item_activated)
def track_center_pressed(data):
if data.event.button == 1:
# handle possible mute icon presses
press_x = data.event.x
press_y = data.event.y
track = tlinewidgets.get_track(press_y)
if track == None:
return
y_off = press_y - tlinewidgets._get_track_y(track.id)
ICON_WIDTH = 12
if press_x > tlinewidgets.COLUMN_LEFT_PAD and press_x < tlinewidgets.COLUMN_LEFT_PAD + ICON_WIDTH:
# Mute icon x area hit
ix, iy = tlinewidgets.MUTE_ICON_POS
if track.height > appconsts.TRACK_HEIGHT_SMALL:
ix, iy = tlinewidgets.MUTE_ICON_POS_NORMAL
ICON_HEIGHT = 10
if track.id >= current_sequence().first_video_index:
# Video tracks
# Test mute switches
if y_off > iy and y_off < iy + ICON_HEIGHT:
# Video mute icon hit
if track.mute_state == appconsts.TRACK_MUTE_NOTHING:
new_mute_state = appconsts.TRACK_MUTE_VIDEO
elif track.mute_state == appconsts.TRACK_MUTE_VIDEO:
new_mute_state = appconsts.TRACK_MUTE_NOTHING
elif track.mute_state == appconsts.TRACK_MUTE_AUDIO:
new_mute_state = appconsts.TRACK_MUTE_ALL
elif track.mute_state == appconsts.TRACK_MUTE_ALL:
new_mute_state = appconsts.TRACK_MUTE_AUDIO
elif y_off > iy + ICON_HEIGHT and y_off < iy + ICON_HEIGHT * 2:
# Audio mute icon hit
if track.mute_state == appconsts.TRACK_MUTE_NOTHING:
new_mute_state = appconsts.TRACK_MUTE_AUDIO
elif track.mute_state == appconsts.TRACK_MUTE_VIDEO:
new_mute_state = appconsts.TRACK_MUTE_ALL
elif track.mute_state == appconsts.TRACK_MUTE_AUDIO:
new_mute_state = appconsts.TRACK_MUTE_NOTHING
elif track.mute_state == appconsts.TRACK_MUTE_ALL:
new_mute_state = appconsts.TRACK_MUTE_VIDEO
else:
return
else:
# Audio tracks
# Test mute switches
iy = iy + 6 # Mute icon is lower on audio tracks
if y_off > iy and y_off < iy + ICON_HEIGHT:
if track.mute_state == appconsts.TRACK_MUTE_VIDEO:
new_mute_state = appconsts.TRACK_MUTE_ALL
else:
new_mute_state = appconsts.TRACK_MUTE_VIDEO
else:
return
# Update track mute state
current_sequence().set_track_mute_state(track.id, new_mute_state)
gui.tline_column.widget.queue_draw()
if data.event.button == 3:
guicomponents.display_tracks_popup_menu(data.event, data.track, \
_track_menu_item_activated)
POPUP_HANDLERS = {"lock":lock_track,
"unlock":unlock_track,
"normal_height":set_track_normal_height,
"small_height":set_track_small_height,
"mute_track":mute_track}
| Python |
"""
Flowblade Movie Editor is a nonlinear video editor.
Copyright 2014 Janne Liljeblad.
This file is part of Flowblade Movie Editor <http://code.google.com/p/flowblade>.
Flowblade Movie Editor is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Flowblade Movie Editor is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Flowblade Movie Editor. If not, see <http://www.gnu.org/licenses/>.
"""
import pygtk
pygtk.require('2.0');
import gtk
import appconsts
import edit
from editorstate import current_sequence
import tlinewidgets
import updater
MAX_DELTA = 100000000
edit_data = None
mouse_disabled = True
class MultimoveData:
"""
This class collects and saves data that enables a "Multi" tool edit to be performed.
"""
def __init__(self, pressed_track, first_moved_frame, move_all_tracks):
self.first_moved_frame = first_moved_frame
self.pressed_track_id = pressed_track.id
self.max_backwards = 0
self.move_all_tracks = move_all_tracks
self.trim_blank_indexes = []
self.track_edit_ops = []
self.track_affected = []
self.legal_edit = True
self._build_move_data()
def _build_move_data(self):
# Look at all tracks exept
tracks = current_sequence().tracks
# Get per track:
# * maximum length edit can be done backwards before an overwrite happens
# * indexes of blanks that are trimmed and/or added/removed,
# -1 when no blanks are altered on that track
track_max_deltas = []
trim_blank_indexes = []
for i in range(1, len(tracks) - 1):
track = tracks[i]
if len(track.clips) == 0:
track_max_deltas.append(MAX_DELTA)
trim_blank_indexes.append(-1)
else:
clip_index = current_sequence().get_clip_index(track, self.first_moved_frame)
first_frame_clip = track.clips[clip_index]
clip_first_frame = track.clip_start(clip_index)
# Case: frame after track last clip, no clips are moved
if clip_index == -1:
track_max_deltas.append(MAX_DELTA)
trim_blank_indexes.append(-1)
continue
# Case: Clip start in same frame as moved clip start
if (clip_first_frame == self.first_moved_frame) and (not first_frame_clip.is_blanck_clip):
if clip_index == 0: # First clip on track
track_max_deltas.append(0)
trim_blank_indexes.append(0)
else:
# not first/last clip on track
prev_clip = track.clips[clip_index - 1]
if not prev_clip.is_blanck_clip:
# first clip to be moved is tight after clip on first move frame
track_max_deltas.append(0)
trim_blank_indexes.append(clip_index)
else:
blank_clip_start_frame = track.clip_start(clip_index + 1)
moved_clip_start_frame = track.clip_start(clip_index + 2)
track_max_deltas.append(moved_clip_start_frame - blank_clip_start_frame)
trim_blank_indexes.append(clip_index - 1)
continue
# Case: frame on clip
if not first_frame_clip.is_blanck_clip:
if clip_index == 0: # First clip on track
track_max_deltas.append(0)
trim_blank_indexes.append(0)
elif clip_index == len(track.clips) - 1: # last clip on track, no clips are moved
track_max_deltas.append(MAX_DELTA)
trim_blank_indexes.append(-1)
else:
# not first/last clip on track
next_clip = track.clips[clip_index + 1]
if not next_clip.is_blanck_clip:
# first clip to be moved is tight after clip on first move frame
track_max_deltas.append(0)
trim_blank_indexes.append(clip_index + 1)
else:
blank_clip_start_frame = track.clip_start(clip_index + 1)
moved_clip_start_frame = track.clip_start(clip_index + 2)
track_max_deltas.append(moved_clip_start_frame - blank_clip_start_frame)
trim_blank_indexes.append(clip_index + 1)
# Case: frame on blank
else:
track_max_deltas.append(track.clips[clip_index].clip_length())
trim_blank_indexes.append(clip_index)
self.trim_blank_indexes = trim_blank_indexes
# Pressed track max delta trim blank index is calculated differently
# (because on pressed track to the hit clip is moved)
# and existing values overwritten
track = tracks[self.pressed_track_id]
clip_index = current_sequence().get_clip_index(track, self.first_moved_frame)
first_frame_clip = track.clips[clip_index]
if first_frame_clip.is_blanck_clip:
self.legal_edit = False
return
if clip_index == 0:
max_d = 0
trim_index = 0
else:
prev_clip = track.clips[clip_index - 1]
if prev_clip.is_blanck_clip == True:
max_d = prev_clip.clip_length()
trim_index = clip_index - 1
else:
max_d = 0
trim_index = clip_index
track_max_deltas[self.pressed_track_id - 1] = max_d
self.trim_blank_indexes[self.pressed_track_id - 1] = trim_index
# Smallest track delta is the max number of frames
# the edit can be done backwards
smallest_max_delta = MAX_DELTA
for i in range(1, len(tracks) - 1):
d = track_max_deltas[i - 1]
if d < smallest_max_delta:
smallest_max_delta = d
self.max_backwards = smallest_max_delta
# Track have different ways the edit will need to be applied
# make a list of those
track_edit_ops = []
for i in range(1, len(tracks) - 1):
track = tracks[i]
track_delta = track_max_deltas[i - 1]
if track_delta == 0:
track_edit_ops.append(appconsts.MULTI_ADD_TRIM)
elif track_delta == MAX_DELTA:
track_edit_ops.append(appconsts.MULTI_NOOP)
elif self.max_backwards > 0 and track_delta == self.max_backwards:
track_edit_ops.append(appconsts.MULTI_TRIM_REMOVE)
else:
track_edit_ops.append(appconsts.MULTI_TRIM)
self.track_edit_ops = track_edit_ops
# Make list of boolean values of tracks affected by the edit
if self.move_all_tracks:
for i in range(1, len(tracks) - 1):
self.track_affected.append(True)
else:
for i in range(1, len(tracks) - 1):
self.track_affected.append(False)
self.track_affected[self.pressed_track_id - 1] = True
def mouse_press(event, frame):
x = event.x
y = event.y
global edit_data, mouse_disabled
# Clear edit data in gui module
edit_data = None
mouse_disabled = False
tlinewidgets.set_edit_mode_data(edit_data)
# Get pressed track
track = tlinewidgets.get_track(y)
if track == None:
mouse_disabled = True
return
# Get pressed clip index
clip_index = current_sequence().get_clip_index(track, frame)
# Selecting empty or blank clip does not define edit
if clip_index == -1:
mouse_disabled = True
return
pressed_clip = track.clips[clip_index]
if pressed_clip.is_blanck_clip:
mouse_disabled = True
return
if (event.state & gtk.gdk.CONTROL_MASK):
move_all = False
else:
move_all = True
first_moved_frame = track.clip_start(clip_index)
multi_data = MultimoveData(track, first_moved_frame, move_all)
edit_data = {"track_id":track.id,
"press_frame":frame,
"current_frame":frame,
"first_moved_frame":first_moved_frame,
"mouse_start_x":x,
"mouse_start_y":y,
"multi_data":multi_data}
tlinewidgets.set_edit_mode_data(edit_data)
updater.repaint_tline()
def mouse_move(x, y, frame, state):
if mouse_disabled:
return
global edit_data
edit_data["current_frame"] = frame
updater.repaint_tline()
def mouse_release(x, y, frame, state):
if mouse_disabled:
return
global edit_data
press_frame = edit_data["press_frame"]
min_allowed_delta = - edit_data["multi_data"].max_backwards
delta = frame - press_frame
if delta < min_allowed_delta:
delta = min_allowed_delta
if delta != 0:
data = {"edit_delta":delta,
"multi_data":edit_data["multi_data"]}
action = edit.multi_move_action(data)
action.do_edit()
edit_data = None
tlinewidgets.set_edit_mode_data(edit_data)
updater.repaint_tline()
| Python |
"""
Flowblade Movie Editor is a nonlinear video editor.
Copyright 2012 Janne Liljeblad.
This file is part of Flowblade Movie Editor <http://code.google.com/p/flowblade>.
Flowblade Movie Editor is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Flowblade Movie Editor is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Flowblade Movie Editor. If not, see <http://www.gnu.org/licenses/>.
"""
"""
Module builds dialog windows. User input is handled at
callsites which provide callback methods for response signals.
"""
import pygtk
pygtk.require('2.0');
import gtk
import locale
import os
import pango
import appconsts
import dialogutils
import gui
import guicomponents
import guiutils
import editorstate
import editorpersistance
import mltenv
import mltprofiles
import mltfilters
import mlttransitions
import panels
import renderconsumer
import respaths
import utils
def new_project_dialog(callback):
default_profile_index = mltprofiles.get_default_profile_index()
default_profile = mltprofiles.get_default_profile()
dialog = gtk.Dialog(_("New Project"), None,
gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,
(_("Cancel").encode('utf-8'), gtk.RESPONSE_REJECT,
_("OK").encode('utf-8'), gtk.RESPONSE_ACCEPT))
out_profile_combo = gtk.combo_box_new_text()
profiles = mltprofiles.get_profiles()
for profile in profiles:
out_profile_combo.append_text(profile[0])
out_profile_combo.set_active(default_profile_index)
profile_select = panels.get_two_column_box(gtk.Label(_("Project profile:")),
out_profile_combo,
250)
profile_info_panel = guicomponents.get_profile_info_box(default_profile, False)
profile_info_box = gtk.VBox()
profile_info_box.add(profile_info_panel)
profiles_vbox = guiutils.get_vbox([profile_select,profile_info_box], False)
profiles_frame = panels.get_named_frame(_("Profile"), profiles_vbox)
tracks_combo, tracks_combo_values_list = guicomponents.get_track_counts_combo_and_values_list()
tracks_select = panels.get_two_column_box(gtk.Label(_("Number of tracks:")),
tracks_combo, 250)
tracks_vbox = guiutils.get_vbox([tracks_select], False)
tracks_frame = panels.get_named_frame(_("Tracks"), tracks_vbox)
vbox = guiutils.get_vbox([profiles_frame, tracks_frame], False)
alignment = dialogutils.get_default_alignment(vbox)
dialog.vbox.pack_start(alignment, True, True, 0)
_default_behaviour(dialog)
dialog.connect('response', callback, out_profile_combo, tracks_combo,
tracks_combo_values_list)#, project_type_combo,
#project_folder, compact_name_entry)
out_profile_combo.connect('changed', lambda w: _new_project_profile_changed(w, profile_info_box))
dialog.show_all()
def _new_project_profile_changed(combo_box, profile_info_box):
profile = mltprofiles.get_profile_for_index(combo_box.get_active())
info_box_children = profile_info_box.get_children()
for child in info_box_children:
profile_info_box.remove(child)
info_panel = guicomponents.get_profile_info_box(profile, True)
profile_info_box.add(info_panel)
profile_info_box.show_all()
info_panel.show()
def save_backup_snapshot(name, callback):
dialog = gtk.Dialog(_("Save Project Backup Snapshot"), None,
gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,
(_("Cancel").encode('utf-8'), gtk.RESPONSE_REJECT,
_("OK").encode('utf-8'), gtk.RESPONSE_ACCEPT))
project_folder = gtk.FileChooserButton(_("Select Snapshot Project Folder"))
project_folder.set_action(gtk.FILE_CHOOSER_ACTION_SELECT_FOLDER)
project_folder.set_current_folder(os.path.expanduser("~") + "/")
project_folder_label = gtk.Label(_("Snapshot Folder:"))
project_folder_row = guiutils.get_two_column_box(project_folder_label, project_folder, 250)
compact_name_entry = gtk.Entry(30)
compact_name_entry.set_width_chars(30)
compact_name_entry.set_text(name)
compact_name_label = gtk.Label(_("Project File Name:"))
compact_name_entry_row = guiutils.get_two_column_box(compact_name_label, compact_name_entry, 250)
type_vbox = gtk.VBox(False, 2)
type_vbox.pack_start(project_folder_row, False, False, 0)
type_vbox.pack_start(compact_name_entry_row, False, False, 0)
vbox = gtk.VBox(False, 2)
vbox.add(type_vbox)
alignment = dialogutils.get_default_alignment(vbox)
dialog.vbox.pack_start(alignment, True, True, 0)
_default_behaviour(dialog)
dialog.connect('response', callback, project_folder, compact_name_entry)
dialog.show_all()
def load_project_dialog(callback):
dialog = gtk.FileChooserDialog(_("Select Project File"), None,
gtk.FILE_CHOOSER_ACTION_OPEN,
(_("Cancel").encode('utf-8'), gtk.RESPONSE_REJECT,
_("OK").encode('utf-8'), gtk.RESPONSE_ACCEPT), None)
dialog.set_action(gtk.FILE_CHOOSER_ACTION_OPEN)
dialog.set_select_multiple(False)
file_filter = gtk.FileFilter()
file_filter.set_name(_("Flowblade Projects"))
file_filter.add_pattern("*" + appconsts.PROJECT_FILE_EXTENSION)
dialog.add_filter(file_filter)
dialog.connect('response', callback)
dialog.show()
def save_project_as_dialog(callback, current_name, open_dir):
dialog = gtk.FileChooserDialog(_("Save Project As"), None,
gtk.FILE_CHOOSER_ACTION_SAVE,
(_("Cancel").encode('utf-8'), gtk.RESPONSE_REJECT,
_("Save").encode('utf-8'), gtk.RESPONSE_ACCEPT), None)
dialog.set_action(gtk.FILE_CHOOSER_ACTION_SAVE)
dialog.set_current_name(current_name)
dialog.set_do_overwrite_confirmation(True)
if open_dir != None:
dialog.set_current_folder(open_dir)
dialog.set_select_multiple(False)
file_filter = gtk.FileFilter()
file_filter.add_pattern("*" + appconsts.PROJECT_FILE_EXTENSION)
dialog.add_filter(file_filter)
dialog.connect('response', callback)
dialog.show()
def export_xml_dialog(callback, project_name):
_export_file_name_dialog(callback, project_name, _("Export Project as XML to"))
def _export_file_name_dialog(callback, project_name, dialog_title):
dialog = gtk.FileChooserDialog(dialog_title, None,
gtk.FILE_CHOOSER_ACTION_SAVE,
(_("Cancel").encode('utf-8'), gtk.RESPONSE_REJECT,
_("Export").encode('utf-8'), gtk.RESPONSE_ACCEPT), None)
dialog.set_action(gtk.FILE_CHOOSER_ACTION_SAVE)
project_name = project_name.strip(".flb")
dialog.set_current_name(project_name + ".xml")
dialog.set_do_overwrite_confirmation(True)
dialog.set_select_multiple(False)
dialog.connect('response', callback)
dialog.show()
def save_env_data_dialog(callback):
dialog = gtk.FileChooserDialog(_("Save Runtime Environment Data"), None,
gtk.FILE_CHOOSER_ACTION_SAVE,
(_("Cancel").encode('utf-8'), gtk.RESPONSE_REJECT,
_("Save").encode('utf-8'), gtk.RESPONSE_ACCEPT), None)
dialog.set_action(gtk.FILE_CHOOSER_ACTION_SAVE)
dialog.set_current_name("flowblade_runtime_environment_data")
dialog.set_do_overwrite_confirmation(True)
dialog.set_select_multiple(False)
dialog.connect('response', callback)
dialog.show()
def select_thumbnail_dir(callback, parent_window, current_dir_path, retry_open_media):
panel, file_select = panels.get_thumbnail_select_panel(current_dir_path)
cancel_str = _("Cancel").encode('utf-8')
ok_str = _("Ok").encode('utf-8')
dialog = gtk.Dialog(_("Select Thumbnail Folder"),
parent_window,
gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,
(cancel_str, gtk.RESPONSE_CANCEL,
ok_str, gtk.RESPONSE_YES))
dialog.vbox.pack_start(panel, True, True, 0)
_default_behaviour(dialog)
dialog.connect('response', callback, (file_select, retry_open_media))
dialog.show_all()
def select_rendred_clips_dir(callback, parent_window, current_dir_path, context_data=None):
panel, file_select = panels.get_render_folder_select_panel(current_dir_path)
cancel_str = _("Cancel").encode('utf-8')
ok_str = _("Ok").encode('utf-8')
dialog = gtk.Dialog(_("Select Thumbnail Folder"),
parent_window,
gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,
(cancel_str, gtk.RESPONSE_CANCEL,
ok_str, gtk.RESPONSE_YES))
dialog.vbox.pack_start(panel, True, True, 0)
_default_behaviour(dialog)
if context_data == None:
dialog.connect('response', callback, file_select)
else:
dialog.connect('response', callback, file_select, context_data)
dialog.show_all()
def rendered_clips_no_home_folder_dialog():
dialogutils.warning_message(_("Can't make home folder render clips folder"),
_("Please create and select some other folder then \'") +
os.path.expanduser("~") + _("\' as render clips folder"),
gui.editor_window.window)
def exit_confirm_dialog(callback, msg, parent_window, project_name):
title = _("Save project '") + project_name + _("' before exiting?")
content = dialogutils.get_warning_message_dialog_panel(title, msg, False, gtk.STOCK_QUIT)
align = gtk.Alignment(0.5, 0.5, 1.0, 1.0)
align.set_padding(0, 12, 0, 0)
align.add(content)
dialog = gtk.Dialog("",
parent_window,
gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,
(_("Don't Save").encode('utf-8'), gtk.RESPONSE_CLOSE,
_("Cancel").encode('utf-8'), gtk.RESPONSE_CANCEL,
_("Save").encode('utf-8'), gtk.RESPONSE_YES))
dialog.vbox.pack_start(align, True, True, 0)
_default_behaviour(dialog)
dialog.connect('response', callback)
dialog.show_all()
def close_confirm_dialog(callback, msg, parent_window, project_name):
title = _("Save project '") + project_name + _("' before closing project?")
content = dialogutils.get_warning_message_dialog_panel(title, msg, False, gtk.STOCK_QUIT)
align = gtk.Alignment(0.5, 0.5, 1.0, 1.0)
align.set_padding(0, 12, 0, 0)
align.add(content)
dialog = gtk.Dialog("",
parent_window,
gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,
(_("Don't Save").encode('utf-8'), gtk.RESPONSE_CLOSE,
_("Cancel").encode('utf-8'), gtk.RESPONSE_CANCEL,
_("Save").encode('utf-8'), gtk.RESPONSE_YES))
dialog.vbox.pack_start(align, True, True, 0)
_default_behaviour(dialog)
dialog.connect('response', callback)
dialog.show_all()
def about_dialog(parent_window):
dialog = gtk.Dialog(_("About"), None,
gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,
(_("OK").encode('utf-8'), gtk.RESPONSE_ACCEPT))
img = gtk.image_new_from_file(respaths.IMAGE_PATH + "flowbladeappicon.png")
flow_label = gtk.Label("Flowblade Movie Editor")
ver_label = gtk.Label("0.18.0")
janne_label = gtk.Label("Copyright 2015 Janne Liljeblad")
flow_label.modify_font(pango.FontDescription("sans bold 14"))
vbox = gtk.VBox(False, 4)
vbox.pack_start(guiutils.get_pad_label(30, 12), False, False, 0)
vbox.pack_start(img, False, False, 0)
vbox.pack_start(guiutils.get_pad_label(30, 4), False, False, 0)
vbox.pack_start(flow_label, False, False, 0)
vbox.pack_start(ver_label, False, False, 0)
vbox.pack_start(guiutils.get_pad_label(30, 22), False, False, 0)
vbox.pack_start(janne_label, False, False, 0)
vbox.pack_start(gtk.Label(), True, True, 0)
alignment = dialogutils.get_default_alignment(vbox)
alignment.set_size_request(450, 370)
up_label = gtk.Label("Upstream:")
up_projs = gtk.Label("MLT")
up_projs2 = gtk.Label("FFMpeg, Frei0r, LADSPA, Cairo, Gnome, Linux")
tools_label = gtk.Label("Tools:")
tools_list = gtk.Label("Geany, Inkscape, Gimp, ack-grep")
up_label.modify_font(pango.FontDescription("sans bold 12"))
tools_label.modify_font(pango.FontDescription("sans bold 12"))
vbox2 = gtk.VBox(False, 4)
vbox2.pack_start(guiutils.get_pad_label(30, 12), False, False, 0)
vbox2.pack_start(up_label, False, False, 0)
vbox2.pack_start(up_projs, False, False, 0)
vbox2.pack_start(up_projs2, False, False, 0)
vbox2.pack_start(guiutils.get_pad_label(30, 22), False, False, 0)
vbox2.pack_start(tools_label, False, False, 0)
vbox2.pack_start(tools_list, False, False, 0)
vbox2.pack_start(guiutils.get_pad_label(30, 22), False, False, 0)
vbox2.pack_start(gtk.Label(), True, True, 0)
alignment2 = dialogutils.get_default_alignment(vbox2)
alignment2.set_size_request(450, 370)
license_view = guicomponents.get_gpl3_scroll_widget((450, 370))
alignment3 = dialogutils.get_default_alignment(license_view)
alignment3.set_size_request(450, 370)
translations_view = guicomponents.get_translations_scroll_widget((450, 370))
alignment4 = dialogutils.get_default_alignment(translations_view)
alignment4.set_size_request(450, 370)
notebook = gtk.Notebook()
notebook.set_size_request(450 + 10, 370 + 10)
notebook.append_page(alignment, gtk.Label(_("Application")))
notebook.append_page(alignment2, gtk.Label(_("Thanks")))
notebook.append_page(alignment3, gtk.Label(_("License")))
notebook.append_page(alignment4, gtk.Label(_("Translations")))
dialog.vbox.pack_start(notebook, True, True, 0)
dialog.connect('response', _dialog_destroy)
dialog.show_all()
def environment_dialog(parent_window, write_data_cb):
dialog = gtk.Dialog(_("Runtime Environment"), None,
gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,
(_("OK").encode('utf-8'), gtk.RESPONSE_ACCEPT))
COLUMN_WIDTH = 450
r1 = guiutils.get_left_justified_box([gtk.Label(_("MLT version: ")), gtk.Label(str(editorstate.mlt_version))])
try:
major, minor, rev = editorstate.gtk_version
gtk_ver = str(major) + "." + str(minor) + "." + str(rev)
except:
gtk_ver = str(editorstate.gtk_version)
r2 = guiutils.get_left_justified_box([gtk.Label(_("GTK version: ")), gtk.Label(gtk_ver)])
lc, encoding = locale.getdefaultlocale()
r3 = guiutils.get_left_justified_box([gtk.Label(_("Locale: ")), gtk.Label(str(lc))])
if editorstate.app_running_from == editorstate.RUNNING_FROM_INSTALLATION:
run_type = _("INSTALLATION")
else:
run_type = _("DEVELOPER VERSION")
r4 = guiutils.get_left_justified_box([gtk.Label(_("Running from: ")), gtk.Label(run_type)])
write_button = gtk.Button(_("Write Environment Data to File"))
write_button.connect("clicked", lambda w,e: write_data_cb(), None)
r5 = guiutils.get_left_justified_box([write_button])
vbox = gtk.VBox(False, 4)
vbox.pack_start(r1, False, False, 0)
vbox.pack_start(r2, False, False, 0)
vbox.pack_start(r3, False, False, 0)
vbox.pack_start(r4, False, False, 0)
vbox.pack_start(r5, False, False, 0)
filters = sorted(mltenv.services)
filters_sw = _get_items_in_scroll_window(filters, 7, COLUMN_WIDTH, 140)
transitions = sorted(mltenv.transitions)
transitions_sw = _get_items_in_scroll_window(transitions, 7, COLUMN_WIDTH, 140)
v_codecs = sorted(mltenv.vcodecs)
v_codecs_sw = _get_items_in_scroll_window(v_codecs, 6, COLUMN_WIDTH, 125)
a_codecs = sorted(mltenv.acodecs)
a_codecs_sw = _get_items_in_scroll_window(a_codecs, 6, COLUMN_WIDTH, 125)
formats = sorted(mltenv.formats)
formats_sw = _get_items_in_scroll_window(formats, 5, COLUMN_WIDTH, 105)
enc_ops = renderconsumer.encoding_options + renderconsumer.not_supported_encoding_options
enc_msgs = []
for e_opt in enc_ops:
if e_opt.supported:
msg = e_opt.name + _(" AVAILABLE")
else:
msg = e_opt.name + _(" NOT AVAILABLE, ") + e_opt.err_msg + _(" MISSING")
enc_msgs.append(msg)
enc_opt_sw = _get_items_in_scroll_window(enc_msgs, 5, COLUMN_WIDTH, 115)
missing_mlt_services = []
for f in mltfilters.not_found_filters:
msg = "mlt.Filter " + f.mlt_service_id + _(" FOR FILTER ") + f.name + _(" NOT FOUND")
missing_mlt_services.append(msg)
for t in mlttransitions.not_found_transitions:
msg = "mlt.Transition " + t.mlt_service_id + _(" FOR TRANSITION ") + t.name + _(" NOT FOUND")
missing_services_sw = _get_items_in_scroll_window(missing_mlt_services, 5, COLUMN_WIDTH, 60)
l_pane = gtk.VBox(False, 4)
l_pane.pack_start(guiutils.get_named_frame(_("General"), vbox), False, False, 0)
l_pane.pack_start(guiutils.get_named_frame(_("MLT Filters"), filters_sw), False, False, 0)
l_pane.pack_start(guiutils.get_named_frame(_("MLT Transitions"), transitions_sw), False, False, 0)
l_pane.pack_start(guiutils.get_named_frame(_("Missing MLT Services"), missing_services_sw), True, True, 0)
r_pane = gtk.VBox(False, 4)
r_pane.pack_start(guiutils.get_named_frame(_("Video Codecs"), v_codecs_sw), False, False, 0)
r_pane.pack_start(guiutils.get_named_frame(_("Audio Codecs"), a_codecs_sw), False, False, 0)
r_pane.pack_start(guiutils.get_named_frame(_("Formats"), formats_sw), False, False, 0)
r_pane.pack_start(guiutils.get_named_frame(_("Render Options"), enc_opt_sw), False, False, 0)
pane = gtk.HBox(False, 4)
pane.pack_start(l_pane, False, False, 0)
pane.pack_start(guiutils.pad_label(5, 5), False, False, 0)
pane.pack_start(r_pane, False, False, 0)
a = dialogutils.get_default_alignment(pane)
dialog.vbox.pack_start(a, True, True, 0)
dialog.connect('response', _dialog_destroy)
dialog.show_all()
dialog.set_resizable(False)
def _get_items_in_scroll_window(items, rows_count, w, h):
row_widgets = []
for i in items:
row = guiutils.get_left_justified_box([gtk.Label(i)])
row_widgets.append(row)
items_pane = _get_item_columns_panel(row_widgets, rows_count)
sw = gtk.ScrolledWindow()
sw.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
sw.add_with_viewport(items_pane)
sw.set_size_request(w, h)
return sw
def _get_item_columns_panel(items, rows):
hbox = gtk.HBox(False, 4)
n_item = 0
col_items = 0
vbox = gtk.VBox()
hbox.pack_start(vbox, False, False, 0)
while n_item < len(items):
item = items[n_item]
vbox.pack_start(item, False, False, 0)
n_item += 1
col_items += 1
if col_items > rows:
vbox = gtk.VBox()
hbox.pack_start(vbox, False, False, 0)
col_items = 0
return hbox
def file_properties_dialog(data):
dialog = gtk.Dialog(_("File Properties"), None,
gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,
( _("OK").encode('utf-8'), gtk.RESPONSE_ACCEPT))
panel = panels.get_file_properties_panel(data)
alignment = dialogutils.get_default_alignment(panel)
dialog.vbox.pack_start(alignment, True, True, 0)
_default_behaviour(dialog)
dialog.connect('response', _dialog_destroy)
dialog.show_all()
def clip_properties_dialog(data):
dialog = gtk.Dialog(_("Clip Properties"), None,
gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,
( _("OK").encode('utf-8'), gtk.RESPONSE_ACCEPT))
panel = panels.get_clip_properties_panel(data)
alignment = dialogutils.get_default_alignment(panel)
dialog.vbox.pack_start(alignment, True, True, 0)
_default_behaviour(dialog)
dialog.connect('response', _dialog_destroy)
dialog.show_all()
def add_compositor_dialog(current_sequence, callback, data):
dialog = gtk.Dialog(_("Composite Target Track"), None,
gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,
(_("Cancel").encode('utf-8'), gtk.RESPONSE_REJECT,
_("Add Compositor").encode('utf-8'), gtk.RESPONSE_ACCEPT))
panel, track_combo = panels.get_add_compositor_panel(current_sequence, data)
alignment = dialogutils.get_default_alignment(panel)
dialog.vbox.pack_start(alignment, True, True, 0)
_default_behaviour(dialog)
dialog.connect('response', callback, data, track_combo)
dialog.show_all()
def _dialog_destroy(dialog, response):
dialog.destroy()
def _default_behaviour(dialog):
dialog.set_default_response(gtk.RESPONSE_OK)
dialog.set_has_separator(False)
dialog.set_resizable(False)
def load_dialog():
dialog = gtk.Window(gtk.WINDOW_TOPLEVEL)
dialog.set_title(_("Loading project"))
info_label = gtk.Label("")
status_box = gtk.HBox(False, 2)
status_box.pack_start(info_label, False, False, 0)
status_box.pack_start(gtk.Label(), True, True, 0)
progress_bar = gtk.ProgressBar()
progress_bar.set_fraction(0.2)
progress_bar.set_pulse_step(0.1)
est_box = gtk.HBox(False, 2)
est_box.pack_start(gtk.Label(""),False, False, 0)
est_box.pack_start(gtk.Label(), True, True, 0)
progress_vbox = gtk.VBox(False, 2)
progress_vbox.pack_start(status_box, False, False, 0)
progress_vbox.pack_start(progress_bar, True, True, 0)
progress_vbox.pack_start(est_box, False, False, 0)
alignment = gtk.Alignment(0.5, 0.5, 1.0, 1.0)
alignment.set_padding(12, 12, 12, 12)
alignment.add(progress_vbox)
dialog.add(alignment)
dialog.set_default_size(400, 70)
dialog.set_position(gtk.WIN_POS_CENTER)
dialog.show_all()
# Make refs available for updates
dialog.progress_bar = progress_bar
dialog.info = info_label
return dialog
def recreate_icons_progress_dialog():
dialog = gtk.Window(gtk.WINDOW_TOPLEVEL)
dialog.set_title(_("Recreating icons"))
info_label = gtk.Label("")
status_box = gtk.HBox(False, 2)
status_box.pack_start(info_label, False, False, 0)
status_box.pack_start(gtk.Label(), True, True, 0)
progress_bar = gtk.ProgressBar()
progress_bar.set_fraction(0.0)
est_box = gtk.HBox(False, 2)
est_box.pack_start(gtk.Label(""),False, False, 0)
est_box.pack_start(gtk.Label(), True, True, 0)
progress_vbox = gtk.VBox(False, 2)
progress_vbox.pack_start(status_box, False, False, 0)
progress_vbox.pack_start(progress_bar, True, True, 0)
progress_vbox.pack_start(est_box, False, False, 0)
alignment = gtk.Alignment(0.5, 0.5, 1.0, 1.0)
alignment.set_padding(12, 12, 12, 12)
alignment.add(progress_vbox)
dialog.add(alignment)
dialog.set_default_size(400, 70)
dialog.set_position(gtk.WIN_POS_CENTER)
dialog.show_all()
# Make refs available for updates
dialog.progress_bar = progress_bar
dialog.info = info_label
return dialog
def proxy_delete_warning_dialog(parent_window, callback):
title = _("Are you sure you want to delete these media files?")
msg1 = _("One or more of the Media Files you are deleting from the project\neither <b>have proxy files or are proxy files.</b>\n\n")
msg2 = _("Deleting these files could <b>prevent converting</b> between\nusing proxy files and using original media.\n\n")
msg = msg1 + msg2
content = dialogutils.get_warning_message_dialog_panel(title, msg)
align = gtk.Alignment(0.5, 0.5, 1.0, 1.0)
align.set_padding(0, 12, 0, 0)
align.add(content)
dialog = gtk.Dialog("",
parent_window,
gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,
(_("Cancel").encode('utf-8'), gtk.RESPONSE_CANCEL,
_("Force Delete").encode('utf-8'), gtk.RESPONSE_OK))
dialog.vbox.pack_start(align, True, True, 0)
_default_behaviour(dialog)
dialog.set_default_response(gtk.RESPONSE_CANCEL)
dialog.connect('response', callback)
dialog.show_all()
def autosave_recovery_dialog(callback, parent_window):
title = _("Open last autosave?")
msg1 = _("It seems that Flowblade exited abnormally last time.\n\n")
msg2 = _("If there is another instance of Flowblade running,\nthis dialog has probably detected its autosave file.\n\n")
msg3 = _("It is NOT possible to open this autosaved version later.")
msg = msg1 + msg2 + msg3
content = dialogutils.get_warning_message_dialog_panel(title, msg)
align = gtk.Alignment(0.5, 0.5, 1.0, 1.0)
align.set_padding(0, 12, 0, 0)
align.add(content)
dialog = gtk.Dialog("",
parent_window,
gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,
(_("Continue with default 'untitled' project").encode('utf-8'), gtk.RESPONSE_CANCEL,
_("Open Autosaved Project").encode('utf-8'), gtk.RESPONSE_OK))
dialog.vbox.pack_start(align, True, True, 0)
_default_behaviour(dialog)
dialog.connect('response', callback)
dialog.show_all()
def autosaves_many_recovery_dialog(response_callback, autosaves, parent_window):
title = _("Open a autosave file?")
msg1 = _("There are <b>multiple autosave files</b> from application crashes.\n\n")
msg3 = _("If you just <b>experienced a crash, select the last created autosave</b> file\nto continue working.\n\n")
msg4 = _("If you see this at application start without a recent crash,\nyou should probably delete all autosave files to stop seeing this dialog.")
msg = msg1 + msg3 + msg4
info_panel = dialogutils.get_warning_message_dialog_panel(title, msg)
autosaves_view = guicomponents.AutoSavesListView()
autosaves_view.set_size_request(300, 300)
autosaves_view.fill_data_model(autosaves)
delete_all = gtk.Button("Delete all autosaves")
delete_all.connect("clicked", lambda w : _autosaves_delete_all_clicked(autosaves, autosaves_view, dialog))
delete_all_but_selected = gtk.Button("Delete all but selected autosave")
delete_all_but_selected.connect("clicked", lambda w : _autosaves_delete_unselected(autosaves, autosaves_view))
delete_buttons_vbox = gtk.HBox()
delete_buttons_vbox.pack_start(gtk.Label(), True, True, 0)
delete_buttons_vbox.pack_start(delete_all, False, False, 0)
delete_buttons_vbox.pack_start(delete_all_but_selected, False, False, 0)
delete_buttons_vbox.pack_start(gtk.Label(), True, True, 0)
pane = gtk.VBox()
pane.pack_start(info_panel, False, False, 0)
pane.pack_start(delete_buttons_vbox, False, False, 0)
pane.pack_start(guiutils.get_pad_label(12,12), False, False, 0)
pane.pack_start(autosaves_view, False, False, 0)
align = gtk.Alignment(0.5, 0.5, 1.0, 1.0)
align.set_padding(0, 12, 0, 0)
align.add(pane)
dialog = gtk.Dialog("",
parent_window,
gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,
(_("Continue with default 'untitled' project").encode('utf-8'), gtk.RESPONSE_CANCEL,
_("Open Selected Autosave").encode('utf-8'), gtk.RESPONSE_OK))
dialog.vbox.pack_start(align, True, True, 0)
_default_behaviour(dialog)
dialog.connect('response', response_callback, autosaves_view, autosaves)
dialog.show_all()
def _autosaves_delete_all_clicked(autosaves, autosaves_view, dialog):
for autosave in autosaves:
os.remove(autosave.path)
dialog.set_response_sensitive(gtk.RESPONSE_OK, False)
del autosaves[:]
autosaves_view.fill_data_model(autosaves)
def _autosaves_delete_unselected(autosaves, autosaves_view):
selected_autosave = autosaves.pop(autosaves_view.get_selected_indexes_list()[0])
for autosave in autosaves:
os.remove(autosave.path)
del autosaves[:]
autosaves.append(selected_autosave)
autosaves_view.fill_data_model(autosaves)
def tracks_count_change_dialog(callback):
dialog = gtk.Dialog(_("Change Sequence Tracks Count"), None,
gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,
(_("Cancel").encode('utf-8'), gtk.RESPONSE_REJECT,
_("Change Tracks").encode('utf-8'), gtk.RESPONSE_ACCEPT))
tracks_combo, tracks_combo_values_list = guicomponents.get_track_counts_combo_and_values_list()
tracks_select = panels.get_two_column_box(gtk.Label(_("New Number of Tracks:")),
tracks_combo,
250)
info_text = _("Please note:\n") + \
u"\u2022" + _(" It is recommended that you save Project before completing this operation\n") + \
u"\u2022" + _(" There is no Undo for this operation\n") + \
u"\u2022" + _(" Current Undo Stack will be destroyed\n") + \
u"\u2022" + _(" All Clips and Compositors on deleted Tracks will be permanently destroyed")
info_label = gtk.Label(info_text)
info_label.set_use_markup(True)
info_box = guiutils.get_left_justified_box([info_label])
pad = guiutils.get_pad_label(24, 24)
tracks_vbox = gtk.VBox(False, 2)
tracks_vbox.pack_start(info_box, False, False, 0)
tracks_vbox.pack_start(pad, False, False, 0)
tracks_vbox.pack_start(tracks_select, False, False, 0)
alignment = dialogutils.get_alignment2(tracks_vbox)
dialog.vbox.pack_start(alignment, True, True, 0)
_default_behaviour(dialog)
dialog.connect('response', callback, tracks_combo)
dialog.show_all()
def new_sequence_dialog(callback, default_name):
dialog = gtk.Dialog(_("Create New Sequence"), None,
gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,
(_("Cancel").encode('utf-8'), gtk.RESPONSE_REJECT,
_("Create Sequence").encode('utf-8'), gtk.RESPONSE_ACCEPT))
name_entry = gtk.Entry(30)
name_entry.set_width_chars(30)
name_entry.set_text(default_name)
name_entry.set_activates_default(True)
name_select = panels.get_two_column_box(gtk.Label(_("Sequence Name:")),
name_entry,
250)
tracks_combo, tracks_combo_values_list = guicomponents.get_track_counts_combo_and_values_list()
tracks_select = panels.get_two_column_box(gtk.Label(_("Number of Tracks:")),
tracks_combo,
250)
open_check = gtk.CheckButton()
open_check.set_active(True)
open_label = gtk.Label(_("Open For Editing:"))
open_hbox = gtk.HBox(False, 2)
open_hbox.pack_start(gtk.Label(), True, True, 0)
open_hbox.pack_start(open_label, False, False, 0)
open_hbox.pack_start(open_check, False, False, 0)
tracks_vbox = gtk.VBox(False, 2)
tracks_vbox.pack_start(name_select, False, False, 0)
tracks_vbox.pack_start(tracks_select, False, False, 0)
tracks_vbox.pack_start(guiutils.get_pad_label(12, 12), False, False, 0)
tracks_vbox.pack_start(open_hbox, False, False, 0)
alignment = dialogutils.get_alignment2(tracks_vbox)
dialog.vbox.pack_start(alignment, True, True, 0)
_default_behaviour(dialog)
dialog.connect('response', callback, (name_entry, tracks_combo, open_check))
dialog.show_all()
def new_media_name_dialog(callback, media_file):
dialog = gtk.Dialog(_("Rename New Media Object"), None,
gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,
(_("Cancel").encode('utf-8'), gtk.RESPONSE_REJECT,
_("Rename").encode('utf-8'), gtk.RESPONSE_ACCEPT))
name_entry = gtk.Entry(30)
name_entry.set_width_chars(30)
name_entry.set_text(media_file.name)
name_entry.set_activates_default(True)
name_select = panels.get_two_column_box(gtk.Label(_("New Name:")),
name_entry,
180)
tracks_vbox = gtk.VBox(False, 2)
tracks_vbox.pack_start(name_select, False, False, 0)
tracks_vbox.pack_start(guiutils.get_pad_label(12, 12), False, False, 0)
alignment = dialogutils.get_alignment2(tracks_vbox)
dialog.vbox.pack_start(alignment, True, True, 0)
_default_behaviour(dialog)
dialog.set_default_response(gtk.RESPONSE_ACCEPT)
dialog.connect('response', callback, (name_entry, media_file))
dialog.show_all()
def new_clip_name_dialog(callback, clip):
dialog = gtk.Dialog(_("Rename Clip"), None,
gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,
(_("Cancel").encode('utf-8'), gtk.RESPONSE_REJECT,
_("Rename").encode('utf-8'), gtk.RESPONSE_ACCEPT))
name_entry = gtk.Entry(30)
name_entry.set_width_chars(30)
name_entry.set_text(clip.name)
name_entry.set_activates_default(True)
name_select = panels.get_two_column_box(gtk.Label(_("New Name:")),
name_entry,
180)
tracks_vbox = gtk.VBox(False, 2)
tracks_vbox.pack_start(name_select, False, False, 0)
tracks_vbox.pack_start(guiutils.get_pad_label(12, 12), False, False, 0)
alignment = dialogutils.get_alignment2(tracks_vbox)
dialog.vbox.pack_start(alignment, True, True, 0)
_default_behaviour(dialog)
dialog.set_default_response(gtk.RESPONSE_ACCEPT)
dialog.connect('response', callback, (name_entry, clip))
dialog.show_all()
def new_media_log_group_name_dialog(callback, next_index, add_selected):
dialog = gtk.Dialog(_("New Range Item Group"), None,
gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,
(_("Cancel").encode('utf-8'), gtk.RESPONSE_REJECT,
_("Create").encode('utf-8'), gtk.RESPONSE_OK))
name_entry = gtk.Entry(30)
name_entry.set_width_chars(30)
name_entry.set_text(_("User Group ") + str(next_index))
name_entry.set_activates_default(True)
name_select = panels.get_two_column_box(gtk.Label(_("New Group Name:")),
name_entry,
180)
vbox = gtk.VBox(False, 2)
vbox.pack_start(name_select, False, False, 0)
alignment = dialogutils.get_default_alignment(vbox)
dialog.vbox.pack_start(alignment, True, True, 0)
_default_behaviour(dialog)
dialog.set_default_response(gtk.RESPONSE_ACCEPT)
dialog.connect('response', callback, (name_entry, add_selected))
dialog.show_all()
def group_rename_dialog(callback, group_name):
dialog, entry = dialogutils.get_single_line_text_input_dialog(30, 130,
_("Rename Range Log Item Group"),
_("Rename").encode('utf-8'),
_("New Group Name:"),
group_name)
dialog.connect('response', callback, entry)
dialog.show_all()
def not_valid_producer_dialog(file_path, parent_window):
primary_txt = _("Can't open non-valid media")
secondary_txt = _("File: ") + file_path + _("\nis not a valid media file.")
dialogutils.warning_message(primary_txt, secondary_txt, parent_window, is_info=True)
def marker_name_dialog(frame_str, callback):
dialog = gtk.Dialog(_("New Marker"), None,
gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,
(_("Add Marker").encode('utf-8'), gtk.RESPONSE_ACCEPT))
name_entry = gtk.Entry(30)
name_entry.set_width_chars(30)
name_entry.set_text("")
name_entry.set_activates_default(True)
name_select = panels.get_two_column_box(gtk.Label(_("Name for marker at ") + frame_str),
name_entry,
250)
alignment = dialogutils.get_default_alignment(name_select)
dialog.vbox.pack_start(alignment, True, True, 0)
dialog.set_default_response(gtk.RESPONSE_ACCEPT)
_default_behaviour(dialog)
dialog.connect('response', callback, name_entry)
dialog.show_all()
def open_image_sequence_dialog(callback, parent_window):
cancel_str = _("Cancel").encode('utf-8')
ok_str = _("Ok").encode('utf-8')
dialog = gtk.Dialog(_("Add Image Sequence Clip"),
parent_window,
gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,
(cancel_str, gtk.RESPONSE_CANCEL,
ok_str, gtk.RESPONSE_YES))
file_chooser = gtk.FileChooserButton(_("Select First Frame"))
file_chooser.set_size_request(250, 25)
filt = utils.get_image_sequence_file_filter()
file_chooser.add_filter(filt)
row1 = guiutils.get_two_column_box(gtk.Label(_("First frame:")), file_chooser, 220)
adj = gtk.Adjustment(value=1, lower=1, upper=250, step_incr=1)
frames_per_image = gtk.SpinButton(adjustment=adj, climb_rate=1.0, digits=0)
row2 = guiutils.get_two_column_box(gtk.Label(_("Frames per Source Image:")), frames_per_image, 220)
vbox = gtk.VBox(False, 2)
vbox.pack_start(row1, False, False, 0)
vbox.pack_start(row2, False, False, 0)
alignment = dialogutils.get_alignment2(vbox)
dialog.vbox.pack_start(alignment, True, True, 0)
_default_behaviour(dialog)
dialog.connect('response', callback, (file_chooser, frames_per_image))
dialog.show_all()
def export_edl_dialog(callback, parent_window, project_name):
cancel_str = _("Cancel").encode('utf-8')
ok_str = _("Export To EDL").encode('utf-8')
dialog = gtk.Dialog(_("Export EDL"),
parent_window,
gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,
(cancel_str, gtk.RESPONSE_CANCEL,
ok_str, gtk.RESPONSE_YES))
INPUT_LABELS_WITDH = 220
project_name = project_name.strip(".flb")
file_name = gtk.Entry()
file_name.set_text(project_name)
extension_label = gtk.Label(".edl")
extension_label.set_size_request(35, 20)
name_pack = gtk.HBox(False, 4)
name_pack.pack_start(file_name, True, True, 0)
name_pack.pack_start(extension_label, False, False, 0)
name_row = guiutils.get_two_column_box(gtk.Label(_("Export file name:")), name_pack, INPUT_LABELS_WITDH)
out_folder = gtk.FileChooserButton(_("Select target folder"))
out_folder.set_action(gtk.FILE_CHOOSER_ACTION_SELECT_FOLDER)
out_folder.set_current_folder(os.path.expanduser("~") + "/")
folder_row = guiutils.get_two_column_box(gtk.Label(_("Export folder:")), out_folder, INPUT_LABELS_WITDH)
file_frame = guiutils.get_named_frame_with_vbox(_("File"), [name_row, folder_row])
seq = editorstate.current_sequence()
track_select_combo = gtk.combo_box_new_text()
for i in range(seq.first_video_index, len(seq.tracks) - 1):
track_select_combo.append_text(utils.get_track_name(seq.tracks[i], seq))
track_select_combo.set_active(0)
track_row = guiutils.get_two_column_box(gtk.Label(_("Exported video track:")), track_select_combo, INPUT_LABELS_WITDH)
cascade_check = gtk.CheckButton()
cascade_check.connect("toggled", _cascade_toggled, track_select_combo)
cascade_row = guiutils.get_left_justified_box( [cascade_check, gtk.Label(_("Cascade video tracks"))])
audio_track_select_combo = gtk.combo_box_new_text()
for i in range(1, seq.first_video_index):
audio_track_select_combo.append_text(utils.get_track_name(seq.tracks[i], seq))
audio_track_select_combo.set_active(seq.first_video_index - 2)
audio_track_select_combo.set_sensitive(False)
audio_track_row = guiutils.get_two_column_box(gtk.Label(_("Exported audio track:")), audio_track_select_combo, INPUT_LABELS_WITDH)
op_combo = gtk.combo_box_new_text()
op_combo.append_text(_("Audio From Video"))
op_combo.append_text(_("Separate Audio Track"))
op_combo.append_text(_("No Audio"))
op_combo.set_active(0)
op_combo.connect("changed", _audio_op_changed, audio_track_select_combo)
op_row = guiutils.get_two_column_box(gtk.Label(_("Audio export:")), op_combo, INPUT_LABELS_WITDH)
tracks_frame = guiutils.get_named_frame_with_vbox(_("Tracks"), [cascade_row, track_row, op_row, audio_track_row])
vbox = gtk.VBox(False, 2)
vbox.pack_start(file_frame, False, False, 0)
vbox.pack_start(tracks_frame, False, False, 0)
alignment = gtk.Alignment(0.5, 0.5, 1.0, 1.0)
alignment.set_padding(12, 12, 12, 12)
alignment.add(vbox)
dialog.vbox.pack_start(alignment, True, True, 0)
_default_behaviour(dialog)
dialog.connect('response', callback, (file_name, out_folder, track_select_combo, cascade_check, op_combo, audio_track_select_combo))
dialog.show_all()
def _cascade_toggled(check, track_select_combo):
if check.get_active() == True:
track_select_combo.set_sensitive(False)
else:
track_select_combo.set_sensitive(True)
def _audio_op_changed(combo, audio_track_select_combo):
if combo.get_active() == 1:
audio_track_select_combo.set_sensitive(True)
else:
audio_track_select_combo.set_sensitive(False)
def transition_edit_dialog(callback, transition_data):
dialog = gtk.Dialog(_("Add Transition").encode('utf-8'), None,
gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,
(_("Cancel").encode('utf-8'), gtk.RESPONSE_REJECT,
_("Apply").encode('utf-8'), gtk.RESPONSE_ACCEPT))
alignment, type_combo, length_entry, encodings_cb, quality_cb, wipe_luma_combo_box, color_button = panels.get_transition_panel(transition_data)
widgets = (type_combo, length_entry, encodings_cb, quality_cb, wipe_luma_combo_box, color_button)
dialog.connect('response', callback, widgets, transition_data)
dialog.vbox.pack_start(alignment, True, True, 0)
_default_behaviour(dialog)
dialog.show_all()
def fade_edit_dialog(callback, transition_data):
dialog = gtk.Dialog(_("Add Fade"), None,
gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,
(_("Cancel").encode('utf-8'), gtk.RESPONSE_REJECT,
_("Apply").encode('utf-8'), gtk.RESPONSE_ACCEPT))
alignment, type_combo, length_entry, encodings_cb, quality_cb, color_button = panels.get_fade_panel(transition_data)
widgets = (type_combo, length_entry, encodings_cb, quality_cb, color_button)
dialog.connect('response', callback, widgets, transition_data)
dialog.vbox.pack_start(alignment, True, True, 0)
_default_behaviour(dialog)
dialog.show_all()
def keyboard_shortcuts_dialog(parent_window):
dialog = gtk.Dialog(_("Keyboard Shortcuts"),
parent_window,
gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,
(_("Close").encode('utf-8'), gtk.RESPONSE_CLOSE))
general_vbox = gtk.VBox()
general_vbox.pack_start(_get_kb_row(_("Control + N"), _("Create New Project")), False, False, 0)
general_vbox.pack_start(_get_kb_row(_("Control + S"), _("Save Project")), False, False, 0)
general_vbox.pack_start(_get_kb_row(_("Delete"), _("Delete Selected Item")), False, False, 0)
general_vbox.pack_start(_get_kb_row(_("Escape"), _("Stop Rendering Audio Levels")), False, False, 0)
general_vbox.pack_start(_get_kb_row(_("Control + Q"), _("Quit")), False, False, 0)
general_vbox.pack_start(_get_kb_row(_("Control + Z"), _("Undo")), False, False, 0)
general_vbox.pack_start(_get_kb_row(_("Control + Y"), _("Redo")), False, False, 0)
general_vbox.pack_start(_get_kb_row(_("Control + O"), _("Open Project")), False, False, 0)
general_vbox.pack_start(_get_kb_row(_("TAB"), _("Switch Monitor Source")), False, False, 0)
general_vbox.pack_start(_get_kb_row(_("Control + L"), _("Log Marked Clip Range")), False, False, 0)
general = guiutils.get_named_frame(_("General"), general_vbox)
tline_vbox = gtk.VBox()
tline_vbox.pack_start(_get_kb_row("I", _("Set Mark In")))
tline_vbox.pack_start(_get_kb_row("O", _("Set Mark Out")))
tline_vbox.pack_start(_get_kb_row("Alt + I", _("Go To Mark In")))
tline_vbox.pack_start(_get_kb_row("Alt + O", _("Go To Mark Out")))
tline_vbox.pack_start(_get_kb_row("X", _("Cut Clip")))
tline_vbox.pack_start(_get_kb_row(_("Delete"), _("Splice Out")))
tline_vbox.pack_start(_get_kb_row("Y", _("Insert")))
tline_vbox.pack_start(_get_kb_row("U", _("Append")))
tline_vbox.pack_start(_get_kb_row("T", _("3 Point Overwrite Insert")))
tline_vbox.pack_start(_get_kb_row("M", _("Add Mark")))
tline_vbox.pack_start(_get_kb_row("Control + C", _("Copy Clips")))
tline_vbox.pack_start(_get_kb_row("Control + V", _("Paste Clips")))
tline_vbox.pack_start(_get_kb_row(_("G"), _("Log Marked Clip Range")), False, False, 0)
tline = guiutils.get_named_frame(_("Timeline"), tline_vbox)
play_vbox = gtk.VBox()
play_vbox.pack_start(_get_kb_row(_("Space"), _("Start / Stop Playback")))
play_vbox.pack_start(_get_kb_row("J", _("Backwards Faster")))
play_vbox.pack_start(_get_kb_row("K", _("Stop")))
play_vbox.pack_start(_get_kb_row("L", _("Forward Faster")))
play_vbox.pack_start(_get_kb_row(_("Left Arrow "), _("Prev Frame")))
play_vbox.pack_start(_get_kb_row(_("Right Arrow"), _("Next Frame")))
play_vbox.pack_start(_get_kb_row(_("Up Arrow"), _("Next Edit/Mark")))
play_vbox.pack_start(_get_kb_row(_("Down Arrow"), _("Prev Edit/Mark")))
play_vbox.pack_start(_get_kb_row(_("Home"), _("Go To Start")))
play_vbox.pack_start(_get_kb_row(_("Shift + I"), _("To Mark In")))
play_vbox.pack_start(_get_kb_row(_("Shift + O"), _("To Mark Out")))
play = guiutils.get_named_frame(_("Playback"), play_vbox)
tools_vbox = gtk.VBox()
tools_vbox.pack_start(_get_kb_row("1", _("Insert")))
tools_vbox.pack_start(_get_kb_row("2", _("Overwrite")))
tools_vbox.pack_start(_get_kb_row("3", _("Trim")))
tools_vbox.pack_start(_get_kb_row("4", _("Roll")))
tools_vbox.pack_start(_get_kb_row("5", _("Slip")))
tools_vbox.pack_start(_get_kb_row("6", _("Spacer")))
tools = guiutils.get_named_frame(_("Tools"), tools_vbox)
geom_vbox = gtk.VBox()
geom_vbox.pack_start(_get_kb_row(_("Left Arrow "), _("Move Source Video Left")))
geom_vbox.pack_start(_get_kb_row(_("Right Arrow"), _("Move Source Video Right")))
geom_vbox.pack_start(_get_kb_row(_("Up Arrow"), _("Move Source Video Up")))
geom_vbox.pack_start(_get_kb_row(_("Down Arrow"), _("Move Source Video Down")))
geom = guiutils.get_named_frame(_("Geometry Editor"), geom_vbox)
panel = gtk.VBox()
panel.pack_start(tools, False, False, 0)
panel.pack_start(guiutils.pad_label(12,12), False, False, 0)
panel.pack_start(tline, False, False, 0)
panel.pack_start(guiutils.pad_label(12,12), False, False, 0)
panel.pack_start(play, False, False, 0)
panel.pack_start(guiutils.pad_label(12,12), False, False, 0)
panel.pack_start(general, False, False, 0)
panel.pack_start(guiutils.pad_label(12,12), False, False, 0)
panel.pack_start(geom, False, False, 0)
pad_panel = gtk.HBox()
pad_panel.pack_start(guiutils.pad_label(12,12), False, False, 0)
pad_panel.pack_start(panel, True, False, 0)
pad_panel.pack_start(guiutils.pad_label(12,12), False, False, 0)
sw = gtk.ScrolledWindow()
sw.set_policy(gtk.POLICY_NEVER, gtk.POLICY_AUTOMATIC)
sw.add_with_viewport(pad_panel)
sw.set_size_request(420, 400)
alignment = gtk.Alignment(0.5, 0.5, 1.0, 1.0)
alignment.set_padding(24, 24, 24, 24)
alignment.add(sw)
dialog.vbox.pack_start(alignment, True, True, 0)
_default_behaviour(dialog)
dialog.connect('response', _dialog_destroy)
dialog.show_all()
def _get_kb_row(msg1, msg2):
label1 = gtk.Label(msg1)
label2 = gtk.Label(msg2)
KB_SHORTCUT_ROW_WIDTH = 400
KB_SHORTCUT_ROW_HEIGHT = 22
row = guiutils.get_two_column_box(label1, label2, 170)
row.set_size_request(KB_SHORTCUT_ROW_WIDTH, KB_SHORTCUT_ROW_HEIGHT)
return row
def watermark_dialog(add_callback, remove_callback):
dialog = gtk.Dialog(_("Sequence Watermark"), None,
gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,
(_("Close").encode('utf-8'), gtk.RESPONSE_CLOSE))
seq_label = guiutils.bold_label(_("Sequence:") + " ")
seq_name = gtk.Label(editorstate.current_sequence().name)
file_path_label = guiutils.bold_label(_("Watermark:") + " ")
add_button = gtk.Button(_("Set Watermark File"))
remove_button = gtk.Button(_("Remove Watermark"))
if editorstate.current_sequence().watermark_file_path == None:
file_path_value_label = gtk.Label("Not Set")
add_button.set_sensitive(True)
remove_button.set_sensitive(False)
else:
file_path_value_label = gtk.Label(editorstate.current_sequence().watermark_file_path)
add_button.set_sensitive(False)
remove_button.set_sensitive(True)
row1 = guiutils.get_left_justified_box([seq_label, seq_name])
row2 = guiutils.get_left_justified_box([file_path_label, file_path_value_label])
row3 = guiutils.get_left_justified_box([gtk.Label(), remove_button, guiutils.pad_label(8, 8), add_button])
row3.set_size_request(470, 30)
widgets = (add_button, remove_button, file_path_value_label)
add_button.connect("clicked", add_callback, widgets)
remove_button.connect("clicked", remove_callback, widgets)
vbox = gtk.VBox(False, 2)
vbox.pack_start(row1, False, False, 0)
vbox.pack_start(row2, False, False, 0)
vbox.pack_start(guiutils.pad_label(12, 8), False, False, 0)
vbox.pack_start(row3, False, False, 0)
alignment = gtk.Alignment(0.5, 0.5, 1.0, 1.0)
alignment.set_padding(12, 12, 12, 12)
alignment.add(vbox)
dialog.vbox.pack_start(alignment, True, True, 0)
_default_behaviour(dialog)
dialog.connect('response', _dialog_destroy)
dialog.show_all()
def watermark_file_dialog(callback, widgets):
dialog = gtk.FileChooserDialog(_("Select Watermark File"), None,
gtk.FILE_CHOOSER_ACTION_OPEN,
(_("Cancel").encode('utf-8'), gtk.RESPONSE_REJECT,
_("OK").encode('utf-8'), gtk.RESPONSE_ACCEPT), None)
dialog.set_action(gtk.FILE_CHOOSER_ACTION_OPEN)
dialog.set_select_multiple(False)
file_filter = gtk.FileFilter()
file_filter.set_name("Accepted Watermark Files")
file_filter.add_pattern("*" + ".png")
file_filter.add_pattern("*" + ".jpeg")
file_filter.add_pattern("*" + ".jpg")
file_filter.add_pattern("*" + ".tga")
dialog.add_filter(file_filter)
dialog.connect('response', callback, widgets)
dialog.show()
def media_file_dialog(text, callback, multiple_select, data=None):
file_select = gtk.FileChooserDialog(text, None, gtk.FILE_CHOOSER_ACTION_OPEN,
(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_OPEN, gtk.RESPONSE_OK))
file_select.set_default_response(gtk.RESPONSE_CANCEL)
file_select.set_select_multiple(multiple_select)
media_filter = utils.get_media_source_file_filter()
all_filter = gtk.FileFilter()
all_filter.set_name(_("All files"))
all_filter.add_pattern("*.*")
file_select.add_filter(media_filter)
file_select.add_filter(all_filter)
if ((editorpersistance.prefs.open_in_last_opended_media_dir == True)
and (editorpersistance.prefs.last_opened_media_dir != None)):
file_select.set_current_folder(editorpersistance.prefs.last_opened_media_dir)
if data == None:
file_select.connect('response', callback)
else:
file_select.connect('response', callback, data)
file_select.set_modal(True)
file_select.show()
def save_snaphot_progess(media_copy_txt, project_txt):
dialog = gtk.Window(gtk.WINDOW_TOPLEVEL)
dialog.set_title(_("Saving project snapshot"))
dialog.media_copy_info = gtk.Label(media_copy_txt)
media_copy_row = guiutils.get_left_justified_box([dialog.media_copy_info])
dialog.saving_project_info = gtk.Label(project_txt)
project_row = guiutils.get_left_justified_box([dialog.saving_project_info])
progress_vbox = gtk.VBox(False, 2)
progress_vbox.pack_start(media_copy_row, False, False, 0)
progress_vbox.pack_start(project_row, True, True, 0)
alignment = gtk.Alignment(0.5, 0.5, 1.0, 1.0)
alignment.set_padding(12, 12, 12, 12)
alignment.add(progress_vbox)
dialog.add(alignment)
dialog.set_default_size(400, 70)
dialog.set_position(gtk.WIN_POS_CENTER)
dialog.show_all()
return dialog
| Python |
"""
Flowblade Movie Editor is a nonlinear video editor.
Copyright 2012 Janne Liljeblad.
This file is part of Flowblade Movie Editor <http://code.google.com/p/flowblade>.
Flowblade Movie Editor is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Flowblade Movie Editor is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Flowblade Movie Editor. If not, see <http://www.gnu.org/licenses/>.
"""
import gettext
import locale
import os
import respaths
APP_NAME = "Flowblade"
lang = None
filter_groups = {}
filter_names = {}
param_names = {}
combo_options = {}
def init_languages():
langs = []
lc, encoding = locale.getdefaultlocale()
if (lc):
langs = [lc]
print "Locale:", lc
language = os.environ.get('LANGUAGE', None)
if (language):
langs += language.split(":")
gettext.bindtextdomain(APP_NAME, respaths.LOCALE_PATH)
gettext.textdomain(APP_NAME)
# Get the language to use
global lang
#lang = gettext.translation(APP_NAME, respaths.LOCALE_PATH, languages=["fi"], fallback=True) # Testing, comment out for production
lang = gettext.translation(APP_NAME, respaths.LOCALE_PATH, languages=langs, fallback=True)
lang.install(APP_NAME) # makes _() a build-in available in all modules without imports
def get_filter_name(f_name):
try:
return filter_names[f_name]
except KeyError:
return f_name
def get_filter_group_name(group_name):
try:
return filter_groups[group_name]
except:
return group_name
def get_param_name(name):
try:
return param_names[name]
except KeyError:
return name
def get_combo_option(c_opt):
try:
return combo_options[c_opt]
except KeyError:
return c_opt
def load_filters_translations():
# filter group names
global filter_groups
filter_groups["Color"] = _("Color")
filter_groups["Color Effect"] = _("Color Effect")
filter_groups["Audio"] = _("Audio")
filter_groups["Audio Filter"] = _("Audio Filter")
filter_groups["Blur"] = _("Blur")
filter_groups["Distort"] = _("Distort")
filter_groups["Alpha"] = _("Alpha")
filter_groups["Movement"] = _("Movement")
filter_groups["Transform"] = _("Transform")
filter_groups["Edge"] = _("Edge")
filter_groups["Fix"] = _("Fix")
filter_groups["Artistic"] = _("Artistic")
# filter names
global filter_names
filter_names["Alpha Gradient"] = _("Alpha Gradient")
filter_names["Crop"] = _("Crop")
filter_names["Alpha Shape"]= _("Alpha Shape")
filter_names["Volume"]= _("Volume")
filter_names["Pan"]= _("Pan")
filter_names["Pan Keyframed"]= _("Pan Keyframed")
filter_names["Mono to Stereo"]= _("Mono to Stereo")
filter_names["Swap Channels"]= _("Swap Channels")
filter_names["Pitchshifter"]= _("Pitchshifter")
filter_names["Distort - Barry's Satan"]= _("Distort - Barry's Satan")
filter_names["Frequency Shift - Bode/Moog"]= _("Frequency Shift - Bode/Moog")
filter_names["Equalize - DJ 3-band"]= _("Equalize - DJ 3-band")
filter_names["Flanger - DJ"]= _("Flanger - DJ")
filter_names["Declipper"]= _("Declipper")
filter_names["Delayorama"]= _("Delayorama")
filter_names["Distort - Diode Processor"]= _("Distort - Diode Processor")
filter_names["Distort - Foldover"]= _("Distort - Foldover")
filter_names["Highpass - Butterworth"]= _("Highpass - Butterworth")
filter_names["Lowpass - Butterworth"]= _("Lowpass - Butterworth")
filter_names["GSM Simulator"]= _("GSM Simulator")
filter_names["Reverb - GVerb"]= _("Reverb - GVerb")
filter_names["Noise Gate"]= _("Noise Gate")
filter_names["Bandpass"]= _("Bandpass")
filter_names["Pitchscaler - High Quality"]= _("Pitchscaler - High Quality")
filter_names["Equalize - Multiband"]= _("Equalize - Multiband")
filter_names["Reverb - Plate"]= _("Reverb - Plate")
filter_names["Distort - Pointer cast"]= _("Distort - Pointer cast")
filter_names["Rate Shifter"]= _("Rate Shifter")
filter_names["Signal Shifter"]= _("Signal Shifter")
filter_names["Distort - Sinus Wavewrap"]= _("Distort - Sinus Wavewrap")
filter_names["Vinyl Effect"]= _("Vinyl Effect")
filter_names["Chorus - Multivoice"]= _("Chorus - Multivoice")
filter_names["Charcoal"]= _("Charcoal")
filter_names["Glow"]= _("Glow")
filter_names["Old Film"]= _("Old Film")
filter_names["Scanlines"]= _("Scanlines")
filter_names["Cartoon"]= _("Cartoon")
filter_names["Pixelize"]= _("Pixelize")
filter_names["Blur"]= _("Blur")
filter_names["Grain"]= _("Grain")
filter_names["Grayscale"]= _("Grayscale")
filter_names["Contrast"]= _("Contrast")
filter_names["Saturation"]= _("Saturation")
filter_names["Invert"]= _("Invert")
filter_names["Hue"]= _("Hue")
filter_names["Brightness"]= _("Brightness")
filter_names["Sepia"]= _("Sepia")
filter_names["Tint"]= _("Tint")
filter_names["White Balance"]= _("White Balance")
filter_names["Levels"]= _("Levels")
filter_names["Color Clustering"]= _("Color Clustering")
filter_names["Chroma Hold"]= _("Chroma Hold")
filter_names["Three Layer"]= _("Three Layer")
filter_names["Threshold0r"]= _("Threshold0r")
filter_names["Technicolor"]= _("Technicolor")
filter_names["Primaries"]= _("Primaries")
filter_names["Color Distance"]= _("Color Distance")
filter_names["Threshold"]= _("Threshold")
filter_names["Waves"]= _("Waves")
filter_names["Lens Correction"]= _("Lens Correction")
filter_names["Flip"]= _("Flip")
filter_names["Mirror"]= _("Mirror")
filter_names["V Sync"]= _("V Sync")
filter_names["Edge Glow"]= _("Edge Glow")
filter_names["Sobel"]= _("Sobel")
filter_names["Denoise"]= _("Denoise")
filter_names["Sharpness"]= _("Sharpness")
filter_names["Letterbox"]= _("Letterbox")
filter_names["Baltan"]= _("Baltan")
filter_names["Vertigo"]= _("Vertigo")
filter_names["Nervous"]= _("Nervous")
filter_names["Freeze"]= _("Freeze")
filter_names["Rotate"]= _("Rotate")
filter_names["Shear"]= _("Shear")
filter_names["Translate"]= _("Translate")
# 0.8 added
filter_names["Color Select"]= _("Color Select")
filter_names["Alpha Modify"]= _("Alpha Modify")
filter_names["Spill Supress"]= _("Spill Supress")
filter_names["RGB Noise"]= _("RGB Noise")
filter_names["Box Blur"]= _("Box Blur")
filter_names["IRR Blur"]= _("IRR Blur")
filter_names["Color Halftone"]= _("Color Halftone")
filter_names["Dither"]= _("Dither")
filter_names["Vignette"]= _("Vignette")
filter_names["Emboss"]= _("Emboss")
filter_names["3 Point Balance"]= _("3 Point Balance")
filter_names["Colorize"]= _("Colorize")
filter_names["Brightness Keyframed"]= _("Brightness Keyframed")
filter_names["RGB Adjustment"]= _("RGB Adjustment")
filter_names["Color Tap"]= _("Color Tap")
filter_names["Posterize"]= _("Posterize")
filter_names["Soft Glow"]= _("Soft Glow")
filter_names["Newspaper"]= _("Newspaper")
# 0.16 added
filter_names["Luma Key"] = _("Luma Key")
filter_names["Chroma Key"] = _("Chroma Key")
filter_names["Affine"] = _("Affine")
filter_names["Color Adjustment"] = _("Color Adjustment")
filter_names["Color Grading"] = _("Color Grading")
filter_names["Curves"] = _("Curves")
filter_names["Lift Gain Gamma"] = _("Lift Gain Gamma")
filter_names["Image Grid"] = _("Image Grid")
# 0.18
filter_names["Color Lift Gain Gamma"] = _("Color Lift Gain Gamma")
# param names
global param_names
# param names for filters
param_names["Position"] = _("Position")
param_names["Grad width"] = _("Grad width")
param_names["Tilt"] = _("Tilt")
param_names["Min"] = _("Min")
param_names["Max"] = _("Max")
param_names["Left"] = _("Left")
param_names["Right"] = _("Right")
param_names["Top"] = _("Top")
param_names["Bottom"] = _("Bottom")
param_names["Shape"] = _("Shape")
param_names["Pos X"] = _("Pos X")
param_names["Pos Y"] = _("Pos Y")
param_names["Size X"] = _("Size X")
param_names["Size Y"] = _("Size Y")
param_names["Tilt"] = _("Tilt")
param_names["Trans. Width"] = _("Trans. Width")
param_names["Volume"] = _("Volume")
param_names["Left/Right"] = _("Left/Right")
param_names["Left/Right"] = _("Left/Right")
param_names["Dry/Wet"] = _("Dry/Wet")
param_names["Pitch Shift"] = _("Pitch Shift")
param_names["Buffer Size"] = _("Buffer Size")
param_names["Dry/Wet"] = _("Dry/Wet")
param_names["Decay Time(samples)"] = _("Decay Time(samples)")
param_names["Knee Point(dB)"] = _("Knee Point(dB)")
param_names["Dry/Wet"] = _("Dry/Wet")
param_names["Frequency shift"] = _("Frequency shift")
param_names["Dry/Wet"] = _("Dry/Wet")
param_names["Low Gain(dB)"] = _("Low Gain(dB)")
param_names["Mid Gain(dB)"] = _("Mid Gain(dB)")
param_names["High Gain(dB)"] = _("High Gain(dB)")
param_names["Dry/Wet"] = _("Dry/Wet")
param_names["Oscillation period(s)"] = _("Oscillation period(s)")
param_names["Oscillation depth(ms)"] = _("Oscillation depth(ms)")
param_names["Feedback%"] = _("Feedback%")
param_names["Dry/Wet"] = _("Dry/Wet")
param_names["Dry/Wet"] = _("Dry/Wet")
param_names["Random seed"] = _("Random seed")
param_names["Input Gain(dB)"] = _("Input Gain(dB)")
param_names["Feedback(%)"] = _("Feedback(%)")
param_names["Number of taps"] = _("Number of taps")
param_names["First Delay(s)"] = _("First Delay(s)")
param_names["Delay Range(s)"] = _("Delay Range(s)")
param_names["Delay Change"] = _("Delay Change")
param_names["Delay Random(%)"] = _("Delay Random(%)")
param_names["Amplitude Change"] = _("Amplitude Change")
param_names["Amplitude Random(%)"] = _("Amplitude Random(%)")
param_names["Dry/Wet"] = _("Dry/Wet")
param_names["Amount"] = _("Amount")
param_names["Dry/Wet"] = _("Dry/Wet")
param_names["Drive"] = _("Drive")
param_names["Skew"] = _("Skew")
param_names["Dry/Wet"] = _("Dry/Wet")
param_names["Cutoff Frequency(Hz)"] = _("Cutoff Frequency(Hz)")
param_names["Resonance"] = _("Resonance")
param_names["Dry/Wet"] = _("Dry/Wet")
param_names["Cutoff Frequency(Hz)"] = _("Cutoff Frequency(Hz)")
param_names["Resonance"] = _("Resonance")
param_names["Dry/Wet"] = _("Dry/Wet")
param_names["Passes"] = _("Passes")
param_names["Error Rate"] = _("Error Rate")
param_names["Dry/Wet"] = _("Dry/Wet")
param_names["Roomsize"] = _("Roomsize")
param_names["Reverb time(s)"] = _("Reverb time(s)")
param_names["Damping"] = _("Damping")
param_names["Input bandwith"] = _("Input bandwith")
param_names["Dry signal level(dB)"] = _("Dry signal level(dB)")
param_names["Early reflection level(dB)"] = _("Early reflection level(dB)")
param_names["Tail level(dB)"] = _("Tail level(dB)")
param_names["Dry/Wet"] = _("Dry/Wet")
param_names["LF keyfilter(Hz)"] = _("LF keyfilter(Hz)")
param_names["HF keyfilter(Hz)"] = _("HF keyfilter(Hz)")
param_names["Threshold(dB)"] = _("Threshold(dB)")
param_names["Attack(ms)"] = _("Attack(ms)")
param_names["Hold(ms)"] = _("Hold(ms)")
param_names["Decay(ms)"] = _("Decay(ms)")
param_names["Range(dB)"] = _("Range(dB)")
param_names["Dry/Wet"] = _("Dry/Wet")
param_names["Center Frequency(Hz)"] = _("Center Frequency(Hz)")
param_names["Bandwidth(Hz)"] = _("Bandwidth(Hz)")
param_names["Stages"] = _("Stages")
param_names["Dry/Wet"] = _("Dry/Wet")
param_names["Pitch-coefficient"] = _("Pitch-coefficient")
param_names["Dry/Wet"] = _("Dry/Wet")
param_names["50Hz gain"] = _("50Hz gain")
param_names["100Hz gain"] = _("100Hz gain")
param_names["156Hz gain"] = _("156Hz gain")
param_names["220Hz gain"] = _("220Hz gain")
param_names["311Hz gain"] = _("311Hz gain")
param_names["440Hz gain"] = _("440Hz gain")
param_names["622Hz gain"] = _("622Hz gain")
param_names["880Hz gain"] = _("880Hz gain")
param_names["1250Hz gain"] = _("1250Hz gain")
param_names["1750Hz gain"] = _("1750Hz gain")
param_names["2500Hz gain"] = _("2500Hz gain")
param_names["3500Hz gain"] = _("3500Hz gain")
param_names["5000Hz gain"] = _("5000Hz gain")
param_names["100000Hz gain"] = _("100000Hz gain")
param_names["200000Hz gain"] = _("200000Hz gain")
param_names["Dry/Wet"] = _("Dry/Wet")
param_names["Reverb time"] = _("Reverb time")
param_names["Damping"] = _("Damping")
param_names["Dry/Wet mix"] = _("Dry/Wet mix")
param_names["Dry/Wet"] = _("Dry/Wet")
param_names["Effect cutoff(Hz)"] = _("Effect cutoff(Hz)")
param_names["Dry/Wet mix"] = _("Dry/Wet mix")
param_names["Dry/Wet"] = _("Dry/Wet")
param_names["Rate"] = _("Rate")
param_names["Dry/Wet"] = _("Dry/Wet")
param_names["Sift"] = _("Sift")
param_names["Dry/Wet"] = _("Dry/Wet")
param_names["Amount"] = _("Amount")
param_names["Dry/Wet"] = _("Dry/Wet")
param_names["Year"] = _("Year")
param_names["RPM"] = _("RPM")
param_names["Surface warping"] = _("Surface warping")
param_names["Cracle"] = _("Cracle")
param_names["Wear"] = _("Wear")
param_names["Dry/Wet"] = _("Dry/Wet")
param_names["Number of voices"] = _("Number of voices")
param_names["Delay base(ms)"] = _("Delay base(ms)")
param_names["Voice separation(ms)"] = _("Voice separation(ms)")
param_names["Detune(%)"] = _("Detune(%)")
param_names["Oscillation frequency(Hz)"] = _("Oscillation frequency(Hz)")
param_names["Output attenuation(dB)"] = _("Output attenuation(dB)")
param_names["Dry/Wet"] = _("Dry/Wet")
param_names["X Scatter"] = _("X Scatter")
param_names["Y Scatter"] = _("Y Scatter")
param_names["Scale"] = _("Scale")
param_names["Mix"] = _("Mix")
param_names["Invert"] = _("Invert")
param_names["Blur"] = _("Blur")
param_names["Delta"] = _("Delta")
param_names["Duration"] = _("Duration")
param_names["Bright. up"] = _("Bright. up")
param_names["Bright. down"] = _("Bright. down")
param_names["Bright. dur."] = _("Bright. dur.")
param_names["Develop up"] = _("Develop up")
param_names["Develop down"] = _("Develop down")
param_names["Develop dur."] = _("Develop dur.")
param_names["Triplevel"] = _("Triplevel")
param_names["Difference Space"] = _("Difference Space")
param_names["Block width"] = _("Block width")
param_names["Block height"] = _("Block height")
param_names["Size"] = _("Size")
param_names["Noise"] = _("Noise")
param_names["Contrast"] = _("Contrast")
param_names["Brightness"] = _("Brightness")
param_names["Contrast"] = _("Contrast")
param_names["Saturation"] = _("Saturation")
param_names["Hue"] = _("Hue")
param_names["Brightness"] = _("Brightness")
param_names["Brightness"] = _("Brightness")
param_names["U"] = _("U")
param_names["V"] = _("V")
param_names["Black"] = _("Black")
param_names["White"] = _("White")
param_names["Amount"] = _("Amount")
param_names["Neutral Color"] = _("Neutral Color")
param_names["Input"] = _("Input")
param_names["Input"] = _("Input")
param_names["Gamma"] = _("Gamma")
param_names["Black"] = _("Black")
param_names["White"] = _("White")
param_names["Num"] = _("Num")
param_names["Dist. weight"] = _("Dist. weight")
param_names["Color"] = _("Color")
param_names["Variance"] = _("Variance")
param_names["Threshold"] = _("Threshold")
param_names["Red Saturation"] = _("Red Saturation")
param_names["Yellow Saturation"] = _("Yellow Saturation")
param_names["Factor"] = _("Factor")
param_names["Source color"] = _("Source color")
param_names["Threshold"] = _("Threshold")
param_names["Amplitude"] = _("Amplitude")
param_names["Frequency"] = _("Frequency")
param_names["Rotate"] = _("Rotate")
param_names["Tilt"] = _("Tilt")
param_names["Center Correct"] = _("Center Correct")
param_names["Edges Correct"] = _("Edges Correct")
param_names["Flip"] = _("Flip")
param_names["Axis"] = _("Axis")
param_names["Invert"] = _("Invert")
param_names["Position"] = _("Position")
param_names["Edge Lightning"] = _("Edge Lightning")
param_names["Edge Brightness"] = _("Edge Brightness")
param_names["Non-Edge Brightness"] = _("Non-Edge Brightness")
param_names["Spatial"] = _("Spatial")
param_names["Temporal"] = _("Temporal")
param_names["Amount"] = _("Amount")
param_names["Size"] = _("Size")
param_names["Border width"] = _("Border width")
param_names["Phase Incr."] = _("Phase Incr.")
param_names["Zoom"] = _("Zoom")
param_names["Freeze Frame"] = _("Freeze Frame")
param_names["Freeze After"] = _("Freeze After")
param_names["Freeze Before"] = _("Freeze Before")
param_names["Angle"] = _("Angle")
param_names["transition.geometry"] = _("transition.geometry")
param_names["Shear X"] = _("Shear X")
param_names["Shear Y"] = _("Shear Y")
param_names["transition.geometry"] = _("transition.geometry")
param_names["transition.geometry"] = _("transition.geometry")
param_names["Left"] = _("Left")
param_names["Right"] = _("Right")
param_names["Top"] = _("Top")
param_names["Bottom"] = _("Bottom")
param_names["Invert"] = _("Invert")
param_names["Blur"] = _("Blur")
param_names["Opacity"] = _("Opacity")
param_names["Opacity"] = _("Opacity")
param_names["Rotate X"] = _("Rotate X")
param_names["Rotate Y"] = _("Rotate Y")
param_names["Rotate Z"] = _("Rotate Z")
# added 0.8
param_names["Edge Mode"] = _("Edge Mode")
param_names["Sel. Space"] = _("Sel. Space")
param_names["Operation"] = _("Operation")
param_names["Hard"] = _("Hard")
param_names["R/A/Hue"] = _("R/A/Hue")
param_names["G/B/Chromae"] = _("G/B/Chroma")
param_names["B/I/I"] = _("B/I/I")
param_names["Supress"] = _("Supress")
param_names["Horizontal"] = _("Horizontal")
param_names["Vertical"] = _("Vertical")
param_names["Type"] = _("Type")
param_names["Edge"] = _("Edge")
param_names["Dot Radius"] = _("Dot Radius")
param_names["Cyan Angle"] = _("Cyan Angle")
param_names["Magenta Angle"] = _("Magenta Angle")
param_names["Yellow Angle"] = _("Yellow Angle")
param_names["Levels"] = _("Levels")
param_names["Matrix Type"] = _("Matrix Type")
param_names["Aspect"] = _("Aspect")
param_names["Center Size"] = _("Center Size")
param_names["Azimuth"] = _("Azimuth")
param_names["Lightness"] = _("Lightness")
param_names["Bump Height"] = _("Bump Height")
param_names["Gray"] = _("Gray")
param_names["Split Preview"] = _("Split Preview")
param_names["Source on Left"] = _("Source on Left")
param_names["Lightness"] = _("Lightness")
param_names["Input black level"] = _("Input black level")
param_names["Input white level"] = _("Input white level")
param_names["Black output"] = _("Black output")
param_names["White output"] = _("White output")
param_names["Red"] = _("Red")
param_names["Green"] = _("Green")
param_names["Blue"] = _("Blue")
param_names["Action"] = _("Action")
param_names["Keep Luma"] = _("Keep Luma")
param_names["Luma Formula"] = _("Luma Formula")
param_names["Effect"] = _("Effect")
param_names["Sharpness"] = _("Sharpness")
param_names["Blend Type"] = _("Blend Type")
# added 0.16
param_names["Key Color"] = _("Key Color")
param_names["Pre-Level"] = _("Pre-Level")
param_names["Post-Level"] = _("Post-Level")
param_names["Slope"] = _("Slope")
param_names["Luma Band"] = _("Luma Band")
param_names["Lift"] = _("Lift")
param_names["Gain"] = _("Gain")
param_names["Input White Level"] = _("Input White Level")
param_names["Input Black Level"] = _("Input Black Level")
param_names["Black Output"] = _("Black Output")
param_names["White Output"] = _("White Output")
param_names["Rows"] = _("Rows")
param_names["Columns"] = _("Columns")
param_names["Color Temperature"] = _("Color Temperature")
# param names for compositors
param_names["Opacity"] = _("Opacity")
param_names["Shear X"] = _("Shear X")
param_names["Shear Y"] = _("Shear Y")
param_names["Distort"] = _("Distort")
param_names["Opacity"] = _("Opacity")
param_names["Wipe Type"] = _("Wipe Type")
param_names["Invert"] = _("Invert")
param_names["Softness"] = _("Softness")
param_names["Wipe Amount"] = _("Wipe Amount")
param_names["Wipe Type"] = _("Wipe Type")
param_names["Invert"] = _("Invert")
param_names["Softness"] = _("Softness")
# Combo options
global combo_options
combo_options["Shave"] = _("Shave")
combo_options["Rectangle"] = _("Rectangle")
combo_options["Ellipse"] = _("Ellipse")
combo_options["Triangle"] = _("Triangle")
combo_options["Diamond"] = _("Diamond")
combo_options["Shave"] = _("Shave")
combo_options["Shrink Hard"] = _("Shrink Hard")
combo_options["Shrink Soft"] = _("Shrink Soft")
combo_options["Grow Hard"] = _("Grow Hard")
combo_options["Grow Soft"] = _("Grow Soft")
combo_options["RGB"] = _("RGB")
combo_options["ABI"] = _("ABI")
combo_options["HCI"] = _("HCI")
combo_options["Hard"] = _("Hard")
combo_options["Fat"] = _("Fat")
combo_options["Normal"] = _("Normal")
combo_options["Skinny"] = _("Skinny")
combo_options["Ellipsoid"] = _("Ellipsoid")
combo_options["Diamond"] = _("Diamond")
combo_options["Overwrite"] = _("Overwrite")
combo_options["Max"] = _("Max")
combo_options["Min"] = _("Min")
combo_options["Add"] = _("Add")
combo_options["Subtract"] = _("Subtract")
combo_options["Green"] = _("Green")
combo_options["Blue"] = _("Blue")
combo_options["Sharper"] = _("Sharper")
combo_options["Fuzzier"] = _("Fuzzier")
combo_options["Luma"] = _("Luma")
combo_options["Red"] = _("Red")
combo_options["Green"] = _("Green")
combo_options["Blue"] = _("Blue")
combo_options["Add Constant"] = _("Add Constant")
combo_options["Change Gamma"] = _("Change Gamma")
combo_options["Multiply"] = _("Multiply")
combo_options["XPro"] = _("XPro")
combo_options["OldPhoto"] = _("OldPhoto")
combo_options["Sepia"] = _("Sepia")
combo_options["Heat"] = _("Heat")
combo_options["XRay"] = _("XRay")
combo_options["RedGreen"] = _("RedGreen")
combo_options["YellowBlue"] = _("YellowBlue")
combo_options["Esses"] = _("Esses")
combo_options["Horizontal"] = _("Horizontal")
combo_options["Vertical"] = _("Vertical")
combo_options["Shadows"] = _("Shadows")
combo_options["Midtones"] = _("Midtones")
combo_options["Highlights"] = _("Highlights")
| Python |
"""
Flowblade Movie Editor is a nonlinear video editor.
Copyright 2014 Janne Liljeblad.
This file is part of Flowblade Movie Editor <http://code.google.com/p/flowblade>.
Flowblade Movie Editor is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Flowblade Movie Editor is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Flowblade Movie Editor. If not, see <http://www.gnu.org/licenses/>.
"""
"""
Module handles button edit events from buttons in the middle bar.
"""
import pygtk
pygtk.require('2.0');
import gtk
import os
from operator import itemgetter
import appconsts
import dialogs
import dialogutils
import gui
import guicomponents
import edit
import editevent
import editorpersistance
import editorstate
from editorstate import get_track
from editorstate import current_sequence
from editorstate import PLAYER
from editorstate import timeline_visible
from editorstate import MONITOR_MEDIA_FILE
from editorstate import EDIT_MODE
import movemodes
import mlttransitions
import render
import renderconsumer
import syncsplitevent
import updater
import utils
# Used to store transition render data to be used at render complete callback
transition_render_data = None
# --------------------------- module funcs
def _get_new_clip_from_clip_monitor():
"""
Creates and returns new clip from current clip monitor clip
with user set in and out points.
"""
if MONITOR_MEDIA_FILE() == None:
# Info window here
return
if MONITOR_MEDIA_FILE().type != appconsts.PATTERN_PRODUCER:
new_clip = current_sequence().create_file_producer_clip(MONITOR_MEDIA_FILE().path)
else:
new_clip = current_sequence().create_pattern_producer(MONITOR_MEDIA_FILE())
# Set clip in and out points
new_clip.mark_in = MONITOR_MEDIA_FILE().mark_in
new_clip.mark_out = MONITOR_MEDIA_FILE().mark_out
new_clip.name = MONITOR_MEDIA_FILE().name
if new_clip.mark_in == -1:
new_clip.mark_in = 0
if new_clip.mark_out == -1:
new_clip.mark_out = new_clip.get_length() - 1 #-1 == out inclusive
return new_clip
# How to get this depends on what is displayed on monitor
def _current_tline_frame():
return editorstate.current_tline_frame()
# ---------------------------------- edit button events
def cut_pressed():
if not timeline_visible():
updater.display_sequence_in_monitor()
if EDIT_MODE() == editorstate.ONE_ROLL_TRIM:
editevent.oneroll_trim_no_edit_init()
return
if EDIT_MODE() == editorstate.TWO_ROLL_TRIM:
editevent.tworoll_trim_no_edit_init()
return
tline_frame = PLAYER().current_frame()
movemodes.clear_selected_clips()
# Iterate tracks and do cut on all active that have non-blanck
# clips and frame is not on previous edits
for i in range(1, len(current_sequence().tracks)):
track = get_track(i)
if track.active == False:
continue
if editevent.track_lock_check_and_user_info(track, cut_pressed, "cut"): # so the other tracks get cut...
continue
# Get index and clip
index = track.get_clip_index_at(int(tline_frame))
try:
clip = track.clips[index]
# don't cut blanck clip
if clip.is_blanck_clip:
continue
except Exception:
continue # Frame after last clip in track
# Get cut frame in clip frames
clip_start_in_tline = track.clip_start(index)
clip_frame = tline_frame - clip_start_in_tline + clip.clip_in
# Dont edit if frame on cut.
if clip_frame == clip.clip_in:
continue
# Do edit
data = {"track":track,
"index":index,
"clip":clip,
"clip_cut_frame":clip_frame}
action = edit.cut_action(data)
action.do_edit()
updater.repaint_tline()
def splice_out_button_pressed():
"""
Removes 1 - n long continuous clip range from track and closes
the created gap.
"""
if movemodes.selected_track == -1:
return
# Edit consumes selection, so clear selected from clips
movemodes.set_range_selection(movemodes.selected_track,
movemodes.selected_range_in,
movemodes.selected_range_out,
False)
track = get_track(movemodes.selected_track)
if editevent.track_lock_check_and_user_info(track, splice_out_button_pressed, "splice out"):
movemodes.clear_selection_values()
return
data = {"track":track,
"from_index":movemodes.selected_range_in,
"to_index":movemodes.selected_range_out}
edit_action = edit.remove_multiple_action(data)
edit_action.do_edit()
# Nothing is selected after edit
movemodes.clear_selection_values()
updater.repaint_tline()
def lift_button_pressed():
"""
Removes 1 - n long continuous clip range from track and fills
the created gap with a black clip
"""
if movemodes.selected_track == -1:
return
# Edit consumes selection, set clips seletion attr to false
movemodes.set_range_selection(movemodes.selected_track,
movemodes.selected_range_in,
movemodes.selected_range_out,
False)
track = get_track(movemodes.selected_track)
if editevent.track_lock_check_and_user_info(track, lift_button_pressed, "lift"):
movemodes.clear_selection_values()
return
data = {"track":track,
"from_index":movemodes.selected_range_in,
"to_index":movemodes.selected_range_out}
edit_action = edit.lift_multiple_action(data)
edit_action.do_edit()
# Nothing is left selected after edit
movemodes.clear_selection_values()
updater.repaint_tline()
def insert_button_pressed():
track = current_sequence().get_first_active_track()
if editevent.track_lock_check_and_user_info(track, insert_button_pressed, "insert"):
return
tline_pos =_current_tline_frame()
new_clip = _get_new_clip_from_clip_monitor()
if new_clip == None:
no_monitor_clip_info(gui.editor_window.window)
return
updater.save_monitor_frame = False # hack to not get wrong value saved in MediaFile.current_frame
editevent.do_clip_insert(track, new_clip, tline_pos)
def append_button_pressed():
track = current_sequence().get_first_active_track()
if editevent.track_lock_check_and_user_info(track, append_button_pressed, "insert"):
return
tline_pos = track.get_length()
new_clip = _get_new_clip_from_clip_monitor()
if new_clip == None:
no_monitor_clip_info(gui.editor_window.window)
return
updater.save_monitor_frame = False # hack to not get wrong value saved in MediaFile.current_frame
editevent.do_clip_insert(track, new_clip, tline_pos)
def three_point_overwrite_pressed():
# Check that state is good for edit
if movemodes.selected_track == -1:
primary_txt = _("No Clips are selected!")
secondary_txt = _("You need to select clips to overwrite to perform this edit.")
dialogutils.info_message(primary_txt, secondary_txt, gui.editor_window.window)
return
# Get data
track = get_track(movemodes.selected_track)
if editevent.track_lock_check_and_user_info(track, three_point_overwrite_pressed, "3 point overwrite"):
return
range_start_frame = track.clip_start(movemodes.selected_range_in)
out_clip = track.clips[movemodes.selected_range_out]
out_start = track.clip_start(movemodes.selected_range_out)
range_end_frame = out_start + out_clip.clip_out - out_clip.clip_in
range_length = range_end_frame - range_start_frame + 1 # calculated end is incl.
over_clip = _get_new_clip_from_clip_monitor()
if over_clip == None:
no_monitor_clip_info(gui.editor_window.window)
return
over_length = over_clip.mark_out - over_clip.mark_in + 1 # + 1 out incl ?????????? what if over_clip.mark_out == -1 ??????????
if over_length < range_length:
monitor_clip_too_short(gui.editor_window.window)
return
over_clip_out = over_clip.mark_in + range_length - 1 # -1 out incl
range_in = movemodes.selected_range_in
range_out = movemodes.selected_range_out
movemodes.clear_selected_clips() # edit consumes selection
updater.save_monitor_frame = False # hack to not get wrong value saved in MediaFile.current_frame
data = {"track":track,
"clip":over_clip,
"clip_in":over_clip.mark_in,
"clip_out":over_clip_out,
"in_index":range_in,
"out_index":range_out}
action = edit.three_point_overwrite_action(data)
action.do_edit()
updater.display_tline_cut_frame(track, range_in)
def range_overwrite_pressed():
# Get data
track = current_sequence().get_first_active_track()
if editevent.track_lock_check_and_user_info(track, range_overwrite_pressed, "range overwrite"):
return
# tractor is has mark in and mark
mark_in_frame = current_sequence().tractor.mark_in
mark_out_frame = current_sequence().tractor.mark_out
range_length = mark_out_frame - mark_in_frame + 1 # end is incl.
if mark_in_frame == -1 or mark_out_frame == -1:
primary_txt = _("Timeline Range not set!")
secondary_txt = _("You need to set Timeline Range using Mark In and Mark Out buttons\nto perform this edit.")
dialogutils.info_message(primary_txt, secondary_txt, gui.editor_window.window)
return
# Get over clip and check it overwrite range area
over_clip = _get_new_clip_from_clip_monitor()
if over_clip == None:
no_monitor_clip_info(gui.editor_window.window)
return
over_length = over_clip.mark_out - over_clip.mark_in + 1 # + 1 out incl
if over_length < range_length:
monitor_clip_too_short(gui.editor_window.window)
return
over_clip_out = over_clip.mark_in + range_length - 1
movemodes.clear_selected_clips() # edit consumes selection
updater.save_monitor_frame = False # hack to not get wrong value saved in MediaFile.current_frame
data = {"track":track,
"clip":over_clip,
"clip_in":over_clip.mark_in,
"clip_out":over_clip_out,
"mark_in_frame":mark_in_frame,
"mark_out_frame":mark_out_frame + 1} # +1 because mark is displayed and end of frame end this
# confirms to user expectation of
# of how this should work
action = edit.range_overwrite_action(data)
action.do_edit()
updater.display_tline_cut_frame(track, track.get_clip_index_at(mark_in_frame))
def resync_button_pressed():
syncsplitevent.resync_selected()
def add_transition_menu_item_selected():
if movemodes.selected_track == -1:
# INFOWINDOW
return
clip_count = movemodes.selected_range_out - movemodes.selected_range_in + 1 # +1 out incl.
if not (clip_count == 2):
# INFOWINDOW
return
add_transition_pressed()
def add_fade_menu_item_selected():
if movemodes.selected_track == -1:
print "so selection track"
# INFOWINDOW
return
clip_count = movemodes.selected_range_out - movemodes.selected_range_in + 1 # +1 out incl.
if not (clip_count == 1):
# INFOWINDOW
return
add_transition_pressed()
def add_transition_pressed(retry_from_render_folder_select=False):
if movemodes.selected_track == -1:
print "so selection track"
# INFOWINDOW
return
track = get_track(movemodes.selected_track)
clip_count = movemodes.selected_range_out - movemodes.selected_range_in + 1 # +1 out incl.
if not ((clip_count == 2) or (clip_count == 1)):
# INFOWINDOW
print "clip count"
return
if track.id < current_sequence().first_video_index and clip_count == 1:
_no_audio_tracks_mixing_info()
return
if editorpersistance.prefs.render_folder == None:
if retry_from_render_folder_select == True:
return
dialogs.select_rendred_clips_dir(_add_transition_render_folder_select_callback,
gui.editor_window.window,
editorpersistance.prefs.render_folder)
return
if clip_count == 2:
_do_rendered_transition(track)
else:
_do_rendered_fade(track)
def _do_rendered_transition(track):
from_clip = track.clips[movemodes.selected_range_in]
to_clip = track.clips[movemodes.selected_range_out]
# Get available clip handles to do transition
from_handle = from_clip.get_length() - from_clip.clip_out
from_clip_length = from_clip.clip_out - from_clip.clip_in
to_handle = to_clip.clip_in
to_clip_length = to_clip.clip_out - to_clip.clip_in
if to_clip_length < from_handle:
from_handle = to_clip_length
if from_clip_length < to_handle:
to_handle = from_clip_length
# Images have limitless handles, but we simulate that with big value
IMAGE_MEDIA_HANDLE_LENGTH = 1000
if from_clip.media_type == appconsts.IMAGE:
from_handle = IMAGE_MEDIA_HANDLE_LENGTH
if to_clip.media_type == appconsts.IMAGE:
to_handle = IMAGE_MEDIA_HANDLE_LENGTH
max_length = from_handle + to_handle
transition_data = {"track":track,
"from_clip":from_clip,
"to_clip":to_clip,
"from_handle":from_handle,
"to_handle":to_handle,
"max_length":max_length}
if track.id >= current_sequence().first_video_index:
dialogs.transition_edit_dialog(_add_transition_dialog_callback,
transition_data)
else:
_no_audio_tracks_mixing_info()
def _add_transition_render_folder_select_callback(dialog, response_id, file_select):
try:
folder = file_select.get_filenames()[0]
except:
dialog.destroy()
return
dialog.destroy()
if response_id == gtk.RESPONSE_YES:
if folder == os.path.expanduser("~"):
dialogs.rendered_clips_no_home_folder_dialog()
else:
editorpersistance.prefs.render_folder = folder
editorpersistance.save()
add_transition_pressed(True)
def _add_transition_dialog_callback(dialog, response_id, selection_widgets, transition_data):
if response_id != gtk.RESPONSE_ACCEPT:
dialog.destroy()
return
# Get input data
type_combo, length_entry, enc_combo, quality_combo, wipe_luma_combo_box, color_button = selection_widgets
encoding_option_index = enc_combo.get_active()
quality_option_index = quality_combo.get_active()
extension_text = "." + renderconsumer.encoding_options[encoding_option_index].extension
sorted_wipe_luma_index = wipe_luma_combo_box.get_active()
color_str = color_button.get_color().to_string()
try:
length = int(length_entry.get_text())
except Exception, e:
# INFOWINDOW, bad input
print str(e)
print "entry"
return
dialog.destroy()
from_clip = transition_data["from_clip"]
to_clip = transition_data["to_clip"]
# Get values to build transition render sequence
# Divide transition lenght between clips, odd frame goes to from_clip
real_length = length + 1 # first frame is full from clip frame so we are going to have to drop that
to_part = real_length / 2
from_part = real_length - to_part
# HACKFIX, I just tested this till it worked, not entirely sure on math here
if to_part == from_part:
add_thingy = 0
else:
add_thingy = 1
if _check_transition_handles((from_part - add_thingy),
transition_data["from_handle"],
to_part - (1 - add_thingy),
transition_data["to_handle"]) == False:
return
# Get from in and out frames
from_in = from_clip.clip_out - from_part + add_thingy
from_out = from_in + length # or transition will include one frame too many
# Get to in and out frames
to_in = to_clip.clip_in - to_part - 1
to_out = to_in + length # or transition will include one frame too many
# Edit clears selection, get track index before selection is cleared
trans_index = movemodes.selected_range_out
movemodes.clear_selected_clips()
transition_type_selection_index = type_combo.get_active() # these corespond with ...
producer_tractor = mlttransitions.get_rendered_transition_tractor( editorstate.current_sequence(),
from_clip,
to_clip,
from_out,
from_in,
to_out,
to_in,
transition_type_selection_index,
sorted_wipe_luma_index,
color_str)
# Save transition data into global variable to be available at render complete callback
global transition_render_data
transition_render_data = (trans_index, from_clip, to_clip, transition_data["track"], from_in, to_out, transition_type_selection_index)
window_text, type_id = mlttransitions.rendered_transitions[transition_type_selection_index]
window_text = _("Rendering ") + window_text
render.render_single_track_transition_clip(producer_tractor,
encoding_option_index,
quality_option_index,
str(extension_text),
_transition_render_complete,
window_text)
def _transition_render_complete(clip_path):
print "render complete"
global transition_render_data
transition_index, from_clip, to_clip, track, from_in, to_out, transition_type = transition_render_data
transition_clip = current_sequence().create_rendered_transition_clip(clip_path, transition_type)
data = {"transition_clip":transition_clip,
"transition_index":transition_index,
"from_clip":from_clip,
"to_clip":to_clip,
"track":track,
"from_in":from_in,
"to_out":to_out}
action = edit.add_centered_transition_action(data)
action.do_edit()
def _check_transition_handles(from_req, from_handle, to_req, to_handle):
if from_req > from_handle:
info_text = _("There is not enough material available in the FROM clip after the cut") + \
_("\nto create the transition.\n\n") + \
_("<b>Available:</b> ") + str(from_handle) + _(" frame(s)\n") + \
_("<b>Required:</b> ") + str(from_req) + _(" frame(s)")
dialogutils.info_message(_("FROM Clip Handle is too short!"),
info_text,
gui.editor_window.window)
return False
if to_req > to_handle:
info_text = _("There is not enough material available in the TO clip before the cut") + \
_("\nto create the transition.\n\n") + \
_("<b>Available:</b> ") + str(to_handle) + _(" frame(s)\n") + \
_("<b>Required:</b> ") + str(to_req) + _(" frame(s)")
dialogutils.info_message(_("TO Clip Handle is too short!"),
info_text,
gui.editor_window.window)
return False
return True
def _do_rendered_fade(track):
clip = track.clips[movemodes.selected_range_in]
transition_data = {"track":track,
"clip":clip}
if track.id >= current_sequence().first_video_index:
dialogs.fade_edit_dialog(_add_fade_dialog_callback, transition_data)
else:
_no_audio_tracks_mixing_info()
def _no_audio_tracks_mixing_info():
primary_txt = _("Only Video Track mix / fades available")
secondary_txt = _("Unfortunately rendered mixes and fades can currently\nonly be applied on clips on Video Tracks.")
dialogutils.info_message(primary_txt, secondary_txt, gui.editor_window.window)
def _add_fade_dialog_callback(dialog, response_id, selection_widgets, transition_data):
if response_id != gtk.RESPONSE_ACCEPT:
dialog.destroy()
return
# Get input data
type_combo, length_entry, enc_combo, quality_combo, color_button = selection_widgets
encoding_option_index = enc_combo.get_active()
quality_option_index = quality_combo.get_active()
extension_text = "." + renderconsumer.encoding_options[encoding_option_index].extension
color_str = color_button.get_color().to_string()
try:
length = int(length_entry.get_text())
except Exception, e:
# INFOWINDOW, bad input
print str(e)
print "entry"
return
dialog.destroy()
if length == 0:
return
clip = transition_data["clip"]
if length > clip.clip_length():
info_text = _("Clip is too short for the requested fade:\n\n") + \
_("<b>Clip Length:</b> ") + str(clip.clip_length()) + _(" frame(s)\n") + \
_("<b>Fade Length:</b> ") + str(length) + _(" frame(s)\n")
dialogutils.info_message(_("Clip is too short!"),
info_text,
gui.editor_window.window)
return
# Edit clears selection, get track index before selection is cleared
clip_index = movemodes.selected_range_in
movemodes.clear_selected_clips()
transition_type_selection_index = type_combo.get_active() + 3 # +3 because mlttransitions.RENDERED_FADE_IN = 3 and mlttransitions.RENDERED_FADE_OUT = 4
# and fade in/out selection indexes are 0 and 1
producer_tractor = mlttransitions.get_rendered_transition_tractor( editorstate.current_sequence(),
clip,
None,
length,
None,
None,
None,
transition_type_selection_index,
None,
color_str)
print "producer_tractor length:" + str(producer_tractor.get_length())
# Save transition data into global variable to be available at render complete callback
global transition_render_data
transition_render_data = (clip_index, transition_type_selection_index, clip, transition_data["track"], length)
window_text, type_id = mlttransitions.rendered_transitions[transition_type_selection_index]
window_text = _("Rendering ") + window_text
render.render_single_track_transition_clip(producer_tractor,
encoding_option_index,
quality_option_index,
str(extension_text),
_fade_render_complete,
window_text)
def _fade_render_complete(clip_path):
print "fade render complete"
global transition_render_data
clip_index, fade_type, clip, track, length = transition_render_data
fade_clip = current_sequence().create_rendered_transition_clip(clip_path, fade_type)
data = {"fade_clip":fade_clip,
"index":clip_index,
"track":track,
"length":length}
if fade_type == mlttransitions.RENDERED_FADE_IN:
action = edit.add_rendered_fade_in_action(data)
action.do_edit()
else: # mlttransitions.RENDERED_FADE_OUT
action = edit.add_rendered_fade_out_action(data)
action.do_edit()
# --------------------------------------------------------- view move setting
def view_mode_menu_lauched(launcher, event):
guicomponents.get_monitor_view_popupmenu(launcher, event, _view_mode_menu_item_item_activated)
def _view_mode_menu_item_item_activated(widget, msg):
editorstate.current_sequence().set_output_mode(msg)
gui.editor_window.view_mode_select.set_pixbuf(msg)
# ------------------------------------------------------- dialogs
def no_monitor_clip_info(parent_window):
primary_txt = _("No Clip loaded into Monitor")
secondary_txt = _("Can't do the requested edit because there is no Clip in Monitor.")
dialogutils.info_message(primary_txt, secondary_txt, parent_window)
def monitor_clip_too_short(parent_window):
primary_txt = _("Defined range in Monitor Clip is too short")
secondary_txt = _("Can't do the requested edit because Mark In -> Mark Out Range or Clip is too short.")
dialogutils.info_message(primary_txt, secondary_txt, parent_window)
# ------------------------------------------------- clip to rang log d'n'd
def mouse_dragged_out(event):
if movemodes.selected_range_in != -1:
movemodes.clips_drag_out_started(event)
# --------------------------------------------------- copy/paste
def do_timeline_objects_copy():
if movemodes.selected_track != -1:
# copying clips
track = current_sequence().tracks[movemodes.selected_track]
clone_clips = []
for i in range(movemodes.selected_range_in, movemodes.selected_range_out + 1):
clone_clip = current_sequence().clone_track_clip(track, i)
clone_clips.append(clone_clip)
editorstate.set_copy_paste_objects(clone_clips)
def do_timeline_objects_paste():
track = current_sequence().get_first_active_track()
if track == None:
return
paste_objs = editorstate.get_copy_paste_objects()
if paste_objs == None:
return
tline_pos = editorstate.current_tline_frame()
new_clips = []
for clip in paste_objs:
new_clip = current_sequence().create_clone_clip(clip)
new_clips.append(new_clip)
editorstate.set_copy_paste_objects(new_clips)
# Paste clips
editevent.do_multiple_clip_insert(track, paste_objs, tline_pos)
#------------------------------------------- markers
def marker_menu_lauch_pressed(widget, event):
guicomponents.get_markers_popup_menu(event, _marker_menu_item_activated)
def _marker_menu_item_activated(widget, msg):
current_frame = PLAYER().current_frame()
if msg == "add":
dialogs.marker_name_dialog(utils.get_tc_string(current_frame), _marker_add_dialog_callback)
elif msg == "delete":
mrk_index = -1
for i in range(0, len(current_sequence().markers)):
name, frame = current_sequence().markers[i]
if frame == current_frame:
mrk_index = i
if mrk_index != -1:
current_sequence().markers.pop(mrk_index)
updater.repaint_tline()
elif msg == "deleteall":
current_sequence().markers = []
updater.repaint_tline()
else: # seek to marker
name, frame = current_sequence().markers[int(msg)]
PLAYER().seek_frame(frame)
def add_marker():
current_frame = PLAYER().current_frame()
dialogs.marker_name_dialog(utils.get_tc_string(current_frame), _marker_add_dialog_callback)
def _marker_add_dialog_callback(dialog, response_id, name_entry):
name = name_entry.get_text()
dialog.destroy()
current_frame = PLAYER().current_frame()
dupl_index = -1
for i in range(0, len(current_sequence().markers)):
marker_name, frame = current_sequence().markers[i]
if frame == current_frame:
dupl_index = i
if dupl_index != -1:
current_sequence().markers.pop(dupl_index)
current_sequence().markers.append((name, current_frame))
current_sequence().markers = sorted(current_sequence().markers, key=itemgetter(1))
updater.repaint_tline()
| Python |
#!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests that leaked mock objects can be caught be Google Mock."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import gmock_test_utils
PROGRAM_PATH = gmock_test_utils.GetTestExecutablePath('gmock_leak_test_')
TEST_WITH_EXPECT_CALL = [PROGRAM_PATH, '--gtest_filter=*ExpectCall*']
TEST_WITH_ON_CALL = [PROGRAM_PATH, '--gtest_filter=*OnCall*']
TEST_MULTIPLE_LEAKS = [PROGRAM_PATH, '--gtest_filter=*MultipleLeaked*']
class GMockLeakTest(gmock_test_utils.TestCase):
def testCatchesLeakedMockByDefault(self):
self.assertNotEqual(
0,
gmock_test_utils.Subprocess(TEST_WITH_EXPECT_CALL).exit_code)
self.assertNotEqual(
0,
gmock_test_utils.Subprocess(TEST_WITH_ON_CALL).exit_code)
def testDoesNotCatchLeakedMockWhenDisabled(self):
self.assertEquals(
0,
gmock_test_utils.Subprocess(TEST_WITH_EXPECT_CALL +
['--gmock_catch_leaked_mocks=0']).exit_code)
self.assertEquals(
0,
gmock_test_utils.Subprocess(TEST_WITH_ON_CALL +
['--gmock_catch_leaked_mocks=0']).exit_code)
def testCatchesLeakedMockWhenEnabled(self):
self.assertNotEqual(
0,
gmock_test_utils.Subprocess(TEST_WITH_EXPECT_CALL +
['--gmock_catch_leaked_mocks']).exit_code)
self.assertNotEqual(
0,
gmock_test_utils.Subprocess(TEST_WITH_ON_CALL +
['--gmock_catch_leaked_mocks']).exit_code)
def testCatchesLeakedMockWhenEnabledWithExplictFlagValue(self):
self.assertNotEqual(
0,
gmock_test_utils.Subprocess(TEST_WITH_EXPECT_CALL +
['--gmock_catch_leaked_mocks=1']).exit_code)
def testCatchesMultipleLeakedMocks(self):
self.assertNotEqual(
0,
gmock_test_utils.Subprocess(TEST_MULTIPLE_LEAKS +
['--gmock_catch_leaked_mocks']).exit_code)
if __name__ == '__main__':
gmock_test_utils.Main()
| Python |
#!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test utilities for Google C++ Mocking Framework."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import sys
# Determines path to gtest_test_utils and imports it.
SCRIPT_DIR = os.path.dirname(__file__) or '.'
# isdir resolves symbolic links.
gtest_tests_util_dir = os.path.join(SCRIPT_DIR, '../gtest/test')
if os.path.isdir(gtest_tests_util_dir):
GTEST_TESTS_UTIL_DIR = gtest_tests_util_dir
else:
GTEST_TESTS_UTIL_DIR = os.path.join(SCRIPT_DIR, '../../gtest/test')
sys.path.append(GTEST_TESTS_UTIL_DIR)
import gtest_test_utils # pylint: disable-msg=C6204
def GetSourceDir():
"""Returns the absolute path of the directory where the .py files are."""
return gtest_test_utils.GetSourceDir()
def GetTestExecutablePath(executable_name):
"""Returns the absolute path of the test binary given its name.
The function will print a message and abort the program if the resulting file
doesn't exist.
Args:
executable_name: name of the test binary that the test script runs.
Returns:
The absolute path of the test binary.
"""
return gtest_test_utils.GetTestExecutablePath(executable_name)
def GetExitStatus(exit_code):
"""Returns the argument to exit(), or -1 if exit() wasn't called.
Args:
exit_code: the result value of os.system(command).
"""
if os.name == 'nt':
# On Windows, os.WEXITSTATUS() doesn't work and os.system() returns
# the argument to exit() directly.
return exit_code
else:
# On Unix, os.WEXITSTATUS() must be used to extract the exit status
# from the result of os.system().
if os.WIFEXITED(exit_code):
return os.WEXITSTATUS(exit_code)
else:
return -1
# Suppresses the "Invalid const name" lint complaint
# pylint: disable-msg=C6409
# Exposes Subprocess from gtest_test_utils.
Subprocess = gtest_test_utils.Subprocess
# Exposes TestCase from gtest_test_utils.
TestCase = gtest_test_utils.TestCase
# pylint: enable-msg=C6409
def Main():
"""Runs the unit test."""
gtest_test_utils.Main()
| Python |
#!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests the text output of Google C++ Mocking Framework.
SYNOPSIS
gmock_output_test.py --build_dir=BUILD/DIR --gengolden
# where BUILD/DIR contains the built gmock_output_test_ file.
gmock_output_test.py --gengolden
gmock_output_test.py
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import re
import sys
import gmock_test_utils
# The flag for generating the golden file
GENGOLDEN_FLAG = '--gengolden'
PROGRAM_PATH = gmock_test_utils.GetTestExecutablePath('gmock_output_test_')
COMMAND = [PROGRAM_PATH, '--gtest_stack_trace_depth=0', '--gtest_print_time=0']
GOLDEN_NAME = 'gmock_output_test_golden.txt'
GOLDEN_PATH = os.path.join(gmock_test_utils.GetSourceDir(), GOLDEN_NAME)
def ToUnixLineEnding(s):
"""Changes all Windows/Mac line endings in s to UNIX line endings."""
return s.replace('\r\n', '\n').replace('\r', '\n')
def RemoveReportHeaderAndFooter(output):
"""Removes Google Test result report's header and footer from the output."""
output = re.sub(r'.*gtest_main.*\n', '', output)
output = re.sub(r'\[.*\d+ tests.*\n', '', output)
output = re.sub(r'\[.* test environment .*\n', '', output)
output = re.sub(r'\[=+\] \d+ tests .* ran.*', '', output)
output = re.sub(r'.* FAILED TESTS\n', '', output)
return output
def RemoveLocations(output):
"""Removes all file location info from a Google Test program's output.
Args:
output: the output of a Google Test program.
Returns:
output with all file location info (in the form of
'DIRECTORY/FILE_NAME:LINE_NUMBER: 'or
'DIRECTORY\\FILE_NAME(LINE_NUMBER): ') replaced by
'FILE:#: '.
"""
return re.sub(r'.*[/\\](.+)(\:\d+|\(\d+\))\:', 'FILE:#:', output)
def NormalizeErrorMarker(output):
"""Normalizes the error marker, which is different on Windows vs on Linux."""
return re.sub(r' error: ', ' Failure\n', output)
def RemoveMemoryAddresses(output):
"""Removes memory addresses from the test output."""
return re.sub(r'@\w+', '@0x#', output)
def RemoveTestNamesOfLeakedMocks(output):
"""Removes the test names of leaked mock objects from the test output."""
return re.sub(r'\(used in test .+\) ', '', output)
def GetLeakyTests(output):
"""Returns a list of test names that leak mock objects."""
# findall() returns a list of all matches of the regex in output.
# For example, if '(used in test FooTest.Bar)' is in output, the
# list will contain 'FooTest.Bar'.
return re.findall(r'\(used in test (.+)\)', output)
def GetNormalizedOutputAndLeakyTests(output):
"""Normalizes the output of gmock_output_test_.
Args:
output: The test output.
Returns:
A tuple (the normalized test output, the list of test names that have
leaked mocks).
"""
output = ToUnixLineEnding(output)
output = RemoveReportHeaderAndFooter(output)
output = NormalizeErrorMarker(output)
output = RemoveLocations(output)
output = RemoveMemoryAddresses(output)
return (RemoveTestNamesOfLeakedMocks(output), GetLeakyTests(output))
def GetShellCommandOutput(cmd):
"""Runs a command in a sub-process, and returns its STDOUT in a string."""
return gmock_test_utils.Subprocess(cmd, capture_stderr=False).output
def GetNormalizedCommandOutputAndLeakyTests(cmd):
"""Runs a command and returns its normalized output and a list of leaky tests.
Args:
cmd: the shell command.
"""
# Disables exception pop-ups on Windows.
os.environ['GTEST_CATCH_EXCEPTIONS'] = '1'
return GetNormalizedOutputAndLeakyTests(GetShellCommandOutput(cmd))
class GMockOutputTest(gmock_test_utils.TestCase):
def testOutput(self):
(output, leaky_tests) = GetNormalizedCommandOutputAndLeakyTests(COMMAND)
golden_file = open(GOLDEN_PATH, 'rb')
golden = golden_file.read()
golden_file.close()
# The normalized output should match the golden file.
self.assertEquals(golden, output)
# The raw output should contain 2 leaked mock object errors for
# test GMockOutputTest.CatchesLeakedMocks.
self.assertEquals(['GMockOutputTest.CatchesLeakedMocks',
'GMockOutputTest.CatchesLeakedMocks'],
leaky_tests)
if __name__ == '__main__':
if sys.argv[1:] == [GENGOLDEN_FLAG]:
(output, _) = GetNormalizedCommandOutputAndLeakyTests(COMMAND)
golden_file = open(GOLDEN_PATH, 'wb')
golden_file.write(output)
golden_file.close()
else:
gmock_test_utils.Main()
| Python |
#!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""fuse_gmock_files.py v0.1.0
Fuses Google Mock and Google Test source code into two .h files and a .cc file.
SYNOPSIS
fuse_gmock_files.py [GMOCK_ROOT_DIR] OUTPUT_DIR
Scans GMOCK_ROOT_DIR for Google Mock and Google Test source
code, assuming Google Test is in the GMOCK_ROOT_DIR/gtest
sub-directory, and generates three files:
OUTPUT_DIR/gtest/gtest.h, OUTPUT_DIR/gmock/gmock.h, and
OUTPUT_DIR/gmock-gtest-all.cc. Then you can build your tests
by adding OUTPUT_DIR to the include search path and linking
with OUTPUT_DIR/gmock-gtest-all.cc. These three files contain
everything you need to use Google Mock. Hence you can
"install" Google Mock by copying them to wherever you want.
GMOCK_ROOT_DIR can be omitted and defaults to the parent
directory of the directory holding this script.
EXAMPLES
./fuse_gmock_files.py fused_gmock
./fuse_gmock_files.py path/to/unpacked/gmock fused_gmock
This tool is experimental. In particular, it assumes that there is no
conditional inclusion of Google Mock or Google Test headers. Please
report any problems to googlemock@googlegroups.com. You can read
http://code.google.com/p/googlemock/wiki/CookBook for more
information.
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import re
import sets
import sys
# We assume that this file is in the scripts/ directory in the Google
# Mock root directory.
DEFAULT_GMOCK_ROOT_DIR = os.path.join(os.path.dirname(__file__), '..')
# We need to call into gtest/scripts/fuse_gtest_files.py.
sys.path.append(os.path.join(DEFAULT_GMOCK_ROOT_DIR, 'gtest/scripts'))
import fuse_gtest_files
gtest = fuse_gtest_files
# Regex for matching '#include "gmock/..."'.
INCLUDE_GMOCK_FILE_REGEX = re.compile(r'^\s*#\s*include\s*"(gmock/.+)"')
# Where to find the source seed files.
GMOCK_H_SEED = 'include/gmock/gmock.h'
GMOCK_ALL_CC_SEED = 'src/gmock-all.cc'
# Where to put the generated files.
GTEST_H_OUTPUT = 'gtest/gtest.h'
GMOCK_H_OUTPUT = 'gmock/gmock.h'
GMOCK_GTEST_ALL_CC_OUTPUT = 'gmock-gtest-all.cc'
def GetGTestRootDir(gmock_root):
"""Returns the root directory of Google Test."""
return os.path.join(gmock_root, 'gtest')
def ValidateGMockRootDir(gmock_root):
"""Makes sure gmock_root points to a valid gmock root directory.
The function aborts the program on failure.
"""
gtest.ValidateGTestRootDir(GetGTestRootDir(gmock_root))
gtest.VerifyFileExists(gmock_root, GMOCK_H_SEED)
gtest.VerifyFileExists(gmock_root, GMOCK_ALL_CC_SEED)
def ValidateOutputDir(output_dir):
"""Makes sure output_dir points to a valid output directory.
The function aborts the program on failure.
"""
gtest.VerifyOutputFile(output_dir, gtest.GTEST_H_OUTPUT)
gtest.VerifyOutputFile(output_dir, GMOCK_H_OUTPUT)
gtest.VerifyOutputFile(output_dir, GMOCK_GTEST_ALL_CC_OUTPUT)
def FuseGMockH(gmock_root, output_dir):
"""Scans folder gmock_root to generate gmock/gmock.h in output_dir."""
output_file = file(os.path.join(output_dir, GMOCK_H_OUTPUT), 'w')
processed_files = sets.Set() # Holds all gmock headers we've processed.
def ProcessFile(gmock_header_path):
"""Processes the given gmock header file."""
# We don't process the same header twice.
if gmock_header_path in processed_files:
return
processed_files.add(gmock_header_path)
# Reads each line in the given gmock header.
for line in file(os.path.join(gmock_root, gmock_header_path), 'r'):
m = INCLUDE_GMOCK_FILE_REGEX.match(line)
if m:
# It's '#include "gmock/..."' - let's process it recursively.
ProcessFile('include/' + m.group(1))
else:
m = gtest.INCLUDE_GTEST_FILE_REGEX.match(line)
if m:
# It's '#include "gtest/foo.h"'. We translate it to
# "gtest/gtest.h", regardless of what foo is, since all
# gtest headers are fused into gtest/gtest.h.
# There is no need to #include gtest.h twice.
if not gtest.GTEST_H_SEED in processed_files:
processed_files.add(gtest.GTEST_H_SEED)
output_file.write('#include "%s"\n' % (gtest.GTEST_H_OUTPUT,))
else:
# Otherwise we copy the line unchanged to the output file.
output_file.write(line)
ProcessFile(GMOCK_H_SEED)
output_file.close()
def FuseGMockAllCcToFile(gmock_root, output_file):
"""Scans folder gmock_root to fuse gmock-all.cc into output_file."""
processed_files = sets.Set()
def ProcessFile(gmock_source_file):
"""Processes the given gmock source file."""
# We don't process the same #included file twice.
if gmock_source_file in processed_files:
return
processed_files.add(gmock_source_file)
# Reads each line in the given gmock source file.
for line in file(os.path.join(gmock_root, gmock_source_file), 'r'):
m = INCLUDE_GMOCK_FILE_REGEX.match(line)
if m:
# It's '#include "gmock/foo.h"'. We treat it as '#include
# "gmock/gmock.h"', as all other gmock headers are being fused
# into gmock.h and cannot be #included directly.
# There is no need to #include "gmock/gmock.h" more than once.
if not GMOCK_H_SEED in processed_files:
processed_files.add(GMOCK_H_SEED)
output_file.write('#include "%s"\n' % (GMOCK_H_OUTPUT,))
else:
m = gtest.INCLUDE_GTEST_FILE_REGEX.match(line)
if m:
# It's '#include "gtest/..."'.
# There is no need to #include gtest.h as it has been
# #included by gtest-all.cc.
pass
else:
m = gtest.INCLUDE_SRC_FILE_REGEX.match(line)
if m:
# It's '#include "src/foo"' - let's process it recursively.
ProcessFile(m.group(1))
else:
# Otherwise we copy the line unchanged to the output file.
output_file.write(line)
ProcessFile(GMOCK_ALL_CC_SEED)
def FuseGMockGTestAllCc(gmock_root, output_dir):
"""Scans folder gmock_root to generate gmock-gtest-all.cc in output_dir."""
output_file = file(os.path.join(output_dir, GMOCK_GTEST_ALL_CC_OUTPUT), 'w')
# First, fuse gtest-all.cc into gmock-gtest-all.cc.
gtest.FuseGTestAllCcToFile(GetGTestRootDir(gmock_root), output_file)
# Next, append fused gmock-all.cc to gmock-gtest-all.cc.
FuseGMockAllCcToFile(gmock_root, output_file)
output_file.close()
def FuseGMock(gmock_root, output_dir):
"""Fuses gtest.h, gmock.h, and gmock-gtest-all.h."""
ValidateGMockRootDir(gmock_root)
ValidateOutputDir(output_dir)
gtest.FuseGTestH(GetGTestRootDir(gmock_root), output_dir)
FuseGMockH(gmock_root, output_dir)
FuseGMockGTestAllCc(gmock_root, output_dir)
def main():
argc = len(sys.argv)
if argc == 2:
# fuse_gmock_files.py OUTPUT_DIR
FuseGMock(DEFAULT_GMOCK_ROOT_DIR, sys.argv[1])
elif argc == 3:
# fuse_gmock_files.py GMOCK_ROOT_DIR OUTPUT_DIR
FuseGMock(sys.argv[1], sys.argv[2])
else:
print __doc__
sys.exit(1)
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/env python
#
# Copyright 2008 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Driver for starting up Google Mock class generator."""
__author__ = 'nnorwitz@google.com (Neal Norwitz)'
import os
import sys
if __name__ == '__main__':
# Add the directory of this script to the path so we can import gmock_class.
sys.path.append(os.path.dirname(__file__))
from cpp import gmock_class
# Fix the docstring in case they require the usage.
gmock_class.__doc__ = gmock_class.__doc__.replace('gmock_class.py', __file__)
gmock_class.main()
| Python |
#!/usr/bin/env python
#
# Copyright 2007 Neal Norwitz
# Portions Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""C++ keywords and helper utilities for determining keywords."""
__author__ = 'nnorwitz@google.com (Neal Norwitz)'
try:
# Python 3.x
import builtins
except ImportError:
# Python 2.x
import __builtin__ as builtins
if not hasattr(builtins, 'set'):
# Nominal support for Python 2.3.
from sets import Set as set
TYPES = set('bool char int long short double float void wchar_t unsigned signed'.split())
TYPE_MODIFIERS = set('auto register const inline extern static virtual volatile mutable'.split())
ACCESS = set('public protected private friend'.split())
CASTS = set('static_cast const_cast dynamic_cast reinterpret_cast'.split())
OTHERS = set('true false asm class namespace using explicit this operator sizeof'.split())
OTHER_TYPES = set('new delete typedef struct union enum typeid typename template'.split())
CONTROL = set('case switch default if else return goto'.split())
EXCEPTION = set('try catch throw'.split())
LOOP = set('while do for break continue'.split())
ALL = TYPES | TYPE_MODIFIERS | ACCESS | CASTS | OTHERS | OTHER_TYPES | CONTROL | EXCEPTION | LOOP
def IsKeyword(token):
return token in ALL
def IsBuiltinType(token):
if token in ('virtual', 'inline'):
# These only apply to methods, they can't be types by themselves.
return False
return token in TYPES or token in TYPE_MODIFIERS
| Python |
#!/usr/bin/env python
#
# Copyright 2008 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generate Google Mock classes from base classes.
This program will read in a C++ source file and output the Google Mock
classes for the specified classes. If no class is specified, all
classes in the source file are emitted.
Usage:
gmock_class.py header-file.h [ClassName]...
Output is sent to stdout.
"""
__author__ = 'nnorwitz@google.com (Neal Norwitz)'
import os
import re
import sys
from cpp import ast
from cpp import utils
# Preserve compatibility with Python 2.3.
try:
_dummy = set
except NameError:
import sets
set = sets.Set
_VERSION = (1, 0, 1) # The version of this script.
# How many spaces to indent. Can set me with the INDENT environment variable.
_INDENT = 2
def _GenerateMethods(output_lines, source, class_node):
function_type = ast.FUNCTION_VIRTUAL | ast.FUNCTION_PURE_VIRTUAL
ctor_or_dtor = ast.FUNCTION_CTOR | ast.FUNCTION_DTOR
indent = ' ' * _INDENT
for node in class_node.body:
# We only care about virtual functions.
if (isinstance(node, ast.Function) and
node.modifiers & function_type and
not node.modifiers & ctor_or_dtor):
# Pick out all the elements we need from the original function.
const = ''
if node.modifiers & ast.FUNCTION_CONST:
const = 'CONST_'
return_type = 'void'
if node.return_type:
# Add modifiers like 'const'.
modifiers = ''
if node.return_type.modifiers:
modifiers = ' '.join(node.return_type.modifiers) + ' '
return_type = modifiers + node.return_type.name
template_args = [arg.name for arg in node.return_type.templated_types]
if template_args:
return_type += '<' + ', '.join(template_args) + '>'
if len(template_args) > 1:
for line in [
'// The following line won\'t really compile, as the return',
'// type has multiple template arguments. To fix it, use a',
'// typedef for the return type.']:
output_lines.append(indent + line)
if node.return_type.pointer:
return_type += '*'
if node.return_type.reference:
return_type += '&'
mock_method_macro = 'MOCK_%sMETHOD%d' % (const, len(node.parameters))
args = ''
if node.parameters:
# Get the full text of the parameters from the start
# of the first parameter to the end of the last parameter.
start = node.parameters[0].start
end = node.parameters[-1].end
# Remove // comments.
args_strings = re.sub(r'//.*', '', source[start:end])
# Condense multiple spaces and eliminate newlines putting the
# parameters together on a single line. Ensure there is a
# space in an argument which is split by a newline without
# intervening whitespace, e.g.: int\nBar
args = re.sub(' +', ' ', args_strings.replace('\n', ' '))
# Create the mock method definition.
output_lines.extend(['%s%s(%s,' % (indent, mock_method_macro, node.name),
'%s%s(%s));' % (indent*3, return_type, args)])
def _GenerateMocks(filename, source, ast_list, desired_class_names):
processed_class_names = set()
lines = []
for node in ast_list:
if (isinstance(node, ast.Class) and node.body and
# desired_class_names being None means that all classes are selected.
(not desired_class_names or node.name in desired_class_names)):
class_name = node.name
processed_class_names.add(class_name)
class_node = node
# Add namespace before the class.
if class_node.namespace:
lines.extend(['namespace %s {' % n for n in class_node.namespace]) # }
lines.append('')
# Add the class prolog.
lines.append('class Mock%s : public %s {' % (class_name, class_name)) # }
lines.append('%spublic:' % (' ' * (_INDENT // 2)))
# Add all the methods.
_GenerateMethods(lines, source, class_node)
# Close the class.
if lines:
# If there are no virtual methods, no need for a public label.
if len(lines) == 2:
del lines[-1]
# Only close the class if there really is a class.
lines.append('};')
lines.append('') # Add an extra newline.
# Close the namespace.
if class_node.namespace:
for i in range(len(class_node.namespace)-1, -1, -1):
lines.append('} // namespace %s' % class_node.namespace[i])
lines.append('') # Add an extra newline.
if desired_class_names:
missing_class_name_list = list(desired_class_names - processed_class_names)
if missing_class_name_list:
missing_class_name_list.sort()
sys.stderr.write('Class(es) not found in %s: %s\n' %
(filename, ', '.join(missing_class_name_list)))
elif not processed_class_names:
sys.stderr.write('No class found in %s\n' % filename)
return lines
def main(argv=sys.argv):
if len(argv) < 2:
sys.stderr.write('Google Mock Class Generator v%s\n\n' %
'.'.join(map(str, _VERSION)))
sys.stderr.write(__doc__)
return 1
global _INDENT
try:
_INDENT = int(os.environ['INDENT'])
except KeyError:
pass
except:
sys.stderr.write('Unable to use indent of %s\n' % os.environ.get('INDENT'))
filename = argv[1]
desired_class_names = None # None means all classes in the source file.
if len(argv) >= 3:
desired_class_names = set(argv[2:])
source = utils.ReadFile(filename)
if source is None:
return 1
builder = ast.BuilderFromSource(source, filename)
try:
entire_ast = filter(None, builder.Generate())
except KeyboardInterrupt:
return
except:
# An error message was already printed since we couldn't parse.
pass
else:
lines = _GenerateMocks(filename, source, entire_ast, desired_class_names)
sys.stdout.write('\n'.join(lines))
if __name__ == '__main__':
main(sys.argv)
| Python |
#!/usr/bin/env python
#
# Copyright 2007 Neal Norwitz
# Portions Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generic utilities for C++ parsing."""
__author__ = 'nnorwitz@google.com (Neal Norwitz)'
import sys
# Set to True to see the start/end token indices.
DEBUG = True
def ReadFile(filename, print_error=True):
"""Returns the contents of a file."""
try:
fp = open(filename)
try:
return fp.read()
finally:
fp.close()
except IOError:
if print_error:
print('Error reading %s: %s' % (filename, sys.exc_info()[1]))
return None
| Python |
#!/usr/bin/env python
#
# Copyright 2007 Neal Norwitz
# Portions Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generate an Abstract Syntax Tree (AST) for C++."""
__author__ = 'nnorwitz@google.com (Neal Norwitz)'
# TODO:
# * Tokens should never be exported, need to convert to Nodes
# (return types, parameters, etc.)
# * Handle static class data for templatized classes
# * Handle casts (both C++ and C-style)
# * Handle conditions and loops (if/else, switch, for, while/do)
#
# TODO much, much later:
# * Handle #define
# * exceptions
try:
# Python 3.x
import builtins
except ImportError:
# Python 2.x
import __builtin__ as builtins
import sys
import traceback
from cpp import keywords
from cpp import tokenize
from cpp import utils
if not hasattr(builtins, 'reversed'):
# Support Python 2.3 and earlier.
def reversed(seq):
for i in range(len(seq)-1, -1, -1):
yield seq[i]
if not hasattr(builtins, 'next'):
# Support Python 2.5 and earlier.
def next(obj):
return obj.next()
VISIBILITY_PUBLIC, VISIBILITY_PROTECTED, VISIBILITY_PRIVATE = range(3)
FUNCTION_NONE = 0x00
FUNCTION_CONST = 0x01
FUNCTION_VIRTUAL = 0x02
FUNCTION_PURE_VIRTUAL = 0x04
FUNCTION_CTOR = 0x08
FUNCTION_DTOR = 0x10
FUNCTION_ATTRIBUTE = 0x20
FUNCTION_UNKNOWN_ANNOTATION = 0x40
FUNCTION_THROW = 0x80
"""
These are currently unused. Should really handle these properly at some point.
TYPE_MODIFIER_INLINE = 0x010000
TYPE_MODIFIER_EXTERN = 0x020000
TYPE_MODIFIER_STATIC = 0x040000
TYPE_MODIFIER_CONST = 0x080000
TYPE_MODIFIER_REGISTER = 0x100000
TYPE_MODIFIER_VOLATILE = 0x200000
TYPE_MODIFIER_MUTABLE = 0x400000
TYPE_MODIFIER_MAP = {
'inline': TYPE_MODIFIER_INLINE,
'extern': TYPE_MODIFIER_EXTERN,
'static': TYPE_MODIFIER_STATIC,
'const': TYPE_MODIFIER_CONST,
'register': TYPE_MODIFIER_REGISTER,
'volatile': TYPE_MODIFIER_VOLATILE,
'mutable': TYPE_MODIFIER_MUTABLE,
}
"""
_INTERNAL_TOKEN = 'internal'
_NAMESPACE_POP = 'ns-pop'
# TODO(nnorwitz): use this as a singleton for templated_types, etc
# where we don't want to create a new empty dict each time. It is also const.
class _NullDict(object):
__contains__ = lambda self: False
keys = values = items = iterkeys = itervalues = iteritems = lambda self: ()
# TODO(nnorwitz): move AST nodes into a separate module.
class Node(object):
"""Base AST node."""
def __init__(self, start, end):
self.start = start
self.end = end
def IsDeclaration(self):
"""Returns bool if this node is a declaration."""
return False
def IsDefinition(self):
"""Returns bool if this node is a definition."""
return False
def IsExportable(self):
"""Returns bool if this node exportable from a header file."""
return False
def Requires(self, node):
"""Does this AST node require the definition of the node passed in?"""
return False
def XXX__str__(self):
return self._StringHelper(self.__class__.__name__, '')
def _StringHelper(self, name, suffix):
if not utils.DEBUG:
return '%s(%s)' % (name, suffix)
return '%s(%d, %d, %s)' % (name, self.start, self.end, suffix)
def __repr__(self):
return str(self)
class Define(Node):
def __init__(self, start, end, name, definition):
Node.__init__(self, start, end)
self.name = name
self.definition = definition
def __str__(self):
value = '%s %s' % (self.name, self.definition)
return self._StringHelper(self.__class__.__name__, value)
class Include(Node):
def __init__(self, start, end, filename, system):
Node.__init__(self, start, end)
self.filename = filename
self.system = system
def __str__(self):
fmt = '"%s"'
if self.system:
fmt = '<%s>'
return self._StringHelper(self.__class__.__name__, fmt % self.filename)
class Goto(Node):
def __init__(self, start, end, label):
Node.__init__(self, start, end)
self.label = label
def __str__(self):
return self._StringHelper(self.__class__.__name__, str(self.label))
class Expr(Node):
def __init__(self, start, end, expr):
Node.__init__(self, start, end)
self.expr = expr
def Requires(self, node):
# TODO(nnorwitz): impl.
return False
def __str__(self):
return self._StringHelper(self.__class__.__name__, str(self.expr))
class Return(Expr):
pass
class Delete(Expr):
pass
class Friend(Expr):
def __init__(self, start, end, expr, namespace):
Expr.__init__(self, start, end, expr)
self.namespace = namespace[:]
class Using(Node):
def __init__(self, start, end, names):
Node.__init__(self, start, end)
self.names = names
def __str__(self):
return self._StringHelper(self.__class__.__name__, str(self.names))
class Parameter(Node):
def __init__(self, start, end, name, parameter_type, default):
Node.__init__(self, start, end)
self.name = name
self.type = parameter_type
self.default = default
def Requires(self, node):
# TODO(nnorwitz): handle namespaces, etc.
return self.type.name == node.name
def __str__(self):
name = str(self.type)
suffix = '%s %s' % (name, self.name)
if self.default:
suffix += ' = ' + ''.join([d.name for d in self.default])
return self._StringHelper(self.__class__.__name__, suffix)
class _GenericDeclaration(Node):
def __init__(self, start, end, name, namespace):
Node.__init__(self, start, end)
self.name = name
self.namespace = namespace[:]
def FullName(self):
prefix = ''
if self.namespace and self.namespace[-1]:
prefix = '::'.join(self.namespace) + '::'
return prefix + self.name
def _TypeStringHelper(self, suffix):
if self.namespace:
names = [n or '<anonymous>' for n in self.namespace]
suffix += ' in ' + '::'.join(names)
return self._StringHelper(self.__class__.__name__, suffix)
# TODO(nnorwitz): merge with Parameter in some way?
class VariableDeclaration(_GenericDeclaration):
def __init__(self, start, end, name, var_type, initial_value, namespace):
_GenericDeclaration.__init__(self, start, end, name, namespace)
self.type = var_type
self.initial_value = initial_value
def Requires(self, node):
# TODO(nnorwitz): handle namespaces, etc.
return self.type.name == node.name
def ToString(self):
"""Return a string that tries to reconstitute the variable decl."""
suffix = '%s %s' % (self.type, self.name)
if self.initial_value:
suffix += ' = ' + self.initial_value
return suffix
def __str__(self):
return self._StringHelper(self.__class__.__name__, self.ToString())
class Typedef(_GenericDeclaration):
def __init__(self, start, end, name, alias, namespace):
_GenericDeclaration.__init__(self, start, end, name, namespace)
self.alias = alias
def IsDefinition(self):
return True
def IsExportable(self):
return True
def Requires(self, node):
# TODO(nnorwitz): handle namespaces, etc.
name = node.name
for token in self.alias:
if token is not None and name == token.name:
return True
return False
def __str__(self):
suffix = '%s, %s' % (self.name, self.alias)
return self._TypeStringHelper(suffix)
class _NestedType(_GenericDeclaration):
def __init__(self, start, end, name, fields, namespace):
_GenericDeclaration.__init__(self, start, end, name, namespace)
self.fields = fields
def IsDefinition(self):
return True
def IsExportable(self):
return True
def __str__(self):
suffix = '%s, {%s}' % (self.name, self.fields)
return self._TypeStringHelper(suffix)
class Union(_NestedType):
pass
class Enum(_NestedType):
pass
class Class(_GenericDeclaration):
def __init__(self, start, end, name, bases, templated_types, body, namespace):
_GenericDeclaration.__init__(self, start, end, name, namespace)
self.bases = bases
self.body = body
self.templated_types = templated_types
def IsDeclaration(self):
return self.bases is None and self.body is None
def IsDefinition(self):
return not self.IsDeclaration()
def IsExportable(self):
return not self.IsDeclaration()
def Requires(self, node):
# TODO(nnorwitz): handle namespaces, etc.
if self.bases:
for token_list in self.bases:
# TODO(nnorwitz): bases are tokens, do name comparision.
for token in token_list:
if token.name == node.name:
return True
# TODO(nnorwitz): search in body too.
return False
def __str__(self):
name = self.name
if self.templated_types:
name += '<%s>' % self.templated_types
suffix = '%s, %s, %s' % (name, self.bases, self.body)
return self._TypeStringHelper(suffix)
class Struct(Class):
pass
class Function(_GenericDeclaration):
def __init__(self, start, end, name, return_type, parameters,
modifiers, templated_types, body, namespace):
_GenericDeclaration.__init__(self, start, end, name, namespace)
converter = TypeConverter(namespace)
self.return_type = converter.CreateReturnType(return_type)
self.parameters = converter.ToParameters(parameters)
self.modifiers = modifiers
self.body = body
self.templated_types = templated_types
def IsDeclaration(self):
return self.body is None
def IsDefinition(self):
return self.body is not None
def IsExportable(self):
if self.return_type and 'static' in self.return_type.modifiers:
return False
return None not in self.namespace
def Requires(self, node):
if self.parameters:
# TODO(nnorwitz): parameters are tokens, do name comparision.
for p in self.parameters:
if p.name == node.name:
return True
# TODO(nnorwitz): search in body too.
return False
def __str__(self):
# TODO(nnorwitz): add templated_types.
suffix = ('%s %s(%s), 0x%02x, %s' %
(self.return_type, self.name, self.parameters,
self.modifiers, self.body))
return self._TypeStringHelper(suffix)
class Method(Function):
def __init__(self, start, end, name, in_class, return_type, parameters,
modifiers, templated_types, body, namespace):
Function.__init__(self, start, end, name, return_type, parameters,
modifiers, templated_types, body, namespace)
# TODO(nnorwitz): in_class could also be a namespace which can
# mess up finding functions properly.
self.in_class = in_class
class Type(_GenericDeclaration):
"""Type used for any variable (eg class, primitive, struct, etc)."""
def __init__(self, start, end, name, templated_types, modifiers,
reference, pointer, array):
"""
Args:
name: str name of main type
templated_types: [Class (Type?)] template type info between <>
modifiers: [str] type modifiers (keywords) eg, const, mutable, etc.
reference, pointer, array: bools
"""
_GenericDeclaration.__init__(self, start, end, name, [])
self.templated_types = templated_types
if not name and modifiers:
self.name = modifiers.pop()
self.modifiers = modifiers
self.reference = reference
self.pointer = pointer
self.array = array
def __str__(self):
prefix = ''
if self.modifiers:
prefix = ' '.join(self.modifiers) + ' '
name = str(self.name)
if self.templated_types:
name += '<%s>' % self.templated_types
suffix = prefix + name
if self.reference:
suffix += '&'
if self.pointer:
suffix += '*'
if self.array:
suffix += '[]'
return self._TypeStringHelper(suffix)
# By definition, Is* are always False. A Type can only exist in
# some sort of variable declaration, parameter, or return value.
def IsDeclaration(self):
return False
def IsDefinition(self):
return False
def IsExportable(self):
return False
class TypeConverter(object):
def __init__(self, namespace_stack):
self.namespace_stack = namespace_stack
def _GetTemplateEnd(self, tokens, start):
count = 1
end = start
while 1:
token = tokens[end]
end += 1
if token.name == '<':
count += 1
elif token.name == '>':
count -= 1
if count == 0:
break
return tokens[start:end-1], end
def ToType(self, tokens):
"""Convert [Token,...] to [Class(...), ] useful for base classes.
For example, code like class Foo : public Bar<x, y> { ... };
the "Bar<x, y>" portion gets converted to an AST.
Returns:
[Class(...), ...]
"""
result = []
name_tokens = []
reference = pointer = array = False
def AddType(templated_types):
# Partition tokens into name and modifier tokens.
names = []
modifiers = []
for t in name_tokens:
if keywords.IsKeyword(t.name):
modifiers.append(t.name)
else:
names.append(t.name)
name = ''.join(names)
result.append(Type(name_tokens[0].start, name_tokens[-1].end,
name, templated_types, modifiers,
reference, pointer, array))
del name_tokens[:]
i = 0
end = len(tokens)
while i < end:
token = tokens[i]
if token.name == '<':
new_tokens, new_end = self._GetTemplateEnd(tokens, i+1)
AddType(self.ToType(new_tokens))
# If there is a comma after the template, we need to consume
# that here otherwise it becomes part of the name.
i = new_end
reference = pointer = array = False
elif token.name == ',':
AddType([])
reference = pointer = array = False
elif token.name == '*':
pointer = True
elif token.name == '&':
reference = True
elif token.name == '[':
pointer = True
elif token.name == ']':
pass
else:
name_tokens.append(token)
i += 1
if name_tokens:
# No '<' in the tokens, just a simple name and no template.
AddType([])
return result
def DeclarationToParts(self, parts, needs_name_removed):
name = None
default = []
if needs_name_removed:
# Handle default (initial) values properly.
for i, t in enumerate(parts):
if t.name == '=':
default = parts[i+1:]
name = parts[i-1].name
if name == ']' and parts[i-2].name == '[':
name = parts[i-3].name
i -= 1
parts = parts[:i-1]
break
else:
if parts[-1].token_type == tokenize.NAME:
name = parts.pop().name
else:
# TODO(nnorwitz): this is a hack that happens for code like
# Register(Foo<T>); where it thinks this is a function call
# but it's actually a declaration.
name = '???'
modifiers = []
type_name = []
other_tokens = []
templated_types = []
i = 0
end = len(parts)
while i < end:
p = parts[i]
if keywords.IsKeyword(p.name):
modifiers.append(p.name)
elif p.name == '<':
templated_tokens, new_end = self._GetTemplateEnd(parts, i+1)
templated_types = self.ToType(templated_tokens)
i = new_end - 1
# Don't add a spurious :: to data members being initialized.
next_index = i + 1
if next_index < end and parts[next_index].name == '::':
i += 1
elif p.name in ('[', ']', '='):
# These are handled elsewhere.
other_tokens.append(p)
elif p.name not in ('*', '&', '>'):
# Ensure that names have a space between them.
if (type_name and type_name[-1].token_type == tokenize.NAME and
p.token_type == tokenize.NAME):
type_name.append(tokenize.Token(tokenize.SYNTAX, ' ', 0, 0))
type_name.append(p)
else:
other_tokens.append(p)
i += 1
type_name = ''.join([t.name for t in type_name])
return name, type_name, templated_types, modifiers, default, other_tokens
def ToParameters(self, tokens):
if not tokens:
return []
result = []
name = type_name = ''
type_modifiers = []
pointer = reference = array = False
first_token = None
default = []
def AddParameter():
if default:
del default[0] # Remove flag.
end = type_modifiers[-1].end
parts = self.DeclarationToParts(type_modifiers, True)
(name, type_name, templated_types, modifiers,
unused_default, unused_other_tokens) = parts
parameter_type = Type(first_token.start, first_token.end,
type_name, templated_types, modifiers,
reference, pointer, array)
p = Parameter(first_token.start, end, name,
parameter_type, default)
result.append(p)
template_count = 0
for s in tokens:
if not first_token:
first_token = s
if s.name == '<':
template_count += 1
elif s.name == '>':
template_count -= 1
if template_count > 0:
type_modifiers.append(s)
continue
if s.name == ',':
AddParameter()
name = type_name = ''
type_modifiers = []
pointer = reference = array = False
first_token = None
default = []
elif s.name == '*':
pointer = True
elif s.name == '&':
reference = True
elif s.name == '[':
array = True
elif s.name == ']':
pass # Just don't add to type_modifiers.
elif s.name == '=':
# Got a default value. Add any value (None) as a flag.
default.append(None)
elif default:
default.append(s)
else:
type_modifiers.append(s)
AddParameter()
return result
def CreateReturnType(self, return_type_seq):
if not return_type_seq:
return None
start = return_type_seq[0].start
end = return_type_seq[-1].end
_, name, templated_types, modifiers, default, other_tokens = \
self.DeclarationToParts(return_type_seq, False)
names = [n.name for n in other_tokens]
reference = '&' in names
pointer = '*' in names
array = '[' in names
return Type(start, end, name, templated_types, modifiers,
reference, pointer, array)
def GetTemplateIndices(self, names):
# names is a list of strings.
start = names.index('<')
end = len(names) - 1
while end > 0:
if names[end] == '>':
break
end -= 1
return start, end+1
class AstBuilder(object):
def __init__(self, token_stream, filename, in_class='', visibility=None,
namespace_stack=[]):
self.tokens = token_stream
self.filename = filename
# TODO(nnorwitz): use a better data structure (deque) for the queue.
# Switching directions of the "queue" improved perf by about 25%.
# Using a deque should be even better since we access from both sides.
self.token_queue = []
self.namespace_stack = namespace_stack[:]
self.in_class = in_class
if in_class is None:
self.in_class_name_only = None
else:
self.in_class_name_only = in_class.split('::')[-1]
self.visibility = visibility
self.in_function = False
self.current_token = None
# Keep the state whether we are currently handling a typedef or not.
self._handling_typedef = False
self.converter = TypeConverter(self.namespace_stack)
def HandleError(self, msg, token):
printable_queue = list(reversed(self.token_queue[-20:]))
sys.stderr.write('Got %s in %s @ %s %s\n' %
(msg, self.filename, token, printable_queue))
def Generate(self):
while 1:
token = self._GetNextToken()
if not token:
break
# Get the next token.
self.current_token = token
# Dispatch on the next token type.
if token.token_type == _INTERNAL_TOKEN:
if token.name == _NAMESPACE_POP:
self.namespace_stack.pop()
continue
try:
result = self._GenerateOne(token)
if result is not None:
yield result
except:
self.HandleError('exception', token)
raise
def _CreateVariable(self, pos_token, name, type_name, type_modifiers,
ref_pointer_name_seq, templated_types, value=None):
reference = '&' in ref_pointer_name_seq
pointer = '*' in ref_pointer_name_seq
array = '[' in ref_pointer_name_seq
var_type = Type(pos_token.start, pos_token.end, type_name,
templated_types, type_modifiers,
reference, pointer, array)
return VariableDeclaration(pos_token.start, pos_token.end,
name, var_type, value, self.namespace_stack)
def _GenerateOne(self, token):
if token.token_type == tokenize.NAME:
if (keywords.IsKeyword(token.name) and
not keywords.IsBuiltinType(token.name)):
method = getattr(self, 'handle_' + token.name)
return method()
elif token.name == self.in_class_name_only:
# The token name is the same as the class, must be a ctor if
# there is a paren. Otherwise, it's the return type.
# Peek ahead to get the next token to figure out which.
next = self._GetNextToken()
self._AddBackToken(next)
if next.token_type == tokenize.SYNTAX and next.name == '(':
return self._GetMethod([token], FUNCTION_CTOR, None, True)
# Fall through--handle like any other method.
# Handle data or function declaration/definition.
syntax = tokenize.SYNTAX
temp_tokens, last_token = \
self._GetVarTokensUpTo(syntax, '(', ';', '{', '[')
temp_tokens.insert(0, token)
if last_token.name == '(':
# If there is an assignment before the paren,
# this is an expression, not a method.
expr = bool([e for e in temp_tokens if e.name == '='])
if expr:
new_temp = self._GetTokensUpTo(tokenize.SYNTAX, ';')
temp_tokens.append(last_token)
temp_tokens.extend(new_temp)
last_token = tokenize.Token(tokenize.SYNTAX, ';', 0, 0)
if last_token.name == '[':
# Handle array, this isn't a method, unless it's an operator.
# TODO(nnorwitz): keep the size somewhere.
# unused_size = self._GetTokensUpTo(tokenize.SYNTAX, ']')
temp_tokens.append(last_token)
if temp_tokens[-2].name == 'operator':
temp_tokens.append(self._GetNextToken())
else:
temp_tokens2, last_token = \
self._GetVarTokensUpTo(tokenize.SYNTAX, ';')
temp_tokens.extend(temp_tokens2)
if last_token.name == ';':
# Handle data, this isn't a method.
parts = self.converter.DeclarationToParts(temp_tokens, True)
(name, type_name, templated_types, modifiers, default,
unused_other_tokens) = parts
t0 = temp_tokens[0]
names = [t.name for t in temp_tokens]
if templated_types:
start, end = self.converter.GetTemplateIndices(names)
names = names[:start] + names[end:]
default = ''.join([t.name for t in default])
return self._CreateVariable(t0, name, type_name, modifiers,
names, templated_types, default)
if last_token.name == '{':
self._AddBackTokens(temp_tokens[1:])
self._AddBackToken(last_token)
method_name = temp_tokens[0].name
method = getattr(self, 'handle_' + method_name, None)
if not method:
# Must be declaring a variable.
# TODO(nnorwitz): handle the declaration.
return None
return method()
return self._GetMethod(temp_tokens, 0, None, False)
elif token.token_type == tokenize.SYNTAX:
if token.name == '~' and self.in_class:
# Must be a dtor (probably not in method body).
token = self._GetNextToken()
# self.in_class can contain A::Name, but the dtor will only
# be Name. Make sure to compare against the right value.
if (token.token_type == tokenize.NAME and
token.name == self.in_class_name_only):
return self._GetMethod([token], FUNCTION_DTOR, None, True)
# TODO(nnorwitz): handle a lot more syntax.
elif token.token_type == tokenize.PREPROCESSOR:
# TODO(nnorwitz): handle more preprocessor directives.
# token starts with a #, so remove it and strip whitespace.
name = token.name[1:].lstrip()
if name.startswith('include'):
# Remove "include".
name = name[7:].strip()
assert name
# Handle #include \<newline> "header-on-second-line.h".
if name.startswith('\\'):
name = name[1:].strip()
assert name[0] in '<"', token
assert name[-1] in '>"', token
system = name[0] == '<'
filename = name[1:-1]
return Include(token.start, token.end, filename, system)
if name.startswith('define'):
# Remove "define".
name = name[6:].strip()
assert name
value = ''
for i, c in enumerate(name):
if c.isspace():
value = name[i:].lstrip()
name = name[:i]
break
return Define(token.start, token.end, name, value)
if name.startswith('if') and name[2:3].isspace():
condition = name[3:].strip()
if condition.startswith('0') or condition.startswith('(0)'):
self._SkipIf0Blocks()
return None
def _GetTokensUpTo(self, expected_token_type, expected_token):
return self._GetVarTokensUpTo(expected_token_type, expected_token)[0]
def _GetVarTokensUpTo(self, expected_token_type, *expected_tokens):
last_token = self._GetNextToken()
tokens = []
while (last_token.token_type != expected_token_type or
last_token.name not in expected_tokens):
tokens.append(last_token)
last_token = self._GetNextToken()
return tokens, last_token
# TODO(nnorwitz): remove _IgnoreUpTo() it shouldn't be necesary.
def _IgnoreUpTo(self, token_type, token):
unused_tokens = self._GetTokensUpTo(token_type, token)
def _SkipIf0Blocks(self):
count = 1
while 1:
token = self._GetNextToken()
if token.token_type != tokenize.PREPROCESSOR:
continue
name = token.name[1:].lstrip()
if name.startswith('endif'):
count -= 1
if count == 0:
break
elif name.startswith('if'):
count += 1
def _GetMatchingChar(self, open_paren, close_paren, GetNextToken=None):
if GetNextToken is None:
GetNextToken = self._GetNextToken
# Assumes the current token is open_paren and we will consume
# and return up to the close_paren.
count = 1
token = GetNextToken()
while 1:
if token.token_type == tokenize.SYNTAX:
if token.name == open_paren:
count += 1
elif token.name == close_paren:
count -= 1
if count == 0:
break
yield token
token = GetNextToken()
yield token
def _GetParameters(self):
return self._GetMatchingChar('(', ')')
def GetScope(self):
return self._GetMatchingChar('{', '}')
def _GetNextToken(self):
if self.token_queue:
return self.token_queue.pop()
return next(self.tokens)
def _AddBackToken(self, token):
if token.whence == tokenize.WHENCE_STREAM:
token.whence = tokenize.WHENCE_QUEUE
self.token_queue.insert(0, token)
else:
assert token.whence == tokenize.WHENCE_QUEUE, token
self.token_queue.append(token)
def _AddBackTokens(self, tokens):
if tokens:
if tokens[-1].whence == tokenize.WHENCE_STREAM:
for token in tokens:
token.whence = tokenize.WHENCE_QUEUE
self.token_queue[:0] = reversed(tokens)
else:
assert tokens[-1].whence == tokenize.WHENCE_QUEUE, tokens
self.token_queue.extend(reversed(tokens))
def GetName(self, seq=None):
"""Returns ([tokens], next_token_info)."""
GetNextToken = self._GetNextToken
if seq is not None:
it = iter(seq)
GetNextToken = lambda: next(it)
next_token = GetNextToken()
tokens = []
last_token_was_name = False
while (next_token.token_type == tokenize.NAME or
(next_token.token_type == tokenize.SYNTAX and
next_token.name in ('::', '<'))):
# Two NAMEs in a row means the identifier should terminate.
# It's probably some sort of variable declaration.
if last_token_was_name and next_token.token_type == tokenize.NAME:
break
last_token_was_name = next_token.token_type == tokenize.NAME
tokens.append(next_token)
# Handle templated names.
if next_token.name == '<':
tokens.extend(self._GetMatchingChar('<', '>', GetNextToken))
last_token_was_name = True
next_token = GetNextToken()
return tokens, next_token
def GetMethod(self, modifiers, templated_types):
return_type_and_name = self._GetTokensUpTo(tokenize.SYNTAX, '(')
assert len(return_type_and_name) >= 1
return self._GetMethod(return_type_and_name, modifiers, templated_types,
False)
def _GetMethod(self, return_type_and_name, modifiers, templated_types,
get_paren):
template_portion = None
if get_paren:
token = self._GetNextToken()
assert token.token_type == tokenize.SYNTAX, token
if token.name == '<':
# Handle templatized dtors.
template_portion = [token]
template_portion.extend(self._GetMatchingChar('<', '>'))
token = self._GetNextToken()
assert token.token_type == tokenize.SYNTAX, token
assert token.name == '(', token
name = return_type_and_name.pop()
# Handle templatized ctors.
if name.name == '>':
index = 1
while return_type_and_name[index].name != '<':
index += 1
template_portion = return_type_and_name[index:] + [name]
del return_type_and_name[index:]
name = return_type_and_name.pop()
elif name.name == ']':
rt = return_type_and_name
assert rt[-1].name == '[', return_type_and_name
assert rt[-2].name == 'operator', return_type_and_name
name_seq = return_type_and_name[-2:]
del return_type_and_name[-2:]
name = tokenize.Token(tokenize.NAME, 'operator[]',
name_seq[0].start, name.end)
# Get the open paren so _GetParameters() below works.
unused_open_paren = self._GetNextToken()
# TODO(nnorwitz): store template_portion.
return_type = return_type_and_name
indices = name
if return_type:
indices = return_type[0]
# Force ctor for templatized ctors.
if name.name == self.in_class and not modifiers:
modifiers |= FUNCTION_CTOR
parameters = list(self._GetParameters())
del parameters[-1] # Remove trailing ')'.
# Handling operator() is especially weird.
if name.name == 'operator' and not parameters:
token = self._GetNextToken()
assert token.name == '(', token
parameters = list(self._GetParameters())
del parameters[-1] # Remove trailing ')'.
token = self._GetNextToken()
while token.token_type == tokenize.NAME:
modifier_token = token
token = self._GetNextToken()
if modifier_token.name == 'const':
modifiers |= FUNCTION_CONST
elif modifier_token.name == '__attribute__':
# TODO(nnorwitz): handle more __attribute__ details.
modifiers |= FUNCTION_ATTRIBUTE
assert token.name == '(', token
# Consume everything between the (parens).
unused_tokens = list(self._GetMatchingChar('(', ')'))
token = self._GetNextToken()
elif modifier_token.name == 'throw':
modifiers |= FUNCTION_THROW
assert token.name == '(', token
# Consume everything between the (parens).
unused_tokens = list(self._GetMatchingChar('(', ')'))
token = self._GetNextToken()
elif modifier_token.name == modifier_token.name.upper():
# HACK(nnorwitz): assume that all upper-case names
# are some macro we aren't expanding.
modifiers |= FUNCTION_UNKNOWN_ANNOTATION
else:
self.HandleError('unexpected token', modifier_token)
assert token.token_type == tokenize.SYNTAX, token
# Handle ctor initializers.
if token.name == ':':
# TODO(nnorwitz): anything else to handle for initializer list?
while token.name != ';' and token.name != '{':
token = self._GetNextToken()
# Handle pointer to functions that are really data but look
# like method declarations.
if token.name == '(':
if parameters[0].name == '*':
# name contains the return type.
name = parameters.pop()
# parameters contains the name of the data.
modifiers = [p.name for p in parameters]
# Already at the ( to open the parameter list.
function_parameters = list(self._GetMatchingChar('(', ')'))
del function_parameters[-1] # Remove trailing ')'.
# TODO(nnorwitz): store the function_parameters.
token = self._GetNextToken()
assert token.token_type == tokenize.SYNTAX, token
assert token.name == ';', token
return self._CreateVariable(indices, name.name, indices.name,
modifiers, '', None)
# At this point, we got something like:
# return_type (type::*name_)(params);
# This is a data member called name_ that is a function pointer.
# With this code: void (sq_type::*field_)(string&);
# We get: name=void return_type=[] parameters=sq_type ... field_
# TODO(nnorwitz): is return_type always empty?
# TODO(nnorwitz): this isn't even close to being correct.
# Just put in something so we don't crash and can move on.
real_name = parameters[-1]
modifiers = [p.name for p in self._GetParameters()]
del modifiers[-1] # Remove trailing ')'.
return self._CreateVariable(indices, real_name.name, indices.name,
modifiers, '', None)
if token.name == '{':
body = list(self.GetScope())
del body[-1] # Remove trailing '}'.
else:
body = None
if token.name == '=':
token = self._GetNextToken()
assert token.token_type == tokenize.CONSTANT, token
assert token.name == '0', token
modifiers |= FUNCTION_PURE_VIRTUAL
token = self._GetNextToken()
if token.name == '[':
# TODO(nnorwitz): store tokens and improve parsing.
# template <typename T, size_t N> char (&ASH(T (&seq)[N]))[N];
tokens = list(self._GetMatchingChar('[', ']'))
token = self._GetNextToken()
assert token.name == ';', (token, return_type_and_name, parameters)
# Looks like we got a method, not a function.
if len(return_type) > 2 and return_type[-1].name == '::':
return_type, in_class = \
self._GetReturnTypeAndClassName(return_type)
return Method(indices.start, indices.end, name.name, in_class,
return_type, parameters, modifiers, templated_types,
body, self.namespace_stack)
return Function(indices.start, indices.end, name.name, return_type,
parameters, modifiers, templated_types, body,
self.namespace_stack)
def _GetReturnTypeAndClassName(self, token_seq):
# Splitting the return type from the class name in a method
# can be tricky. For example, Return::Type::Is::Hard::To::Find().
# Where is the return type and where is the class name?
# The heuristic used is to pull the last name as the class name.
# This includes all the templated type info.
# TODO(nnorwitz): if there is only One name like in the
# example above, punt and assume the last bit is the class name.
# Ignore a :: prefix, if exists so we can find the first real name.
i = 0
if token_seq[0].name == '::':
i = 1
# Ignore a :: suffix, if exists.
end = len(token_seq) - 1
if token_seq[end-1].name == '::':
end -= 1
# Make a copy of the sequence so we can append a sentinel
# value. This is required for GetName will has to have some
# terminating condition beyond the last name.
seq_copy = token_seq[i:end]
seq_copy.append(tokenize.Token(tokenize.SYNTAX, '', 0, 0))
names = []
while i < end:
# Iterate through the sequence parsing out each name.
new_name, next = self.GetName(seq_copy[i:])
assert new_name, 'Got empty new_name, next=%s' % next
# We got a pointer or ref. Add it to the name.
if next and next.token_type == tokenize.SYNTAX:
new_name.append(next)
names.append(new_name)
i += len(new_name)
# Now that we have the names, it's time to undo what we did.
# Remove the sentinel value.
names[-1].pop()
# Flatten the token sequence for the return type.
return_type = [e for seq in names[:-1] for e in seq]
# The class name is the last name.
class_name = names[-1]
return return_type, class_name
def handle_bool(self):
pass
def handle_char(self):
pass
def handle_int(self):
pass
def handle_long(self):
pass
def handle_short(self):
pass
def handle_double(self):
pass
def handle_float(self):
pass
def handle_void(self):
pass
def handle_wchar_t(self):
pass
def handle_unsigned(self):
pass
def handle_signed(self):
pass
def _GetNestedType(self, ctor):
name = None
name_tokens, token = self.GetName()
if name_tokens:
name = ''.join([t.name for t in name_tokens])
# Handle forward declarations.
if token.token_type == tokenize.SYNTAX and token.name == ';':
return ctor(token.start, token.end, name, None,
self.namespace_stack)
if token.token_type == tokenize.NAME and self._handling_typedef:
self._AddBackToken(token)
return ctor(token.start, token.end, name, None,
self.namespace_stack)
# Must be the type declaration.
fields = list(self._GetMatchingChar('{', '}'))
del fields[-1] # Remove trailing '}'.
if token.token_type == tokenize.SYNTAX and token.name == '{':
next = self._GetNextToken()
new_type = ctor(token.start, token.end, name, fields,
self.namespace_stack)
# A name means this is an anonymous type and the name
# is the variable declaration.
if next.token_type != tokenize.NAME:
return new_type
name = new_type
token = next
# Must be variable declaration using the type prefixed with keyword.
assert token.token_type == tokenize.NAME, token
return self._CreateVariable(token, token.name, name, [], '', None)
def handle_struct(self):
# Special case the handling typedef/aliasing of structs here.
# It would be a pain to handle in the class code.
name_tokens, var_token = self.GetName()
if name_tokens:
next_token = self._GetNextToken()
is_syntax = (var_token.token_type == tokenize.SYNTAX and
var_token.name[0] in '*&')
is_variable = (var_token.token_type == tokenize.NAME and
next_token.name == ';')
variable = var_token
if is_syntax and not is_variable:
variable = next_token
temp = self._GetNextToken()
if temp.token_type == tokenize.SYNTAX and temp.name == '(':
# Handle methods declared to return a struct.
t0 = name_tokens[0]
struct = tokenize.Token(tokenize.NAME, 'struct',
t0.start-7, t0.start-2)
type_and_name = [struct]
type_and_name.extend(name_tokens)
type_and_name.extend((var_token, next_token))
return self._GetMethod(type_and_name, 0, None, False)
assert temp.name == ';', (temp, name_tokens, var_token)
if is_syntax or (is_variable and not self._handling_typedef):
modifiers = ['struct']
type_name = ''.join([t.name for t in name_tokens])
position = name_tokens[0]
return self._CreateVariable(position, variable.name, type_name,
modifiers, var_token.name, None)
name_tokens.extend((var_token, next_token))
self._AddBackTokens(name_tokens)
else:
self._AddBackToken(var_token)
return self._GetClass(Struct, VISIBILITY_PUBLIC, None)
def handle_union(self):
return self._GetNestedType(Union)
def handle_enum(self):
return self._GetNestedType(Enum)
def handle_auto(self):
# TODO(nnorwitz): warn about using auto? Probably not since it
# will be reclaimed and useful for C++0x.
pass
def handle_register(self):
pass
def handle_const(self):
pass
def handle_inline(self):
pass
def handle_extern(self):
pass
def handle_static(self):
pass
def handle_virtual(self):
# What follows must be a method.
token = token2 = self._GetNextToken()
if token.name == 'inline':
# HACK(nnorwitz): handle inline dtors by ignoring 'inline'.
token2 = self._GetNextToken()
if token2.token_type == tokenize.SYNTAX and token2.name == '~':
return self.GetMethod(FUNCTION_VIRTUAL + FUNCTION_DTOR, None)
assert token.token_type == tokenize.NAME or token.name == '::', token
return_type_and_name = self._GetTokensUpTo(tokenize.SYNTAX, '(')
return_type_and_name.insert(0, token)
if token2 is not token:
return_type_and_name.insert(1, token2)
return self._GetMethod(return_type_and_name, FUNCTION_VIRTUAL,
None, False)
def handle_volatile(self):
pass
def handle_mutable(self):
pass
def handle_public(self):
assert self.in_class
self.visibility = VISIBILITY_PUBLIC
def handle_protected(self):
assert self.in_class
self.visibility = VISIBILITY_PROTECTED
def handle_private(self):
assert self.in_class
self.visibility = VISIBILITY_PRIVATE
def handle_friend(self):
tokens = self._GetTokensUpTo(tokenize.SYNTAX, ';')
assert tokens
t0 = tokens[0]
return Friend(t0.start, t0.end, tokens, self.namespace_stack)
def handle_static_cast(self):
pass
def handle_const_cast(self):
pass
def handle_dynamic_cast(self):
pass
def handle_reinterpret_cast(self):
pass
def handle_new(self):
pass
def handle_delete(self):
tokens = self._GetTokensUpTo(tokenize.SYNTAX, ';')
assert tokens
return Delete(tokens[0].start, tokens[0].end, tokens)
def handle_typedef(self):
token = self._GetNextToken()
if (token.token_type == tokenize.NAME and
keywords.IsKeyword(token.name)):
# Token must be struct/enum/union/class.
method = getattr(self, 'handle_' + token.name)
self._handling_typedef = True
tokens = [method()]
self._handling_typedef = False
else:
tokens = [token]
# Get the remainder of the typedef up to the semi-colon.
tokens.extend(self._GetTokensUpTo(tokenize.SYNTAX, ';'))
# TODO(nnorwitz): clean all this up.
assert tokens
name = tokens.pop()
indices = name
if tokens:
indices = tokens[0]
if not indices:
indices = token
if name.name == ')':
# HACK(nnorwitz): Handle pointers to functions "properly".
if (len(tokens) >= 4 and
tokens[1].name == '(' and tokens[2].name == '*'):
tokens.append(name)
name = tokens[3]
elif name.name == ']':
# HACK(nnorwitz): Handle arrays properly.
if len(tokens) >= 2:
tokens.append(name)
name = tokens[1]
new_type = tokens
if tokens and isinstance(tokens[0], tokenize.Token):
new_type = self.converter.ToType(tokens)[0]
return Typedef(indices.start, indices.end, name.name,
new_type, self.namespace_stack)
def handle_typeid(self):
pass # Not needed yet.
def handle_typename(self):
pass # Not needed yet.
def _GetTemplatedTypes(self):
result = {}
tokens = list(self._GetMatchingChar('<', '>'))
len_tokens = len(tokens) - 1 # Ignore trailing '>'.
i = 0
while i < len_tokens:
key = tokens[i].name
i += 1
if keywords.IsKeyword(key) or key == ',':
continue
type_name = default = None
if i < len_tokens:
i += 1
if tokens[i-1].name == '=':
assert i < len_tokens, '%s %s' % (i, tokens)
default, unused_next_token = self.GetName(tokens[i:])
i += len(default)
else:
if tokens[i-1].name != ',':
# We got something like: Type variable.
# Re-adjust the key (variable) and type_name (Type).
key = tokens[i-1].name
type_name = tokens[i-2]
result[key] = (type_name, default)
return result
def handle_template(self):
token = self._GetNextToken()
assert token.token_type == tokenize.SYNTAX, token
assert token.name == '<', token
templated_types = self._GetTemplatedTypes()
# TODO(nnorwitz): for now, just ignore the template params.
token = self._GetNextToken()
if token.token_type == tokenize.NAME:
if token.name == 'class':
return self._GetClass(Class, VISIBILITY_PRIVATE, templated_types)
elif token.name == 'struct':
return self._GetClass(Struct, VISIBILITY_PUBLIC, templated_types)
elif token.name == 'friend':
return self.handle_friend()
self._AddBackToken(token)
tokens, last = self._GetVarTokensUpTo(tokenize.SYNTAX, '(', ';')
tokens.append(last)
self._AddBackTokens(tokens)
if last.name == '(':
return self.GetMethod(FUNCTION_NONE, templated_types)
# Must be a variable definition.
return None
def handle_true(self):
pass # Nothing to do.
def handle_false(self):
pass # Nothing to do.
def handle_asm(self):
pass # Not needed yet.
def handle_class(self):
return self._GetClass(Class, VISIBILITY_PRIVATE, None)
def _GetBases(self):
# Get base classes.
bases = []
while 1:
token = self._GetNextToken()
assert token.token_type == tokenize.NAME, token
# TODO(nnorwitz): store kind of inheritance...maybe.
if token.name not in ('public', 'protected', 'private'):
# If inheritance type is not specified, it is private.
# Just put the token back so we can form a name.
# TODO(nnorwitz): it would be good to warn about this.
self._AddBackToken(token)
else:
# Check for virtual inheritance.
token = self._GetNextToken()
if token.name != 'virtual':
self._AddBackToken(token)
else:
# TODO(nnorwitz): store that we got virtual for this base.
pass
base, next_token = self.GetName()
bases_ast = self.converter.ToType(base)
assert len(bases_ast) == 1, bases_ast
bases.append(bases_ast[0])
assert next_token.token_type == tokenize.SYNTAX, next_token
if next_token.name == '{':
token = next_token
break
# Support multiple inheritance.
assert next_token.name == ',', next_token
return bases, token
def _GetClass(self, class_type, visibility, templated_types):
class_name = None
class_token = self._GetNextToken()
if class_token.token_type != tokenize.NAME:
assert class_token.token_type == tokenize.SYNTAX, class_token
token = class_token
else:
# Skip any macro (e.g. storage class specifiers) after the
# 'class' keyword.
next_token = self._GetNextToken()
if next_token.token_type == tokenize.NAME:
self._AddBackToken(next_token)
else:
self._AddBackTokens([class_token, next_token])
name_tokens, token = self.GetName()
class_name = ''.join([t.name for t in name_tokens])
bases = None
if token.token_type == tokenize.SYNTAX:
if token.name == ';':
# Forward declaration.
return class_type(class_token.start, class_token.end,
class_name, None, templated_types, None,
self.namespace_stack)
if token.name in '*&':
# Inline forward declaration. Could be method or data.
name_token = self._GetNextToken()
next_token = self._GetNextToken()
if next_token.name == ';':
# Handle data
modifiers = ['class']
return self._CreateVariable(class_token, name_token.name,
class_name,
modifiers, token.name, None)
else:
# Assume this is a method.
tokens = (class_token, token, name_token, next_token)
self._AddBackTokens(tokens)
return self.GetMethod(FUNCTION_NONE, None)
if token.name == ':':
bases, token = self._GetBases()
body = None
if token.token_type == tokenize.SYNTAX and token.name == '{':
assert token.token_type == tokenize.SYNTAX, token
assert token.name == '{', token
ast = AstBuilder(self.GetScope(), self.filename, class_name,
visibility, self.namespace_stack)
body = list(ast.Generate())
if not self._handling_typedef:
token = self._GetNextToken()
if token.token_type != tokenize.NAME:
assert token.token_type == tokenize.SYNTAX, token
assert token.name == ';', token
else:
new_class = class_type(class_token.start, class_token.end,
class_name, bases, None,
body, self.namespace_stack)
modifiers = []
return self._CreateVariable(class_token,
token.name, new_class,
modifiers, token.name, None)
else:
if not self._handling_typedef:
self.HandleError('non-typedef token', token)
self._AddBackToken(token)
return class_type(class_token.start, class_token.end, class_name,
bases, None, body, self.namespace_stack)
def handle_namespace(self):
token = self._GetNextToken()
# Support anonymous namespaces.
name = None
if token.token_type == tokenize.NAME:
name = token.name
token = self._GetNextToken()
self.namespace_stack.append(name)
assert token.token_type == tokenize.SYNTAX, token
# Create an internal token that denotes when the namespace is complete.
internal_token = tokenize.Token(_INTERNAL_TOKEN, _NAMESPACE_POP,
None, None)
internal_token.whence = token.whence
if token.name == '=':
# TODO(nnorwitz): handle aliasing namespaces.
name, next_token = self.GetName()
assert next_token.name == ';', next_token
self._AddBackToken(internal_token)
else:
assert token.name == '{', token
tokens = list(self.GetScope())
# Replace the trailing } with the internal namespace pop token.
tokens[-1] = internal_token
# Handle namespace with nothing in it.
self._AddBackTokens(tokens)
return None
def handle_using(self):
tokens = self._GetTokensUpTo(tokenize.SYNTAX, ';')
assert tokens
return Using(tokens[0].start, tokens[0].end, tokens)
def handle_explicit(self):
assert self.in_class
# Nothing much to do.
# TODO(nnorwitz): maybe verify the method name == class name.
# This must be a ctor.
return self.GetMethod(FUNCTION_CTOR, None)
def handle_this(self):
pass # Nothing to do.
def handle_operator(self):
# Pull off the next token(s?) and make that part of the method name.
pass
def handle_sizeof(self):
pass
def handle_case(self):
pass
def handle_switch(self):
pass
def handle_default(self):
token = self._GetNextToken()
assert token.token_type == tokenize.SYNTAX
assert token.name == ':'
def handle_if(self):
pass
def handle_else(self):
pass
def handle_return(self):
tokens = self._GetTokensUpTo(tokenize.SYNTAX, ';')
if not tokens:
return Return(self.current_token.start, self.current_token.end, None)
return Return(tokens[0].start, tokens[0].end, tokens)
def handle_goto(self):
tokens = self._GetTokensUpTo(tokenize.SYNTAX, ';')
assert len(tokens) == 1, str(tokens)
return Goto(tokens[0].start, tokens[0].end, tokens[0].name)
def handle_try(self):
pass # Not needed yet.
def handle_catch(self):
pass # Not needed yet.
def handle_throw(self):
pass # Not needed yet.
def handle_while(self):
pass
def handle_do(self):
pass
def handle_for(self):
pass
def handle_break(self):
self._IgnoreUpTo(tokenize.SYNTAX, ';')
def handle_continue(self):
self._IgnoreUpTo(tokenize.SYNTAX, ';')
def BuilderFromSource(source, filename):
"""Utility method that returns an AstBuilder from source code.
Args:
source: 'C++ source code'
filename: 'file1'
Returns:
AstBuilder
"""
return AstBuilder(tokenize.GetTokens(source), filename)
def PrintIndentifiers(filename, should_print):
"""Prints all identifiers for a C++ source file.
Args:
filename: 'file1'
should_print: predicate with signature: bool Function(token)
"""
source = utils.ReadFile(filename, False)
if source is None:
sys.stderr.write('Unable to find: %s\n' % filename)
return
#print('Processing %s' % actual_filename)
builder = BuilderFromSource(source, filename)
try:
for node in builder.Generate():
if should_print(node):
print(node.name)
except KeyboardInterrupt:
return
except:
pass
def PrintAllIndentifiers(filenames, should_print):
"""Prints all identifiers for each C++ source file in filenames.
Args:
filenames: ['file1', 'file2', ...]
should_print: predicate with signature: bool Function(token)
"""
for path in filenames:
PrintIndentifiers(path, should_print)
def main(argv):
for filename in argv[1:]:
source = utils.ReadFile(filename)
if source is None:
continue
print('Processing %s' % filename)
builder = BuilderFromSource(source, filename)
try:
entire_ast = filter(None, builder.Generate())
except KeyboardInterrupt:
return
except:
# Already printed a warning, print the traceback and continue.
traceback.print_exc()
else:
if utils.DEBUG:
for ast in entire_ast:
print(ast)
if __name__ == '__main__':
main(sys.argv)
| Python |
#!/usr/bin/env python
#
# Copyright 2007 Neal Norwitz
# Portions Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenize C++ source code."""
__author__ = 'nnorwitz@google.com (Neal Norwitz)'
try:
# Python 3.x
import builtins
except ImportError:
# Python 2.x
import __builtin__ as builtins
import sys
from cpp import utils
if not hasattr(builtins, 'set'):
# Nominal support for Python 2.3.
from sets import Set as set
# Add $ as a valid identifier char since so much code uses it.
_letters = 'abcdefghijklmnopqrstuvwxyz'
VALID_IDENTIFIER_CHARS = set(_letters + _letters.upper() + '_0123456789$')
HEX_DIGITS = set('0123456789abcdefABCDEF')
INT_OR_FLOAT_DIGITS = set('01234567890eE-+')
# C++0x string preffixes.
_STR_PREFIXES = set(('R', 'u8', 'u8R', 'u', 'uR', 'U', 'UR', 'L', 'LR'))
# Token types.
UNKNOWN = 'UNKNOWN'
SYNTAX = 'SYNTAX'
CONSTANT = 'CONSTANT'
NAME = 'NAME'
PREPROCESSOR = 'PREPROCESSOR'
# Where the token originated from. This can be used for backtracking.
# It is always set to WHENCE_STREAM in this code.
WHENCE_STREAM, WHENCE_QUEUE = range(2)
class Token(object):
"""Data container to represent a C++ token.
Tokens can be identifiers, syntax char(s), constants, or
pre-processor directives.
start contains the index of the first char of the token in the source
end contains the index of the last char of the token in the source
"""
def __init__(self, token_type, name, start, end):
self.token_type = token_type
self.name = name
self.start = start
self.end = end
self.whence = WHENCE_STREAM
def __str__(self):
if not utils.DEBUG:
return 'Token(%r)' % self.name
return 'Token(%r, %s, %s)' % (self.name, self.start, self.end)
__repr__ = __str__
def _GetString(source, start, i):
i = source.find('"', i+1)
while source[i-1] == '\\':
# Count the trailing backslashes.
backslash_count = 1
j = i - 2
while source[j] == '\\':
backslash_count += 1
j -= 1
# When trailing backslashes are even, they escape each other.
if (backslash_count % 2) == 0:
break
i = source.find('"', i+1)
return i + 1
def _GetChar(source, start, i):
# NOTE(nnorwitz): may not be quite correct, should be good enough.
i = source.find("'", i+1)
while source[i-1] == '\\':
# Need to special case '\\'.
if (i - 2) > start and source[i-2] == '\\':
break
i = source.find("'", i+1)
# Try to handle unterminated single quotes (in a #if 0 block).
if i < 0:
i = start
return i + 1
def GetTokens(source):
"""Returns a sequence of Tokens.
Args:
source: string of C++ source code.
Yields:
Token that represents the next token in the source.
"""
# Cache various valid character sets for speed.
valid_identifier_chars = VALID_IDENTIFIER_CHARS
hex_digits = HEX_DIGITS
int_or_float_digits = INT_OR_FLOAT_DIGITS
int_or_float_digits2 = int_or_float_digits | set('.')
# Only ignore errors while in a #if 0 block.
ignore_errors = False
count_ifs = 0
i = 0
end = len(source)
while i < end:
# Skip whitespace.
while i < end and source[i].isspace():
i += 1
if i >= end:
return
token_type = UNKNOWN
start = i
c = source[i]
if c.isalpha() or c == '_': # Find a string token.
token_type = NAME
while source[i] in valid_identifier_chars:
i += 1
# String and character constants can look like a name if
# they are something like L"".
if (source[i] == "'" and (i - start) == 1 and
source[start:i] in 'uUL'):
# u, U, and L are valid C++0x character preffixes.
token_type = CONSTANT
i = _GetChar(source, start, i)
elif source[i] == "'" and source[start:i] in _STR_PREFIXES:
token_type = CONSTANT
i = _GetString(source, start, i)
elif c == '/' and source[i+1] == '/': # Find // comments.
i = source.find('\n', i)
if i == -1: # Handle EOF.
i = end
continue
elif c == '/' and source[i+1] == '*': # Find /* comments. */
i = source.find('*/', i) + 2
continue
elif c in ':+-<>&|*=': # : or :: (plus other chars).
token_type = SYNTAX
i += 1
new_ch = source[i]
if new_ch == c:
i += 1
elif c == '-' and new_ch == '>':
i += 1
elif new_ch == '=':
i += 1
elif c in '()[]{}~!?^%;/.,': # Handle single char tokens.
token_type = SYNTAX
i += 1
if c == '.' and source[i].isdigit():
token_type = CONSTANT
i += 1
while source[i] in int_or_float_digits:
i += 1
# Handle float suffixes.
for suffix in ('l', 'f'):
if suffix == source[i:i+1].lower():
i += 1
break
elif c.isdigit(): # Find integer.
token_type = CONSTANT
if c == '0' and source[i+1] in 'xX':
# Handle hex digits.
i += 2
while source[i] in hex_digits:
i += 1
else:
while source[i] in int_or_float_digits2:
i += 1
# Handle integer (and float) suffixes.
for suffix in ('ull', 'll', 'ul', 'l', 'f', 'u'):
size = len(suffix)
if suffix == source[i:i+size].lower():
i += size
break
elif c == '"': # Find string.
token_type = CONSTANT
i = _GetString(source, start, i)
elif c == "'": # Find char.
token_type = CONSTANT
i = _GetChar(source, start, i)
elif c == '#': # Find pre-processor command.
token_type = PREPROCESSOR
got_if = source[i:i+3] == '#if' and source[i+3:i+4].isspace()
if got_if:
count_ifs += 1
elif source[i:i+6] == '#endif':
count_ifs -= 1
if count_ifs == 0:
ignore_errors = False
# TODO(nnorwitz): handle preprocessor statements (\ continuations).
while 1:
i1 = source.find('\n', i)
i2 = source.find('//', i)
i3 = source.find('/*', i)
i4 = source.find('"', i)
# NOTE(nnorwitz): doesn't handle comments in #define macros.
# Get the first important symbol (newline, comment, EOF/end).
i = min([x for x in (i1, i2, i3, i4, end) if x != -1])
# Handle #include "dir//foo.h" properly.
if source[i] == '"':
i = source.find('"', i+1) + 1
assert i > 0
continue
# Keep going if end of the line and the line ends with \.
if not (i == i1 and source[i-1] == '\\'):
if got_if:
condition = source[start+4:i].lstrip()
if (condition.startswith('0') or
condition.startswith('(0)')):
ignore_errors = True
break
i += 1
elif c == '\\': # Handle \ in code.
# This is different from the pre-processor \ handling.
i += 1
continue
elif ignore_errors:
# The tokenizer seems to be in pretty good shape. This
# raise is conditionally disabled so that bogus code
# in an #if 0 block can be handled. Since we will ignore
# it anyways, this is probably fine. So disable the
# exception and return the bogus char.
i += 1
else:
sys.stderr.write('Got invalid token in %s @ %d token:%s: %r\n' %
('?', i, c, source[i-10:i+10]))
raise RuntimeError('unexpected token')
if i <= 0:
print('Invalid index, exiting now.')
return
yield Token(token_type, source[start:i], start, i)
if __name__ == '__main__':
def main(argv):
"""Driver mostly for testing purposes."""
for filename in argv[1:]:
source = utils.ReadFile(filename)
if source is None:
continue
for token in GetTokens(source):
print('%-12s: %s' % (token.token_type, token.name))
# print('\r%6.2f%%' % (100.0 * index / token.end),)
sys.stdout.write('\n')
main(sys.argv)
| Python |
#!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""A script to prepare version informtion for use the gtest Info.plist file.
This script extracts the version information from the configure.ac file and
uses it to generate a header file containing the same information. The
#defines in this header file will be included in during the generation of
the Info.plist of the framework, giving the correct value to the version
shown in the Finder.
This script makes the following assumptions (these are faults of the script,
not problems with the Autoconf):
1. The AC_INIT macro will be contained within the first 1024 characters
of configure.ac
2. The version string will be 3 integers separated by periods and will be
surrounded by squre brackets, "[" and "]" (e.g. [1.0.1]). The first
segment represents the major version, the second represents the minor
version and the third represents the fix version.
3. No ")" character exists between the opening "(" and closing ")" of
AC_INIT, including in comments and character strings.
"""
import sys
import re
# Read the command line argument (the output directory for Version.h)
if (len(sys.argv) < 3):
print "Usage: versiongenerate.py input_dir output_dir"
sys.exit(1)
else:
input_dir = sys.argv[1]
output_dir = sys.argv[2]
# Read the first 1024 characters of the configure.ac file
config_file = open("%s/configure.ac" % input_dir, 'r')
buffer_size = 1024
opening_string = config_file.read(buffer_size)
config_file.close()
# Extract the version string from the AC_INIT macro
# The following init_expression means:
# Extract three integers separated by periods and surrounded by squre
# brackets(e.g. "[1.0.1]") between "AC_INIT(" and ")". Do not be greedy
# (*? is the non-greedy flag) since that would pull in everything between
# the first "(" and the last ")" in the file.
version_expression = re.compile(r"AC_INIT\(.*?\[(\d+)\.(\d+)\.(\d+)\].*?\)",
re.DOTALL)
version_values = version_expression.search(opening_string)
major_version = version_values.group(1)
minor_version = version_values.group(2)
fix_version = version_values.group(3)
# Write the version information to a header file to be included in the
# Info.plist file.
file_data = """//
// DO NOT MODIFY THIS FILE (but you can delete it)
//
// This file is autogenerated by the versiongenerate.py script. This script
// is executed in a "Run Script" build phase when creating gtest.framework. This
// header file is not used during compilation of C-source. Rather, it simply
// defines some version strings for substitution in the Info.plist. Because of
// this, we are not not restricted to C-syntax nor are we using include guards.
//
#define GTEST_VERSIONINFO_SHORT %s.%s
#define GTEST_VERSIONINFO_LONG %s.%s.%s
""" % (major_version, minor_version, major_version, minor_version, fix_version)
version_file = open("%s/Version.h" % output_dir, 'w')
version_file.write(file_data)
version_file.close()
| Python |
#!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for the gtest_xml_output module"""
__author__ = 'eefacm@gmail.com (Sean Mcafee)'
import errno
import os
import sys
from xml.dom import minidom, Node
import gtest_test_utils
import gtest_xml_test_utils
GTEST_OUTPUT_FLAG = "--gtest_output"
GTEST_DEFAULT_OUTPUT_FILE = "test_detail.xml"
GTEST_PROGRAM_NAME = "gtest_xml_output_unittest_"
SUPPORTS_STACK_TRACES = False
if SUPPORTS_STACK_TRACES:
STACK_TRACE_TEMPLATE = "\nStack trace:\n*"
else:
STACK_TRACE_TEMPLATE = ""
EXPECTED_NON_EMPTY_XML = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="23" failures="4" disabled="2" errors="0" time="*" name="AllTests">
<testsuite name="SuccessfulTest" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="Succeeds" status="run" time="*" classname="SuccessfulTest"/>
</testsuite>
<testsuite name="FailedTest" tests="1" failures="1" disabled="0" errors="0" time="*">
<testcase name="Fails" status="run" time="*" classname="FailedTest">
<failure message="Value of: 2
Expected: 1" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Value of: 2
Expected: 1%(stack)s]]></failure>
</testcase>
</testsuite>
<testsuite name="MixedResultTest" tests="3" failures="1" disabled="1" errors="0" time="*">
<testcase name="Succeeds" status="run" time="*" classname="MixedResultTest"/>
<testcase name="Fails" status="run" time="*" classname="MixedResultTest">
<failure message="Value of: 2
Expected: 1" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Value of: 2
Expected: 1%(stack)s]]></failure>
<failure message="Value of: 3
Expected: 2" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Value of: 3
Expected: 2%(stack)s]]></failure>
</testcase>
<testcase name="DISABLED_test" status="notrun" time="*" classname="MixedResultTest"/>
</testsuite>
<testsuite name="XmlQuotingTest" tests="1" failures="1" disabled="0" errors="0" time="*">
<testcase name="OutputsCData" status="run" time="*" classname="XmlQuotingTest">
<failure message="Failed
XML output: <?xml encoding="utf-8"><top><![CDATA[cdata text]]></top>" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Failed
XML output: <?xml encoding="utf-8"><top><![CDATA[cdata text]]>]]><![CDATA[</top>%(stack)s]]></failure>
</testcase>
</testsuite>
<testsuite name="InvalidCharactersTest" tests="1" failures="1" disabled="0" errors="0" time="*">
<testcase name="InvalidCharactersInMessage" status="run" time="*" classname="InvalidCharactersTest">
<failure message="Failed
Invalid characters in brackets []" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Failed
Invalid characters in brackets []%(stack)s]]></failure>
</testcase>
</testsuite>
<testsuite name="DisabledTest" tests="1" failures="0" disabled="1" errors="0" time="*">
<testcase name="DISABLED_test_not_run" status="notrun" time="*" classname="DisabledTest"/>
</testsuite>
<testsuite name="PropertyRecordingTest" tests="4" failures="0" disabled="0" errors="0" time="*">
<testcase name="OneProperty" status="run" time="*" classname="PropertyRecordingTest" key_1="1"/>
<testcase name="IntValuedProperty" status="run" time="*" classname="PropertyRecordingTest" key_int="1"/>
<testcase name="ThreeProperties" status="run" time="*" classname="PropertyRecordingTest" key_1="1" key_2="2" key_3="3"/>
<testcase name="TwoValuesForOneKeyUsesLastValue" status="run" time="*" classname="PropertyRecordingTest" key_1="2"/>
</testsuite>
<testsuite name="NoFixtureTest" tests="3" failures="0" disabled="0" errors="0" time="*">
<testcase name="RecordProperty" status="run" time="*" classname="NoFixtureTest" key="1"/>
<testcase name="ExternalUtilityThatCallsRecordIntValuedProperty" status="run" time="*" classname="NoFixtureTest" key_for_utility_int="1"/>
<testcase name="ExternalUtilityThatCallsRecordStringValuedProperty" status="run" time="*" classname="NoFixtureTest" key_for_utility_string="1"/>
</testsuite>
<testsuite name="Single/ValueParamTest" tests="4" failures="0" disabled="0" errors="0" time="*">
<testcase name="HasValueParamAttribute/0" value_param="33" status="run" time="*" classname="Single/ValueParamTest" />
<testcase name="HasValueParamAttribute/1" value_param="42" status="run" time="*" classname="Single/ValueParamTest" />
<testcase name="AnotherTestThatHasValueParamAttribute/0" value_param="33" status="run" time="*" classname="Single/ValueParamTest" />
<testcase name="AnotherTestThatHasValueParamAttribute/1" value_param="42" status="run" time="*" classname="Single/ValueParamTest" />
</testsuite>
<testsuite name="TypedTest/0" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="HasTypeParamAttribute" type_param="*" status="run" time="*" classname="TypedTest/0" />
</testsuite>
<testsuite name="TypedTest/1" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="HasTypeParamAttribute" type_param="*" status="run" time="*" classname="TypedTest/1" />
</testsuite>
<testsuite name="Single/TypeParameterizedTestCase/0" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="HasTypeParamAttribute" type_param="*" status="run" time="*" classname="Single/TypeParameterizedTestCase/0" />
</testsuite>
<testsuite name="Single/TypeParameterizedTestCase/1" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="HasTypeParamAttribute" type_param="*" status="run" time="*" classname="Single/TypeParameterizedTestCase/1" />
</testsuite>
</testsuites>""" % {'stack': STACK_TRACE_TEMPLATE}
EXPECTED_EMPTY_XML = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="0" failures="0" disabled="0" errors="0" time="*" name="AllTests">
</testsuites>"""
class GTestXMLOutputUnitTest(gtest_xml_test_utils.GTestXMLTestCase):
"""
Unit test for Google Test's XML output functionality.
"""
def testNonEmptyXmlOutput(self):
"""
Runs a test program that generates a non-empty XML output, and
tests that the XML output is expected.
"""
self._TestXmlOutput(GTEST_PROGRAM_NAME, EXPECTED_NON_EMPTY_XML, 1)
def testEmptyXmlOutput(self):
"""
Runs a test program that generates an empty XML output, and
tests that the XML output is expected.
"""
self._TestXmlOutput("gtest_no_test_unittest",
EXPECTED_EMPTY_XML, 0)
def testDefaultOutputFile(self):
"""
Confirms that Google Test produces an XML output file with the expected
default name if no name is explicitly specified.
"""
output_file = os.path.join(gtest_test_utils.GetTempDir(),
GTEST_DEFAULT_OUTPUT_FILE)
gtest_prog_path = gtest_test_utils.GetTestExecutablePath(
"gtest_no_test_unittest")
try:
os.remove(output_file)
except OSError, e:
if e.errno != errno.ENOENT:
raise
p = gtest_test_utils.Subprocess(
[gtest_prog_path, "%s=xml" % GTEST_OUTPUT_FLAG],
working_dir=gtest_test_utils.GetTempDir())
self.assert_(p.exited)
self.assertEquals(0, p.exit_code)
self.assert_(os.path.isfile(output_file))
def testSuppressedXmlOutput(self):
"""
Tests that no XML file is generated if the default XML listener is
shut down before RUN_ALL_TESTS is invoked.
"""
xml_path = os.path.join(gtest_test_utils.GetTempDir(),
GTEST_PROGRAM_NAME + "out.xml")
if os.path.isfile(xml_path):
os.remove(xml_path)
gtest_prog_path = gtest_test_utils.GetTestExecutablePath(GTEST_PROGRAM_NAME)
command = [gtest_prog_path,
"%s=xml:%s" % (GTEST_OUTPUT_FLAG, xml_path),
"--shut_down_xml"]
p = gtest_test_utils.Subprocess(command)
if p.terminated_by_signal:
self.assert_(False,
"%s was killed by signal %d" % (gtest_prog_name, p.signal))
else:
self.assert_(p.exited)
self.assertEquals(1, p.exit_code,
"'%s' exited with code %s, which doesn't match "
"the expected exit code %s."
% (command, p.exit_code, 1))
self.assert_(not os.path.isfile(xml_path))
def _TestXmlOutput(self, gtest_prog_name, expected_xml, expected_exit_code):
"""
Asserts that the XML document generated by running the program
gtest_prog_name matches expected_xml, a string containing another
XML document. Furthermore, the program's exit code must be
expected_exit_code.
"""
xml_path = os.path.join(gtest_test_utils.GetTempDir(),
gtest_prog_name + "out.xml")
gtest_prog_path = gtest_test_utils.GetTestExecutablePath(gtest_prog_name)
command = [gtest_prog_path, "%s=xml:%s" % (GTEST_OUTPUT_FLAG, xml_path)]
p = gtest_test_utils.Subprocess(command)
if p.terminated_by_signal:
self.assert_(False,
"%s was killed by signal %d" % (gtest_prog_name, p.signal))
else:
self.assert_(p.exited)
self.assertEquals(expected_exit_code, p.exit_code,
"'%s' exited with code %s, which doesn't match "
"the expected exit code %s."
% (command, p.exit_code, expected_exit_code))
expected = minidom.parseString(expected_xml)
actual = minidom.parse(xml_path)
self.NormalizeXml(actual.documentElement)
self.AssertEquivalentNodes(expected.documentElement,
actual.documentElement)
expected.unlink()
actual .unlink()
if __name__ == '__main__':
os.environ['GTEST_STACK_TRACE_DEPTH'] = '1'
gtest_test_utils.Main()
| Python |
#!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for Google Test's --gtest_list_tests flag.
A user can ask Google Test to list all tests by specifying the
--gtest_list_tests flag. This script tests such functionality
by invoking gtest_list_tests_unittest_ (a program written with
Google Test) the command line flags.
"""
__author__ = 'phanna@google.com (Patrick Hanna)'
import gtest_test_utils
# Constants.
# The command line flag for enabling/disabling listing all tests.
LIST_TESTS_FLAG = 'gtest_list_tests'
# Path to the gtest_list_tests_unittest_ program.
EXE_PATH = gtest_test_utils.GetTestExecutablePath('gtest_list_tests_unittest_')
# The expected output when running gtest_list_tests_unittest_ with
# --gtest_list_tests
EXPECTED_OUTPUT_NO_FILTER = """FooDeathTest.
Test1
Foo.
Bar1
Bar2
DISABLED_Bar3
Abc.
Xyz
Def
FooBar.
Baz
FooTest.
Test1
DISABLED_Test2
Test3
"""
# The expected output when running gtest_list_tests_unittest_ with
# --gtest_list_tests and --gtest_filter=Foo*.
EXPECTED_OUTPUT_FILTER_FOO = """FooDeathTest.
Test1
Foo.
Bar1
Bar2
DISABLED_Bar3
FooBar.
Baz
FooTest.
Test1
DISABLED_Test2
Test3
"""
# Utilities.
def Run(args):
"""Runs gtest_list_tests_unittest_ and returns the list of tests printed."""
return gtest_test_utils.Subprocess([EXE_PATH] + args,
capture_stderr=False).output
# The unit test.
class GTestListTestsUnitTest(gtest_test_utils.TestCase):
"""Tests using the --gtest_list_tests flag to list all tests."""
def RunAndVerify(self, flag_value, expected_output, other_flag):
"""Runs gtest_list_tests_unittest_ and verifies that it prints
the correct tests.
Args:
flag_value: value of the --gtest_list_tests flag;
None if the flag should not be present.
expected_output: the expected output after running command;
other_flag: a different flag to be passed to command
along with gtest_list_tests;
None if the flag should not be present.
"""
if flag_value is None:
flag = ''
flag_expression = 'not set'
elif flag_value == '0':
flag = '--%s=0' % LIST_TESTS_FLAG
flag_expression = '0'
else:
flag = '--%s' % LIST_TESTS_FLAG
flag_expression = '1'
args = [flag]
if other_flag is not None:
args += [other_flag]
output = Run(args)
msg = ('when %s is %s, the output of "%s" is "%s".' %
(LIST_TESTS_FLAG, flag_expression, ' '.join(args), output))
if expected_output is not None:
self.assert_(output == expected_output, msg)
else:
self.assert_(output != EXPECTED_OUTPUT_NO_FILTER, msg)
def testDefaultBehavior(self):
"""Tests the behavior of the default mode."""
self.RunAndVerify(flag_value=None,
expected_output=None,
other_flag=None)
def testFlag(self):
"""Tests using the --gtest_list_tests flag."""
self.RunAndVerify(flag_value='0',
expected_output=None,
other_flag=None)
self.RunAndVerify(flag_value='1',
expected_output=EXPECTED_OUTPUT_NO_FILTER,
other_flag=None)
def testOverrideNonFilterFlags(self):
"""Tests that --gtest_list_tests overrides the non-filter flags."""
self.RunAndVerify(flag_value='1',
expected_output=EXPECTED_OUTPUT_NO_FILTER,
other_flag='--gtest_break_on_failure')
def testWithFilterFlags(self):
"""Tests that --gtest_list_tests takes into account the
--gtest_filter flag."""
self.RunAndVerify(flag_value='1',
expected_output=EXPECTED_OUTPUT_FILTER_FOO,
other_flag='--gtest_filter=Foo*')
if __name__ == '__main__':
gtest_test_utils.Main()
| Python |
#!/usr/bin/env python
#
# Copyright 2010 Google Inc. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests Google Test's exception catching behavior.
This script invokes gtest_catch_exceptions_test_ and
gtest_catch_exceptions_ex_test_ (programs written with
Google Test) and verifies their output.
"""
__author__ = 'vladl@google.com (Vlad Losev)'
import os
import gtest_test_utils
# Constants.
FLAG_PREFIX = '--gtest_'
LIST_TESTS_FLAG = FLAG_PREFIX + 'list_tests'
NO_CATCH_EXCEPTIONS_FLAG = FLAG_PREFIX + 'catch_exceptions=0'
FILTER_FLAG = FLAG_PREFIX + 'filter'
# Path to the gtest_catch_exceptions_ex_test_ binary, compiled with
# exceptions enabled.
EX_EXE_PATH = gtest_test_utils.GetTestExecutablePath(
'gtest_catch_exceptions_ex_test_')
# Path to the gtest_catch_exceptions_test_ binary, compiled with
# exceptions disabled.
EXE_PATH = gtest_test_utils.GetTestExecutablePath(
'gtest_catch_exceptions_no_ex_test_')
TEST_LIST = gtest_test_utils.Subprocess([EXE_PATH, LIST_TESTS_FLAG]).output
SUPPORTS_SEH_EXCEPTIONS = 'ThrowsSehException' in TEST_LIST
if SUPPORTS_SEH_EXCEPTIONS:
BINARY_OUTPUT = gtest_test_utils.Subprocess([EXE_PATH]).output
EX_BINARY_OUTPUT = gtest_test_utils.Subprocess([EX_EXE_PATH]).output
# The tests.
if SUPPORTS_SEH_EXCEPTIONS:
# pylint:disable-msg=C6302
class CatchSehExceptionsTest(gtest_test_utils.TestCase):
"""Tests exception-catching behavior."""
def TestSehExceptions(self, test_output):
self.assert_('SEH exception with code 0x2a thrown '
'in the test fixture\'s constructor'
in test_output)
self.assert_('SEH exception with code 0x2a thrown '
'in the test fixture\'s destructor'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in SetUpTestCase()'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in TearDownTestCase()'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in SetUp()'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in TearDown()'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in the test body'
in test_output)
def testCatchesSehExceptionsWithCxxExceptionsEnabled(self):
self.TestSehExceptions(EX_BINARY_OUTPUT)
def testCatchesSehExceptionsWithCxxExceptionsDisabled(self):
self.TestSehExceptions(BINARY_OUTPUT)
class CatchCxxExceptionsTest(gtest_test_utils.TestCase):
"""Tests C++ exception-catching behavior.
Tests in this test case verify that:
* C++ exceptions are caught and logged as C++ (not SEH) exceptions
* Exception thrown affect the remainder of the test work flow in the
expected manner.
"""
def testCatchesCxxExceptionsInFixtureConstructor(self):
self.assert_('C++ exception with description '
'"Standard C++ exception" thrown '
'in the test fixture\'s constructor'
in EX_BINARY_OUTPUT)
self.assert_('unexpected' not in EX_BINARY_OUTPUT,
'This failure belongs in this test only if '
'"CxxExceptionInConstructorTest" (no quotes) '
'appears on the same line as words "called unexpectedly"')
def testCatchesCxxExceptionsInFixtureDestructor(self):
self.assert_('C++ exception with description '
'"Standard C++ exception" thrown '
'in the test fixture\'s destructor'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInDestructorTest::TearDownTestCase() '
'called as expected.'
in EX_BINARY_OUTPUT)
def testCatchesCxxExceptionsInSetUpTestCase(self):
self.assert_('C++ exception with description "Standard C++ exception"'
' thrown in SetUpTestCase()'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInConstructorTest::TearDownTestCase() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTestCaseTest constructor '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTestCaseTest destructor '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTestCaseTest::SetUp() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTestCaseTest::TearDown() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTestCaseTest test body '
'called as expected.'
in EX_BINARY_OUTPUT)
def testCatchesCxxExceptionsInTearDownTestCase(self):
self.assert_('C++ exception with description "Standard C++ exception"'
' thrown in TearDownTestCase()'
in EX_BINARY_OUTPUT)
def testCatchesCxxExceptionsInSetUp(self):
self.assert_('C++ exception with description "Standard C++ exception"'
' thrown in SetUp()'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTest::TearDownTestCase() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTest destructor '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTest::TearDown() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('unexpected' not in EX_BINARY_OUTPUT,
'This failure belongs in this test only if '
'"CxxExceptionInSetUpTest" (no quotes) '
'appears on the same line as words "called unexpectedly"')
def testCatchesCxxExceptionsInTearDown(self):
self.assert_('C++ exception with description "Standard C++ exception"'
' thrown in TearDown()'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInTearDownTest::TearDownTestCase() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInTearDownTest destructor '
'called as expected.'
in EX_BINARY_OUTPUT)
def testCatchesCxxExceptionsInTestBody(self):
self.assert_('C++ exception with description "Standard C++ exception"'
' thrown in the test body'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInTestBodyTest::TearDownTestCase() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInTestBodyTest destructor '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInTestBodyTest::TearDown() '
'called as expected.'
in EX_BINARY_OUTPUT)
def testCatchesNonStdCxxExceptions(self):
self.assert_('Unknown C++ exception thrown in the test body'
in EX_BINARY_OUTPUT)
def testUnhandledCxxExceptionsAbortTheProgram(self):
# Filters out SEH exception tests on Windows. Unhandled SEH exceptions
# cause tests to show pop-up windows there.
FITLER_OUT_SEH_TESTS_FLAG = FILTER_FLAG + '=-*Seh*'
# By default, Google Test doesn't catch the exceptions.
uncaught_exceptions_ex_binary_output = gtest_test_utils.Subprocess(
[EX_EXE_PATH,
NO_CATCH_EXCEPTIONS_FLAG,
FITLER_OUT_SEH_TESTS_FLAG]).output
self.assert_('Unhandled C++ exception terminating the program'
in uncaught_exceptions_ex_binary_output)
self.assert_('unexpected' not in uncaught_exceptions_ex_binary_output)
if __name__ == '__main__':
gtest_test_utils.Main()
| Python |
#!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Verifies that Google Test warns the user when not initialized properly."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import gtest_test_utils
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_uninitialized_test_')
def Assert(condition):
if not condition:
raise AssertionError
def AssertEq(expected, actual):
if expected != actual:
print 'Expected: %s' % (expected,)
print ' Actual: %s' % (actual,)
raise AssertionError
def TestExitCodeAndOutput(command):
"""Runs the given command and verifies its exit code and output."""
# Verifies that 'command' exits with code 1.
p = gtest_test_utils.Subprocess(command)
Assert(p.exited)
AssertEq(1, p.exit_code)
Assert('InitGoogleTest' in p.output)
class GTestUninitializedTest(gtest_test_utils.TestCase):
def testExitCodeAndOutput(self):
TestExitCodeAndOutput(COMMAND)
if __name__ == '__main__':
gtest_test_utils.Main()
| Python |
#!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for Google Test's break-on-failure mode.
A user can ask Google Test to seg-fault when an assertion fails, using
either the GTEST_BREAK_ON_FAILURE environment variable or the
--gtest_break_on_failure flag. This script tests such functionality
by invoking gtest_break_on_failure_unittest_ (a program written with
Google Test) with different environments and command line flags.
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import gtest_test_utils
import os
import sys
# Constants.
IS_WINDOWS = os.name == 'nt'
# The environment variable for enabling/disabling the break-on-failure mode.
BREAK_ON_FAILURE_ENV_VAR = 'GTEST_BREAK_ON_FAILURE'
# The command line flag for enabling/disabling the break-on-failure mode.
BREAK_ON_FAILURE_FLAG = 'gtest_break_on_failure'
# The environment variable for enabling/disabling the throw-on-failure mode.
THROW_ON_FAILURE_ENV_VAR = 'GTEST_THROW_ON_FAILURE'
# The environment variable for enabling/disabling the catch-exceptions mode.
CATCH_EXCEPTIONS_ENV_VAR = 'GTEST_CATCH_EXCEPTIONS'
# Path to the gtest_break_on_failure_unittest_ program.
EXE_PATH = gtest_test_utils.GetTestExecutablePath(
'gtest_break_on_failure_unittest_')
# Utilities.
environ = os.environ.copy()
def SetEnvVar(env_var, value):
"""Sets an environment variable to a given value; unsets it when the
given value is None.
"""
if value is not None:
environ[env_var] = value
elif env_var in environ:
del environ[env_var]
def Run(command):
"""Runs a command; returns 1 if it was killed by a signal, or 0 otherwise."""
p = gtest_test_utils.Subprocess(command, env=environ)
if p.terminated_by_signal:
return 1
else:
return 0
# The tests.
class GTestBreakOnFailureUnitTest(gtest_test_utils.TestCase):
"""Tests using the GTEST_BREAK_ON_FAILURE environment variable or
the --gtest_break_on_failure flag to turn assertion failures into
segmentation faults.
"""
def RunAndVerify(self, env_var_value, flag_value, expect_seg_fault):
"""Runs gtest_break_on_failure_unittest_ and verifies that it does
(or does not) have a seg-fault.
Args:
env_var_value: value of the GTEST_BREAK_ON_FAILURE environment
variable; None if the variable should be unset.
flag_value: value of the --gtest_break_on_failure flag;
None if the flag should not be present.
expect_seg_fault: 1 if the program is expected to generate a seg-fault;
0 otherwise.
"""
SetEnvVar(BREAK_ON_FAILURE_ENV_VAR, env_var_value)
if env_var_value is None:
env_var_value_msg = ' is not set'
else:
env_var_value_msg = '=' + env_var_value
if flag_value is None:
flag = ''
elif flag_value == '0':
flag = '--%s=0' % BREAK_ON_FAILURE_FLAG
else:
flag = '--%s' % BREAK_ON_FAILURE_FLAG
command = [EXE_PATH]
if flag:
command.append(flag)
if expect_seg_fault:
should_or_not = 'should'
else:
should_or_not = 'should not'
has_seg_fault = Run(command)
SetEnvVar(BREAK_ON_FAILURE_ENV_VAR, None)
msg = ('when %s%s, an assertion failure in "%s" %s cause a seg-fault.' %
(BREAK_ON_FAILURE_ENV_VAR, env_var_value_msg, ' '.join(command),
should_or_not))
self.assert_(has_seg_fault == expect_seg_fault, msg)
def testDefaultBehavior(self):
"""Tests the behavior of the default mode."""
self.RunAndVerify(env_var_value=None,
flag_value=None,
expect_seg_fault=0)
def testEnvVar(self):
"""Tests using the GTEST_BREAK_ON_FAILURE environment variable."""
self.RunAndVerify(env_var_value='0',
flag_value=None,
expect_seg_fault=0)
self.RunAndVerify(env_var_value='1',
flag_value=None,
expect_seg_fault=1)
def testFlag(self):
"""Tests using the --gtest_break_on_failure flag."""
self.RunAndVerify(env_var_value=None,
flag_value='0',
expect_seg_fault=0)
self.RunAndVerify(env_var_value=None,
flag_value='1',
expect_seg_fault=1)
def testFlagOverridesEnvVar(self):
"""Tests that the flag overrides the environment variable."""
self.RunAndVerify(env_var_value='0',
flag_value='0',
expect_seg_fault=0)
self.RunAndVerify(env_var_value='0',
flag_value='1',
expect_seg_fault=1)
self.RunAndVerify(env_var_value='1',
flag_value='0',
expect_seg_fault=0)
self.RunAndVerify(env_var_value='1',
flag_value='1',
expect_seg_fault=1)
def testBreakOnFailureOverridesThrowOnFailure(self):
"""Tests that gtest_break_on_failure overrides gtest_throw_on_failure."""
SetEnvVar(THROW_ON_FAILURE_ENV_VAR, '1')
try:
self.RunAndVerify(env_var_value=None,
flag_value='1',
expect_seg_fault=1)
finally:
SetEnvVar(THROW_ON_FAILURE_ENV_VAR, None)
if IS_WINDOWS:
def testCatchExceptionsDoesNotInterfere(self):
"""Tests that gtest_catch_exceptions doesn't interfere."""
SetEnvVar(CATCH_EXCEPTIONS_ENV_VAR, '1')
try:
self.RunAndVerify(env_var_value='1',
flag_value='1',
expect_seg_fault=1)
finally:
SetEnvVar(CATCH_EXCEPTIONS_ENV_VAR, None)
if __name__ == '__main__':
gtest_test_utils.Main()
| Python |
#!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests Google Test's throw-on-failure mode with exceptions disabled.
This script invokes gtest_throw_on_failure_test_ (a program written with
Google Test) with different environments and command line flags.
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import gtest_test_utils
# Constants.
# The command line flag for enabling/disabling the throw-on-failure mode.
THROW_ON_FAILURE = 'gtest_throw_on_failure'
# Path to the gtest_throw_on_failure_test_ program, compiled with
# exceptions disabled.
EXE_PATH = gtest_test_utils.GetTestExecutablePath(
'gtest_throw_on_failure_test_')
# Utilities.
def SetEnvVar(env_var, value):
"""Sets an environment variable to a given value; unsets it when the
given value is None.
"""
env_var = env_var.upper()
if value is not None:
os.environ[env_var] = value
elif env_var in os.environ:
del os.environ[env_var]
def Run(command):
"""Runs a command; returns True/False if its exit code is/isn't 0."""
print 'Running "%s". . .' % ' '.join(command)
p = gtest_test_utils.Subprocess(command)
return p.exited and p.exit_code == 0
# The tests. TODO(wan@google.com): refactor the class to share common
# logic with code in gtest_break_on_failure_unittest.py.
class ThrowOnFailureTest(gtest_test_utils.TestCase):
"""Tests the throw-on-failure mode."""
def RunAndVerify(self, env_var_value, flag_value, should_fail):
"""Runs gtest_throw_on_failure_test_ and verifies that it does
(or does not) exit with a non-zero code.
Args:
env_var_value: value of the GTEST_BREAK_ON_FAILURE environment
variable; None if the variable should be unset.
flag_value: value of the --gtest_break_on_failure flag;
None if the flag should not be present.
should_fail: True iff the program is expected to fail.
"""
SetEnvVar(THROW_ON_FAILURE, env_var_value)
if env_var_value is None:
env_var_value_msg = ' is not set'
else:
env_var_value_msg = '=' + env_var_value
if flag_value is None:
flag = ''
elif flag_value == '0':
flag = '--%s=0' % THROW_ON_FAILURE
else:
flag = '--%s' % THROW_ON_FAILURE
command = [EXE_PATH]
if flag:
command.append(flag)
if should_fail:
should_or_not = 'should'
else:
should_or_not = 'should not'
failed = not Run(command)
SetEnvVar(THROW_ON_FAILURE, None)
msg = ('when %s%s, an assertion failure in "%s" %s cause a non-zero '
'exit code.' %
(THROW_ON_FAILURE, env_var_value_msg, ' '.join(command),
should_or_not))
self.assert_(failed == should_fail, msg)
def testDefaultBehavior(self):
"""Tests the behavior of the default mode."""
self.RunAndVerify(env_var_value=None, flag_value=None, should_fail=False)
def testThrowOnFailureEnvVar(self):
"""Tests using the GTEST_THROW_ON_FAILURE environment variable."""
self.RunAndVerify(env_var_value='0',
flag_value=None,
should_fail=False)
self.RunAndVerify(env_var_value='1',
flag_value=None,
should_fail=True)
def testThrowOnFailureFlag(self):
"""Tests using the --gtest_throw_on_failure flag."""
self.RunAndVerify(env_var_value=None,
flag_value='0',
should_fail=False)
self.RunAndVerify(env_var_value=None,
flag_value='1',
should_fail=True)
def testThrowOnFailureFlagOverridesEnvVar(self):
"""Tests that --gtest_throw_on_failure overrides GTEST_THROW_ON_FAILURE."""
self.RunAndVerify(env_var_value='0',
flag_value='0',
should_fail=False)
self.RunAndVerify(env_var_value='0',
flag_value='1',
should_fail=True)
self.RunAndVerify(env_var_value='1',
flag_value='0',
should_fail=False)
self.RunAndVerify(env_var_value='1',
flag_value='1',
should_fail=True)
if __name__ == '__main__':
gtest_test_utils.Main()
| Python |
#!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test utilities for Google C++ Testing Framework."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import atexit
import os
import shutil
import sys
import tempfile
import unittest
_test_module = unittest
# Suppresses the 'Import not at the top of the file' lint complaint.
# pylint: disable-msg=C6204
try:
import subprocess
_SUBPROCESS_MODULE_AVAILABLE = True
except:
import popen2
_SUBPROCESS_MODULE_AVAILABLE = False
# pylint: enable-msg=C6204
GTEST_OUTPUT_VAR_NAME = 'GTEST_OUTPUT'
IS_WINDOWS = os.name == 'nt'
IS_CYGWIN = os.name == 'posix' and 'CYGWIN' in os.uname()[0]
# Here we expose a class from a particular module, depending on the
# environment. The comment suppresses the 'Invalid variable name' lint
# complaint.
TestCase = _test_module.TestCase # pylint: disable-msg=C6409
# Initially maps a flag to its default value. After
# _ParseAndStripGTestFlags() is called, maps a flag to its actual value.
_flag_map = {'source_dir': os.path.dirname(sys.argv[0]),
'build_dir': os.path.dirname(sys.argv[0])}
_gtest_flags_are_parsed = False
def _ParseAndStripGTestFlags(argv):
"""Parses and strips Google Test flags from argv. This is idempotent."""
# Suppresses the lint complaint about a global variable since we need it
# here to maintain module-wide state.
global _gtest_flags_are_parsed # pylint: disable-msg=W0603
if _gtest_flags_are_parsed:
return
_gtest_flags_are_parsed = True
for flag in _flag_map:
# The environment variable overrides the default value.
if flag.upper() in os.environ:
_flag_map[flag] = os.environ[flag.upper()]
# The command line flag overrides the environment variable.
i = 1 # Skips the program name.
while i < len(argv):
prefix = '--' + flag + '='
if argv[i].startswith(prefix):
_flag_map[flag] = argv[i][len(prefix):]
del argv[i]
break
else:
# We don't increment i in case we just found a --gtest_* flag
# and removed it from argv.
i += 1
def GetFlag(flag):
"""Returns the value of the given flag."""
# In case GetFlag() is called before Main(), we always call
# _ParseAndStripGTestFlags() here to make sure the --gtest_* flags
# are parsed.
_ParseAndStripGTestFlags(sys.argv)
return _flag_map[flag]
def GetSourceDir():
"""Returns the absolute path of the directory where the .py files are."""
return os.path.abspath(GetFlag('source_dir'))
def GetBuildDir():
"""Returns the absolute path of the directory where the test binaries are."""
return os.path.abspath(GetFlag('build_dir'))
_temp_dir = None
def _RemoveTempDir():
if _temp_dir:
shutil.rmtree(_temp_dir, ignore_errors=True)
atexit.register(_RemoveTempDir)
def GetTempDir():
"""Returns a directory for temporary files."""
global _temp_dir
if not _temp_dir:
_temp_dir = tempfile.mkdtemp()
return _temp_dir
def GetTestExecutablePath(executable_name, build_dir=None):
"""Returns the absolute path of the test binary given its name.
The function will print a message and abort the program if the resulting file
doesn't exist.
Args:
executable_name: name of the test binary that the test script runs.
build_dir: directory where to look for executables, by default
the result of GetBuildDir().
Returns:
The absolute path of the test binary.
"""
path = os.path.abspath(os.path.join(build_dir or GetBuildDir(),
executable_name))
if (IS_WINDOWS or IS_CYGWIN) and not path.endswith('.exe'):
path += '.exe'
if not os.path.exists(path):
message = (
'Unable to find the test binary. Please make sure to provide path\n'
'to the binary via the --build_dir flag or the BUILD_DIR\n'
'environment variable.')
print >> sys.stderr, message
sys.exit(1)
return path
def GetExitStatus(exit_code):
"""Returns the argument to exit(), or -1 if exit() wasn't called.
Args:
exit_code: the result value of os.system(command).
"""
if os.name == 'nt':
# On Windows, os.WEXITSTATUS() doesn't work and os.system() returns
# the argument to exit() directly.
return exit_code
else:
# On Unix, os.WEXITSTATUS() must be used to extract the exit status
# from the result of os.system().
if os.WIFEXITED(exit_code):
return os.WEXITSTATUS(exit_code)
else:
return -1
class Subprocess:
def __init__(self, command, working_dir=None, capture_stderr=True, env=None):
"""Changes into a specified directory, if provided, and executes a command.
Restores the old directory afterwards.
Args:
command: The command to run, in the form of sys.argv.
working_dir: The directory to change into.
capture_stderr: Determines whether to capture stderr in the output member
or to discard it.
env: Dictionary with environment to pass to the subprocess.
Returns:
An object that represents outcome of the executed process. It has the
following attributes:
terminated_by_signal True iff the child process has been terminated
by a signal.
signal Sygnal that terminated the child process.
exited True iff the child process exited normally.
exit_code The code with which the child process exited.
output Child process's stdout and stderr output
combined in a string.
"""
# The subprocess module is the preferrable way of running programs
# since it is available and behaves consistently on all platforms,
# including Windows. But it is only available starting in python 2.4.
# In earlier python versions, we revert to the popen2 module, which is
# available in python 2.0 and later but doesn't provide required
# functionality (Popen4) under Windows. This allows us to support Mac
# OS X 10.4 Tiger, which has python 2.3 installed.
if _SUBPROCESS_MODULE_AVAILABLE:
if capture_stderr:
stderr = subprocess.STDOUT
else:
stderr = subprocess.PIPE
p = subprocess.Popen(command,
stdout=subprocess.PIPE, stderr=stderr,
cwd=working_dir, universal_newlines=True, env=env)
# communicate returns a tuple with the file obect for the child's
# output.
self.output = p.communicate()[0]
self._return_code = p.returncode
else:
old_dir = os.getcwd()
def _ReplaceEnvDict(dest, src):
# Changes made by os.environ.clear are not inheritable by child
# processes until Python 2.6. To produce inheritable changes we have
# to delete environment items with the del statement.
for key in dest:
del dest[key]
dest.update(src)
# When 'env' is not None, backup the environment variables and replace
# them with the passed 'env'. When 'env' is None, we simply use the
# current 'os.environ' for compatibility with the subprocess.Popen
# semantics used above.
if env is not None:
old_environ = os.environ.copy()
_ReplaceEnvDict(os.environ, env)
try:
if working_dir is not None:
os.chdir(working_dir)
if capture_stderr:
p = popen2.Popen4(command)
else:
p = popen2.Popen3(command)
p.tochild.close()
self.output = p.fromchild.read()
ret_code = p.wait()
finally:
os.chdir(old_dir)
# Restore the old environment variables
# if they were replaced.
if env is not None:
_ReplaceEnvDict(os.environ, old_environ)
# Converts ret_code to match the semantics of
# subprocess.Popen.returncode.
if os.WIFSIGNALED(ret_code):
self._return_code = -os.WTERMSIG(ret_code)
else: # os.WIFEXITED(ret_code) should return True here.
self._return_code = os.WEXITSTATUS(ret_code)
if self._return_code < 0:
self.terminated_by_signal = True
self.exited = False
self.signal = -self._return_code
else:
self.terminated_by_signal = False
self.exited = True
self.exit_code = self._return_code
def Main():
"""Runs the unit test."""
# We must call _ParseAndStripGTestFlags() before calling
# unittest.main(). Otherwise the latter will be confused by the
# --gtest_* flags.
_ParseAndStripGTestFlags(sys.argv)
# The tested binaries should not be writing XML output files unless the
# script explicitly instructs them to.
# TODO(vladl@google.com): Move this into Subprocess when we implement
# passing environment into it as a parameter.
if GTEST_OUTPUT_VAR_NAME in os.environ:
del os.environ[GTEST_OUTPUT_VAR_NAME]
_test_module.main()
| Python |
#!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Verifies that Google Test correctly determines whether to use colors."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import gtest_test_utils
IS_WINDOWS = os.name = 'nt'
COLOR_ENV_VAR = 'GTEST_COLOR'
COLOR_FLAG = 'gtest_color'
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_color_test_')
def SetEnvVar(env_var, value):
"""Sets the env variable to 'value'; unsets it when 'value' is None."""
if value is not None:
os.environ[env_var] = value
elif env_var in os.environ:
del os.environ[env_var]
def UsesColor(term, color_env_var, color_flag):
"""Runs gtest_color_test_ and returns its exit code."""
SetEnvVar('TERM', term)
SetEnvVar(COLOR_ENV_VAR, color_env_var)
if color_flag is None:
args = []
else:
args = ['--%s=%s' % (COLOR_FLAG, color_flag)]
p = gtest_test_utils.Subprocess([COMMAND] + args)
return not p.exited or p.exit_code
class GTestColorTest(gtest_test_utils.TestCase):
def testNoEnvVarNoFlag(self):
"""Tests the case when there's neither GTEST_COLOR nor --gtest_color."""
if not IS_WINDOWS:
self.assert_(not UsesColor('dumb', None, None))
self.assert_(not UsesColor('emacs', None, None))
self.assert_(not UsesColor('xterm-mono', None, None))
self.assert_(not UsesColor('unknown', None, None))
self.assert_(not UsesColor(None, None, None))
self.assert_(UsesColor('linux', None, None))
self.assert_(UsesColor('cygwin', None, None))
self.assert_(UsesColor('xterm', None, None))
self.assert_(UsesColor('xterm-color', None, None))
self.assert_(UsesColor('xterm-256color', None, None))
def testFlagOnly(self):
"""Tests the case when there's --gtest_color but not GTEST_COLOR."""
self.assert_(not UsesColor('dumb', None, 'no'))
self.assert_(not UsesColor('xterm-color', None, 'no'))
if not IS_WINDOWS:
self.assert_(not UsesColor('emacs', None, 'auto'))
self.assert_(UsesColor('xterm', None, 'auto'))
self.assert_(UsesColor('dumb', None, 'yes'))
self.assert_(UsesColor('xterm', None, 'yes'))
def testEnvVarOnly(self):
"""Tests the case when there's GTEST_COLOR but not --gtest_color."""
self.assert_(not UsesColor('dumb', 'no', None))
self.assert_(not UsesColor('xterm-color', 'no', None))
if not IS_WINDOWS:
self.assert_(not UsesColor('dumb', 'auto', None))
self.assert_(UsesColor('xterm-color', 'auto', None))
self.assert_(UsesColor('dumb', 'yes', None))
self.assert_(UsesColor('xterm-color', 'yes', None))
def testEnvVarAndFlag(self):
"""Tests the case when there are both GTEST_COLOR and --gtest_color."""
self.assert_(not UsesColor('xterm-color', 'no', 'no'))
self.assert_(UsesColor('dumb', 'no', 'yes'))
self.assert_(UsesColor('xterm-color', 'no', 'auto'))
def testAliasesOfYesAndNo(self):
"""Tests using aliases in specifying --gtest_color."""
self.assert_(UsesColor('dumb', None, 'true'))
self.assert_(UsesColor('dumb', None, 'YES'))
self.assert_(UsesColor('dumb', None, 'T'))
self.assert_(UsesColor('dumb', None, '1'))
self.assert_(not UsesColor('xterm', None, 'f'))
self.assert_(not UsesColor('xterm', None, 'false'))
self.assert_(not UsesColor('xterm', None, '0'))
self.assert_(not UsesColor('xterm', None, 'unknown'))
if __name__ == '__main__':
gtest_test_utils.Main()
| Python |
#!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests the --help flag of Google C++ Testing Framework.
SYNOPSIS
gtest_help_test.py --build_dir=BUILD/DIR
# where BUILD/DIR contains the built gtest_help_test_ file.
gtest_help_test.py
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import re
import gtest_test_utils
IS_LINUX = os.name == 'posix' and os.uname()[0] == 'Linux'
IS_WINDOWS = os.name == 'nt'
PROGRAM_PATH = gtest_test_utils.GetTestExecutablePath('gtest_help_test_')
FLAG_PREFIX = '--gtest_'
DEATH_TEST_STYLE_FLAG = FLAG_PREFIX + 'death_test_style'
STREAM_RESULT_TO_FLAG = FLAG_PREFIX + 'stream_result_to'
UNKNOWN_FLAG = FLAG_PREFIX + 'unknown_flag_for_testing'
LIST_TESTS_FLAG = FLAG_PREFIX + 'list_tests'
INCORRECT_FLAG_VARIANTS = [re.sub('^--', '-', LIST_TESTS_FLAG),
re.sub('^--', '/', LIST_TESTS_FLAG),
re.sub('_', '-', LIST_TESTS_FLAG)]
INTERNAL_FLAG_FOR_TESTING = FLAG_PREFIX + 'internal_flag_for_testing'
SUPPORTS_DEATH_TESTS = "DeathTest" in gtest_test_utils.Subprocess(
[PROGRAM_PATH, LIST_TESTS_FLAG]).output
# The help message must match this regex.
HELP_REGEX = re.compile(
FLAG_PREFIX + r'list_tests.*' +
FLAG_PREFIX + r'filter=.*' +
FLAG_PREFIX + r'also_run_disabled_tests.*' +
FLAG_PREFIX + r'repeat=.*' +
FLAG_PREFIX + r'shuffle.*' +
FLAG_PREFIX + r'random_seed=.*' +
FLAG_PREFIX + r'color=.*' +
FLAG_PREFIX + r'print_time.*' +
FLAG_PREFIX + r'output=.*' +
FLAG_PREFIX + r'break_on_failure.*' +
FLAG_PREFIX + r'throw_on_failure.*' +
FLAG_PREFIX + r'catch_exceptions=0.*',
re.DOTALL)
def RunWithFlag(flag):
"""Runs gtest_help_test_ with the given flag.
Returns:
the exit code and the text output as a tuple.
Args:
flag: the command-line flag to pass to gtest_help_test_, or None.
"""
if flag is None:
command = [PROGRAM_PATH]
else:
command = [PROGRAM_PATH, flag]
child = gtest_test_utils.Subprocess(command)
return child.exit_code, child.output
class GTestHelpTest(gtest_test_utils.TestCase):
"""Tests the --help flag and its equivalent forms."""
def TestHelpFlag(self, flag):
"""Verifies correct behavior when help flag is specified.
The right message must be printed and the tests must
skipped when the given flag is specified.
Args:
flag: A flag to pass to the binary or None.
"""
exit_code, output = RunWithFlag(flag)
self.assertEquals(0, exit_code)
self.assert_(HELP_REGEX.search(output), output)
if IS_LINUX:
self.assert_(STREAM_RESULT_TO_FLAG in output, output)
else:
self.assert_(STREAM_RESULT_TO_FLAG not in output, output)
if SUPPORTS_DEATH_TESTS and not IS_WINDOWS:
self.assert_(DEATH_TEST_STYLE_FLAG in output, output)
else:
self.assert_(DEATH_TEST_STYLE_FLAG not in output, output)
def TestNonHelpFlag(self, flag):
"""Verifies correct behavior when no help flag is specified.
Verifies that when no help flag is specified, the tests are run
and the help message is not printed.
Args:
flag: A flag to pass to the binary or None.
"""
exit_code, output = RunWithFlag(flag)
self.assert_(exit_code != 0)
self.assert_(not HELP_REGEX.search(output), output)
def testPrintsHelpWithFullFlag(self):
self.TestHelpFlag('--help')
def testPrintsHelpWithShortFlag(self):
self.TestHelpFlag('-h')
def testPrintsHelpWithQuestionFlag(self):
self.TestHelpFlag('-?')
def testPrintsHelpWithWindowsStyleQuestionFlag(self):
self.TestHelpFlag('/?')
def testPrintsHelpWithUnrecognizedGoogleTestFlag(self):
self.TestHelpFlag(UNKNOWN_FLAG)
def testPrintsHelpWithIncorrectFlagStyle(self):
for incorrect_flag in INCORRECT_FLAG_VARIANTS:
self.TestHelpFlag(incorrect_flag)
def testRunsTestsWithoutHelpFlag(self):
"""Verifies that when no help flag is specified, the tests are run
and the help message is not printed."""
self.TestNonHelpFlag(None)
def testRunsTestsWithGtestInternalFlag(self):
"""Verifies that the tests are run and no help message is printed when
a flag starting with Google Test prefix and 'internal_' is supplied."""
self.TestNonHelpFlag(INTERNAL_FLAG_FOR_TESTING)
if __name__ == '__main__':
gtest_test_utils.Main()
| Python |
#!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for the gtest_xml_output module."""
__author__ = "keith.ray@gmail.com (Keith Ray)"
import os
from xml.dom import minidom, Node
import gtest_test_utils
import gtest_xml_test_utils
GTEST_OUTPUT_SUBDIR = "xml_outfiles"
GTEST_OUTPUT_1_TEST = "gtest_xml_outfile1_test_"
GTEST_OUTPUT_2_TEST = "gtest_xml_outfile2_test_"
EXPECTED_XML_1 = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="1" failures="0" disabled="0" errors="0" time="*" name="AllTests">
<testsuite name="PropertyOne" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="TestSomeProperties" status="run" time="*" classname="PropertyOne" SetUpProp="1" TestSomeProperty="1" TearDownProp="1" />
</testsuite>
</testsuites>
"""
EXPECTED_XML_2 = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="1" failures="0" disabled="0" errors="0" time="*" name="AllTests">
<testsuite name="PropertyTwo" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="TestSomeProperties" status="run" time="*" classname="PropertyTwo" SetUpProp="2" TestSomeProperty="2" TearDownProp="2" />
</testsuite>
</testsuites>
"""
class GTestXMLOutFilesTest(gtest_xml_test_utils.GTestXMLTestCase):
"""Unit test for Google Test's XML output functionality."""
def setUp(self):
# We want the trailing '/' that the last "" provides in os.path.join, for
# telling Google Test to create an output directory instead of a single file
# for xml output.
self.output_dir_ = os.path.join(gtest_test_utils.GetTempDir(),
GTEST_OUTPUT_SUBDIR, "")
self.DeleteFilesAndDir()
def tearDown(self):
self.DeleteFilesAndDir()
def DeleteFilesAndDir(self):
try:
os.remove(os.path.join(self.output_dir_, GTEST_OUTPUT_1_TEST + ".xml"))
except os.error:
pass
try:
os.remove(os.path.join(self.output_dir_, GTEST_OUTPUT_2_TEST + ".xml"))
except os.error:
pass
try:
os.rmdir(self.output_dir_)
except os.error:
pass
def testOutfile1(self):
self._TestOutFile(GTEST_OUTPUT_1_TEST, EXPECTED_XML_1)
def testOutfile2(self):
self._TestOutFile(GTEST_OUTPUT_2_TEST, EXPECTED_XML_2)
def _TestOutFile(self, test_name, expected_xml):
gtest_prog_path = gtest_test_utils.GetTestExecutablePath(test_name)
command = [gtest_prog_path, "--gtest_output=xml:%s" % self.output_dir_]
p = gtest_test_utils.Subprocess(command,
working_dir=gtest_test_utils.GetTempDir())
self.assert_(p.exited)
self.assertEquals(0, p.exit_code)
# TODO(wan@google.com): libtool causes the built test binary to be
# named lt-gtest_xml_outfiles_test_ instead of
# gtest_xml_outfiles_test_. To account for this possibillity, we
# allow both names in the following code. We should remove this
# hack when Chandler Carruth's libtool replacement tool is ready.
output_file_name1 = test_name + ".xml"
output_file1 = os.path.join(self.output_dir_, output_file_name1)
output_file_name2 = 'lt-' + output_file_name1
output_file2 = os.path.join(self.output_dir_, output_file_name2)
self.assert_(os.path.isfile(output_file1) or os.path.isfile(output_file2),
output_file1)
expected = minidom.parseString(expected_xml)
if os.path.isfile(output_file1):
actual = minidom.parse(output_file1)
else:
actual = minidom.parse(output_file2)
self.NormalizeXml(actual.documentElement)
self.AssertEquivalentNodes(expected.documentElement,
actual.documentElement)
expected.unlink()
actual.unlink()
if __name__ == "__main__":
os.environ["GTEST_STACK_TRACE_DEPTH"] = "0"
gtest_test_utils.Main()
| Python |
#!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests the text output of Google C++ Testing Framework.
SYNOPSIS
gtest_output_test.py --build_dir=BUILD/DIR --gengolden
# where BUILD/DIR contains the built gtest_output_test_ file.
gtest_output_test.py --gengolden
gtest_output_test.py
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import re
import sys
import gtest_test_utils
# The flag for generating the golden file
GENGOLDEN_FLAG = '--gengolden'
CATCH_EXCEPTIONS_ENV_VAR_NAME = 'GTEST_CATCH_EXCEPTIONS'
IS_WINDOWS = os.name == 'nt'
# TODO(vladl@google.com): remove the _lin suffix.
GOLDEN_NAME = 'gtest_output_test_golden_lin.txt'
PROGRAM_PATH = gtest_test_utils.GetTestExecutablePath('gtest_output_test_')
# At least one command we exercise must not have the
# --gtest_internal_skip_environment_and_ad_hoc_tests flag.
COMMAND_LIST_TESTS = ({}, [PROGRAM_PATH, '--gtest_list_tests'])
COMMAND_WITH_COLOR = ({}, [PROGRAM_PATH, '--gtest_color=yes'])
COMMAND_WITH_TIME = ({}, [PROGRAM_PATH,
'--gtest_print_time',
'--gtest_internal_skip_environment_and_ad_hoc_tests',
'--gtest_filter=FatalFailureTest.*:LoggingTest.*'])
COMMAND_WITH_DISABLED = (
{}, [PROGRAM_PATH,
'--gtest_also_run_disabled_tests',
'--gtest_internal_skip_environment_and_ad_hoc_tests',
'--gtest_filter=*DISABLED_*'])
COMMAND_WITH_SHARDING = (
{'GTEST_SHARD_INDEX': '1', 'GTEST_TOTAL_SHARDS': '2'},
[PROGRAM_PATH,
'--gtest_internal_skip_environment_and_ad_hoc_tests',
'--gtest_filter=PassingTest.*'])
GOLDEN_PATH = os.path.join(gtest_test_utils.GetSourceDir(), GOLDEN_NAME)
def ToUnixLineEnding(s):
"""Changes all Windows/Mac line endings in s to UNIX line endings."""
return s.replace('\r\n', '\n').replace('\r', '\n')
def RemoveLocations(test_output):
"""Removes all file location info from a Google Test program's output.
Args:
test_output: the output of a Google Test program.
Returns:
output with all file location info (in the form of
'DIRECTORY/FILE_NAME:LINE_NUMBER: 'or
'DIRECTORY\\FILE_NAME(LINE_NUMBER): ') replaced by
'FILE_NAME:#: '.
"""
return re.sub(r'.*[/\\](.+)(\:\d+|\(\d+\))\: ', r'\1:#: ', test_output)
def RemoveStackTraceDetails(output):
"""Removes all stack traces from a Google Test program's output."""
# *? means "find the shortest string that matches".
return re.sub(r'Stack trace:(.|\n)*?\n\n',
'Stack trace: (omitted)\n\n', output)
def RemoveStackTraces(output):
"""Removes all traces of stack traces from a Google Test program's output."""
# *? means "find the shortest string that matches".
return re.sub(r'Stack trace:(.|\n)*?\n\n', '', output)
def RemoveTime(output):
"""Removes all time information from a Google Test program's output."""
return re.sub(r'\(\d+ ms', '(? ms', output)
def RemoveTypeInfoDetails(test_output):
"""Removes compiler-specific type info from Google Test program's output.
Args:
test_output: the output of a Google Test program.
Returns:
output with type information normalized to canonical form.
"""
# some compilers output the name of type 'unsigned int' as 'unsigned'
return re.sub(r'unsigned int', 'unsigned', test_output)
def NormalizeToCurrentPlatform(test_output):
"""Normalizes platform specific output details for easier comparison."""
if IS_WINDOWS:
# Removes the color information that is not present on Windows.
test_output = re.sub('\x1b\\[(0;3\d)?m', '', test_output)
# Changes failure message headers into the Windows format.
test_output = re.sub(r': Failure\n', r': error: ', test_output)
# Changes file(line_number) to file:line_number.
test_output = re.sub(r'((\w|\.)+)\((\d+)\):', r'\1:\3:', test_output)
return test_output
def RemoveTestCounts(output):
"""Removes test counts from a Google Test program's output."""
output = re.sub(r'\d+ tests?, listed below',
'? tests, listed below', output)
output = re.sub(r'\d+ FAILED TESTS',
'? FAILED TESTS', output)
output = re.sub(r'\d+ tests? from \d+ test cases?',
'? tests from ? test cases', output)
output = re.sub(r'\d+ tests? from ([a-zA-Z_])',
r'? tests from \1', output)
return re.sub(r'\d+ tests?\.', '? tests.', output)
def RemoveMatchingTests(test_output, pattern):
"""Removes output of specified tests from a Google Test program's output.
This function strips not only the beginning and the end of a test but also
all output in between.
Args:
test_output: A string containing the test output.
pattern: A regex string that matches names of test cases or
tests to remove.
Returns:
Contents of test_output with tests whose names match pattern removed.
"""
test_output = re.sub(
r'.*\[ RUN \] .*%s(.|\n)*?\[( FAILED | OK )\] .*%s.*\n' % (
pattern, pattern),
'',
test_output)
return re.sub(r'.*%s.*\n' % pattern, '', test_output)
def NormalizeOutput(output):
"""Normalizes output (the output of gtest_output_test_.exe)."""
output = ToUnixLineEnding(output)
output = RemoveLocations(output)
output = RemoveStackTraceDetails(output)
output = RemoveTime(output)
return output
def GetShellCommandOutput(env_cmd):
"""Runs a command in a sub-process, and returns its output in a string.
Args:
env_cmd: The shell command. A 2-tuple where element 0 is a dict of extra
environment variables to set, and element 1 is a string with
the command and any flags.
Returns:
A string with the command's combined standard and diagnostic output.
"""
# Spawns cmd in a sub-process, and gets its standard I/O file objects.
# Set and save the environment properly.
environ = os.environ.copy()
environ.update(env_cmd[0])
p = gtest_test_utils.Subprocess(env_cmd[1], env=environ)
return p.output
def GetCommandOutput(env_cmd):
"""Runs a command and returns its output with all file location
info stripped off.
Args:
env_cmd: The shell command. A 2-tuple where element 0 is a dict of extra
environment variables to set, and element 1 is a string with
the command and any flags.
"""
# Disables exception pop-ups on Windows.
environ, cmdline = env_cmd
environ = dict(environ) # Ensures we are modifying a copy.
environ[CATCH_EXCEPTIONS_ENV_VAR_NAME] = '1'
return NormalizeOutput(GetShellCommandOutput((environ, cmdline)))
def GetOutputOfAllCommands():
"""Returns concatenated output from several representative commands."""
return (GetCommandOutput(COMMAND_WITH_COLOR) +
GetCommandOutput(COMMAND_WITH_TIME) +
GetCommandOutput(COMMAND_WITH_DISABLED) +
GetCommandOutput(COMMAND_WITH_SHARDING))
test_list = GetShellCommandOutput(COMMAND_LIST_TESTS)
SUPPORTS_DEATH_TESTS = 'DeathTest' in test_list
SUPPORTS_TYPED_TESTS = 'TypedTest' in test_list
SUPPORTS_THREADS = 'ExpectFailureWithThreadsTest' in test_list
SUPPORTS_STACK_TRACES = False
CAN_GENERATE_GOLDEN_FILE = (SUPPORTS_DEATH_TESTS and
SUPPORTS_TYPED_TESTS and
SUPPORTS_THREADS)
class GTestOutputTest(gtest_test_utils.TestCase):
def RemoveUnsupportedTests(self, test_output):
if not SUPPORTS_DEATH_TESTS:
test_output = RemoveMatchingTests(test_output, 'DeathTest')
if not SUPPORTS_TYPED_TESTS:
test_output = RemoveMatchingTests(test_output, 'TypedTest')
test_output = RemoveMatchingTests(test_output, 'TypedDeathTest')
test_output = RemoveMatchingTests(test_output, 'TypeParamDeathTest')
if not SUPPORTS_THREADS:
test_output = RemoveMatchingTests(test_output,
'ExpectFailureWithThreadsTest')
test_output = RemoveMatchingTests(test_output,
'ScopedFakeTestPartResultReporterTest')
test_output = RemoveMatchingTests(test_output,
'WorksConcurrently')
if not SUPPORTS_STACK_TRACES:
test_output = RemoveStackTraces(test_output)
return test_output
def testOutput(self):
output = GetOutputOfAllCommands()
golden_file = open(GOLDEN_PATH, 'rb')
# A mis-configured source control system can cause \r appear in EOL
# sequences when we read the golden file irrespective of an operating
# system used. Therefore, we need to strip those \r's from newlines
# unconditionally.
golden = ToUnixLineEnding(golden_file.read())
golden_file.close()
# We want the test to pass regardless of certain features being
# supported or not.
# We still have to remove type name specifics in all cases.
normalized_actual = RemoveTypeInfoDetails(output)
normalized_golden = RemoveTypeInfoDetails(golden)
if CAN_GENERATE_GOLDEN_FILE:
self.assertEqual(normalized_golden, normalized_actual)
else:
normalized_actual = NormalizeToCurrentPlatform(
RemoveTestCounts(normalized_actual))
normalized_golden = NormalizeToCurrentPlatform(
RemoveTestCounts(self.RemoveUnsupportedTests(normalized_golden)))
# This code is very handy when debugging golden file differences:
if os.getenv('DEBUG_GTEST_OUTPUT_TEST'):
open(os.path.join(
gtest_test_utils.GetSourceDir(),
'_gtest_output_test_normalized_actual.txt'), 'wb').write(
normalized_actual)
open(os.path.join(
gtest_test_utils.GetSourceDir(),
'_gtest_output_test_normalized_golden.txt'), 'wb').write(
normalized_golden)
self.assertEqual(normalized_golden, normalized_actual)
if __name__ == '__main__':
if sys.argv[1:] == [GENGOLDEN_FLAG]:
if CAN_GENERATE_GOLDEN_FILE:
output = GetOutputOfAllCommands()
golden_file = open(GOLDEN_PATH, 'wb')
golden_file.write(output)
golden_file.close()
else:
message = (
"""Unable to write a golden file when compiled in an environment
that does not support all the required features (death tests, typed tests,
and multiple threads). Please generate the golden file using a binary built
with those features enabled.""")
sys.stderr.write(message)
sys.exit(1)
else:
gtest_test_utils.Main()
| Python |
#!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Verifies that Google Test correctly parses environment variables."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import gtest_test_utils
IS_WINDOWS = os.name == 'nt'
IS_LINUX = os.name == 'posix' and os.uname()[0] == 'Linux'
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_env_var_test_')
environ = os.environ.copy()
def AssertEq(expected, actual):
if expected != actual:
print 'Expected: %s' % (expected,)
print ' Actual: %s' % (actual,)
raise AssertionError
def SetEnvVar(env_var, value):
"""Sets the env variable to 'value'; unsets it when 'value' is None."""
if value is not None:
environ[env_var] = value
elif env_var in environ:
del environ[env_var]
def GetFlag(flag):
"""Runs gtest_env_var_test_ and returns its output."""
args = [COMMAND]
if flag is not None:
args += [flag]
return gtest_test_utils.Subprocess(args, env=environ).output
def TestFlag(flag, test_val, default_val):
"""Verifies that the given flag is affected by the corresponding env var."""
env_var = 'GTEST_' + flag.upper()
SetEnvVar(env_var, test_val)
AssertEq(test_val, GetFlag(flag))
SetEnvVar(env_var, None)
AssertEq(default_val, GetFlag(flag))
class GTestEnvVarTest(gtest_test_utils.TestCase):
def testEnvVarAffectsFlag(self):
"""Tests that environment variable should affect the corresponding flag."""
TestFlag('break_on_failure', '1', '0')
TestFlag('color', 'yes', 'auto')
TestFlag('filter', 'FooTest.Bar', '*')
TestFlag('output', 'xml:tmp/foo.xml', '')
TestFlag('print_time', '0', '1')
TestFlag('repeat', '999', '1')
TestFlag('throw_on_failure', '1', '0')
TestFlag('death_test_style', 'threadsafe', 'fast')
TestFlag('catch_exceptions', '0', '1')
if IS_LINUX:
TestFlag('death_test_use_fork', '1', '0')
TestFlag('stack_trace_depth', '0', '100')
if __name__ == '__main__':
gtest_test_utils.Main()
| Python |
#!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test utilities for gtest_xml_output"""
__author__ = 'eefacm@gmail.com (Sean Mcafee)'
import re
from xml.dom import minidom, Node
import gtest_test_utils
GTEST_OUTPUT_FLAG = "--gtest_output"
GTEST_DEFAULT_OUTPUT_FILE = "test_detail.xml"
class GTestXMLTestCase(gtest_test_utils.TestCase):
"""
Base class for tests of Google Test's XML output functionality.
"""
def AssertEquivalentNodes(self, expected_node, actual_node):
"""
Asserts that actual_node (a DOM node object) is equivalent to
expected_node (another DOM node object), in that either both of
them are CDATA nodes and have the same value, or both are DOM
elements and actual_node meets all of the following conditions:
* It has the same tag name as expected_node.
* It has the same set of attributes as expected_node, each with
the same value as the corresponding attribute of expected_node.
Exceptions are any attribute named "time", which needs only be
convertible to a floating-point number and any attribute named
"type_param" which only has to be non-empty.
* It has an equivalent set of child nodes (including elements and
CDATA sections) as expected_node. Note that we ignore the
order of the children as they are not guaranteed to be in any
particular order.
"""
if expected_node.nodeType == Node.CDATA_SECTION_NODE:
self.assertEquals(Node.CDATA_SECTION_NODE, actual_node.nodeType)
self.assertEquals(expected_node.nodeValue, actual_node.nodeValue)
return
self.assertEquals(Node.ELEMENT_NODE, actual_node.nodeType)
self.assertEquals(Node.ELEMENT_NODE, expected_node.nodeType)
self.assertEquals(expected_node.tagName, actual_node.tagName)
expected_attributes = expected_node.attributes
actual_attributes = actual_node .attributes
self.assertEquals(
expected_attributes.length, actual_attributes.length,
"attribute numbers differ in element " + actual_node.tagName)
for i in range(expected_attributes.length):
expected_attr = expected_attributes.item(i)
actual_attr = actual_attributes.get(expected_attr.name)
self.assert_(
actual_attr is not None,
"expected attribute %s not found in element %s" %
(expected_attr.name, actual_node.tagName))
self.assertEquals(expected_attr.value, actual_attr.value,
" values of attribute %s in element %s differ" %
(expected_attr.name, actual_node.tagName))
expected_children = self._GetChildren(expected_node)
actual_children = self._GetChildren(actual_node)
self.assertEquals(
len(expected_children), len(actual_children),
"number of child elements differ in element " + actual_node.tagName)
for child_id, child in expected_children.iteritems():
self.assert_(child_id in actual_children,
'<%s> is not in <%s> (in element %s)' %
(child_id, actual_children, actual_node.tagName))
self.AssertEquivalentNodes(child, actual_children[child_id])
identifying_attribute = {
"testsuites": "name",
"testsuite": "name",
"testcase": "name",
"failure": "message",
}
def _GetChildren(self, element):
"""
Fetches all of the child nodes of element, a DOM Element object.
Returns them as the values of a dictionary keyed by the IDs of the
children. For <testsuites>, <testsuite> and <testcase> elements, the ID
is the value of their "name" attribute; for <failure> elements, it is
the value of the "message" attribute; CDATA sections and non-whitespace
text nodes are concatenated into a single CDATA section with ID
"detail". An exception is raised if any element other than the above
four is encountered, if two child elements with the same identifying
attributes are encountered, or if any other type of node is encountered.
"""
children = {}
for child in element.childNodes:
if child.nodeType == Node.ELEMENT_NODE:
self.assert_(child.tagName in self.identifying_attribute,
"Encountered unknown element <%s>" % child.tagName)
childID = child.getAttribute(self.identifying_attribute[child.tagName])
self.assert_(childID not in children)
children[childID] = child
elif child.nodeType in [Node.TEXT_NODE, Node.CDATA_SECTION_NODE]:
if "detail" not in children:
if (child.nodeType == Node.CDATA_SECTION_NODE or
not child.nodeValue.isspace()):
children["detail"] = child.ownerDocument.createCDATASection(
child.nodeValue)
else:
children["detail"].nodeValue += child.nodeValue
else:
self.fail("Encountered unexpected node type %d" % child.nodeType)
return children
def NormalizeXml(self, element):
"""
Normalizes Google Test's XML output to eliminate references to transient
information that may change from run to run.
* The "time" attribute of <testsuites>, <testsuite> and <testcase>
elements is replaced with a single asterisk, if it contains
only digit characters.
* The "type_param" attribute of <testcase> elements is replaced with a
single asterisk (if it sn non-empty) as it is the type name returned
by the compiler and is platform dependent.
* The line number reported in the first line of the "message"
attribute of <failure> elements is replaced with a single asterisk.
* The directory names in file paths are removed.
* The stack traces are removed.
"""
if element.tagName in ("testsuites", "testsuite", "testcase"):
time = element.getAttributeNode("time")
time.value = re.sub(r"^\d+(\.\d+)?$", "*", time.value)
type_param = element.getAttributeNode("type_param")
if type_param and type_param.value:
type_param.value = "*"
elif element.tagName == "failure":
for child in element.childNodes:
if child.nodeType == Node.CDATA_SECTION_NODE:
# Removes the source line number.
cdata = re.sub(r"^.*[/\\](.*:)\d+\n", "\\1*\n", child.nodeValue)
# Removes the actual stack trace.
child.nodeValue = re.sub(r"\nStack trace:\n(.|\n)*",
"", cdata)
for child in element.childNodes:
if child.nodeType == Node.ELEMENT_NODE:
self.NormalizeXml(child)
| Python |
#!/usr/bin/env python
#
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Verifies that test shuffling works."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import gtest_test_utils
# Command to run the gtest_shuffle_test_ program.
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_shuffle_test_')
# The environment variables for test sharding.
TOTAL_SHARDS_ENV_VAR = 'GTEST_TOTAL_SHARDS'
SHARD_INDEX_ENV_VAR = 'GTEST_SHARD_INDEX'
TEST_FILTER = 'A*.A:A*.B:C*'
ALL_TESTS = []
ACTIVE_TESTS = []
FILTERED_TESTS = []
SHARDED_TESTS = []
SHUFFLED_ALL_TESTS = []
SHUFFLED_ACTIVE_TESTS = []
SHUFFLED_FILTERED_TESTS = []
SHUFFLED_SHARDED_TESTS = []
def AlsoRunDisabledTestsFlag():
return '--gtest_also_run_disabled_tests'
def FilterFlag(test_filter):
return '--gtest_filter=%s' % (test_filter,)
def RepeatFlag(n):
return '--gtest_repeat=%s' % (n,)
def ShuffleFlag():
return '--gtest_shuffle'
def RandomSeedFlag(n):
return '--gtest_random_seed=%s' % (n,)
def RunAndReturnOutput(extra_env, args):
"""Runs the test program and returns its output."""
environ_copy = os.environ.copy()
environ_copy.update(extra_env)
return gtest_test_utils.Subprocess([COMMAND] + args, env=environ_copy).output
def GetTestsForAllIterations(extra_env, args):
"""Runs the test program and returns a list of test lists.
Args:
extra_env: a map from environment variables to their values
args: command line flags to pass to gtest_shuffle_test_
Returns:
A list where the i-th element is the list of tests run in the i-th
test iteration.
"""
test_iterations = []
for line in RunAndReturnOutput(extra_env, args).split('\n'):
if line.startswith('----'):
tests = []
test_iterations.append(tests)
elif line.strip():
tests.append(line.strip()) # 'TestCaseName.TestName'
return test_iterations
def GetTestCases(tests):
"""Returns a list of test cases in the given full test names.
Args:
tests: a list of full test names
Returns:
A list of test cases from 'tests', in their original order.
Consecutive duplicates are removed.
"""
test_cases = []
for test in tests:
test_case = test.split('.')[0]
if not test_case in test_cases:
test_cases.append(test_case)
return test_cases
def CalculateTestLists():
"""Calculates the list of tests run under different flags."""
if not ALL_TESTS:
ALL_TESTS.extend(
GetTestsForAllIterations({}, [AlsoRunDisabledTestsFlag()])[0])
if not ACTIVE_TESTS:
ACTIVE_TESTS.extend(GetTestsForAllIterations({}, [])[0])
if not FILTERED_TESTS:
FILTERED_TESTS.extend(
GetTestsForAllIterations({}, [FilterFlag(TEST_FILTER)])[0])
if not SHARDED_TESTS:
SHARDED_TESTS.extend(
GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '1'},
[])[0])
if not SHUFFLED_ALL_TESTS:
SHUFFLED_ALL_TESTS.extend(GetTestsForAllIterations(
{}, [AlsoRunDisabledTestsFlag(), ShuffleFlag(), RandomSeedFlag(1)])[0])
if not SHUFFLED_ACTIVE_TESTS:
SHUFFLED_ACTIVE_TESTS.extend(GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1)])[0])
if not SHUFFLED_FILTERED_TESTS:
SHUFFLED_FILTERED_TESTS.extend(GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1), FilterFlag(TEST_FILTER)])[0])
if not SHUFFLED_SHARDED_TESTS:
SHUFFLED_SHARDED_TESTS.extend(
GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '1'},
[ShuffleFlag(), RandomSeedFlag(1)])[0])
class GTestShuffleUnitTest(gtest_test_utils.TestCase):
"""Tests test shuffling."""
def setUp(self):
CalculateTestLists()
def testShufflePreservesNumberOfTests(self):
self.assertEqual(len(ALL_TESTS), len(SHUFFLED_ALL_TESTS))
self.assertEqual(len(ACTIVE_TESTS), len(SHUFFLED_ACTIVE_TESTS))
self.assertEqual(len(FILTERED_TESTS), len(SHUFFLED_FILTERED_TESTS))
self.assertEqual(len(SHARDED_TESTS), len(SHUFFLED_SHARDED_TESTS))
def testShuffleChangesTestOrder(self):
self.assert_(SHUFFLED_ALL_TESTS != ALL_TESTS, SHUFFLED_ALL_TESTS)
self.assert_(SHUFFLED_ACTIVE_TESTS != ACTIVE_TESTS, SHUFFLED_ACTIVE_TESTS)
self.assert_(SHUFFLED_FILTERED_TESTS != FILTERED_TESTS,
SHUFFLED_FILTERED_TESTS)
self.assert_(SHUFFLED_SHARDED_TESTS != SHARDED_TESTS,
SHUFFLED_SHARDED_TESTS)
def testShuffleChangesTestCaseOrder(self):
self.assert_(GetTestCases(SHUFFLED_ALL_TESTS) != GetTestCases(ALL_TESTS),
GetTestCases(SHUFFLED_ALL_TESTS))
self.assert_(
GetTestCases(SHUFFLED_ACTIVE_TESTS) != GetTestCases(ACTIVE_TESTS),
GetTestCases(SHUFFLED_ACTIVE_TESTS))
self.assert_(
GetTestCases(SHUFFLED_FILTERED_TESTS) != GetTestCases(FILTERED_TESTS),
GetTestCases(SHUFFLED_FILTERED_TESTS))
self.assert_(
GetTestCases(SHUFFLED_SHARDED_TESTS) != GetTestCases(SHARDED_TESTS),
GetTestCases(SHUFFLED_SHARDED_TESTS))
def testShuffleDoesNotRepeatTest(self):
for test in SHUFFLED_ALL_TESTS:
self.assertEqual(1, SHUFFLED_ALL_TESTS.count(test),
'%s appears more than once' % (test,))
for test in SHUFFLED_ACTIVE_TESTS:
self.assertEqual(1, SHUFFLED_ACTIVE_TESTS.count(test),
'%s appears more than once' % (test,))
for test in SHUFFLED_FILTERED_TESTS:
self.assertEqual(1, SHUFFLED_FILTERED_TESTS.count(test),
'%s appears more than once' % (test,))
for test in SHUFFLED_SHARDED_TESTS:
self.assertEqual(1, SHUFFLED_SHARDED_TESTS.count(test),
'%s appears more than once' % (test,))
def testShuffleDoesNotCreateNewTest(self):
for test in SHUFFLED_ALL_TESTS:
self.assert_(test in ALL_TESTS, '%s is an invalid test' % (test,))
for test in SHUFFLED_ACTIVE_TESTS:
self.assert_(test in ACTIVE_TESTS, '%s is an invalid test' % (test,))
for test in SHUFFLED_FILTERED_TESTS:
self.assert_(test in FILTERED_TESTS, '%s is an invalid test' % (test,))
for test in SHUFFLED_SHARDED_TESTS:
self.assert_(test in SHARDED_TESTS, '%s is an invalid test' % (test,))
def testShuffleIncludesAllTests(self):
for test in ALL_TESTS:
self.assert_(test in SHUFFLED_ALL_TESTS, '%s is missing' % (test,))
for test in ACTIVE_TESTS:
self.assert_(test in SHUFFLED_ACTIVE_TESTS, '%s is missing' % (test,))
for test in FILTERED_TESTS:
self.assert_(test in SHUFFLED_FILTERED_TESTS, '%s is missing' % (test,))
for test in SHARDED_TESTS:
self.assert_(test in SHUFFLED_SHARDED_TESTS, '%s is missing' % (test,))
def testShuffleLeavesDeathTestsAtFront(self):
non_death_test_found = False
for test in SHUFFLED_ACTIVE_TESTS:
if 'DeathTest.' in test:
self.assert_(not non_death_test_found,
'%s appears after a non-death test' % (test,))
else:
non_death_test_found = True
def _VerifyTestCasesDoNotInterleave(self, tests):
test_cases = []
for test in tests:
[test_case, _] = test.split('.')
if test_cases and test_cases[-1] != test_case:
test_cases.append(test_case)
self.assertEqual(1, test_cases.count(test_case),
'Test case %s is not grouped together in %s' %
(test_case, tests))
def testShuffleDoesNotInterleaveTestCases(self):
self._VerifyTestCasesDoNotInterleave(SHUFFLED_ALL_TESTS)
self._VerifyTestCasesDoNotInterleave(SHUFFLED_ACTIVE_TESTS)
self._VerifyTestCasesDoNotInterleave(SHUFFLED_FILTERED_TESTS)
self._VerifyTestCasesDoNotInterleave(SHUFFLED_SHARDED_TESTS)
def testShuffleRestoresOrderAfterEachIteration(self):
# Get the test lists in all 3 iterations, using random seed 1, 2,
# and 3 respectively. Google Test picks a different seed in each
# iteration, and this test depends on the current implementation
# picking successive numbers. This dependency is not ideal, but
# makes the test much easier to write.
[tests_in_iteration1, tests_in_iteration2, tests_in_iteration3] = (
GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1), RepeatFlag(3)]))
# Make sure running the tests with random seed 1 gets the same
# order as in iteration 1 above.
[tests_with_seed1] = GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1)])
self.assertEqual(tests_in_iteration1, tests_with_seed1)
# Make sure running the tests with random seed 2 gets the same
# order as in iteration 2 above. Success means that Google Test
# correctly restores the test order before re-shuffling at the
# beginning of iteration 2.
[tests_with_seed2] = GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(2)])
self.assertEqual(tests_in_iteration2, tests_with_seed2)
# Make sure running the tests with random seed 3 gets the same
# order as in iteration 3 above. Success means that Google Test
# correctly restores the test order before re-shuffling at the
# beginning of iteration 3.
[tests_with_seed3] = GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(3)])
self.assertEqual(tests_in_iteration3, tests_with_seed3)
def testShuffleGeneratesNewOrderInEachIteration(self):
[tests_in_iteration1, tests_in_iteration2, tests_in_iteration3] = (
GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1), RepeatFlag(3)]))
self.assert_(tests_in_iteration1 != tests_in_iteration2,
tests_in_iteration1)
self.assert_(tests_in_iteration1 != tests_in_iteration3,
tests_in_iteration1)
self.assert_(tests_in_iteration2 != tests_in_iteration3,
tests_in_iteration2)
def testShuffleShardedTestsPreservesPartition(self):
# If we run M tests on N shards, the same M tests should be run in
# total, regardless of the random seeds used by the shards.
[tests1] = GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '0'},
[ShuffleFlag(), RandomSeedFlag(1)])
[tests2] = GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '1'},
[ShuffleFlag(), RandomSeedFlag(20)])
[tests3] = GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '2'},
[ShuffleFlag(), RandomSeedFlag(25)])
sorted_sharded_tests = tests1 + tests2 + tests3
sorted_sharded_tests.sort()
sorted_active_tests = []
sorted_active_tests.extend(ACTIVE_TESTS)
sorted_active_tests.sort()
self.assertEqual(sorted_active_tests, sorted_sharded_tests)
if __name__ == '__main__':
gtest_test_utils.Main()
| Python |
#!/usr/bin/env python
#
# Copyright 2005 Google Inc. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for Google Test test filters.
A user can specify which test(s) in a Google Test program to run via either
the GTEST_FILTER environment variable or the --gtest_filter flag.
This script tests such functionality by invoking
gtest_filter_unittest_ (a program written with Google Test) with different
environments and command line flags.
Note that test sharding may also influence which tests are filtered. Therefore,
we test that here also.
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import re
import sets
import sys
import gtest_test_utils
# Constants.
# Checks if this platform can pass empty environment variables to child
# processes. We set an env variable to an empty string and invoke a python
# script in a subprocess to print whether the variable is STILL in
# os.environ. We then use 'eval' to parse the child's output so that an
# exception is thrown if the input is anything other than 'True' nor 'False'.
os.environ['EMPTY_VAR'] = ''
child = gtest_test_utils.Subprocess(
[sys.executable, '-c', 'import os; print \'EMPTY_VAR\' in os.environ'])
CAN_PASS_EMPTY_ENV = eval(child.output)
# Check if this platform can unset environment variables in child processes.
# We set an env variable to a non-empty string, unset it, and invoke
# a python script in a subprocess to print whether the variable
# is NO LONGER in os.environ.
# We use 'eval' to parse the child's output so that an exception
# is thrown if the input is neither 'True' nor 'False'.
os.environ['UNSET_VAR'] = 'X'
del os.environ['UNSET_VAR']
child = gtest_test_utils.Subprocess(
[sys.executable, '-c', 'import os; print \'UNSET_VAR\' not in os.environ'])
CAN_UNSET_ENV = eval(child.output)
# Checks if we should test with an empty filter. This doesn't
# make sense on platforms that cannot pass empty env variables (Win32)
# and on platforms that cannot unset variables (since we cannot tell
# the difference between "" and NULL -- Borland and Solaris < 5.10)
CAN_TEST_EMPTY_FILTER = (CAN_PASS_EMPTY_ENV and CAN_UNSET_ENV)
# The environment variable for specifying the test filters.
FILTER_ENV_VAR = 'GTEST_FILTER'
# The environment variables for test sharding.
TOTAL_SHARDS_ENV_VAR = 'GTEST_TOTAL_SHARDS'
SHARD_INDEX_ENV_VAR = 'GTEST_SHARD_INDEX'
SHARD_STATUS_FILE_ENV_VAR = 'GTEST_SHARD_STATUS_FILE'
# The command line flag for specifying the test filters.
FILTER_FLAG = 'gtest_filter'
# The command line flag for including disabled tests.
ALSO_RUN_DISABED_TESTS_FLAG = 'gtest_also_run_disabled_tests'
# Command to run the gtest_filter_unittest_ program.
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_filter_unittest_')
# Regex for determining whether parameterized tests are enabled in the binary.
PARAM_TEST_REGEX = re.compile(r'/ParamTest')
# Regex for parsing test case names from Google Test's output.
TEST_CASE_REGEX = re.compile(r'^\[\-+\] \d+ tests? from (\w+(/\w+)?)')
# Regex for parsing test names from Google Test's output.
TEST_REGEX = re.compile(r'^\[\s*RUN\s*\].*\.(\w+(/\w+)?)')
# The command line flag to tell Google Test to output the list of tests it
# will run.
LIST_TESTS_FLAG = '--gtest_list_tests'
# Indicates whether Google Test supports death tests.
SUPPORTS_DEATH_TESTS = 'HasDeathTest' in gtest_test_utils.Subprocess(
[COMMAND, LIST_TESTS_FLAG]).output
# Full names of all tests in gtest_filter_unittests_.
PARAM_TESTS = [
'SeqP/ParamTest.TestX/0',
'SeqP/ParamTest.TestX/1',
'SeqP/ParamTest.TestY/0',
'SeqP/ParamTest.TestY/1',
'SeqQ/ParamTest.TestX/0',
'SeqQ/ParamTest.TestX/1',
'SeqQ/ParamTest.TestY/0',
'SeqQ/ParamTest.TestY/1',
]
DISABLED_TESTS = [
'BarTest.DISABLED_TestFour',
'BarTest.DISABLED_TestFive',
'BazTest.DISABLED_TestC',
'DISABLED_FoobarTest.Test1',
'DISABLED_FoobarTest.DISABLED_Test2',
'DISABLED_FoobarbazTest.TestA',
]
if SUPPORTS_DEATH_TESTS:
DEATH_TESTS = [
'HasDeathTest.Test1',
'HasDeathTest.Test2',
]
else:
DEATH_TESTS = []
# All the non-disabled tests.
ACTIVE_TESTS = [
'FooTest.Abc',
'FooTest.Xyz',
'BarTest.TestOne',
'BarTest.TestTwo',
'BarTest.TestThree',
'BazTest.TestOne',
'BazTest.TestA',
'BazTest.TestB',
] + DEATH_TESTS + PARAM_TESTS
param_tests_present = None
# Utilities.
environ = os.environ.copy()
def SetEnvVar(env_var, value):
"""Sets the env variable to 'value'; unsets it when 'value' is None."""
if value is not None:
environ[env_var] = value
elif env_var in environ:
del environ[env_var]
def RunAndReturnOutput(args = None):
"""Runs the test program and returns its output."""
return gtest_test_utils.Subprocess([COMMAND] + (args or []),
env=environ).output
def RunAndExtractTestList(args = None):
"""Runs the test program and returns its exit code and a list of tests run."""
p = gtest_test_utils.Subprocess([COMMAND] + (args or []), env=environ)
tests_run = []
test_case = ''
test = ''
for line in p.output.split('\n'):
match = TEST_CASE_REGEX.match(line)
if match is not None:
test_case = match.group(1)
else:
match = TEST_REGEX.match(line)
if match is not None:
test = match.group(1)
tests_run.append(test_case + '.' + test)
return (tests_run, p.exit_code)
def InvokeWithModifiedEnv(extra_env, function, *args, **kwargs):
"""Runs the given function and arguments in a modified environment."""
try:
original_env = environ.copy()
environ.update(extra_env)
return function(*args, **kwargs)
finally:
environ.clear()
environ.update(original_env)
def RunWithSharding(total_shards, shard_index, command):
"""Runs a test program shard and returns exit code and a list of tests run."""
extra_env = {SHARD_INDEX_ENV_VAR: str(shard_index),
TOTAL_SHARDS_ENV_VAR: str(total_shards)}
return InvokeWithModifiedEnv(extra_env, RunAndExtractTestList, command)
# The unit test.
class GTestFilterUnitTest(gtest_test_utils.TestCase):
"""Tests the env variable or the command line flag to filter tests."""
# Utilities.
def AssertSetEqual(self, lhs, rhs):
"""Asserts that two sets are equal."""
for elem in lhs:
self.assert_(elem in rhs, '%s in %s' % (elem, rhs))
for elem in rhs:
self.assert_(elem in lhs, '%s in %s' % (elem, lhs))
def AssertPartitionIsValid(self, set_var, list_of_sets):
"""Asserts that list_of_sets is a valid partition of set_var."""
full_partition = []
for slice_var in list_of_sets:
full_partition.extend(slice_var)
self.assertEqual(len(set_var), len(full_partition))
self.assertEqual(sets.Set(set_var), sets.Set(full_partition))
def AdjustForParameterizedTests(self, tests_to_run):
"""Adjust tests_to_run in case value parameterized tests are disabled."""
global param_tests_present
if not param_tests_present:
return list(sets.Set(tests_to_run) - sets.Set(PARAM_TESTS))
else:
return tests_to_run
def RunAndVerify(self, gtest_filter, tests_to_run):
"""Checks that the binary runs correct set of tests for a given filter."""
tests_to_run = self.AdjustForParameterizedTests(tests_to_run)
# First, tests using the environment variable.
# Windows removes empty variables from the environment when passing it
# to a new process. This means it is impossible to pass an empty filter
# into a process using the environment variable. However, we can still
# test the case when the variable is not supplied (i.e., gtest_filter is
# None).
# pylint: disable-msg=C6403
if CAN_TEST_EMPTY_FILTER or gtest_filter != '':
SetEnvVar(FILTER_ENV_VAR, gtest_filter)
tests_run = RunAndExtractTestList()[0]
SetEnvVar(FILTER_ENV_VAR, None)
self.AssertSetEqual(tests_run, tests_to_run)
# pylint: enable-msg=C6403
# Next, tests using the command line flag.
if gtest_filter is None:
args = []
else:
args = ['--%s=%s' % (FILTER_FLAG, gtest_filter)]
tests_run = RunAndExtractTestList(args)[0]
self.AssertSetEqual(tests_run, tests_to_run)
def RunAndVerifyWithSharding(self, gtest_filter, total_shards, tests_to_run,
args=None, check_exit_0=False):
"""Checks that binary runs correct tests for the given filter and shard.
Runs all shards of gtest_filter_unittest_ with the given filter, and
verifies that the right set of tests were run. The union of tests run
on each shard should be identical to tests_to_run, without duplicates.
Args:
gtest_filter: A filter to apply to the tests.
total_shards: A total number of shards to split test run into.
tests_to_run: A set of tests expected to run.
args : Arguments to pass to the to the test binary.
check_exit_0: When set to a true value, make sure that all shards
return 0.
"""
tests_to_run = self.AdjustForParameterizedTests(tests_to_run)
# Windows removes empty variables from the environment when passing it
# to a new process. This means it is impossible to pass an empty filter
# into a process using the environment variable. However, we can still
# test the case when the variable is not supplied (i.e., gtest_filter is
# None).
# pylint: disable-msg=C6403
if CAN_TEST_EMPTY_FILTER or gtest_filter != '':
SetEnvVar(FILTER_ENV_VAR, gtest_filter)
partition = []
for i in range(0, total_shards):
(tests_run, exit_code) = RunWithSharding(total_shards, i, args)
if check_exit_0:
self.assertEqual(0, exit_code)
partition.append(tests_run)
self.AssertPartitionIsValid(tests_to_run, partition)
SetEnvVar(FILTER_ENV_VAR, None)
# pylint: enable-msg=C6403
def RunAndVerifyAllowingDisabled(self, gtest_filter, tests_to_run):
"""Checks that the binary runs correct set of tests for the given filter.
Runs gtest_filter_unittest_ with the given filter, and enables
disabled tests. Verifies that the right set of tests were run.
Args:
gtest_filter: A filter to apply to the tests.
tests_to_run: A set of tests expected to run.
"""
tests_to_run = self.AdjustForParameterizedTests(tests_to_run)
# Construct the command line.
args = ['--%s' % ALSO_RUN_DISABED_TESTS_FLAG]
if gtest_filter is not None:
args.append('--%s=%s' % (FILTER_FLAG, gtest_filter))
tests_run = RunAndExtractTestList(args)[0]
self.AssertSetEqual(tests_run, tests_to_run)
def setUp(self):
"""Sets up test case.
Determines whether value-parameterized tests are enabled in the binary and
sets the flags accordingly.
"""
global param_tests_present
if param_tests_present is None:
param_tests_present = PARAM_TEST_REGEX.search(
RunAndReturnOutput()) is not None
def testDefaultBehavior(self):
"""Tests the behavior of not specifying the filter."""
self.RunAndVerify(None, ACTIVE_TESTS)
def testDefaultBehaviorWithShards(self):
"""Tests the behavior without the filter, with sharding enabled."""
self.RunAndVerifyWithSharding(None, 1, ACTIVE_TESTS)
self.RunAndVerifyWithSharding(None, 2, ACTIVE_TESTS)
self.RunAndVerifyWithSharding(None, len(ACTIVE_TESTS) - 1, ACTIVE_TESTS)
self.RunAndVerifyWithSharding(None, len(ACTIVE_TESTS), ACTIVE_TESTS)
self.RunAndVerifyWithSharding(None, len(ACTIVE_TESTS) + 1, ACTIVE_TESTS)
def testEmptyFilter(self):
"""Tests an empty filter."""
self.RunAndVerify('', [])
self.RunAndVerifyWithSharding('', 1, [])
self.RunAndVerifyWithSharding('', 2, [])
def testBadFilter(self):
"""Tests a filter that matches nothing."""
self.RunAndVerify('BadFilter', [])
self.RunAndVerifyAllowingDisabled('BadFilter', [])
def testFullName(self):
"""Tests filtering by full name."""
self.RunAndVerify('FooTest.Xyz', ['FooTest.Xyz'])
self.RunAndVerifyAllowingDisabled('FooTest.Xyz', ['FooTest.Xyz'])
self.RunAndVerifyWithSharding('FooTest.Xyz', 5, ['FooTest.Xyz'])
def testUniversalFilters(self):
"""Tests filters that match everything."""
self.RunAndVerify('*', ACTIVE_TESTS)
self.RunAndVerify('*.*', ACTIVE_TESTS)
self.RunAndVerifyWithSharding('*.*', len(ACTIVE_TESTS) - 3, ACTIVE_TESTS)
self.RunAndVerifyAllowingDisabled('*', ACTIVE_TESTS + DISABLED_TESTS)
self.RunAndVerifyAllowingDisabled('*.*', ACTIVE_TESTS + DISABLED_TESTS)
def testFilterByTestCase(self):
"""Tests filtering by test case name."""
self.RunAndVerify('FooTest.*', ['FooTest.Abc', 'FooTest.Xyz'])
BAZ_TESTS = ['BazTest.TestOne', 'BazTest.TestA', 'BazTest.TestB']
self.RunAndVerify('BazTest.*', BAZ_TESTS)
self.RunAndVerifyAllowingDisabled('BazTest.*',
BAZ_TESTS + ['BazTest.DISABLED_TestC'])
def testFilterByTest(self):
"""Tests filtering by test name."""
self.RunAndVerify('*.TestOne', ['BarTest.TestOne', 'BazTest.TestOne'])
def testFilterDisabledTests(self):
"""Select only the disabled tests to run."""
self.RunAndVerify('DISABLED_FoobarTest.Test1', [])
self.RunAndVerifyAllowingDisabled('DISABLED_FoobarTest.Test1',
['DISABLED_FoobarTest.Test1'])
self.RunAndVerify('*DISABLED_*', [])
self.RunAndVerifyAllowingDisabled('*DISABLED_*', DISABLED_TESTS)
self.RunAndVerify('*.DISABLED_*', [])
self.RunAndVerifyAllowingDisabled('*.DISABLED_*', [
'BarTest.DISABLED_TestFour',
'BarTest.DISABLED_TestFive',
'BazTest.DISABLED_TestC',
'DISABLED_FoobarTest.DISABLED_Test2',
])
self.RunAndVerify('DISABLED_*', [])
self.RunAndVerifyAllowingDisabled('DISABLED_*', [
'DISABLED_FoobarTest.Test1',
'DISABLED_FoobarTest.DISABLED_Test2',
'DISABLED_FoobarbazTest.TestA',
])
def testWildcardInTestCaseName(self):
"""Tests using wildcard in the test case name."""
self.RunAndVerify('*a*.*', [
'BarTest.TestOne',
'BarTest.TestTwo',
'BarTest.TestThree',
'BazTest.TestOne',
'BazTest.TestA',
'BazTest.TestB', ] + DEATH_TESTS + PARAM_TESTS)
def testWildcardInTestName(self):
"""Tests using wildcard in the test name."""
self.RunAndVerify('*.*A*', ['FooTest.Abc', 'BazTest.TestA'])
def testFilterWithoutDot(self):
"""Tests a filter that has no '.' in it."""
self.RunAndVerify('*z*', [
'FooTest.Xyz',
'BazTest.TestOne',
'BazTest.TestA',
'BazTest.TestB',
])
def testTwoPatterns(self):
"""Tests filters that consist of two patterns."""
self.RunAndVerify('Foo*.*:*A*', [
'FooTest.Abc',
'FooTest.Xyz',
'BazTest.TestA',
])
# An empty pattern + a non-empty one
self.RunAndVerify(':*A*', ['FooTest.Abc', 'BazTest.TestA'])
def testThreePatterns(self):
"""Tests filters that consist of three patterns."""
self.RunAndVerify('*oo*:*A*:*One', [
'FooTest.Abc',
'FooTest.Xyz',
'BarTest.TestOne',
'BazTest.TestOne',
'BazTest.TestA',
])
# The 2nd pattern is empty.
self.RunAndVerify('*oo*::*One', [
'FooTest.Abc',
'FooTest.Xyz',
'BarTest.TestOne',
'BazTest.TestOne',
])
# The last 2 patterns are empty.
self.RunAndVerify('*oo*::', [
'FooTest.Abc',
'FooTest.Xyz',
])
def testNegativeFilters(self):
self.RunAndVerify('*-BazTest.TestOne', [
'FooTest.Abc',
'FooTest.Xyz',
'BarTest.TestOne',
'BarTest.TestTwo',
'BarTest.TestThree',
'BazTest.TestA',
'BazTest.TestB',
] + DEATH_TESTS + PARAM_TESTS)
self.RunAndVerify('*-FooTest.Abc:BazTest.*', [
'FooTest.Xyz',
'BarTest.TestOne',
'BarTest.TestTwo',
'BarTest.TestThree',
] + DEATH_TESTS + PARAM_TESTS)
self.RunAndVerify('BarTest.*-BarTest.TestOne', [
'BarTest.TestTwo',
'BarTest.TestThree',
])
# Tests without leading '*'.
self.RunAndVerify('-FooTest.Abc:FooTest.Xyz:BazTest.*', [
'BarTest.TestOne',
'BarTest.TestTwo',
'BarTest.TestThree',
] + DEATH_TESTS + PARAM_TESTS)
# Value parameterized tests.
self.RunAndVerify('*/*', PARAM_TESTS)
# Value parameterized tests filtering by the sequence name.
self.RunAndVerify('SeqP/*', [
'SeqP/ParamTest.TestX/0',
'SeqP/ParamTest.TestX/1',
'SeqP/ParamTest.TestY/0',
'SeqP/ParamTest.TestY/1',
])
# Value parameterized tests filtering by the test name.
self.RunAndVerify('*/0', [
'SeqP/ParamTest.TestX/0',
'SeqP/ParamTest.TestY/0',
'SeqQ/ParamTest.TestX/0',
'SeqQ/ParamTest.TestY/0',
])
def testFlagOverridesEnvVar(self):
"""Tests that the filter flag overrides the filtering env. variable."""
SetEnvVar(FILTER_ENV_VAR, 'Foo*')
args = ['--%s=%s' % (FILTER_FLAG, '*One')]
tests_run = RunAndExtractTestList(args)[0]
SetEnvVar(FILTER_ENV_VAR, None)
self.AssertSetEqual(tests_run, ['BarTest.TestOne', 'BazTest.TestOne'])
def testShardStatusFileIsCreated(self):
"""Tests that the shard file is created if specified in the environment."""
shard_status_file = os.path.join(gtest_test_utils.GetTempDir(),
'shard_status_file')
self.assert_(not os.path.exists(shard_status_file))
extra_env = {SHARD_STATUS_FILE_ENV_VAR: shard_status_file}
try:
InvokeWithModifiedEnv(extra_env, RunAndReturnOutput)
finally:
self.assert_(os.path.exists(shard_status_file))
os.remove(shard_status_file)
def testShardStatusFileIsCreatedWithListTests(self):
"""Tests that the shard file is created with the "list_tests" flag."""
shard_status_file = os.path.join(gtest_test_utils.GetTempDir(),
'shard_status_file2')
self.assert_(not os.path.exists(shard_status_file))
extra_env = {SHARD_STATUS_FILE_ENV_VAR: shard_status_file}
try:
output = InvokeWithModifiedEnv(extra_env,
RunAndReturnOutput,
[LIST_TESTS_FLAG])
finally:
# This assertion ensures that Google Test enumerated the tests as
# opposed to running them.
self.assert_('[==========]' not in output,
'Unexpected output during test enumeration.\n'
'Please ensure that LIST_TESTS_FLAG is assigned the\n'
'correct flag value for listing Google Test tests.')
self.assert_(os.path.exists(shard_status_file))
os.remove(shard_status_file)
if SUPPORTS_DEATH_TESTS:
def testShardingWorksWithDeathTests(self):
"""Tests integration with death tests and sharding."""
gtest_filter = 'HasDeathTest.*:SeqP/*'
expected_tests = [
'HasDeathTest.Test1',
'HasDeathTest.Test2',
'SeqP/ParamTest.TestX/0',
'SeqP/ParamTest.TestX/1',
'SeqP/ParamTest.TestY/0',
'SeqP/ParamTest.TestY/1',
]
for flag in ['--gtest_death_test_style=threadsafe',
'--gtest_death_test_style=fast']:
self.RunAndVerifyWithSharding(gtest_filter, 3, expected_tests,
check_exit_0=True, args=[flag])
self.RunAndVerifyWithSharding(gtest_filter, 5, expected_tests,
check_exit_0=True, args=[flag])
if __name__ == '__main__':
gtest_test_utils.Main()
| Python |
#!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""fuse_gtest_files.py v0.2.0
Fuses Google Test source code into a .h file and a .cc file.
SYNOPSIS
fuse_gtest_files.py [GTEST_ROOT_DIR] OUTPUT_DIR
Scans GTEST_ROOT_DIR for Google Test source code, and generates
two files: OUTPUT_DIR/gtest/gtest.h and OUTPUT_DIR/gtest/gtest-all.cc.
Then you can build your tests by adding OUTPUT_DIR to the include
search path and linking with OUTPUT_DIR/gtest/gtest-all.cc. These
two files contain everything you need to use Google Test. Hence
you can "install" Google Test by copying them to wherever you want.
GTEST_ROOT_DIR can be omitted and defaults to the parent
directory of the directory holding this script.
EXAMPLES
./fuse_gtest_files.py fused_gtest
./fuse_gtest_files.py path/to/unpacked/gtest fused_gtest
This tool is experimental. In particular, it assumes that there is no
conditional inclusion of Google Test headers. Please report any
problems to googletestframework@googlegroups.com. You can read
http://code.google.com/p/googletest/wiki/GoogleTestAdvancedGuide for
more information.
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import re
import sets
import sys
# We assume that this file is in the scripts/ directory in the Google
# Test root directory.
DEFAULT_GTEST_ROOT_DIR = os.path.join(os.path.dirname(__file__), '..')
# Regex for matching '#include "gtest/..."'.
INCLUDE_GTEST_FILE_REGEX = re.compile(r'^\s*#\s*include\s*"(gtest/.+)"')
# Regex for matching '#include "src/..."'.
INCLUDE_SRC_FILE_REGEX = re.compile(r'^\s*#\s*include\s*"(src/.+)"')
# Where to find the source seed files.
GTEST_H_SEED = 'include/gtest/gtest.h'
GTEST_SPI_H_SEED = 'include/gtest/gtest-spi.h'
GTEST_ALL_CC_SEED = 'src/gtest-all.cc'
# Where to put the generated files.
GTEST_H_OUTPUT = 'gtest/gtest.h'
GTEST_ALL_CC_OUTPUT = 'gtest/gtest-all.cc'
def VerifyFileExists(directory, relative_path):
"""Verifies that the given file exists; aborts on failure.
relative_path is the file path relative to the given directory.
"""
if not os.path.isfile(os.path.join(directory, relative_path)):
print 'ERROR: Cannot find %s in directory %s.' % (relative_path,
directory)
print ('Please either specify a valid project root directory '
'or omit it on the command line.')
sys.exit(1)
def ValidateGTestRootDir(gtest_root):
"""Makes sure gtest_root points to a valid gtest root directory.
The function aborts the program on failure.
"""
VerifyFileExists(gtest_root, GTEST_H_SEED)
VerifyFileExists(gtest_root, GTEST_ALL_CC_SEED)
def VerifyOutputFile(output_dir, relative_path):
"""Verifies that the given output file path is valid.
relative_path is relative to the output_dir directory.
"""
# Makes sure the output file either doesn't exist or can be overwritten.
output_file = os.path.join(output_dir, relative_path)
if os.path.exists(output_file):
# TODO(wan@google.com): The following user-interaction doesn't
# work with automated processes. We should provide a way for the
# Makefile to force overwriting the files.
print ('%s already exists in directory %s - overwrite it? (y/N) ' %
(relative_path, output_dir))
answer = sys.stdin.readline().strip()
if answer not in ['y', 'Y']:
print 'ABORTED.'
sys.exit(1)
# Makes sure the directory holding the output file exists; creates
# it and all its ancestors if necessary.
parent_directory = os.path.dirname(output_file)
if not os.path.isdir(parent_directory):
os.makedirs(parent_directory)
def ValidateOutputDir(output_dir):
"""Makes sure output_dir points to a valid output directory.
The function aborts the program on failure.
"""
VerifyOutputFile(output_dir, GTEST_H_OUTPUT)
VerifyOutputFile(output_dir, GTEST_ALL_CC_OUTPUT)
def FuseGTestH(gtest_root, output_dir):
"""Scans folder gtest_root to generate gtest/gtest.h in output_dir."""
output_file = file(os.path.join(output_dir, GTEST_H_OUTPUT), 'w')
processed_files = sets.Set() # Holds all gtest headers we've processed.
def ProcessFile(gtest_header_path):
"""Processes the given gtest header file."""
# We don't process the same header twice.
if gtest_header_path in processed_files:
return
processed_files.add(gtest_header_path)
# Reads each line in the given gtest header.
for line in file(os.path.join(gtest_root, gtest_header_path), 'r'):
m = INCLUDE_GTEST_FILE_REGEX.match(line)
if m:
# It's '#include "gtest/..."' - let's process it recursively.
ProcessFile('include/' + m.group(1))
else:
# Otherwise we copy the line unchanged to the output file.
output_file.write(line)
ProcessFile(GTEST_H_SEED)
output_file.close()
def FuseGTestAllCcToFile(gtest_root, output_file):
"""Scans folder gtest_root to generate gtest/gtest-all.cc in output_file."""
processed_files = sets.Set()
def ProcessFile(gtest_source_file):
"""Processes the given gtest source file."""
# We don't process the same #included file twice.
if gtest_source_file in processed_files:
return
processed_files.add(gtest_source_file)
# Reads each line in the given gtest source file.
for line in file(os.path.join(gtest_root, gtest_source_file), 'r'):
m = INCLUDE_GTEST_FILE_REGEX.match(line)
if m:
if 'include/' + m.group(1) == GTEST_SPI_H_SEED:
# It's '#include "gtest/gtest-spi.h"'. This file is not
# #included by "gtest/gtest.h", so we need to process it.
ProcessFile(GTEST_SPI_H_SEED)
else:
# It's '#include "gtest/foo.h"' where foo is not gtest-spi.
# We treat it as '#include "gtest/gtest.h"', as all other
# gtest headers are being fused into gtest.h and cannot be
# #included directly.
# There is no need to #include "gtest/gtest.h" more than once.
if not GTEST_H_SEED in processed_files:
processed_files.add(GTEST_H_SEED)
output_file.write('#include "%s"\n' % (GTEST_H_OUTPUT,))
else:
m = INCLUDE_SRC_FILE_REGEX.match(line)
if m:
# It's '#include "src/foo"' - let's process it recursively.
ProcessFile(m.group(1))
else:
output_file.write(line)
ProcessFile(GTEST_ALL_CC_SEED)
def FuseGTestAllCc(gtest_root, output_dir):
"""Scans folder gtest_root to generate gtest/gtest-all.cc in output_dir."""
output_file = file(os.path.join(output_dir, GTEST_ALL_CC_OUTPUT), 'w')
FuseGTestAllCcToFile(gtest_root, output_file)
output_file.close()
def FuseGTest(gtest_root, output_dir):
"""Fuses gtest.h and gtest-all.cc."""
ValidateGTestRootDir(gtest_root)
ValidateOutputDir(output_dir)
FuseGTestH(gtest_root, output_dir)
FuseGTestAllCc(gtest_root, output_dir)
def main():
argc = len(sys.argv)
if argc == 2:
# fuse_gtest_files.py OUTPUT_DIR
FuseGTest(DEFAULT_GTEST_ROOT_DIR, sys.argv[1])
elif argc == 3:
# fuse_gtest_files.py GTEST_ROOT_DIR OUTPUT_DIR
FuseGTest(sys.argv[1], sys.argv[2])
else:
print __doc__
sys.exit(1)
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""gen_gtest_pred_impl.py v0.1
Generates the implementation of Google Test predicate assertions and
accompanying tests.
Usage:
gen_gtest_pred_impl.py MAX_ARITY
where MAX_ARITY is a positive integer.
The command generates the implementation of up-to MAX_ARITY-ary
predicate assertions, and writes it to file gtest_pred_impl.h in the
directory where the script is. It also generates the accompanying
unit test in file gtest_pred_impl_unittest.cc.
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import sys
import time
# Where this script is.
SCRIPT_DIR = os.path.dirname(sys.argv[0])
# Where to store the generated header.
HEADER = os.path.join(SCRIPT_DIR, '../include/gtest/gtest_pred_impl.h')
# Where to store the generated unit test.
UNIT_TEST = os.path.join(SCRIPT_DIR, '../test/gtest_pred_impl_unittest.cc')
def HeaderPreamble(n):
"""Returns the preamble for the header file.
Args:
n: the maximum arity of the predicate macros to be generated.
"""
# A map that defines the values used in the preamble template.
DEFS = {
'today' : time.strftime('%m/%d/%Y'),
'year' : time.strftime('%Y'),
'command' : '%s %s' % (os.path.basename(sys.argv[0]), n),
'n' : n
}
return (
"""// Copyright 2006, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// This file is AUTOMATICALLY GENERATED on %(today)s by command
// '%(command)s'. DO NOT EDIT BY HAND!
//
// Implements a family of generic predicate assertion macros.
#ifndef GTEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_
#define GTEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_
// Makes sure this header is not included before gtest.h.
#ifndef GTEST_INCLUDE_GTEST_GTEST_H_
#error Do not include gtest_pred_impl.h directly. Include gtest.h instead.
#endif // GTEST_INCLUDE_GTEST_GTEST_H_
// This header implements a family of generic predicate assertion
// macros:
//
// ASSERT_PRED_FORMAT1(pred_format, v1)
// ASSERT_PRED_FORMAT2(pred_format, v1, v2)
// ...
//
// where pred_format is a function or functor that takes n (in the
// case of ASSERT_PRED_FORMATn) values and their source expression
// text, and returns a testing::AssertionResult. See the definition
// of ASSERT_EQ in gtest.h for an example.
//
// If you don't care about formatting, you can use the more
// restrictive version:
//
// ASSERT_PRED1(pred, v1)
// ASSERT_PRED2(pred, v1, v2)
// ...
//
// where pred is an n-ary function or functor that returns bool,
// and the values v1, v2, ..., must support the << operator for
// streaming to std::ostream.
//
// We also define the EXPECT_* variations.
//
// For now we only support predicates whose arity is at most %(n)s.
// Please email googletestframework@googlegroups.com if you need
// support for higher arities.
// GTEST_ASSERT_ is the basic statement to which all of the assertions
// in this file reduce. Don't use this in your code.
#define GTEST_ASSERT_(expression, on_failure) \\
GTEST_AMBIGUOUS_ELSE_BLOCKER_ \\
if (const ::testing::AssertionResult gtest_ar = (expression)) \\
; \\
else \\
on_failure(gtest_ar.failure_message())
""" % DEFS)
def Arity(n):
"""Returns the English name of the given arity."""
if n < 0:
return None
elif n <= 3:
return ['nullary', 'unary', 'binary', 'ternary'][n]
else:
return '%s-ary' % n
def Title(word):
"""Returns the given word in title case. The difference between
this and string's title() method is that Title('4-ary') is '4-ary'
while '4-ary'.title() is '4-Ary'."""
return word[0].upper() + word[1:]
def OneTo(n):
"""Returns the list [1, 2, 3, ..., n]."""
return range(1, n + 1)
def Iter(n, format, sep=''):
"""Given a positive integer n, a format string that contains 0 or
more '%s' format specs, and optionally a separator string, returns
the join of n strings, each formatted with the format string on an
iterator ranged from 1 to n.
Example:
Iter(3, 'v%s', sep=', ') returns 'v1, v2, v3'.
"""
# How many '%s' specs are in format?
spec_count = len(format.split('%s')) - 1
return sep.join([format % (spec_count * (i,)) for i in OneTo(n)])
def ImplementationForArity(n):
"""Returns the implementation of n-ary predicate assertions."""
# A map the defines the values used in the implementation template.
DEFS = {
'n' : str(n),
'vs' : Iter(n, 'v%s', sep=', '),
'vts' : Iter(n, '#v%s', sep=', '),
'arity' : Arity(n),
'Arity' : Title(Arity(n))
}
impl = """
// Helper function for implementing {EXPECT|ASSERT}_PRED%(n)s. Don't use
// this in your code.
template <typename Pred""" % DEFS
impl += Iter(n, """,
typename T%s""")
impl += """>
AssertionResult AssertPred%(n)sHelper(const char* pred_text""" % DEFS
impl += Iter(n, """,
const char* e%s""")
impl += """,
Pred pred"""
impl += Iter(n, """,
const T%s& v%s""")
impl += """) {
if (pred(%(vs)s)) return AssertionSuccess();
""" % DEFS
impl += ' return AssertionFailure() << pred_text << "("'
impl += Iter(n, """
<< e%s""", sep=' << ", "')
impl += ' << ") evaluates to false, where"'
impl += Iter(n, """
<< "\\n" << e%s << " evaluates to " << v%s""")
impl += """;
}
// Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT%(n)s.
// Don't use this in your code.
#define GTEST_PRED_FORMAT%(n)s_(pred_format, %(vs)s, on_failure)\\
GTEST_ASSERT_(pred_format(%(vts)s, %(vs)s),\\
on_failure)
// Internal macro for implementing {EXPECT|ASSERT}_PRED%(n)s. Don't use
// this in your code.
#define GTEST_PRED%(n)s_(pred, %(vs)s, on_failure)\\
GTEST_ASSERT_(::testing::AssertPred%(n)sHelper(#pred""" % DEFS
impl += Iter(n, """, \\
#v%s""")
impl += """, \\
pred"""
impl += Iter(n, """, \\
v%s""")
impl += """), on_failure)
// %(Arity)s predicate assertion macros.
#define EXPECT_PRED_FORMAT%(n)s(pred_format, %(vs)s) \\
GTEST_PRED_FORMAT%(n)s_(pred_format, %(vs)s, GTEST_NONFATAL_FAILURE_)
#define EXPECT_PRED%(n)s(pred, %(vs)s) \\
GTEST_PRED%(n)s_(pred, %(vs)s, GTEST_NONFATAL_FAILURE_)
#define ASSERT_PRED_FORMAT%(n)s(pred_format, %(vs)s) \\
GTEST_PRED_FORMAT%(n)s_(pred_format, %(vs)s, GTEST_FATAL_FAILURE_)
#define ASSERT_PRED%(n)s(pred, %(vs)s) \\
GTEST_PRED%(n)s_(pred, %(vs)s, GTEST_FATAL_FAILURE_)
""" % DEFS
return impl
def HeaderPostamble():
"""Returns the postamble for the header file."""
return """
#endif // GTEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_
"""
def GenerateFile(path, content):
"""Given a file path and a content string, overwrites it with the
given content."""
print 'Updating file %s . . .' % path
f = file(path, 'w+')
print >>f, content,
f.close()
print 'File %s has been updated.' % path
def GenerateHeader(n):
"""Given the maximum arity n, updates the header file that implements
the predicate assertions."""
GenerateFile(HEADER,
HeaderPreamble(n)
+ ''.join([ImplementationForArity(i) for i in OneTo(n)])
+ HeaderPostamble())
def UnitTestPreamble():
"""Returns the preamble for the unit test file."""
# A map that defines the values used in the preamble template.
DEFS = {
'today' : time.strftime('%m/%d/%Y'),
'year' : time.strftime('%Y'),
'command' : '%s %s' % (os.path.basename(sys.argv[0]), sys.argv[1]),
}
return (
"""// Copyright 2006, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// This file is AUTOMATICALLY GENERATED on %(today)s by command
// '%(command)s'. DO NOT EDIT BY HAND!
// Regression test for gtest_pred_impl.h
//
// This file is generated by a script and quite long. If you intend to
// learn how Google Test works by reading its unit tests, read
// gtest_unittest.cc instead.
//
// This is intended as a regression test for the Google Test predicate
// assertions. We compile it as part of the gtest_unittest target
// only to keep the implementation tidy and compact, as it is quite
// involved to set up the stage for testing Google Test using Google
// Test itself.
//
// Currently, gtest_unittest takes ~11 seconds to run in the testing
// daemon. In the future, if it grows too large and needs much more
// time to finish, we should consider separating this file into a
// stand-alone regression test.
#include <iostream>
#include "gtest/gtest.h"
#include "gtest/gtest-spi.h"
// A user-defined data type.
struct Bool {
explicit Bool(int val) : value(val != 0) {}
bool operator>(int n) const { return value > Bool(n).value; }
Bool operator+(const Bool& rhs) const { return Bool(value + rhs.value); }
bool operator==(const Bool& rhs) const { return value == rhs.value; }
bool value;
};
// Enables Bool to be used in assertions.
std::ostream& operator<<(std::ostream& os, const Bool& x) {
return os << (x.value ? "true" : "false");
}
""" % DEFS)
def TestsForArity(n):
"""Returns the tests for n-ary predicate assertions."""
# A map that defines the values used in the template for the tests.
DEFS = {
'n' : n,
'es' : Iter(n, 'e%s', sep=', '),
'vs' : Iter(n, 'v%s', sep=', '),
'vts' : Iter(n, '#v%s', sep=', '),
'tvs' : Iter(n, 'T%s v%s', sep=', '),
'int_vs' : Iter(n, 'int v%s', sep=', '),
'Bool_vs' : Iter(n, 'Bool v%s', sep=', '),
'types' : Iter(n, 'typename T%s', sep=', '),
'v_sum' : Iter(n, 'v%s', sep=' + '),
'arity' : Arity(n),
'Arity' : Title(Arity(n)),
}
tests = (
"""// Sample functions/functors for testing %(arity)s predicate assertions.
// A %(arity)s predicate function.
template <%(types)s>
bool PredFunction%(n)s(%(tvs)s) {
return %(v_sum)s > 0;
}
// The following two functions are needed to circumvent a bug in
// gcc 2.95.3, which sometimes has problem with the above template
// function.
bool PredFunction%(n)sInt(%(int_vs)s) {
return %(v_sum)s > 0;
}
bool PredFunction%(n)sBool(%(Bool_vs)s) {
return %(v_sum)s > 0;
}
""" % DEFS)
tests += """
// A %(arity)s predicate functor.
struct PredFunctor%(n)s {
template <%(types)s>
bool operator()(""" % DEFS
tests += Iter(n, 'const T%s& v%s', sep=""",
""")
tests += """) {
return %(v_sum)s > 0;
}
};
""" % DEFS
tests += """
// A %(arity)s predicate-formatter function.
template <%(types)s>
testing::AssertionResult PredFormatFunction%(n)s(""" % DEFS
tests += Iter(n, 'const char* e%s', sep=""",
""")
tests += Iter(n, """,
const T%s& v%s""")
tests += """) {
if (PredFunction%(n)s(%(vs)s))
return testing::AssertionSuccess();
return testing::AssertionFailure()
<< """ % DEFS
tests += Iter(n, 'e%s', sep=' << " + " << ')
tests += """
<< " is expected to be positive, but evaluates to "
<< %(v_sum)s << ".";
}
""" % DEFS
tests += """
// A %(arity)s predicate-formatter functor.
struct PredFormatFunctor%(n)s {
template <%(types)s>
testing::AssertionResult operator()(""" % DEFS
tests += Iter(n, 'const char* e%s', sep=""",
""")
tests += Iter(n, """,
const T%s& v%s""")
tests += """) const {
return PredFormatFunction%(n)s(%(es)s, %(vs)s);
}
};
""" % DEFS
tests += """
// Tests for {EXPECT|ASSERT}_PRED_FORMAT%(n)s.
class Predicate%(n)sTest : public testing::Test {
protected:
virtual void SetUp() {
expected_to_finish_ = true;
finished_ = false;""" % DEFS
tests += """
""" + Iter(n, 'n%s_ = ') + """0;
}
"""
tests += """
virtual void TearDown() {
// Verifies that each of the predicate's arguments was evaluated
// exactly once."""
tests += ''.join(["""
EXPECT_EQ(1, n%s_) <<
"The predicate assertion didn't evaluate argument %s "
"exactly once.";""" % (i, i + 1) for i in OneTo(n)])
tests += """
// Verifies that the control flow in the test function is expected.
if (expected_to_finish_ && !finished_) {
FAIL() << "The predicate assertion unexpactedly aborted the test.";
} else if (!expected_to_finish_ && finished_) {
FAIL() << "The failed predicate assertion didn't abort the test "
"as expected.";
}
}
// true iff the test function is expected to run to finish.
static bool expected_to_finish_;
// true iff the test function did run to finish.
static bool finished_;
""" % DEFS
tests += Iter(n, """
static int n%s_;""")
tests += """
};
bool Predicate%(n)sTest::expected_to_finish_;
bool Predicate%(n)sTest::finished_;
""" % DEFS
tests += Iter(n, """int Predicate%%(n)sTest::n%s_;
""") % DEFS
tests += """
typedef Predicate%(n)sTest EXPECT_PRED_FORMAT%(n)sTest;
typedef Predicate%(n)sTest ASSERT_PRED_FORMAT%(n)sTest;
typedef Predicate%(n)sTest EXPECT_PRED%(n)sTest;
typedef Predicate%(n)sTest ASSERT_PRED%(n)sTest;
""" % DEFS
def GenTest(use_format, use_assert, expect_failure,
use_functor, use_user_type):
"""Returns the test for a predicate assertion macro.
Args:
use_format: true iff the assertion is a *_PRED_FORMAT*.
use_assert: true iff the assertion is a ASSERT_*.
expect_failure: true iff the assertion is expected to fail.
use_functor: true iff the first argument of the assertion is
a functor (as opposed to a function)
use_user_type: true iff the predicate functor/function takes
argument(s) of a user-defined type.
Example:
GenTest(1, 0, 0, 1, 0) returns a test that tests the behavior
of a successful EXPECT_PRED_FORMATn() that takes a functor
whose arguments have built-in types."""
if use_assert:
assrt = 'ASSERT' # 'assert' is reserved, so we cannot use
# that identifier here.
else:
assrt = 'EXPECT'
assertion = assrt + '_PRED'
if use_format:
pred_format = 'PredFormat'
assertion += '_FORMAT'
else:
pred_format = 'Pred'
assertion += '%(n)s' % DEFS
if use_functor:
pred_format_type = 'functor'
pred_format += 'Functor%(n)s()'
else:
pred_format_type = 'function'
pred_format += 'Function%(n)s'
if not use_format:
if use_user_type:
pred_format += 'Bool'
else:
pred_format += 'Int'
test_name = pred_format_type.title()
if use_user_type:
arg_type = 'user-defined type (Bool)'
test_name += 'OnUserType'
if expect_failure:
arg = 'Bool(n%s_++)'
else:
arg = 'Bool(++n%s_)'
else:
arg_type = 'built-in type (int)'
test_name += 'OnBuiltInType'
if expect_failure:
arg = 'n%s_++'
else:
arg = '++n%s_'
if expect_failure:
successful_or_failed = 'failed'
expected_or_not = 'expected.'
test_name += 'Failure'
else:
successful_or_failed = 'successful'
expected_or_not = 'UNEXPECTED!'
test_name += 'Success'
# A map that defines the values used in the test template.
defs = DEFS.copy()
defs.update({
'assert' : assrt,
'assertion' : assertion,
'test_name' : test_name,
'pf_type' : pred_format_type,
'pf' : pred_format,
'arg_type' : arg_type,
'arg' : arg,
'successful' : successful_or_failed,
'expected' : expected_or_not,
})
test = """
// Tests a %(successful)s %(assertion)s where the
// predicate-formatter is a %(pf_type)s on a %(arg_type)s.
TEST_F(%(assertion)sTest, %(test_name)s) {""" % defs
indent = (len(assertion) + 3)*' '
extra_indent = ''
if expect_failure:
extra_indent = ' '
if use_assert:
test += """
expected_to_finish_ = false;
EXPECT_FATAL_FAILURE({ // NOLINT"""
else:
test += """
EXPECT_NONFATAL_FAILURE({ // NOLINT"""
test += '\n' + extra_indent + """ %(assertion)s(%(pf)s""" % defs
test = test % defs
test += Iter(n, ',\n' + indent + extra_indent + '%(arg)s' % defs)
test += ');\n' + extra_indent + ' finished_ = true;\n'
if expect_failure:
test += ' }, "");\n'
test += '}\n'
return test
# Generates tests for all 2**6 = 64 combinations.
tests += ''.join([GenTest(use_format, use_assert, expect_failure,
use_functor, use_user_type)
for use_format in [0, 1]
for use_assert in [0, 1]
for expect_failure in [0, 1]
for use_functor in [0, 1]
for use_user_type in [0, 1]
])
return tests
def UnitTestPostamble():
"""Returns the postamble for the tests."""
return ''
def GenerateUnitTest(n):
"""Returns the tests for up-to n-ary predicate assertions."""
GenerateFile(UNIT_TEST,
UnitTestPreamble()
+ ''.join([TestsForArity(i) for i in OneTo(n)])
+ UnitTestPostamble())
def _Main():
"""The entry point of the script. Generates the header file and its
unit test."""
if len(sys.argv) != 2:
print __doc__
print 'Author: ' + __author__
sys.exit(1)
n = int(sys.argv[1])
GenerateHeader(n)
GenerateUnitTest(n)
if __name__ == '__main__':
_Main()
| Python |
#!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""pump v0.2.0 - Pretty Useful for Meta Programming.
A tool for preprocessor meta programming. Useful for generating
repetitive boilerplate code. Especially useful for writing C++
classes, functions, macros, and templates that need to work with
various number of arguments.
USAGE:
pump.py SOURCE_FILE
EXAMPLES:
pump.py foo.cc.pump
Converts foo.cc.pump to foo.cc.
GRAMMAR:
CODE ::= ATOMIC_CODE*
ATOMIC_CODE ::= $var ID = EXPRESSION
| $var ID = [[ CODE ]]
| $range ID EXPRESSION..EXPRESSION
| $for ID SEPARATOR [[ CODE ]]
| $($)
| $ID
| $(EXPRESSION)
| $if EXPRESSION [[ CODE ]] ELSE_BRANCH
| [[ CODE ]]
| RAW_CODE
SEPARATOR ::= RAW_CODE | EMPTY
ELSE_BRANCH ::= $else [[ CODE ]]
| $elif EXPRESSION [[ CODE ]] ELSE_BRANCH
| EMPTY
EXPRESSION has Python syntax.
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import re
import sys
TOKEN_TABLE = [
(re.compile(r'\$var\s+'), '$var'),
(re.compile(r'\$elif\s+'), '$elif'),
(re.compile(r'\$else\s+'), '$else'),
(re.compile(r'\$for\s+'), '$for'),
(re.compile(r'\$if\s+'), '$if'),
(re.compile(r'\$range\s+'), '$range'),
(re.compile(r'\$[_A-Za-z]\w*'), '$id'),
(re.compile(r'\$\(\$\)'), '$($)'),
(re.compile(r'\$'), '$'),
(re.compile(r'\[\[\n?'), '[['),
(re.compile(r'\]\]\n?'), ']]'),
]
class Cursor:
"""Represents a position (line and column) in a text file."""
def __init__(self, line=-1, column=-1):
self.line = line
self.column = column
def __eq__(self, rhs):
return self.line == rhs.line and self.column == rhs.column
def __ne__(self, rhs):
return not self == rhs
def __lt__(self, rhs):
return self.line < rhs.line or (
self.line == rhs.line and self.column < rhs.column)
def __le__(self, rhs):
return self < rhs or self == rhs
def __gt__(self, rhs):
return rhs < self
def __ge__(self, rhs):
return rhs <= self
def __str__(self):
if self == Eof():
return 'EOF'
else:
return '%s(%s)' % (self.line + 1, self.column)
def __add__(self, offset):
return Cursor(self.line, self.column + offset)
def __sub__(self, offset):
return Cursor(self.line, self.column - offset)
def Clone(self):
"""Returns a copy of self."""
return Cursor(self.line, self.column)
# Special cursor to indicate the end-of-file.
def Eof():
"""Returns the special cursor to denote the end-of-file."""
return Cursor(-1, -1)
class Token:
"""Represents a token in a Pump source file."""
def __init__(self, start=None, end=None, value=None, token_type=None):
if start is None:
self.start = Eof()
else:
self.start = start
if end is None:
self.end = Eof()
else:
self.end = end
self.value = value
self.token_type = token_type
def __str__(self):
return 'Token @%s: \'%s\' type=%s' % (
self.start, self.value, self.token_type)
def Clone(self):
"""Returns a copy of self."""
return Token(self.start.Clone(), self.end.Clone(), self.value,
self.token_type)
def StartsWith(lines, pos, string):
"""Returns True iff the given position in lines starts with 'string'."""
return lines[pos.line][pos.column:].startswith(string)
def FindFirstInLine(line, token_table):
best_match_start = -1
for (regex, token_type) in token_table:
m = regex.search(line)
if m:
# We found regex in lines
if best_match_start < 0 or m.start() < best_match_start:
best_match_start = m.start()
best_match_length = m.end() - m.start()
best_match_token_type = token_type
if best_match_start < 0:
return None
return (best_match_start, best_match_length, best_match_token_type)
def FindFirst(lines, token_table, cursor):
"""Finds the first occurrence of any string in strings in lines."""
start = cursor.Clone()
cur_line_number = cursor.line
for line in lines[start.line:]:
if cur_line_number == start.line:
line = line[start.column:]
m = FindFirstInLine(line, token_table)
if m:
# We found a regex in line.
(start_column, length, token_type) = m
if cur_line_number == start.line:
start_column += start.column
found_start = Cursor(cur_line_number, start_column)
found_end = found_start + length
return MakeToken(lines, found_start, found_end, token_type)
cur_line_number += 1
# We failed to find str in lines
return None
def SubString(lines, start, end):
"""Returns a substring in lines."""
if end == Eof():
end = Cursor(len(lines) - 1, len(lines[-1]))
if start >= end:
return ''
if start.line == end.line:
return lines[start.line][start.column:end.column]
result_lines = ([lines[start.line][start.column:]] +
lines[start.line + 1:end.line] +
[lines[end.line][:end.column]])
return ''.join(result_lines)
def StripMetaComments(str):
"""Strip meta comments from each line in the given string."""
# First, completely remove lines containing nothing but a meta
# comment, including the trailing \n.
str = re.sub(r'^\s*\$\$.*\n', '', str)
# Then, remove meta comments from contentful lines.
return re.sub(r'\s*\$\$.*', '', str)
def MakeToken(lines, start, end, token_type):
"""Creates a new instance of Token."""
return Token(start, end, SubString(lines, start, end), token_type)
def ParseToken(lines, pos, regex, token_type):
line = lines[pos.line][pos.column:]
m = regex.search(line)
if m and not m.start():
return MakeToken(lines, pos, pos + m.end(), token_type)
else:
print 'ERROR: %s expected at %s.' % (token_type, pos)
sys.exit(1)
ID_REGEX = re.compile(r'[_A-Za-z]\w*')
EQ_REGEX = re.compile(r'=')
REST_OF_LINE_REGEX = re.compile(r'.*?(?=$|\$\$)')
OPTIONAL_WHITE_SPACES_REGEX = re.compile(r'\s*')
WHITE_SPACE_REGEX = re.compile(r'\s')
DOT_DOT_REGEX = re.compile(r'\.\.')
def Skip(lines, pos, regex):
line = lines[pos.line][pos.column:]
m = re.search(regex, line)
if m and not m.start():
return pos + m.end()
else:
return pos
def SkipUntil(lines, pos, regex, token_type):
line = lines[pos.line][pos.column:]
m = re.search(regex, line)
if m:
return pos + m.start()
else:
print ('ERROR: %s expected on line %s after column %s.' %
(token_type, pos.line + 1, pos.column))
sys.exit(1)
def ParseExpTokenInParens(lines, pos):
def ParseInParens(pos):
pos = Skip(lines, pos, OPTIONAL_WHITE_SPACES_REGEX)
pos = Skip(lines, pos, r'\(')
pos = Parse(pos)
pos = Skip(lines, pos, r'\)')
return pos
def Parse(pos):
pos = SkipUntil(lines, pos, r'\(|\)', ')')
if SubString(lines, pos, pos + 1) == '(':
pos = Parse(pos + 1)
pos = Skip(lines, pos, r'\)')
return Parse(pos)
else:
return pos
start = pos.Clone()
pos = ParseInParens(pos)
return MakeToken(lines, start, pos, 'exp')
def RStripNewLineFromToken(token):
if token.value.endswith('\n'):
return Token(token.start, token.end, token.value[:-1], token.token_type)
else:
return token
def TokenizeLines(lines, pos):
while True:
found = FindFirst(lines, TOKEN_TABLE, pos)
if not found:
yield MakeToken(lines, pos, Eof(), 'code')
return
if found.start == pos:
prev_token = None
prev_token_rstripped = None
else:
prev_token = MakeToken(lines, pos, found.start, 'code')
prev_token_rstripped = RStripNewLineFromToken(prev_token)
if found.token_type == '$var':
if prev_token_rstripped:
yield prev_token_rstripped
yield found
id_token = ParseToken(lines, found.end, ID_REGEX, 'id')
yield id_token
pos = Skip(lines, id_token.end, OPTIONAL_WHITE_SPACES_REGEX)
eq_token = ParseToken(lines, pos, EQ_REGEX, '=')
yield eq_token
pos = Skip(lines, eq_token.end, r'\s*')
if SubString(lines, pos, pos + 2) != '[[':
exp_token = ParseToken(lines, pos, REST_OF_LINE_REGEX, 'exp')
yield exp_token
pos = Cursor(exp_token.end.line + 1, 0)
elif found.token_type == '$for':
if prev_token_rstripped:
yield prev_token_rstripped
yield found
id_token = ParseToken(lines, found.end, ID_REGEX, 'id')
yield id_token
pos = Skip(lines, id_token.end, WHITE_SPACE_REGEX)
elif found.token_type == '$range':
if prev_token_rstripped:
yield prev_token_rstripped
yield found
id_token = ParseToken(lines, found.end, ID_REGEX, 'id')
yield id_token
pos = Skip(lines, id_token.end, OPTIONAL_WHITE_SPACES_REGEX)
dots_pos = SkipUntil(lines, pos, DOT_DOT_REGEX, '..')
yield MakeToken(lines, pos, dots_pos, 'exp')
yield MakeToken(lines, dots_pos, dots_pos + 2, '..')
pos = dots_pos + 2
new_pos = Cursor(pos.line + 1, 0)
yield MakeToken(lines, pos, new_pos, 'exp')
pos = new_pos
elif found.token_type == '$':
if prev_token:
yield prev_token
yield found
exp_token = ParseExpTokenInParens(lines, found.end)
yield exp_token
pos = exp_token.end
elif (found.token_type == ']]' or found.token_type == '$if' or
found.token_type == '$elif' or found.token_type == '$else'):
if prev_token_rstripped:
yield prev_token_rstripped
yield found
pos = found.end
else:
if prev_token:
yield prev_token
yield found
pos = found.end
def Tokenize(s):
"""A generator that yields the tokens in the given string."""
if s != '':
lines = s.splitlines(True)
for token in TokenizeLines(lines, Cursor(0, 0)):
yield token
class CodeNode:
def __init__(self, atomic_code_list=None):
self.atomic_code = atomic_code_list
class VarNode:
def __init__(self, identifier=None, atomic_code=None):
self.identifier = identifier
self.atomic_code = atomic_code
class RangeNode:
def __init__(self, identifier=None, exp1=None, exp2=None):
self.identifier = identifier
self.exp1 = exp1
self.exp2 = exp2
class ForNode:
def __init__(self, identifier=None, sep=None, code=None):
self.identifier = identifier
self.sep = sep
self.code = code
class ElseNode:
def __init__(self, else_branch=None):
self.else_branch = else_branch
class IfNode:
def __init__(self, exp=None, then_branch=None, else_branch=None):
self.exp = exp
self.then_branch = then_branch
self.else_branch = else_branch
class RawCodeNode:
def __init__(self, token=None):
self.raw_code = token
class LiteralDollarNode:
def __init__(self, token):
self.token = token
class ExpNode:
def __init__(self, token, python_exp):
self.token = token
self.python_exp = python_exp
def PopFront(a_list):
head = a_list[0]
a_list[:1] = []
return head
def PushFront(a_list, elem):
a_list[:0] = [elem]
def PopToken(a_list, token_type=None):
token = PopFront(a_list)
if token_type is not None and token.token_type != token_type:
print 'ERROR: %s expected at %s' % (token_type, token.start)
print 'ERROR: %s found instead' % (token,)
sys.exit(1)
return token
def PeekToken(a_list):
if not a_list:
return None
return a_list[0]
def ParseExpNode(token):
python_exp = re.sub(r'([_A-Za-z]\w*)', r'self.GetValue("\1")', token.value)
return ExpNode(token, python_exp)
def ParseElseNode(tokens):
def Pop(token_type=None):
return PopToken(tokens, token_type)
next = PeekToken(tokens)
if not next:
return None
if next.token_type == '$else':
Pop('$else')
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
return code_node
elif next.token_type == '$elif':
Pop('$elif')
exp = Pop('code')
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
inner_else_node = ParseElseNode(tokens)
return CodeNode([IfNode(ParseExpNode(exp), code_node, inner_else_node)])
elif not next.value.strip():
Pop('code')
return ParseElseNode(tokens)
else:
return None
def ParseAtomicCodeNode(tokens):
def Pop(token_type=None):
return PopToken(tokens, token_type)
head = PopFront(tokens)
t = head.token_type
if t == 'code':
return RawCodeNode(head)
elif t == '$var':
id_token = Pop('id')
Pop('=')
next = PeekToken(tokens)
if next.token_type == 'exp':
exp_token = Pop()
return VarNode(id_token, ParseExpNode(exp_token))
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
return VarNode(id_token, code_node)
elif t == '$for':
id_token = Pop('id')
next_token = PeekToken(tokens)
if next_token.token_type == 'code':
sep_token = next_token
Pop('code')
else:
sep_token = None
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
return ForNode(id_token, sep_token, code_node)
elif t == '$if':
exp_token = Pop('code')
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
else_node = ParseElseNode(tokens)
return IfNode(ParseExpNode(exp_token), code_node, else_node)
elif t == '$range':
id_token = Pop('id')
exp1_token = Pop('exp')
Pop('..')
exp2_token = Pop('exp')
return RangeNode(id_token, ParseExpNode(exp1_token),
ParseExpNode(exp2_token))
elif t == '$id':
return ParseExpNode(Token(head.start + 1, head.end, head.value[1:], 'id'))
elif t == '$($)':
return LiteralDollarNode(head)
elif t == '$':
exp_token = Pop('exp')
return ParseExpNode(exp_token)
elif t == '[[':
code_node = ParseCodeNode(tokens)
Pop(']]')
return code_node
else:
PushFront(tokens, head)
return None
def ParseCodeNode(tokens):
atomic_code_list = []
while True:
if not tokens:
break
atomic_code_node = ParseAtomicCodeNode(tokens)
if atomic_code_node:
atomic_code_list.append(atomic_code_node)
else:
break
return CodeNode(atomic_code_list)
def ParseToAST(pump_src_text):
"""Convert the given Pump source text into an AST."""
tokens = list(Tokenize(pump_src_text))
code_node = ParseCodeNode(tokens)
return code_node
class Env:
def __init__(self):
self.variables = []
self.ranges = []
def Clone(self):
clone = Env()
clone.variables = self.variables[:]
clone.ranges = self.ranges[:]
return clone
def PushVariable(self, var, value):
# If value looks like an int, store it as an int.
try:
int_value = int(value)
if ('%s' % int_value) == value:
value = int_value
except Exception:
pass
self.variables[:0] = [(var, value)]
def PopVariable(self):
self.variables[:1] = []
def PushRange(self, var, lower, upper):
self.ranges[:0] = [(var, lower, upper)]
def PopRange(self):
self.ranges[:1] = []
def GetValue(self, identifier):
for (var, value) in self.variables:
if identifier == var:
return value
print 'ERROR: meta variable %s is undefined.' % (identifier,)
sys.exit(1)
def EvalExp(self, exp):
try:
result = eval(exp.python_exp)
except Exception, e:
print 'ERROR: caught exception %s: %s' % (e.__class__.__name__, e)
print ('ERROR: failed to evaluate meta expression %s at %s' %
(exp.python_exp, exp.token.start))
sys.exit(1)
return result
def GetRange(self, identifier):
for (var, lower, upper) in self.ranges:
if identifier == var:
return (lower, upper)
print 'ERROR: range %s is undefined.' % (identifier,)
sys.exit(1)
class Output:
def __init__(self):
self.string = ''
def GetLastLine(self):
index = self.string.rfind('\n')
if index < 0:
return ''
return self.string[index + 1:]
def Append(self, s):
self.string += s
def RunAtomicCode(env, node, output):
if isinstance(node, VarNode):
identifier = node.identifier.value.strip()
result = Output()
RunAtomicCode(env.Clone(), node.atomic_code, result)
value = result.string
env.PushVariable(identifier, value)
elif isinstance(node, RangeNode):
identifier = node.identifier.value.strip()
lower = int(env.EvalExp(node.exp1))
upper = int(env.EvalExp(node.exp2))
env.PushRange(identifier, lower, upper)
elif isinstance(node, ForNode):
identifier = node.identifier.value.strip()
if node.sep is None:
sep = ''
else:
sep = node.sep.value
(lower, upper) = env.GetRange(identifier)
for i in range(lower, upper + 1):
new_env = env.Clone()
new_env.PushVariable(identifier, i)
RunCode(new_env, node.code, output)
if i != upper:
output.Append(sep)
elif isinstance(node, RawCodeNode):
output.Append(node.raw_code.value)
elif isinstance(node, IfNode):
cond = env.EvalExp(node.exp)
if cond:
RunCode(env.Clone(), node.then_branch, output)
elif node.else_branch is not None:
RunCode(env.Clone(), node.else_branch, output)
elif isinstance(node, ExpNode):
value = env.EvalExp(node)
output.Append('%s' % (value,))
elif isinstance(node, LiteralDollarNode):
output.Append('$')
elif isinstance(node, CodeNode):
RunCode(env.Clone(), node, output)
else:
print 'BAD'
print node
sys.exit(1)
def RunCode(env, code_node, output):
for atomic_code in code_node.atomic_code:
RunAtomicCode(env, atomic_code, output)
def IsComment(cur_line):
return '//' in cur_line
def IsInPreprocessorDirevative(prev_lines, cur_line):
if cur_line.lstrip().startswith('#'):
return True
return prev_lines != [] and prev_lines[-1].endswith('\\')
def WrapComment(line, output):
loc = line.find('//')
before_comment = line[:loc].rstrip()
if before_comment == '':
indent = loc
else:
output.append(before_comment)
indent = len(before_comment) - len(before_comment.lstrip())
prefix = indent*' ' + '// '
max_len = 80 - len(prefix)
comment = line[loc + 2:].strip()
segs = [seg for seg in re.split(r'(\w+\W*)', comment) if seg != '']
cur_line = ''
for seg in segs:
if len((cur_line + seg).rstrip()) < max_len:
cur_line += seg
else:
if cur_line.strip() != '':
output.append(prefix + cur_line.rstrip())
cur_line = seg.lstrip()
if cur_line.strip() != '':
output.append(prefix + cur_line.strip())
def WrapCode(line, line_concat, output):
indent = len(line) - len(line.lstrip())
prefix = indent*' ' # Prefix of the current line
max_len = 80 - indent - len(line_concat) # Maximum length of the current line
new_prefix = prefix + 4*' ' # Prefix of a continuation line
new_max_len = max_len - 4 # Maximum length of a continuation line
# Prefers to wrap a line after a ',' or ';'.
segs = [seg for seg in re.split(r'([^,;]+[,;]?)', line.strip()) if seg != '']
cur_line = '' # The current line without leading spaces.
for seg in segs:
# If the line is still too long, wrap at a space.
while cur_line == '' and len(seg.strip()) > max_len:
seg = seg.lstrip()
split_at = seg.rfind(' ', 0, max_len)
output.append(prefix + seg[:split_at].strip() + line_concat)
seg = seg[split_at + 1:]
prefix = new_prefix
max_len = new_max_len
if len((cur_line + seg).rstrip()) < max_len:
cur_line = (cur_line + seg).lstrip()
else:
output.append(prefix + cur_line.rstrip() + line_concat)
prefix = new_prefix
max_len = new_max_len
cur_line = seg.lstrip()
if cur_line.strip() != '':
output.append(prefix + cur_line.strip())
def WrapPreprocessorDirevative(line, output):
WrapCode(line, ' \\', output)
def WrapPlainCode(line, output):
WrapCode(line, '', output)
def IsHeaderGuardOrInclude(line):
return (re.match(r'^#(ifndef|define|endif\s*//)\s*[\w_]+\s*$', line) or
re.match(r'^#include\s', line))
def WrapLongLine(line, output):
line = line.rstrip()
if len(line) <= 80:
output.append(line)
elif IsComment(line):
if IsHeaderGuardOrInclude(line):
# The style guide made an exception to allow long header guard lines
# and includes.
output.append(line)
else:
WrapComment(line, output)
elif IsInPreprocessorDirevative(output, line):
if IsHeaderGuardOrInclude(line):
# The style guide made an exception to allow long header guard lines
# and includes.
output.append(line)
else:
WrapPreprocessorDirevative(line, output)
else:
WrapPlainCode(line, output)
def BeautifyCode(string):
lines = string.splitlines()
output = []
for line in lines:
WrapLongLine(line, output)
output2 = [line.rstrip() for line in output]
return '\n'.join(output2) + '\n'
def ConvertFromPumpSource(src_text):
"""Return the text generated from the given Pump source text."""
ast = ParseToAST(StripMetaComments(src_text))
output = Output()
RunCode(Env(), ast, output)
return BeautifyCode(output.string)
def main(argv):
if len(argv) == 1:
print __doc__
sys.exit(1)
file_path = argv[-1]
output_str = ConvertFromPumpSource(file(file_path, 'r').read())
if file_path.endswith('.pump'):
output_file_path = file_path[:-5]
else:
output_file_path = '-'
if output_file_path == '-':
print output_str,
else:
output_file = file(output_file_path, 'w')
output_file.write('// This file was GENERATED by command:\n')
output_file.write('// %s %s\n' %
(os.path.basename(__file__), os.path.basename(file_path)))
output_file.write('// DO NOT EDIT BY HAND!!!\n\n')
output_file.write(output_str)
output_file.close()
if __name__ == '__main__':
main(sys.argv)
| Python |
#!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""A script to prepare version informtion for use the gtest Info.plist file.
This script extracts the version information from the configure.ac file and
uses it to generate a header file containing the same information. The
#defines in this header file will be included in during the generation of
the Info.plist of the framework, giving the correct value to the version
shown in the Finder.
This script makes the following assumptions (these are faults of the script,
not problems with the Autoconf):
1. The AC_INIT macro will be contained within the first 1024 characters
of configure.ac
2. The version string will be 3 integers separated by periods and will be
surrounded by squre brackets, "[" and "]" (e.g. [1.0.1]). The first
segment represents the major version, the second represents the minor
version and the third represents the fix version.
3. No ")" character exists between the opening "(" and closing ")" of
AC_INIT, including in comments and character strings.
"""
import sys
import re
# Read the command line argument (the output directory for Version.h)
if (len(sys.argv) < 3):
print "Usage: versiongenerate.py input_dir output_dir"
sys.exit(1)
else:
input_dir = sys.argv[1]
output_dir = sys.argv[2]
# Read the first 1024 characters of the configure.ac file
config_file = open("%s/configure.ac" % input_dir, 'r')
buffer_size = 1024
opening_string = config_file.read(buffer_size)
config_file.close()
# Extract the version string from the AC_INIT macro
# The following init_expression means:
# Extract three integers separated by periods and surrounded by squre
# brackets(e.g. "[1.0.1]") between "AC_INIT(" and ")". Do not be greedy
# (*? is the non-greedy flag) since that would pull in everything between
# the first "(" and the last ")" in the file.
version_expression = re.compile(r"AC_INIT\(.*?\[(\d+)\.(\d+)\.(\d+)\].*?\)",
re.DOTALL)
version_values = version_expression.search(opening_string)
major_version = version_values.group(1)
minor_version = version_values.group(2)
fix_version = version_values.group(3)
# Write the version information to a header file to be included in the
# Info.plist file.
file_data = """//
// DO NOT MODIFY THIS FILE (but you can delete it)
//
// This file is autogenerated by the versiongenerate.py script. This script
// is executed in a "Run Script" build phase when creating gtest.framework. This
// header file is not used during compilation of C-source. Rather, it simply
// defines some version strings for substitution in the Info.plist. Because of
// this, we are not not restricted to C-syntax nor are we using include guards.
//
#define GTEST_VERSIONINFO_SHORT %s.%s
#define GTEST_VERSIONINFO_LONG %s.%s.%s
""" % (major_version, minor_version, major_version, minor_version, fix_version)
version_file = open("%s/Version.h" % output_dir, 'w')
version_file.write(file_data)
version_file.close()
| Python |
#!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for the gtest_xml_output module"""
__author__ = 'eefacm@gmail.com (Sean Mcafee)'
import errno
import os
import sys
from xml.dom import minidom, Node
import gtest_test_utils
import gtest_xml_test_utils
GTEST_OUTPUT_FLAG = "--gtest_output"
GTEST_DEFAULT_OUTPUT_FILE = "test_detail.xml"
GTEST_PROGRAM_NAME = "gtest_xml_output_unittest_"
SUPPORTS_STACK_TRACES = False
if SUPPORTS_STACK_TRACES:
STACK_TRACE_TEMPLATE = "\nStack trace:\n*"
else:
STACK_TRACE_TEMPLATE = ""
EXPECTED_NON_EMPTY_XML = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="23" failures="4" disabled="2" errors="0" time="*" name="AllTests">
<testsuite name="SuccessfulTest" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="Succeeds" status="run" time="*" classname="SuccessfulTest"/>
</testsuite>
<testsuite name="FailedTest" tests="1" failures="1" disabled="0" errors="0" time="*">
<testcase name="Fails" status="run" time="*" classname="FailedTest">
<failure message="Value of: 2
Expected: 1" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Value of: 2
Expected: 1%(stack)s]]></failure>
</testcase>
</testsuite>
<testsuite name="MixedResultTest" tests="3" failures="1" disabled="1" errors="0" time="*">
<testcase name="Succeeds" status="run" time="*" classname="MixedResultTest"/>
<testcase name="Fails" status="run" time="*" classname="MixedResultTest">
<failure message="Value of: 2
Expected: 1" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Value of: 2
Expected: 1%(stack)s]]></failure>
<failure message="Value of: 3
Expected: 2" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Value of: 3
Expected: 2%(stack)s]]></failure>
</testcase>
<testcase name="DISABLED_test" status="notrun" time="*" classname="MixedResultTest"/>
</testsuite>
<testsuite name="XmlQuotingTest" tests="1" failures="1" disabled="0" errors="0" time="*">
<testcase name="OutputsCData" status="run" time="*" classname="XmlQuotingTest">
<failure message="Failed
XML output: <?xml encoding="utf-8"><top><![CDATA[cdata text]]></top>" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Failed
XML output: <?xml encoding="utf-8"><top><![CDATA[cdata text]]>]]><![CDATA[</top>%(stack)s]]></failure>
</testcase>
</testsuite>
<testsuite name="InvalidCharactersTest" tests="1" failures="1" disabled="0" errors="0" time="*">
<testcase name="InvalidCharactersInMessage" status="run" time="*" classname="InvalidCharactersTest">
<failure message="Failed
Invalid characters in brackets []" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Failed
Invalid characters in brackets []%(stack)s]]></failure>
</testcase>
</testsuite>
<testsuite name="DisabledTest" tests="1" failures="0" disabled="1" errors="0" time="*">
<testcase name="DISABLED_test_not_run" status="notrun" time="*" classname="DisabledTest"/>
</testsuite>
<testsuite name="PropertyRecordingTest" tests="4" failures="0" disabled="0" errors="0" time="*">
<testcase name="OneProperty" status="run" time="*" classname="PropertyRecordingTest" key_1="1"/>
<testcase name="IntValuedProperty" status="run" time="*" classname="PropertyRecordingTest" key_int="1"/>
<testcase name="ThreeProperties" status="run" time="*" classname="PropertyRecordingTest" key_1="1" key_2="2" key_3="3"/>
<testcase name="TwoValuesForOneKeyUsesLastValue" status="run" time="*" classname="PropertyRecordingTest" key_1="2"/>
</testsuite>
<testsuite name="NoFixtureTest" tests="3" failures="0" disabled="0" errors="0" time="*">
<testcase name="RecordProperty" status="run" time="*" classname="NoFixtureTest" key="1"/>
<testcase name="ExternalUtilityThatCallsRecordIntValuedProperty" status="run" time="*" classname="NoFixtureTest" key_for_utility_int="1"/>
<testcase name="ExternalUtilityThatCallsRecordStringValuedProperty" status="run" time="*" classname="NoFixtureTest" key_for_utility_string="1"/>
</testsuite>
<testsuite name="Single/ValueParamTest" tests="4" failures="0" disabled="0" errors="0" time="*">
<testcase name="HasValueParamAttribute/0" value_param="33" status="run" time="*" classname="Single/ValueParamTest" />
<testcase name="HasValueParamAttribute/1" value_param="42" status="run" time="*" classname="Single/ValueParamTest" />
<testcase name="AnotherTestThatHasValueParamAttribute/0" value_param="33" status="run" time="*" classname="Single/ValueParamTest" />
<testcase name="AnotherTestThatHasValueParamAttribute/1" value_param="42" status="run" time="*" classname="Single/ValueParamTest" />
</testsuite>
<testsuite name="TypedTest/0" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="HasTypeParamAttribute" type_param="*" status="run" time="*" classname="TypedTest/0" />
</testsuite>
<testsuite name="TypedTest/1" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="HasTypeParamAttribute" type_param="*" status="run" time="*" classname="TypedTest/1" />
</testsuite>
<testsuite name="Single/TypeParameterizedTestCase/0" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="HasTypeParamAttribute" type_param="*" status="run" time="*" classname="Single/TypeParameterizedTestCase/0" />
</testsuite>
<testsuite name="Single/TypeParameterizedTestCase/1" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="HasTypeParamAttribute" type_param="*" status="run" time="*" classname="Single/TypeParameterizedTestCase/1" />
</testsuite>
</testsuites>""" % {'stack': STACK_TRACE_TEMPLATE}
EXPECTED_EMPTY_XML = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="0" failures="0" disabled="0" errors="0" time="*" name="AllTests">
</testsuites>"""
class GTestXMLOutputUnitTest(gtest_xml_test_utils.GTestXMLTestCase):
"""
Unit test for Google Test's XML output functionality.
"""
def testNonEmptyXmlOutput(self):
"""
Runs a test program that generates a non-empty XML output, and
tests that the XML output is expected.
"""
self._TestXmlOutput(GTEST_PROGRAM_NAME, EXPECTED_NON_EMPTY_XML, 1)
def testEmptyXmlOutput(self):
"""
Runs a test program that generates an empty XML output, and
tests that the XML output is expected.
"""
self._TestXmlOutput("gtest_no_test_unittest",
EXPECTED_EMPTY_XML, 0)
def testDefaultOutputFile(self):
"""
Confirms that Google Test produces an XML output file with the expected
default name if no name is explicitly specified.
"""
output_file = os.path.join(gtest_test_utils.GetTempDir(),
GTEST_DEFAULT_OUTPUT_FILE)
gtest_prog_path = gtest_test_utils.GetTestExecutablePath(
"gtest_no_test_unittest")
try:
os.remove(output_file)
except OSError, e:
if e.errno != errno.ENOENT:
raise
p = gtest_test_utils.Subprocess(
[gtest_prog_path, "%s=xml" % GTEST_OUTPUT_FLAG],
working_dir=gtest_test_utils.GetTempDir())
self.assert_(p.exited)
self.assertEquals(0, p.exit_code)
self.assert_(os.path.isfile(output_file))
def testSuppressedXmlOutput(self):
"""
Tests that no XML file is generated if the default XML listener is
shut down before RUN_ALL_TESTS is invoked.
"""
xml_path = os.path.join(gtest_test_utils.GetTempDir(),
GTEST_PROGRAM_NAME + "out.xml")
if os.path.isfile(xml_path):
os.remove(xml_path)
gtest_prog_path = gtest_test_utils.GetTestExecutablePath(GTEST_PROGRAM_NAME)
command = [gtest_prog_path,
"%s=xml:%s" % (GTEST_OUTPUT_FLAG, xml_path),
"--shut_down_xml"]
p = gtest_test_utils.Subprocess(command)
if p.terminated_by_signal:
self.assert_(False,
"%s was killed by signal %d" % (gtest_prog_name, p.signal))
else:
self.assert_(p.exited)
self.assertEquals(1, p.exit_code,
"'%s' exited with code %s, which doesn't match "
"the expected exit code %s."
% (command, p.exit_code, 1))
self.assert_(not os.path.isfile(xml_path))
def _TestXmlOutput(self, gtest_prog_name, expected_xml, expected_exit_code):
"""
Asserts that the XML document generated by running the program
gtest_prog_name matches expected_xml, a string containing another
XML document. Furthermore, the program's exit code must be
expected_exit_code.
"""
xml_path = os.path.join(gtest_test_utils.GetTempDir(),
gtest_prog_name + "out.xml")
gtest_prog_path = gtest_test_utils.GetTestExecutablePath(gtest_prog_name)
command = [gtest_prog_path, "%s=xml:%s" % (GTEST_OUTPUT_FLAG, xml_path)]
p = gtest_test_utils.Subprocess(command)
if p.terminated_by_signal:
self.assert_(False,
"%s was killed by signal %d" % (gtest_prog_name, p.signal))
else:
self.assert_(p.exited)
self.assertEquals(expected_exit_code, p.exit_code,
"'%s' exited with code %s, which doesn't match "
"the expected exit code %s."
% (command, p.exit_code, expected_exit_code))
expected = minidom.parseString(expected_xml)
actual = minidom.parse(xml_path)
self.NormalizeXml(actual.documentElement)
self.AssertEquivalentNodes(expected.documentElement,
actual.documentElement)
expected.unlink()
actual .unlink()
if __name__ == '__main__':
os.environ['GTEST_STACK_TRACE_DEPTH'] = '1'
gtest_test_utils.Main()
| Python |
#!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for Google Test's --gtest_list_tests flag.
A user can ask Google Test to list all tests by specifying the
--gtest_list_tests flag. This script tests such functionality
by invoking gtest_list_tests_unittest_ (a program written with
Google Test) the command line flags.
"""
__author__ = 'phanna@google.com (Patrick Hanna)'
import gtest_test_utils
# Constants.
# The command line flag for enabling/disabling listing all tests.
LIST_TESTS_FLAG = 'gtest_list_tests'
# Path to the gtest_list_tests_unittest_ program.
EXE_PATH = gtest_test_utils.GetTestExecutablePath('gtest_list_tests_unittest_')
# The expected output when running gtest_list_tests_unittest_ with
# --gtest_list_tests
EXPECTED_OUTPUT_NO_FILTER = """FooDeathTest.
Test1
Foo.
Bar1
Bar2
DISABLED_Bar3
Abc.
Xyz
Def
FooBar.
Baz
FooTest.
Test1
DISABLED_Test2
Test3
"""
# The expected output when running gtest_list_tests_unittest_ with
# --gtest_list_tests and --gtest_filter=Foo*.
EXPECTED_OUTPUT_FILTER_FOO = """FooDeathTest.
Test1
Foo.
Bar1
Bar2
DISABLED_Bar3
FooBar.
Baz
FooTest.
Test1
DISABLED_Test2
Test3
"""
# Utilities.
def Run(args):
"""Runs gtest_list_tests_unittest_ and returns the list of tests printed."""
return gtest_test_utils.Subprocess([EXE_PATH] + args,
capture_stderr=False).output
# The unit test.
class GTestListTestsUnitTest(gtest_test_utils.TestCase):
"""Tests using the --gtest_list_tests flag to list all tests."""
def RunAndVerify(self, flag_value, expected_output, other_flag):
"""Runs gtest_list_tests_unittest_ and verifies that it prints
the correct tests.
Args:
flag_value: value of the --gtest_list_tests flag;
None if the flag should not be present.
expected_output: the expected output after running command;
other_flag: a different flag to be passed to command
along with gtest_list_tests;
None if the flag should not be present.
"""
if flag_value is None:
flag = ''
flag_expression = 'not set'
elif flag_value == '0':
flag = '--%s=0' % LIST_TESTS_FLAG
flag_expression = '0'
else:
flag = '--%s' % LIST_TESTS_FLAG
flag_expression = '1'
args = [flag]
if other_flag is not None:
args += [other_flag]
output = Run(args)
msg = ('when %s is %s, the output of "%s" is "%s".' %
(LIST_TESTS_FLAG, flag_expression, ' '.join(args), output))
if expected_output is not None:
self.assert_(output == expected_output, msg)
else:
self.assert_(output != EXPECTED_OUTPUT_NO_FILTER, msg)
def testDefaultBehavior(self):
"""Tests the behavior of the default mode."""
self.RunAndVerify(flag_value=None,
expected_output=None,
other_flag=None)
def testFlag(self):
"""Tests using the --gtest_list_tests flag."""
self.RunAndVerify(flag_value='0',
expected_output=None,
other_flag=None)
self.RunAndVerify(flag_value='1',
expected_output=EXPECTED_OUTPUT_NO_FILTER,
other_flag=None)
def testOverrideNonFilterFlags(self):
"""Tests that --gtest_list_tests overrides the non-filter flags."""
self.RunAndVerify(flag_value='1',
expected_output=EXPECTED_OUTPUT_NO_FILTER,
other_flag='--gtest_break_on_failure')
def testWithFilterFlags(self):
"""Tests that --gtest_list_tests takes into account the
--gtest_filter flag."""
self.RunAndVerify(flag_value='1',
expected_output=EXPECTED_OUTPUT_FILTER_FOO,
other_flag='--gtest_filter=Foo*')
if __name__ == '__main__':
gtest_test_utils.Main()
| Python |
#!/usr/bin/env python
#
# Copyright 2010 Google Inc. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests Google Test's exception catching behavior.
This script invokes gtest_catch_exceptions_test_ and
gtest_catch_exceptions_ex_test_ (programs written with
Google Test) and verifies their output.
"""
__author__ = 'vladl@google.com (Vlad Losev)'
import os
import gtest_test_utils
# Constants.
FLAG_PREFIX = '--gtest_'
LIST_TESTS_FLAG = FLAG_PREFIX + 'list_tests'
NO_CATCH_EXCEPTIONS_FLAG = FLAG_PREFIX + 'catch_exceptions=0'
FILTER_FLAG = FLAG_PREFIX + 'filter'
# Path to the gtest_catch_exceptions_ex_test_ binary, compiled with
# exceptions enabled.
EX_EXE_PATH = gtest_test_utils.GetTestExecutablePath(
'gtest_catch_exceptions_ex_test_')
# Path to the gtest_catch_exceptions_test_ binary, compiled with
# exceptions disabled.
EXE_PATH = gtest_test_utils.GetTestExecutablePath(
'gtest_catch_exceptions_no_ex_test_')
TEST_LIST = gtest_test_utils.Subprocess([EXE_PATH, LIST_TESTS_FLAG]).output
SUPPORTS_SEH_EXCEPTIONS = 'ThrowsSehException' in TEST_LIST
if SUPPORTS_SEH_EXCEPTIONS:
BINARY_OUTPUT = gtest_test_utils.Subprocess([EXE_PATH]).output
EX_BINARY_OUTPUT = gtest_test_utils.Subprocess([EX_EXE_PATH]).output
# The tests.
if SUPPORTS_SEH_EXCEPTIONS:
# pylint:disable-msg=C6302
class CatchSehExceptionsTest(gtest_test_utils.TestCase):
"""Tests exception-catching behavior."""
def TestSehExceptions(self, test_output):
self.assert_('SEH exception with code 0x2a thrown '
'in the test fixture\'s constructor'
in test_output)
self.assert_('SEH exception with code 0x2a thrown '
'in the test fixture\'s destructor'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in SetUpTestCase()'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in TearDownTestCase()'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in SetUp()'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in TearDown()'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in the test body'
in test_output)
def testCatchesSehExceptionsWithCxxExceptionsEnabled(self):
self.TestSehExceptions(EX_BINARY_OUTPUT)
def testCatchesSehExceptionsWithCxxExceptionsDisabled(self):
self.TestSehExceptions(BINARY_OUTPUT)
class CatchCxxExceptionsTest(gtest_test_utils.TestCase):
"""Tests C++ exception-catching behavior.
Tests in this test case verify that:
* C++ exceptions are caught and logged as C++ (not SEH) exceptions
* Exception thrown affect the remainder of the test work flow in the
expected manner.
"""
def testCatchesCxxExceptionsInFixtureConstructor(self):
self.assert_('C++ exception with description '
'"Standard C++ exception" thrown '
'in the test fixture\'s constructor'
in EX_BINARY_OUTPUT)
self.assert_('unexpected' not in EX_BINARY_OUTPUT,
'This failure belongs in this test only if '
'"CxxExceptionInConstructorTest" (no quotes) '
'appears on the same line as words "called unexpectedly"')
def testCatchesCxxExceptionsInFixtureDestructor(self):
self.assert_('C++ exception with description '
'"Standard C++ exception" thrown '
'in the test fixture\'s destructor'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInDestructorTest::TearDownTestCase() '
'called as expected.'
in EX_BINARY_OUTPUT)
def testCatchesCxxExceptionsInSetUpTestCase(self):
self.assert_('C++ exception with description "Standard C++ exception"'
' thrown in SetUpTestCase()'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInConstructorTest::TearDownTestCase() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTestCaseTest constructor '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTestCaseTest destructor '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTestCaseTest::SetUp() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTestCaseTest::TearDown() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTestCaseTest test body '
'called as expected.'
in EX_BINARY_OUTPUT)
def testCatchesCxxExceptionsInTearDownTestCase(self):
self.assert_('C++ exception with description "Standard C++ exception"'
' thrown in TearDownTestCase()'
in EX_BINARY_OUTPUT)
def testCatchesCxxExceptionsInSetUp(self):
self.assert_('C++ exception with description "Standard C++ exception"'
' thrown in SetUp()'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTest::TearDownTestCase() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTest destructor '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTest::TearDown() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('unexpected' not in EX_BINARY_OUTPUT,
'This failure belongs in this test only if '
'"CxxExceptionInSetUpTest" (no quotes) '
'appears on the same line as words "called unexpectedly"')
def testCatchesCxxExceptionsInTearDown(self):
self.assert_('C++ exception with description "Standard C++ exception"'
' thrown in TearDown()'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInTearDownTest::TearDownTestCase() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInTearDownTest destructor '
'called as expected.'
in EX_BINARY_OUTPUT)
def testCatchesCxxExceptionsInTestBody(self):
self.assert_('C++ exception with description "Standard C++ exception"'
' thrown in the test body'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInTestBodyTest::TearDownTestCase() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInTestBodyTest destructor '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInTestBodyTest::TearDown() '
'called as expected.'
in EX_BINARY_OUTPUT)
def testCatchesNonStdCxxExceptions(self):
self.assert_('Unknown C++ exception thrown in the test body'
in EX_BINARY_OUTPUT)
def testUnhandledCxxExceptionsAbortTheProgram(self):
# Filters out SEH exception tests on Windows. Unhandled SEH exceptions
# cause tests to show pop-up windows there.
FITLER_OUT_SEH_TESTS_FLAG = FILTER_FLAG + '=-*Seh*'
# By default, Google Test doesn't catch the exceptions.
uncaught_exceptions_ex_binary_output = gtest_test_utils.Subprocess(
[EX_EXE_PATH,
NO_CATCH_EXCEPTIONS_FLAG,
FITLER_OUT_SEH_TESTS_FLAG]).output
self.assert_('Unhandled C++ exception terminating the program'
in uncaught_exceptions_ex_binary_output)
self.assert_('unexpected' not in uncaught_exceptions_ex_binary_output)
if __name__ == '__main__':
gtest_test_utils.Main()
| Python |
#!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Verifies that Google Test warns the user when not initialized properly."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import gtest_test_utils
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_uninitialized_test_')
def Assert(condition):
if not condition:
raise AssertionError
def AssertEq(expected, actual):
if expected != actual:
print 'Expected: %s' % (expected,)
print ' Actual: %s' % (actual,)
raise AssertionError
def TestExitCodeAndOutput(command):
"""Runs the given command and verifies its exit code and output."""
# Verifies that 'command' exits with code 1.
p = gtest_test_utils.Subprocess(command)
Assert(p.exited)
AssertEq(1, p.exit_code)
Assert('InitGoogleTest' in p.output)
class GTestUninitializedTest(gtest_test_utils.TestCase):
def testExitCodeAndOutput(self):
TestExitCodeAndOutput(COMMAND)
if __name__ == '__main__':
gtest_test_utils.Main()
| Python |
#!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for Google Test's break-on-failure mode.
A user can ask Google Test to seg-fault when an assertion fails, using
either the GTEST_BREAK_ON_FAILURE environment variable or the
--gtest_break_on_failure flag. This script tests such functionality
by invoking gtest_break_on_failure_unittest_ (a program written with
Google Test) with different environments and command line flags.
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import gtest_test_utils
import os
import sys
# Constants.
IS_WINDOWS = os.name == 'nt'
# The environment variable for enabling/disabling the break-on-failure mode.
BREAK_ON_FAILURE_ENV_VAR = 'GTEST_BREAK_ON_FAILURE'
# The command line flag for enabling/disabling the break-on-failure mode.
BREAK_ON_FAILURE_FLAG = 'gtest_break_on_failure'
# The environment variable for enabling/disabling the throw-on-failure mode.
THROW_ON_FAILURE_ENV_VAR = 'GTEST_THROW_ON_FAILURE'
# The environment variable for enabling/disabling the catch-exceptions mode.
CATCH_EXCEPTIONS_ENV_VAR = 'GTEST_CATCH_EXCEPTIONS'
# Path to the gtest_break_on_failure_unittest_ program.
EXE_PATH = gtest_test_utils.GetTestExecutablePath(
'gtest_break_on_failure_unittest_')
# Utilities.
environ = os.environ.copy()
def SetEnvVar(env_var, value):
"""Sets an environment variable to a given value; unsets it when the
given value is None.
"""
if value is not None:
environ[env_var] = value
elif env_var in environ:
del environ[env_var]
def Run(command):
"""Runs a command; returns 1 if it was killed by a signal, or 0 otherwise."""
p = gtest_test_utils.Subprocess(command, env=environ)
if p.terminated_by_signal:
return 1
else:
return 0
# The tests.
class GTestBreakOnFailureUnitTest(gtest_test_utils.TestCase):
"""Tests using the GTEST_BREAK_ON_FAILURE environment variable or
the --gtest_break_on_failure flag to turn assertion failures into
segmentation faults.
"""
def RunAndVerify(self, env_var_value, flag_value, expect_seg_fault):
"""Runs gtest_break_on_failure_unittest_ and verifies that it does
(or does not) have a seg-fault.
Args:
env_var_value: value of the GTEST_BREAK_ON_FAILURE environment
variable; None if the variable should be unset.
flag_value: value of the --gtest_break_on_failure flag;
None if the flag should not be present.
expect_seg_fault: 1 if the program is expected to generate a seg-fault;
0 otherwise.
"""
SetEnvVar(BREAK_ON_FAILURE_ENV_VAR, env_var_value)
if env_var_value is None:
env_var_value_msg = ' is not set'
else:
env_var_value_msg = '=' + env_var_value
if flag_value is None:
flag = ''
elif flag_value == '0':
flag = '--%s=0' % BREAK_ON_FAILURE_FLAG
else:
flag = '--%s' % BREAK_ON_FAILURE_FLAG
command = [EXE_PATH]
if flag:
command.append(flag)
if expect_seg_fault:
should_or_not = 'should'
else:
should_or_not = 'should not'
has_seg_fault = Run(command)
SetEnvVar(BREAK_ON_FAILURE_ENV_VAR, None)
msg = ('when %s%s, an assertion failure in "%s" %s cause a seg-fault.' %
(BREAK_ON_FAILURE_ENV_VAR, env_var_value_msg, ' '.join(command),
should_or_not))
self.assert_(has_seg_fault == expect_seg_fault, msg)
def testDefaultBehavior(self):
"""Tests the behavior of the default mode."""
self.RunAndVerify(env_var_value=None,
flag_value=None,
expect_seg_fault=0)
def testEnvVar(self):
"""Tests using the GTEST_BREAK_ON_FAILURE environment variable."""
self.RunAndVerify(env_var_value='0',
flag_value=None,
expect_seg_fault=0)
self.RunAndVerify(env_var_value='1',
flag_value=None,
expect_seg_fault=1)
def testFlag(self):
"""Tests using the --gtest_break_on_failure flag."""
self.RunAndVerify(env_var_value=None,
flag_value='0',
expect_seg_fault=0)
self.RunAndVerify(env_var_value=None,
flag_value='1',
expect_seg_fault=1)
def testFlagOverridesEnvVar(self):
"""Tests that the flag overrides the environment variable."""
self.RunAndVerify(env_var_value='0',
flag_value='0',
expect_seg_fault=0)
self.RunAndVerify(env_var_value='0',
flag_value='1',
expect_seg_fault=1)
self.RunAndVerify(env_var_value='1',
flag_value='0',
expect_seg_fault=0)
self.RunAndVerify(env_var_value='1',
flag_value='1',
expect_seg_fault=1)
def testBreakOnFailureOverridesThrowOnFailure(self):
"""Tests that gtest_break_on_failure overrides gtest_throw_on_failure."""
SetEnvVar(THROW_ON_FAILURE_ENV_VAR, '1')
try:
self.RunAndVerify(env_var_value=None,
flag_value='1',
expect_seg_fault=1)
finally:
SetEnvVar(THROW_ON_FAILURE_ENV_VAR, None)
if IS_WINDOWS:
def testCatchExceptionsDoesNotInterfere(self):
"""Tests that gtest_catch_exceptions doesn't interfere."""
SetEnvVar(CATCH_EXCEPTIONS_ENV_VAR, '1')
try:
self.RunAndVerify(env_var_value='1',
flag_value='1',
expect_seg_fault=1)
finally:
SetEnvVar(CATCH_EXCEPTIONS_ENV_VAR, None)
if __name__ == '__main__':
gtest_test_utils.Main()
| Python |
#!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests Google Test's throw-on-failure mode with exceptions disabled.
This script invokes gtest_throw_on_failure_test_ (a program written with
Google Test) with different environments and command line flags.
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import gtest_test_utils
# Constants.
# The command line flag for enabling/disabling the throw-on-failure mode.
THROW_ON_FAILURE = 'gtest_throw_on_failure'
# Path to the gtest_throw_on_failure_test_ program, compiled with
# exceptions disabled.
EXE_PATH = gtest_test_utils.GetTestExecutablePath(
'gtest_throw_on_failure_test_')
# Utilities.
def SetEnvVar(env_var, value):
"""Sets an environment variable to a given value; unsets it when the
given value is None.
"""
env_var = env_var.upper()
if value is not None:
os.environ[env_var] = value
elif env_var in os.environ:
del os.environ[env_var]
def Run(command):
"""Runs a command; returns True/False if its exit code is/isn't 0."""
print 'Running "%s". . .' % ' '.join(command)
p = gtest_test_utils.Subprocess(command)
return p.exited and p.exit_code == 0
# The tests. TODO(wan@google.com): refactor the class to share common
# logic with code in gtest_break_on_failure_unittest.py.
class ThrowOnFailureTest(gtest_test_utils.TestCase):
"""Tests the throw-on-failure mode."""
def RunAndVerify(self, env_var_value, flag_value, should_fail):
"""Runs gtest_throw_on_failure_test_ and verifies that it does
(or does not) exit with a non-zero code.
Args:
env_var_value: value of the GTEST_BREAK_ON_FAILURE environment
variable; None if the variable should be unset.
flag_value: value of the --gtest_break_on_failure flag;
None if the flag should not be present.
should_fail: True iff the program is expected to fail.
"""
SetEnvVar(THROW_ON_FAILURE, env_var_value)
if env_var_value is None:
env_var_value_msg = ' is not set'
else:
env_var_value_msg = '=' + env_var_value
if flag_value is None:
flag = ''
elif flag_value == '0':
flag = '--%s=0' % THROW_ON_FAILURE
else:
flag = '--%s' % THROW_ON_FAILURE
command = [EXE_PATH]
if flag:
command.append(flag)
if should_fail:
should_or_not = 'should'
else:
should_or_not = 'should not'
failed = not Run(command)
SetEnvVar(THROW_ON_FAILURE, None)
msg = ('when %s%s, an assertion failure in "%s" %s cause a non-zero '
'exit code.' %
(THROW_ON_FAILURE, env_var_value_msg, ' '.join(command),
should_or_not))
self.assert_(failed == should_fail, msg)
def testDefaultBehavior(self):
"""Tests the behavior of the default mode."""
self.RunAndVerify(env_var_value=None, flag_value=None, should_fail=False)
def testThrowOnFailureEnvVar(self):
"""Tests using the GTEST_THROW_ON_FAILURE environment variable."""
self.RunAndVerify(env_var_value='0',
flag_value=None,
should_fail=False)
self.RunAndVerify(env_var_value='1',
flag_value=None,
should_fail=True)
def testThrowOnFailureFlag(self):
"""Tests using the --gtest_throw_on_failure flag."""
self.RunAndVerify(env_var_value=None,
flag_value='0',
should_fail=False)
self.RunAndVerify(env_var_value=None,
flag_value='1',
should_fail=True)
def testThrowOnFailureFlagOverridesEnvVar(self):
"""Tests that --gtest_throw_on_failure overrides GTEST_THROW_ON_FAILURE."""
self.RunAndVerify(env_var_value='0',
flag_value='0',
should_fail=False)
self.RunAndVerify(env_var_value='0',
flag_value='1',
should_fail=True)
self.RunAndVerify(env_var_value='1',
flag_value='0',
should_fail=False)
self.RunAndVerify(env_var_value='1',
flag_value='1',
should_fail=True)
if __name__ == '__main__':
gtest_test_utils.Main()
| Python |
#!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test utilities for Google C++ Testing Framework."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import atexit
import os
import shutil
import sys
import tempfile
import unittest
_test_module = unittest
# Suppresses the 'Import not at the top of the file' lint complaint.
# pylint: disable-msg=C6204
try:
import subprocess
_SUBPROCESS_MODULE_AVAILABLE = True
except:
import popen2
_SUBPROCESS_MODULE_AVAILABLE = False
# pylint: enable-msg=C6204
GTEST_OUTPUT_VAR_NAME = 'GTEST_OUTPUT'
IS_WINDOWS = os.name == 'nt'
IS_CYGWIN = os.name == 'posix' and 'CYGWIN' in os.uname()[0]
# Here we expose a class from a particular module, depending on the
# environment. The comment suppresses the 'Invalid variable name' lint
# complaint.
TestCase = _test_module.TestCase # pylint: disable-msg=C6409
# Initially maps a flag to its default value. After
# _ParseAndStripGTestFlags() is called, maps a flag to its actual value.
_flag_map = {'source_dir': os.path.dirname(sys.argv[0]),
'build_dir': os.path.dirname(sys.argv[0])}
_gtest_flags_are_parsed = False
def _ParseAndStripGTestFlags(argv):
"""Parses and strips Google Test flags from argv. This is idempotent."""
# Suppresses the lint complaint about a global variable since we need it
# here to maintain module-wide state.
global _gtest_flags_are_parsed # pylint: disable-msg=W0603
if _gtest_flags_are_parsed:
return
_gtest_flags_are_parsed = True
for flag in _flag_map:
# The environment variable overrides the default value.
if flag.upper() in os.environ:
_flag_map[flag] = os.environ[flag.upper()]
# The command line flag overrides the environment variable.
i = 1 # Skips the program name.
while i < len(argv):
prefix = '--' + flag + '='
if argv[i].startswith(prefix):
_flag_map[flag] = argv[i][len(prefix):]
del argv[i]
break
else:
# We don't increment i in case we just found a --gtest_* flag
# and removed it from argv.
i += 1
def GetFlag(flag):
"""Returns the value of the given flag."""
# In case GetFlag() is called before Main(), we always call
# _ParseAndStripGTestFlags() here to make sure the --gtest_* flags
# are parsed.
_ParseAndStripGTestFlags(sys.argv)
return _flag_map[flag]
def GetSourceDir():
"""Returns the absolute path of the directory where the .py files are."""
return os.path.abspath(GetFlag('source_dir'))
def GetBuildDir():
"""Returns the absolute path of the directory where the test binaries are."""
return os.path.abspath(GetFlag('build_dir'))
_temp_dir = None
def _RemoveTempDir():
if _temp_dir:
shutil.rmtree(_temp_dir, ignore_errors=True)
atexit.register(_RemoveTempDir)
def GetTempDir():
"""Returns a directory for temporary files."""
global _temp_dir
if not _temp_dir:
_temp_dir = tempfile.mkdtemp()
return _temp_dir
def GetTestExecutablePath(executable_name, build_dir=None):
"""Returns the absolute path of the test binary given its name.
The function will print a message and abort the program if the resulting file
doesn't exist.
Args:
executable_name: name of the test binary that the test script runs.
build_dir: directory where to look for executables, by default
the result of GetBuildDir().
Returns:
The absolute path of the test binary.
"""
path = os.path.abspath(os.path.join(build_dir or GetBuildDir(),
executable_name))
if (IS_WINDOWS or IS_CYGWIN) and not path.endswith('.exe'):
path += '.exe'
if not os.path.exists(path):
message = (
'Unable to find the test binary. Please make sure to provide path\n'
'to the binary via the --build_dir flag or the BUILD_DIR\n'
'environment variable.')
print >> sys.stderr, message
sys.exit(1)
return path
def GetExitStatus(exit_code):
"""Returns the argument to exit(), or -1 if exit() wasn't called.
Args:
exit_code: the result value of os.system(command).
"""
if os.name == 'nt':
# On Windows, os.WEXITSTATUS() doesn't work and os.system() returns
# the argument to exit() directly.
return exit_code
else:
# On Unix, os.WEXITSTATUS() must be used to extract the exit status
# from the result of os.system().
if os.WIFEXITED(exit_code):
return os.WEXITSTATUS(exit_code)
else:
return -1
class Subprocess:
def __init__(self, command, working_dir=None, capture_stderr=True, env=None):
"""Changes into a specified directory, if provided, and executes a command.
Restores the old directory afterwards.
Args:
command: The command to run, in the form of sys.argv.
working_dir: The directory to change into.
capture_stderr: Determines whether to capture stderr in the output member
or to discard it.
env: Dictionary with environment to pass to the subprocess.
Returns:
An object that represents outcome of the executed process. It has the
following attributes:
terminated_by_signal True iff the child process has been terminated
by a signal.
signal Sygnal that terminated the child process.
exited True iff the child process exited normally.
exit_code The code with which the child process exited.
output Child process's stdout and stderr output
combined in a string.
"""
# The subprocess module is the preferrable way of running programs
# since it is available and behaves consistently on all platforms,
# including Windows. But it is only available starting in python 2.4.
# In earlier python versions, we revert to the popen2 module, which is
# available in python 2.0 and later but doesn't provide required
# functionality (Popen4) under Windows. This allows us to support Mac
# OS X 10.4 Tiger, which has python 2.3 installed.
if _SUBPROCESS_MODULE_AVAILABLE:
if capture_stderr:
stderr = subprocess.STDOUT
else:
stderr = subprocess.PIPE
p = subprocess.Popen(command,
stdout=subprocess.PIPE, stderr=stderr,
cwd=working_dir, universal_newlines=True, env=env)
# communicate returns a tuple with the file obect for the child's
# output.
self.output = p.communicate()[0]
self._return_code = p.returncode
else:
old_dir = os.getcwd()
def _ReplaceEnvDict(dest, src):
# Changes made by os.environ.clear are not inheritable by child
# processes until Python 2.6. To produce inheritable changes we have
# to delete environment items with the del statement.
for key in dest:
del dest[key]
dest.update(src)
# When 'env' is not None, backup the environment variables and replace
# them with the passed 'env'. When 'env' is None, we simply use the
# current 'os.environ' for compatibility with the subprocess.Popen
# semantics used above.
if env is not None:
old_environ = os.environ.copy()
_ReplaceEnvDict(os.environ, env)
try:
if working_dir is not None:
os.chdir(working_dir)
if capture_stderr:
p = popen2.Popen4(command)
else:
p = popen2.Popen3(command)
p.tochild.close()
self.output = p.fromchild.read()
ret_code = p.wait()
finally:
os.chdir(old_dir)
# Restore the old environment variables
# if they were replaced.
if env is not None:
_ReplaceEnvDict(os.environ, old_environ)
# Converts ret_code to match the semantics of
# subprocess.Popen.returncode.
if os.WIFSIGNALED(ret_code):
self._return_code = -os.WTERMSIG(ret_code)
else: # os.WIFEXITED(ret_code) should return True here.
self._return_code = os.WEXITSTATUS(ret_code)
if self._return_code < 0:
self.terminated_by_signal = True
self.exited = False
self.signal = -self._return_code
else:
self.terminated_by_signal = False
self.exited = True
self.exit_code = self._return_code
def Main():
"""Runs the unit test."""
# We must call _ParseAndStripGTestFlags() before calling
# unittest.main(). Otherwise the latter will be confused by the
# --gtest_* flags.
_ParseAndStripGTestFlags(sys.argv)
# The tested binaries should not be writing XML output files unless the
# script explicitly instructs them to.
# TODO(vladl@google.com): Move this into Subprocess when we implement
# passing environment into it as a parameter.
if GTEST_OUTPUT_VAR_NAME in os.environ:
del os.environ[GTEST_OUTPUT_VAR_NAME]
_test_module.main()
| Python |
#!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Verifies that Google Test correctly determines whether to use colors."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import gtest_test_utils
IS_WINDOWS = os.name = 'nt'
COLOR_ENV_VAR = 'GTEST_COLOR'
COLOR_FLAG = 'gtest_color'
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_color_test_')
def SetEnvVar(env_var, value):
"""Sets the env variable to 'value'; unsets it when 'value' is None."""
if value is not None:
os.environ[env_var] = value
elif env_var in os.environ:
del os.environ[env_var]
def UsesColor(term, color_env_var, color_flag):
"""Runs gtest_color_test_ and returns its exit code."""
SetEnvVar('TERM', term)
SetEnvVar(COLOR_ENV_VAR, color_env_var)
if color_flag is None:
args = []
else:
args = ['--%s=%s' % (COLOR_FLAG, color_flag)]
p = gtest_test_utils.Subprocess([COMMAND] + args)
return not p.exited or p.exit_code
class GTestColorTest(gtest_test_utils.TestCase):
def testNoEnvVarNoFlag(self):
"""Tests the case when there's neither GTEST_COLOR nor --gtest_color."""
if not IS_WINDOWS:
self.assert_(not UsesColor('dumb', None, None))
self.assert_(not UsesColor('emacs', None, None))
self.assert_(not UsesColor('xterm-mono', None, None))
self.assert_(not UsesColor('unknown', None, None))
self.assert_(not UsesColor(None, None, None))
self.assert_(UsesColor('linux', None, None))
self.assert_(UsesColor('cygwin', None, None))
self.assert_(UsesColor('xterm', None, None))
self.assert_(UsesColor('xterm-color', None, None))
self.assert_(UsesColor('xterm-256color', None, None))
def testFlagOnly(self):
"""Tests the case when there's --gtest_color but not GTEST_COLOR."""
self.assert_(not UsesColor('dumb', None, 'no'))
self.assert_(not UsesColor('xterm-color', None, 'no'))
if not IS_WINDOWS:
self.assert_(not UsesColor('emacs', None, 'auto'))
self.assert_(UsesColor('xterm', None, 'auto'))
self.assert_(UsesColor('dumb', None, 'yes'))
self.assert_(UsesColor('xterm', None, 'yes'))
def testEnvVarOnly(self):
"""Tests the case when there's GTEST_COLOR but not --gtest_color."""
self.assert_(not UsesColor('dumb', 'no', None))
self.assert_(not UsesColor('xterm-color', 'no', None))
if not IS_WINDOWS:
self.assert_(not UsesColor('dumb', 'auto', None))
self.assert_(UsesColor('xterm-color', 'auto', None))
self.assert_(UsesColor('dumb', 'yes', None))
self.assert_(UsesColor('xterm-color', 'yes', None))
def testEnvVarAndFlag(self):
"""Tests the case when there are both GTEST_COLOR and --gtest_color."""
self.assert_(not UsesColor('xterm-color', 'no', 'no'))
self.assert_(UsesColor('dumb', 'no', 'yes'))
self.assert_(UsesColor('xterm-color', 'no', 'auto'))
def testAliasesOfYesAndNo(self):
"""Tests using aliases in specifying --gtest_color."""
self.assert_(UsesColor('dumb', None, 'true'))
self.assert_(UsesColor('dumb', None, 'YES'))
self.assert_(UsesColor('dumb', None, 'T'))
self.assert_(UsesColor('dumb', None, '1'))
self.assert_(not UsesColor('xterm', None, 'f'))
self.assert_(not UsesColor('xterm', None, 'false'))
self.assert_(not UsesColor('xterm', None, '0'))
self.assert_(not UsesColor('xterm', None, 'unknown'))
if __name__ == '__main__':
gtest_test_utils.Main()
| Python |
#!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests the --help flag of Google C++ Testing Framework.
SYNOPSIS
gtest_help_test.py --build_dir=BUILD/DIR
# where BUILD/DIR contains the built gtest_help_test_ file.
gtest_help_test.py
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import re
import gtest_test_utils
IS_LINUX = os.name == 'posix' and os.uname()[0] == 'Linux'
IS_WINDOWS = os.name == 'nt'
PROGRAM_PATH = gtest_test_utils.GetTestExecutablePath('gtest_help_test_')
FLAG_PREFIX = '--gtest_'
DEATH_TEST_STYLE_FLAG = FLAG_PREFIX + 'death_test_style'
STREAM_RESULT_TO_FLAG = FLAG_PREFIX + 'stream_result_to'
UNKNOWN_FLAG = FLAG_PREFIX + 'unknown_flag_for_testing'
LIST_TESTS_FLAG = FLAG_PREFIX + 'list_tests'
INCORRECT_FLAG_VARIANTS = [re.sub('^--', '-', LIST_TESTS_FLAG),
re.sub('^--', '/', LIST_TESTS_FLAG),
re.sub('_', '-', LIST_TESTS_FLAG)]
INTERNAL_FLAG_FOR_TESTING = FLAG_PREFIX + 'internal_flag_for_testing'
SUPPORTS_DEATH_TESTS = "DeathTest" in gtest_test_utils.Subprocess(
[PROGRAM_PATH, LIST_TESTS_FLAG]).output
# The help message must match this regex.
HELP_REGEX = re.compile(
FLAG_PREFIX + r'list_tests.*' +
FLAG_PREFIX + r'filter=.*' +
FLAG_PREFIX + r'also_run_disabled_tests.*' +
FLAG_PREFIX + r'repeat=.*' +
FLAG_PREFIX + r'shuffle.*' +
FLAG_PREFIX + r'random_seed=.*' +
FLAG_PREFIX + r'color=.*' +
FLAG_PREFIX + r'print_time.*' +
FLAG_PREFIX + r'output=.*' +
FLAG_PREFIX + r'break_on_failure.*' +
FLAG_PREFIX + r'throw_on_failure.*' +
FLAG_PREFIX + r'catch_exceptions=0.*',
re.DOTALL)
def RunWithFlag(flag):
"""Runs gtest_help_test_ with the given flag.
Returns:
the exit code and the text output as a tuple.
Args:
flag: the command-line flag to pass to gtest_help_test_, or None.
"""
if flag is None:
command = [PROGRAM_PATH]
else:
command = [PROGRAM_PATH, flag]
child = gtest_test_utils.Subprocess(command)
return child.exit_code, child.output
class GTestHelpTest(gtest_test_utils.TestCase):
"""Tests the --help flag and its equivalent forms."""
def TestHelpFlag(self, flag):
"""Verifies correct behavior when help flag is specified.
The right message must be printed and the tests must
skipped when the given flag is specified.
Args:
flag: A flag to pass to the binary or None.
"""
exit_code, output = RunWithFlag(flag)
self.assertEquals(0, exit_code)
self.assert_(HELP_REGEX.search(output), output)
if IS_LINUX:
self.assert_(STREAM_RESULT_TO_FLAG in output, output)
else:
self.assert_(STREAM_RESULT_TO_FLAG not in output, output)
if SUPPORTS_DEATH_TESTS and not IS_WINDOWS:
self.assert_(DEATH_TEST_STYLE_FLAG in output, output)
else:
self.assert_(DEATH_TEST_STYLE_FLAG not in output, output)
def TestNonHelpFlag(self, flag):
"""Verifies correct behavior when no help flag is specified.
Verifies that when no help flag is specified, the tests are run
and the help message is not printed.
Args:
flag: A flag to pass to the binary or None.
"""
exit_code, output = RunWithFlag(flag)
self.assert_(exit_code != 0)
self.assert_(not HELP_REGEX.search(output), output)
def testPrintsHelpWithFullFlag(self):
self.TestHelpFlag('--help')
def testPrintsHelpWithShortFlag(self):
self.TestHelpFlag('-h')
def testPrintsHelpWithQuestionFlag(self):
self.TestHelpFlag('-?')
def testPrintsHelpWithWindowsStyleQuestionFlag(self):
self.TestHelpFlag('/?')
def testPrintsHelpWithUnrecognizedGoogleTestFlag(self):
self.TestHelpFlag(UNKNOWN_FLAG)
def testPrintsHelpWithIncorrectFlagStyle(self):
for incorrect_flag in INCORRECT_FLAG_VARIANTS:
self.TestHelpFlag(incorrect_flag)
def testRunsTestsWithoutHelpFlag(self):
"""Verifies that when no help flag is specified, the tests are run
and the help message is not printed."""
self.TestNonHelpFlag(None)
def testRunsTestsWithGtestInternalFlag(self):
"""Verifies that the tests are run and no help message is printed when
a flag starting with Google Test prefix and 'internal_' is supplied."""
self.TestNonHelpFlag(INTERNAL_FLAG_FOR_TESTING)
if __name__ == '__main__':
gtest_test_utils.Main()
| Python |
#!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for the gtest_xml_output module."""
__author__ = "keith.ray@gmail.com (Keith Ray)"
import os
from xml.dom import minidom, Node
import gtest_test_utils
import gtest_xml_test_utils
GTEST_OUTPUT_SUBDIR = "xml_outfiles"
GTEST_OUTPUT_1_TEST = "gtest_xml_outfile1_test_"
GTEST_OUTPUT_2_TEST = "gtest_xml_outfile2_test_"
EXPECTED_XML_1 = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="1" failures="0" disabled="0" errors="0" time="*" name="AllTests">
<testsuite name="PropertyOne" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="TestSomeProperties" status="run" time="*" classname="PropertyOne" SetUpProp="1" TestSomeProperty="1" TearDownProp="1" />
</testsuite>
</testsuites>
"""
EXPECTED_XML_2 = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="1" failures="0" disabled="0" errors="0" time="*" name="AllTests">
<testsuite name="PropertyTwo" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="TestSomeProperties" status="run" time="*" classname="PropertyTwo" SetUpProp="2" TestSomeProperty="2" TearDownProp="2" />
</testsuite>
</testsuites>
"""
class GTestXMLOutFilesTest(gtest_xml_test_utils.GTestXMLTestCase):
"""Unit test for Google Test's XML output functionality."""
def setUp(self):
# We want the trailing '/' that the last "" provides in os.path.join, for
# telling Google Test to create an output directory instead of a single file
# for xml output.
self.output_dir_ = os.path.join(gtest_test_utils.GetTempDir(),
GTEST_OUTPUT_SUBDIR, "")
self.DeleteFilesAndDir()
def tearDown(self):
self.DeleteFilesAndDir()
def DeleteFilesAndDir(self):
try:
os.remove(os.path.join(self.output_dir_, GTEST_OUTPUT_1_TEST + ".xml"))
except os.error:
pass
try:
os.remove(os.path.join(self.output_dir_, GTEST_OUTPUT_2_TEST + ".xml"))
except os.error:
pass
try:
os.rmdir(self.output_dir_)
except os.error:
pass
def testOutfile1(self):
self._TestOutFile(GTEST_OUTPUT_1_TEST, EXPECTED_XML_1)
def testOutfile2(self):
self._TestOutFile(GTEST_OUTPUT_2_TEST, EXPECTED_XML_2)
def _TestOutFile(self, test_name, expected_xml):
gtest_prog_path = gtest_test_utils.GetTestExecutablePath(test_name)
command = [gtest_prog_path, "--gtest_output=xml:%s" % self.output_dir_]
p = gtest_test_utils.Subprocess(command,
working_dir=gtest_test_utils.GetTempDir())
self.assert_(p.exited)
self.assertEquals(0, p.exit_code)
# TODO(wan@google.com): libtool causes the built test binary to be
# named lt-gtest_xml_outfiles_test_ instead of
# gtest_xml_outfiles_test_. To account for this possibillity, we
# allow both names in the following code. We should remove this
# hack when Chandler Carruth's libtool replacement tool is ready.
output_file_name1 = test_name + ".xml"
output_file1 = os.path.join(self.output_dir_, output_file_name1)
output_file_name2 = 'lt-' + output_file_name1
output_file2 = os.path.join(self.output_dir_, output_file_name2)
self.assert_(os.path.isfile(output_file1) or os.path.isfile(output_file2),
output_file1)
expected = minidom.parseString(expected_xml)
if os.path.isfile(output_file1):
actual = minidom.parse(output_file1)
else:
actual = minidom.parse(output_file2)
self.NormalizeXml(actual.documentElement)
self.AssertEquivalentNodes(expected.documentElement,
actual.documentElement)
expected.unlink()
actual.unlink()
if __name__ == "__main__":
os.environ["GTEST_STACK_TRACE_DEPTH"] = "0"
gtest_test_utils.Main()
| Python |
#!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests the text output of Google C++ Testing Framework.
SYNOPSIS
gtest_output_test.py --build_dir=BUILD/DIR --gengolden
# where BUILD/DIR contains the built gtest_output_test_ file.
gtest_output_test.py --gengolden
gtest_output_test.py
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import re
import sys
import gtest_test_utils
# The flag for generating the golden file
GENGOLDEN_FLAG = '--gengolden'
CATCH_EXCEPTIONS_ENV_VAR_NAME = 'GTEST_CATCH_EXCEPTIONS'
IS_WINDOWS = os.name == 'nt'
# TODO(vladl@google.com): remove the _lin suffix.
GOLDEN_NAME = 'gtest_output_test_golden_lin.txt'
PROGRAM_PATH = gtest_test_utils.GetTestExecutablePath('gtest_output_test_')
# At least one command we exercise must not have the
# --gtest_internal_skip_environment_and_ad_hoc_tests flag.
COMMAND_LIST_TESTS = ({}, [PROGRAM_PATH, '--gtest_list_tests'])
COMMAND_WITH_COLOR = ({}, [PROGRAM_PATH, '--gtest_color=yes'])
COMMAND_WITH_TIME = ({}, [PROGRAM_PATH,
'--gtest_print_time',
'--gtest_internal_skip_environment_and_ad_hoc_tests',
'--gtest_filter=FatalFailureTest.*:LoggingTest.*'])
COMMAND_WITH_DISABLED = (
{}, [PROGRAM_PATH,
'--gtest_also_run_disabled_tests',
'--gtest_internal_skip_environment_and_ad_hoc_tests',
'--gtest_filter=*DISABLED_*'])
COMMAND_WITH_SHARDING = (
{'GTEST_SHARD_INDEX': '1', 'GTEST_TOTAL_SHARDS': '2'},
[PROGRAM_PATH,
'--gtest_internal_skip_environment_and_ad_hoc_tests',
'--gtest_filter=PassingTest.*'])
GOLDEN_PATH = os.path.join(gtest_test_utils.GetSourceDir(), GOLDEN_NAME)
def ToUnixLineEnding(s):
"""Changes all Windows/Mac line endings in s to UNIX line endings."""
return s.replace('\r\n', '\n').replace('\r', '\n')
def RemoveLocations(test_output):
"""Removes all file location info from a Google Test program's output.
Args:
test_output: the output of a Google Test program.
Returns:
output with all file location info (in the form of
'DIRECTORY/FILE_NAME:LINE_NUMBER: 'or
'DIRECTORY\\FILE_NAME(LINE_NUMBER): ') replaced by
'FILE_NAME:#: '.
"""
return re.sub(r'.*[/\\](.+)(\:\d+|\(\d+\))\: ', r'\1:#: ', test_output)
def RemoveStackTraceDetails(output):
"""Removes all stack traces from a Google Test program's output."""
# *? means "find the shortest string that matches".
return re.sub(r'Stack trace:(.|\n)*?\n\n',
'Stack trace: (omitted)\n\n', output)
def RemoveStackTraces(output):
"""Removes all traces of stack traces from a Google Test program's output."""
# *? means "find the shortest string that matches".
return re.sub(r'Stack trace:(.|\n)*?\n\n', '', output)
def RemoveTime(output):
"""Removes all time information from a Google Test program's output."""
return re.sub(r'\(\d+ ms', '(? ms', output)
def RemoveTypeInfoDetails(test_output):
"""Removes compiler-specific type info from Google Test program's output.
Args:
test_output: the output of a Google Test program.
Returns:
output with type information normalized to canonical form.
"""
# some compilers output the name of type 'unsigned int' as 'unsigned'
return re.sub(r'unsigned int', 'unsigned', test_output)
def NormalizeToCurrentPlatform(test_output):
"""Normalizes platform specific output details for easier comparison."""
if IS_WINDOWS:
# Removes the color information that is not present on Windows.
test_output = re.sub('\x1b\\[(0;3\d)?m', '', test_output)
# Changes failure message headers into the Windows format.
test_output = re.sub(r': Failure\n', r': error: ', test_output)
# Changes file(line_number) to file:line_number.
test_output = re.sub(r'((\w|\.)+)\((\d+)\):', r'\1:\3:', test_output)
return test_output
def RemoveTestCounts(output):
"""Removes test counts from a Google Test program's output."""
output = re.sub(r'\d+ tests?, listed below',
'? tests, listed below', output)
output = re.sub(r'\d+ FAILED TESTS',
'? FAILED TESTS', output)
output = re.sub(r'\d+ tests? from \d+ test cases?',
'? tests from ? test cases', output)
output = re.sub(r'\d+ tests? from ([a-zA-Z_])',
r'? tests from \1', output)
return re.sub(r'\d+ tests?\.', '? tests.', output)
def RemoveMatchingTests(test_output, pattern):
"""Removes output of specified tests from a Google Test program's output.
This function strips not only the beginning and the end of a test but also
all output in between.
Args:
test_output: A string containing the test output.
pattern: A regex string that matches names of test cases or
tests to remove.
Returns:
Contents of test_output with tests whose names match pattern removed.
"""
test_output = re.sub(
r'.*\[ RUN \] .*%s(.|\n)*?\[( FAILED | OK )\] .*%s.*\n' % (
pattern, pattern),
'',
test_output)
return re.sub(r'.*%s.*\n' % pattern, '', test_output)
def NormalizeOutput(output):
"""Normalizes output (the output of gtest_output_test_.exe)."""
output = ToUnixLineEnding(output)
output = RemoveLocations(output)
output = RemoveStackTraceDetails(output)
output = RemoveTime(output)
return output
def GetShellCommandOutput(env_cmd):
"""Runs a command in a sub-process, and returns its output in a string.
Args:
env_cmd: The shell command. A 2-tuple where element 0 is a dict of extra
environment variables to set, and element 1 is a string with
the command and any flags.
Returns:
A string with the command's combined standard and diagnostic output.
"""
# Spawns cmd in a sub-process, and gets its standard I/O file objects.
# Set and save the environment properly.
environ = os.environ.copy()
environ.update(env_cmd[0])
p = gtest_test_utils.Subprocess(env_cmd[1], env=environ)
return p.output
def GetCommandOutput(env_cmd):
"""Runs a command and returns its output with all file location
info stripped off.
Args:
env_cmd: The shell command. A 2-tuple where element 0 is a dict of extra
environment variables to set, and element 1 is a string with
the command and any flags.
"""
# Disables exception pop-ups on Windows.
environ, cmdline = env_cmd
environ = dict(environ) # Ensures we are modifying a copy.
environ[CATCH_EXCEPTIONS_ENV_VAR_NAME] = '1'
return NormalizeOutput(GetShellCommandOutput((environ, cmdline)))
def GetOutputOfAllCommands():
"""Returns concatenated output from several representative commands."""
return (GetCommandOutput(COMMAND_WITH_COLOR) +
GetCommandOutput(COMMAND_WITH_TIME) +
GetCommandOutput(COMMAND_WITH_DISABLED) +
GetCommandOutput(COMMAND_WITH_SHARDING))
test_list = GetShellCommandOutput(COMMAND_LIST_TESTS)
SUPPORTS_DEATH_TESTS = 'DeathTest' in test_list
SUPPORTS_TYPED_TESTS = 'TypedTest' in test_list
SUPPORTS_THREADS = 'ExpectFailureWithThreadsTest' in test_list
SUPPORTS_STACK_TRACES = False
CAN_GENERATE_GOLDEN_FILE = (SUPPORTS_DEATH_TESTS and
SUPPORTS_TYPED_TESTS and
SUPPORTS_THREADS)
class GTestOutputTest(gtest_test_utils.TestCase):
def RemoveUnsupportedTests(self, test_output):
if not SUPPORTS_DEATH_TESTS:
test_output = RemoveMatchingTests(test_output, 'DeathTest')
if not SUPPORTS_TYPED_TESTS:
test_output = RemoveMatchingTests(test_output, 'TypedTest')
test_output = RemoveMatchingTests(test_output, 'TypedDeathTest')
test_output = RemoveMatchingTests(test_output, 'TypeParamDeathTest')
if not SUPPORTS_THREADS:
test_output = RemoveMatchingTests(test_output,
'ExpectFailureWithThreadsTest')
test_output = RemoveMatchingTests(test_output,
'ScopedFakeTestPartResultReporterTest')
test_output = RemoveMatchingTests(test_output,
'WorksConcurrently')
if not SUPPORTS_STACK_TRACES:
test_output = RemoveStackTraces(test_output)
return test_output
def testOutput(self):
output = GetOutputOfAllCommands()
golden_file = open(GOLDEN_PATH, 'rb')
# A mis-configured source control system can cause \r appear in EOL
# sequences when we read the golden file irrespective of an operating
# system used. Therefore, we need to strip those \r's from newlines
# unconditionally.
golden = ToUnixLineEnding(golden_file.read())
golden_file.close()
# We want the test to pass regardless of certain features being
# supported or not.
# We still have to remove type name specifics in all cases.
normalized_actual = RemoveTypeInfoDetails(output)
normalized_golden = RemoveTypeInfoDetails(golden)
if CAN_GENERATE_GOLDEN_FILE:
self.assertEqual(normalized_golden, normalized_actual)
else:
normalized_actual = NormalizeToCurrentPlatform(
RemoveTestCounts(normalized_actual))
normalized_golden = NormalizeToCurrentPlatform(
RemoveTestCounts(self.RemoveUnsupportedTests(normalized_golden)))
# This code is very handy when debugging golden file differences:
if os.getenv('DEBUG_GTEST_OUTPUT_TEST'):
open(os.path.join(
gtest_test_utils.GetSourceDir(),
'_gtest_output_test_normalized_actual.txt'), 'wb').write(
normalized_actual)
open(os.path.join(
gtest_test_utils.GetSourceDir(),
'_gtest_output_test_normalized_golden.txt'), 'wb').write(
normalized_golden)
self.assertEqual(normalized_golden, normalized_actual)
if __name__ == '__main__':
if sys.argv[1:] == [GENGOLDEN_FLAG]:
if CAN_GENERATE_GOLDEN_FILE:
output = GetOutputOfAllCommands()
golden_file = open(GOLDEN_PATH, 'wb')
golden_file.write(output)
golden_file.close()
else:
message = (
"""Unable to write a golden file when compiled in an environment
that does not support all the required features (death tests, typed tests,
and multiple threads). Please generate the golden file using a binary built
with those features enabled.""")
sys.stderr.write(message)
sys.exit(1)
else:
gtest_test_utils.Main()
| Python |
#!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Verifies that Google Test correctly parses environment variables."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import gtest_test_utils
IS_WINDOWS = os.name == 'nt'
IS_LINUX = os.name == 'posix' and os.uname()[0] == 'Linux'
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_env_var_test_')
environ = os.environ.copy()
def AssertEq(expected, actual):
if expected != actual:
print 'Expected: %s' % (expected,)
print ' Actual: %s' % (actual,)
raise AssertionError
def SetEnvVar(env_var, value):
"""Sets the env variable to 'value'; unsets it when 'value' is None."""
if value is not None:
environ[env_var] = value
elif env_var in environ:
del environ[env_var]
def GetFlag(flag):
"""Runs gtest_env_var_test_ and returns its output."""
args = [COMMAND]
if flag is not None:
args += [flag]
return gtest_test_utils.Subprocess(args, env=environ).output
def TestFlag(flag, test_val, default_val):
"""Verifies that the given flag is affected by the corresponding env var."""
env_var = 'GTEST_' + flag.upper()
SetEnvVar(env_var, test_val)
AssertEq(test_val, GetFlag(flag))
SetEnvVar(env_var, None)
AssertEq(default_val, GetFlag(flag))
class GTestEnvVarTest(gtest_test_utils.TestCase):
def testEnvVarAffectsFlag(self):
"""Tests that environment variable should affect the corresponding flag."""
TestFlag('break_on_failure', '1', '0')
TestFlag('color', 'yes', 'auto')
TestFlag('filter', 'FooTest.Bar', '*')
TestFlag('output', 'xml:tmp/foo.xml', '')
TestFlag('print_time', '0', '1')
TestFlag('repeat', '999', '1')
TestFlag('throw_on_failure', '1', '0')
TestFlag('death_test_style', 'threadsafe', 'fast')
TestFlag('catch_exceptions', '0', '1')
if IS_LINUX:
TestFlag('death_test_use_fork', '1', '0')
TestFlag('stack_trace_depth', '0', '100')
if __name__ == '__main__':
gtest_test_utils.Main()
| Python |
#!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test utilities for gtest_xml_output"""
__author__ = 'eefacm@gmail.com (Sean Mcafee)'
import re
from xml.dom import minidom, Node
import gtest_test_utils
GTEST_OUTPUT_FLAG = "--gtest_output"
GTEST_DEFAULT_OUTPUT_FILE = "test_detail.xml"
class GTestXMLTestCase(gtest_test_utils.TestCase):
"""
Base class for tests of Google Test's XML output functionality.
"""
def AssertEquivalentNodes(self, expected_node, actual_node):
"""
Asserts that actual_node (a DOM node object) is equivalent to
expected_node (another DOM node object), in that either both of
them are CDATA nodes and have the same value, or both are DOM
elements and actual_node meets all of the following conditions:
* It has the same tag name as expected_node.
* It has the same set of attributes as expected_node, each with
the same value as the corresponding attribute of expected_node.
Exceptions are any attribute named "time", which needs only be
convertible to a floating-point number and any attribute named
"type_param" which only has to be non-empty.
* It has an equivalent set of child nodes (including elements and
CDATA sections) as expected_node. Note that we ignore the
order of the children as they are not guaranteed to be in any
particular order.
"""
if expected_node.nodeType == Node.CDATA_SECTION_NODE:
self.assertEquals(Node.CDATA_SECTION_NODE, actual_node.nodeType)
self.assertEquals(expected_node.nodeValue, actual_node.nodeValue)
return
self.assertEquals(Node.ELEMENT_NODE, actual_node.nodeType)
self.assertEquals(Node.ELEMENT_NODE, expected_node.nodeType)
self.assertEquals(expected_node.tagName, actual_node.tagName)
expected_attributes = expected_node.attributes
actual_attributes = actual_node .attributes
self.assertEquals(
expected_attributes.length, actual_attributes.length,
"attribute numbers differ in element " + actual_node.tagName)
for i in range(expected_attributes.length):
expected_attr = expected_attributes.item(i)
actual_attr = actual_attributes.get(expected_attr.name)
self.assert_(
actual_attr is not None,
"expected attribute %s not found in element %s" %
(expected_attr.name, actual_node.tagName))
self.assertEquals(expected_attr.value, actual_attr.value,
" values of attribute %s in element %s differ" %
(expected_attr.name, actual_node.tagName))
expected_children = self._GetChildren(expected_node)
actual_children = self._GetChildren(actual_node)
self.assertEquals(
len(expected_children), len(actual_children),
"number of child elements differ in element " + actual_node.tagName)
for child_id, child in expected_children.iteritems():
self.assert_(child_id in actual_children,
'<%s> is not in <%s> (in element %s)' %
(child_id, actual_children, actual_node.tagName))
self.AssertEquivalentNodes(child, actual_children[child_id])
identifying_attribute = {
"testsuites": "name",
"testsuite": "name",
"testcase": "name",
"failure": "message",
}
def _GetChildren(self, element):
"""
Fetches all of the child nodes of element, a DOM Element object.
Returns them as the values of a dictionary keyed by the IDs of the
children. For <testsuites>, <testsuite> and <testcase> elements, the ID
is the value of their "name" attribute; for <failure> elements, it is
the value of the "message" attribute; CDATA sections and non-whitespace
text nodes are concatenated into a single CDATA section with ID
"detail". An exception is raised if any element other than the above
four is encountered, if two child elements with the same identifying
attributes are encountered, or if any other type of node is encountered.
"""
children = {}
for child in element.childNodes:
if child.nodeType == Node.ELEMENT_NODE:
self.assert_(child.tagName in self.identifying_attribute,
"Encountered unknown element <%s>" % child.tagName)
childID = child.getAttribute(self.identifying_attribute[child.tagName])
self.assert_(childID not in children)
children[childID] = child
elif child.nodeType in [Node.TEXT_NODE, Node.CDATA_SECTION_NODE]:
if "detail" not in children:
if (child.nodeType == Node.CDATA_SECTION_NODE or
not child.nodeValue.isspace()):
children["detail"] = child.ownerDocument.createCDATASection(
child.nodeValue)
else:
children["detail"].nodeValue += child.nodeValue
else:
self.fail("Encountered unexpected node type %d" % child.nodeType)
return children
def NormalizeXml(self, element):
"""
Normalizes Google Test's XML output to eliminate references to transient
information that may change from run to run.
* The "time" attribute of <testsuites>, <testsuite> and <testcase>
elements is replaced with a single asterisk, if it contains
only digit characters.
* The "type_param" attribute of <testcase> elements is replaced with a
single asterisk (if it sn non-empty) as it is the type name returned
by the compiler and is platform dependent.
* The line number reported in the first line of the "message"
attribute of <failure> elements is replaced with a single asterisk.
* The directory names in file paths are removed.
* The stack traces are removed.
"""
if element.tagName in ("testsuites", "testsuite", "testcase"):
time = element.getAttributeNode("time")
time.value = re.sub(r"^\d+(\.\d+)?$", "*", time.value)
type_param = element.getAttributeNode("type_param")
if type_param and type_param.value:
type_param.value = "*"
elif element.tagName == "failure":
for child in element.childNodes:
if child.nodeType == Node.CDATA_SECTION_NODE:
# Removes the source line number.
cdata = re.sub(r"^.*[/\\](.*:)\d+\n", "\\1*\n", child.nodeValue)
# Removes the actual stack trace.
child.nodeValue = re.sub(r"\nStack trace:\n(.|\n)*",
"", cdata)
for child in element.childNodes:
if child.nodeType == Node.ELEMENT_NODE:
self.NormalizeXml(child)
| Python |
#!/usr/bin/env python
#
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Verifies that test shuffling works."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import gtest_test_utils
# Command to run the gtest_shuffle_test_ program.
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_shuffle_test_')
# The environment variables for test sharding.
TOTAL_SHARDS_ENV_VAR = 'GTEST_TOTAL_SHARDS'
SHARD_INDEX_ENV_VAR = 'GTEST_SHARD_INDEX'
TEST_FILTER = 'A*.A:A*.B:C*'
ALL_TESTS = []
ACTIVE_TESTS = []
FILTERED_TESTS = []
SHARDED_TESTS = []
SHUFFLED_ALL_TESTS = []
SHUFFLED_ACTIVE_TESTS = []
SHUFFLED_FILTERED_TESTS = []
SHUFFLED_SHARDED_TESTS = []
def AlsoRunDisabledTestsFlag():
return '--gtest_also_run_disabled_tests'
def FilterFlag(test_filter):
return '--gtest_filter=%s' % (test_filter,)
def RepeatFlag(n):
return '--gtest_repeat=%s' % (n,)
def ShuffleFlag():
return '--gtest_shuffle'
def RandomSeedFlag(n):
return '--gtest_random_seed=%s' % (n,)
def RunAndReturnOutput(extra_env, args):
"""Runs the test program and returns its output."""
environ_copy = os.environ.copy()
environ_copy.update(extra_env)
return gtest_test_utils.Subprocess([COMMAND] + args, env=environ_copy).output
def GetTestsForAllIterations(extra_env, args):
"""Runs the test program and returns a list of test lists.
Args:
extra_env: a map from environment variables to their values
args: command line flags to pass to gtest_shuffle_test_
Returns:
A list where the i-th element is the list of tests run in the i-th
test iteration.
"""
test_iterations = []
for line in RunAndReturnOutput(extra_env, args).split('\n'):
if line.startswith('----'):
tests = []
test_iterations.append(tests)
elif line.strip():
tests.append(line.strip()) # 'TestCaseName.TestName'
return test_iterations
def GetTestCases(tests):
"""Returns a list of test cases in the given full test names.
Args:
tests: a list of full test names
Returns:
A list of test cases from 'tests', in their original order.
Consecutive duplicates are removed.
"""
test_cases = []
for test in tests:
test_case = test.split('.')[0]
if not test_case in test_cases:
test_cases.append(test_case)
return test_cases
def CalculateTestLists():
"""Calculates the list of tests run under different flags."""
if not ALL_TESTS:
ALL_TESTS.extend(
GetTestsForAllIterations({}, [AlsoRunDisabledTestsFlag()])[0])
if not ACTIVE_TESTS:
ACTIVE_TESTS.extend(GetTestsForAllIterations({}, [])[0])
if not FILTERED_TESTS:
FILTERED_TESTS.extend(
GetTestsForAllIterations({}, [FilterFlag(TEST_FILTER)])[0])
if not SHARDED_TESTS:
SHARDED_TESTS.extend(
GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '1'},
[])[0])
if not SHUFFLED_ALL_TESTS:
SHUFFLED_ALL_TESTS.extend(GetTestsForAllIterations(
{}, [AlsoRunDisabledTestsFlag(), ShuffleFlag(), RandomSeedFlag(1)])[0])
if not SHUFFLED_ACTIVE_TESTS:
SHUFFLED_ACTIVE_TESTS.extend(GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1)])[0])
if not SHUFFLED_FILTERED_TESTS:
SHUFFLED_FILTERED_TESTS.extend(GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1), FilterFlag(TEST_FILTER)])[0])
if not SHUFFLED_SHARDED_TESTS:
SHUFFLED_SHARDED_TESTS.extend(
GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '1'},
[ShuffleFlag(), RandomSeedFlag(1)])[0])
class GTestShuffleUnitTest(gtest_test_utils.TestCase):
"""Tests test shuffling."""
def setUp(self):
CalculateTestLists()
def testShufflePreservesNumberOfTests(self):
self.assertEqual(len(ALL_TESTS), len(SHUFFLED_ALL_TESTS))
self.assertEqual(len(ACTIVE_TESTS), len(SHUFFLED_ACTIVE_TESTS))
self.assertEqual(len(FILTERED_TESTS), len(SHUFFLED_FILTERED_TESTS))
self.assertEqual(len(SHARDED_TESTS), len(SHUFFLED_SHARDED_TESTS))
def testShuffleChangesTestOrder(self):
self.assert_(SHUFFLED_ALL_TESTS != ALL_TESTS, SHUFFLED_ALL_TESTS)
self.assert_(SHUFFLED_ACTIVE_TESTS != ACTIVE_TESTS, SHUFFLED_ACTIVE_TESTS)
self.assert_(SHUFFLED_FILTERED_TESTS != FILTERED_TESTS,
SHUFFLED_FILTERED_TESTS)
self.assert_(SHUFFLED_SHARDED_TESTS != SHARDED_TESTS,
SHUFFLED_SHARDED_TESTS)
def testShuffleChangesTestCaseOrder(self):
self.assert_(GetTestCases(SHUFFLED_ALL_TESTS) != GetTestCases(ALL_TESTS),
GetTestCases(SHUFFLED_ALL_TESTS))
self.assert_(
GetTestCases(SHUFFLED_ACTIVE_TESTS) != GetTestCases(ACTIVE_TESTS),
GetTestCases(SHUFFLED_ACTIVE_TESTS))
self.assert_(
GetTestCases(SHUFFLED_FILTERED_TESTS) != GetTestCases(FILTERED_TESTS),
GetTestCases(SHUFFLED_FILTERED_TESTS))
self.assert_(
GetTestCases(SHUFFLED_SHARDED_TESTS) != GetTestCases(SHARDED_TESTS),
GetTestCases(SHUFFLED_SHARDED_TESTS))
def testShuffleDoesNotRepeatTest(self):
for test in SHUFFLED_ALL_TESTS:
self.assertEqual(1, SHUFFLED_ALL_TESTS.count(test),
'%s appears more than once' % (test,))
for test in SHUFFLED_ACTIVE_TESTS:
self.assertEqual(1, SHUFFLED_ACTIVE_TESTS.count(test),
'%s appears more than once' % (test,))
for test in SHUFFLED_FILTERED_TESTS:
self.assertEqual(1, SHUFFLED_FILTERED_TESTS.count(test),
'%s appears more than once' % (test,))
for test in SHUFFLED_SHARDED_TESTS:
self.assertEqual(1, SHUFFLED_SHARDED_TESTS.count(test),
'%s appears more than once' % (test,))
def testShuffleDoesNotCreateNewTest(self):
for test in SHUFFLED_ALL_TESTS:
self.assert_(test in ALL_TESTS, '%s is an invalid test' % (test,))
for test in SHUFFLED_ACTIVE_TESTS:
self.assert_(test in ACTIVE_TESTS, '%s is an invalid test' % (test,))
for test in SHUFFLED_FILTERED_TESTS:
self.assert_(test in FILTERED_TESTS, '%s is an invalid test' % (test,))
for test in SHUFFLED_SHARDED_TESTS:
self.assert_(test in SHARDED_TESTS, '%s is an invalid test' % (test,))
def testShuffleIncludesAllTests(self):
for test in ALL_TESTS:
self.assert_(test in SHUFFLED_ALL_TESTS, '%s is missing' % (test,))
for test in ACTIVE_TESTS:
self.assert_(test in SHUFFLED_ACTIVE_TESTS, '%s is missing' % (test,))
for test in FILTERED_TESTS:
self.assert_(test in SHUFFLED_FILTERED_TESTS, '%s is missing' % (test,))
for test in SHARDED_TESTS:
self.assert_(test in SHUFFLED_SHARDED_TESTS, '%s is missing' % (test,))
def testShuffleLeavesDeathTestsAtFront(self):
non_death_test_found = False
for test in SHUFFLED_ACTIVE_TESTS:
if 'DeathTest.' in test:
self.assert_(not non_death_test_found,
'%s appears after a non-death test' % (test,))
else:
non_death_test_found = True
def _VerifyTestCasesDoNotInterleave(self, tests):
test_cases = []
for test in tests:
[test_case, _] = test.split('.')
if test_cases and test_cases[-1] != test_case:
test_cases.append(test_case)
self.assertEqual(1, test_cases.count(test_case),
'Test case %s is not grouped together in %s' %
(test_case, tests))
def testShuffleDoesNotInterleaveTestCases(self):
self._VerifyTestCasesDoNotInterleave(SHUFFLED_ALL_TESTS)
self._VerifyTestCasesDoNotInterleave(SHUFFLED_ACTIVE_TESTS)
self._VerifyTestCasesDoNotInterleave(SHUFFLED_FILTERED_TESTS)
self._VerifyTestCasesDoNotInterleave(SHUFFLED_SHARDED_TESTS)
def testShuffleRestoresOrderAfterEachIteration(self):
# Get the test lists in all 3 iterations, using random seed 1, 2,
# and 3 respectively. Google Test picks a different seed in each
# iteration, and this test depends on the current implementation
# picking successive numbers. This dependency is not ideal, but
# makes the test much easier to write.
[tests_in_iteration1, tests_in_iteration2, tests_in_iteration3] = (
GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1), RepeatFlag(3)]))
# Make sure running the tests with random seed 1 gets the same
# order as in iteration 1 above.
[tests_with_seed1] = GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1)])
self.assertEqual(tests_in_iteration1, tests_with_seed1)
# Make sure running the tests with random seed 2 gets the same
# order as in iteration 2 above. Success means that Google Test
# correctly restores the test order before re-shuffling at the
# beginning of iteration 2.
[tests_with_seed2] = GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(2)])
self.assertEqual(tests_in_iteration2, tests_with_seed2)
# Make sure running the tests with random seed 3 gets the same
# order as in iteration 3 above. Success means that Google Test
# correctly restores the test order before re-shuffling at the
# beginning of iteration 3.
[tests_with_seed3] = GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(3)])
self.assertEqual(tests_in_iteration3, tests_with_seed3)
def testShuffleGeneratesNewOrderInEachIteration(self):
[tests_in_iteration1, tests_in_iteration2, tests_in_iteration3] = (
GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1), RepeatFlag(3)]))
self.assert_(tests_in_iteration1 != tests_in_iteration2,
tests_in_iteration1)
self.assert_(tests_in_iteration1 != tests_in_iteration3,
tests_in_iteration1)
self.assert_(tests_in_iteration2 != tests_in_iteration3,
tests_in_iteration2)
def testShuffleShardedTestsPreservesPartition(self):
# If we run M tests on N shards, the same M tests should be run in
# total, regardless of the random seeds used by the shards.
[tests1] = GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '0'},
[ShuffleFlag(), RandomSeedFlag(1)])
[tests2] = GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '1'},
[ShuffleFlag(), RandomSeedFlag(20)])
[tests3] = GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '2'},
[ShuffleFlag(), RandomSeedFlag(25)])
sorted_sharded_tests = tests1 + tests2 + tests3
sorted_sharded_tests.sort()
sorted_active_tests = []
sorted_active_tests.extend(ACTIVE_TESTS)
sorted_active_tests.sort()
self.assertEqual(sorted_active_tests, sorted_sharded_tests)
if __name__ == '__main__':
gtest_test_utils.Main()
| Python |
#!/usr/bin/env python
#
# Copyright 2005 Google Inc. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for Google Test test filters.
A user can specify which test(s) in a Google Test program to run via either
the GTEST_FILTER environment variable or the --gtest_filter flag.
This script tests such functionality by invoking
gtest_filter_unittest_ (a program written with Google Test) with different
environments and command line flags.
Note that test sharding may also influence which tests are filtered. Therefore,
we test that here also.
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import re
import sets
import sys
import gtest_test_utils
# Constants.
# Checks if this platform can pass empty environment variables to child
# processes. We set an env variable to an empty string and invoke a python
# script in a subprocess to print whether the variable is STILL in
# os.environ. We then use 'eval' to parse the child's output so that an
# exception is thrown if the input is anything other than 'True' nor 'False'.
os.environ['EMPTY_VAR'] = ''
child = gtest_test_utils.Subprocess(
[sys.executable, '-c', 'import os; print \'EMPTY_VAR\' in os.environ'])
CAN_PASS_EMPTY_ENV = eval(child.output)
# Check if this platform can unset environment variables in child processes.
# We set an env variable to a non-empty string, unset it, and invoke
# a python script in a subprocess to print whether the variable
# is NO LONGER in os.environ.
# We use 'eval' to parse the child's output so that an exception
# is thrown if the input is neither 'True' nor 'False'.
os.environ['UNSET_VAR'] = 'X'
del os.environ['UNSET_VAR']
child = gtest_test_utils.Subprocess(
[sys.executable, '-c', 'import os; print \'UNSET_VAR\' not in os.environ'])
CAN_UNSET_ENV = eval(child.output)
# Checks if we should test with an empty filter. This doesn't
# make sense on platforms that cannot pass empty env variables (Win32)
# and on platforms that cannot unset variables (since we cannot tell
# the difference between "" and NULL -- Borland and Solaris < 5.10)
CAN_TEST_EMPTY_FILTER = (CAN_PASS_EMPTY_ENV and CAN_UNSET_ENV)
# The environment variable for specifying the test filters.
FILTER_ENV_VAR = 'GTEST_FILTER'
# The environment variables for test sharding.
TOTAL_SHARDS_ENV_VAR = 'GTEST_TOTAL_SHARDS'
SHARD_INDEX_ENV_VAR = 'GTEST_SHARD_INDEX'
SHARD_STATUS_FILE_ENV_VAR = 'GTEST_SHARD_STATUS_FILE'
# The command line flag for specifying the test filters.
FILTER_FLAG = 'gtest_filter'
# The command line flag for including disabled tests.
ALSO_RUN_DISABED_TESTS_FLAG = 'gtest_also_run_disabled_tests'
# Command to run the gtest_filter_unittest_ program.
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_filter_unittest_')
# Regex for determining whether parameterized tests are enabled in the binary.
PARAM_TEST_REGEX = re.compile(r'/ParamTest')
# Regex for parsing test case names from Google Test's output.
TEST_CASE_REGEX = re.compile(r'^\[\-+\] \d+ tests? from (\w+(/\w+)?)')
# Regex for parsing test names from Google Test's output.
TEST_REGEX = re.compile(r'^\[\s*RUN\s*\].*\.(\w+(/\w+)?)')
# The command line flag to tell Google Test to output the list of tests it
# will run.
LIST_TESTS_FLAG = '--gtest_list_tests'
# Indicates whether Google Test supports death tests.
SUPPORTS_DEATH_TESTS = 'HasDeathTest' in gtest_test_utils.Subprocess(
[COMMAND, LIST_TESTS_FLAG]).output
# Full names of all tests in gtest_filter_unittests_.
PARAM_TESTS = [
'SeqP/ParamTest.TestX/0',
'SeqP/ParamTest.TestX/1',
'SeqP/ParamTest.TestY/0',
'SeqP/ParamTest.TestY/1',
'SeqQ/ParamTest.TestX/0',
'SeqQ/ParamTest.TestX/1',
'SeqQ/ParamTest.TestY/0',
'SeqQ/ParamTest.TestY/1',
]
DISABLED_TESTS = [
'BarTest.DISABLED_TestFour',
'BarTest.DISABLED_TestFive',
'BazTest.DISABLED_TestC',
'DISABLED_FoobarTest.Test1',
'DISABLED_FoobarTest.DISABLED_Test2',
'DISABLED_FoobarbazTest.TestA',
]
if SUPPORTS_DEATH_TESTS:
DEATH_TESTS = [
'HasDeathTest.Test1',
'HasDeathTest.Test2',
]
else:
DEATH_TESTS = []
# All the non-disabled tests.
ACTIVE_TESTS = [
'FooTest.Abc',
'FooTest.Xyz',
'BarTest.TestOne',
'BarTest.TestTwo',
'BarTest.TestThree',
'BazTest.TestOne',
'BazTest.TestA',
'BazTest.TestB',
] + DEATH_TESTS + PARAM_TESTS
param_tests_present = None
# Utilities.
environ = os.environ.copy()
def SetEnvVar(env_var, value):
"""Sets the env variable to 'value'; unsets it when 'value' is None."""
if value is not None:
environ[env_var] = value
elif env_var in environ:
del environ[env_var]
def RunAndReturnOutput(args = None):
"""Runs the test program and returns its output."""
return gtest_test_utils.Subprocess([COMMAND] + (args or []),
env=environ).output
def RunAndExtractTestList(args = None):
"""Runs the test program and returns its exit code and a list of tests run."""
p = gtest_test_utils.Subprocess([COMMAND] + (args or []), env=environ)
tests_run = []
test_case = ''
test = ''
for line in p.output.split('\n'):
match = TEST_CASE_REGEX.match(line)
if match is not None:
test_case = match.group(1)
else:
match = TEST_REGEX.match(line)
if match is not None:
test = match.group(1)
tests_run.append(test_case + '.' + test)
return (tests_run, p.exit_code)
def InvokeWithModifiedEnv(extra_env, function, *args, **kwargs):
"""Runs the given function and arguments in a modified environment."""
try:
original_env = environ.copy()
environ.update(extra_env)
return function(*args, **kwargs)
finally:
environ.clear()
environ.update(original_env)
def RunWithSharding(total_shards, shard_index, command):
"""Runs a test program shard and returns exit code and a list of tests run."""
extra_env = {SHARD_INDEX_ENV_VAR: str(shard_index),
TOTAL_SHARDS_ENV_VAR: str(total_shards)}
return InvokeWithModifiedEnv(extra_env, RunAndExtractTestList, command)
# The unit test.
class GTestFilterUnitTest(gtest_test_utils.TestCase):
"""Tests the env variable or the command line flag to filter tests."""
# Utilities.
def AssertSetEqual(self, lhs, rhs):
"""Asserts that two sets are equal."""
for elem in lhs:
self.assert_(elem in rhs, '%s in %s' % (elem, rhs))
for elem in rhs:
self.assert_(elem in lhs, '%s in %s' % (elem, lhs))
def AssertPartitionIsValid(self, set_var, list_of_sets):
"""Asserts that list_of_sets is a valid partition of set_var."""
full_partition = []
for slice_var in list_of_sets:
full_partition.extend(slice_var)
self.assertEqual(len(set_var), len(full_partition))
self.assertEqual(sets.Set(set_var), sets.Set(full_partition))
def AdjustForParameterizedTests(self, tests_to_run):
"""Adjust tests_to_run in case value parameterized tests are disabled."""
global param_tests_present
if not param_tests_present:
return list(sets.Set(tests_to_run) - sets.Set(PARAM_TESTS))
else:
return tests_to_run
def RunAndVerify(self, gtest_filter, tests_to_run):
"""Checks that the binary runs correct set of tests for a given filter."""
tests_to_run = self.AdjustForParameterizedTests(tests_to_run)
# First, tests using the environment variable.
# Windows removes empty variables from the environment when passing it
# to a new process. This means it is impossible to pass an empty filter
# into a process using the environment variable. However, we can still
# test the case when the variable is not supplied (i.e., gtest_filter is
# None).
# pylint: disable-msg=C6403
if CAN_TEST_EMPTY_FILTER or gtest_filter != '':
SetEnvVar(FILTER_ENV_VAR, gtest_filter)
tests_run = RunAndExtractTestList()[0]
SetEnvVar(FILTER_ENV_VAR, None)
self.AssertSetEqual(tests_run, tests_to_run)
# pylint: enable-msg=C6403
# Next, tests using the command line flag.
if gtest_filter is None:
args = []
else:
args = ['--%s=%s' % (FILTER_FLAG, gtest_filter)]
tests_run = RunAndExtractTestList(args)[0]
self.AssertSetEqual(tests_run, tests_to_run)
def RunAndVerifyWithSharding(self, gtest_filter, total_shards, tests_to_run,
args=None, check_exit_0=False):
"""Checks that binary runs correct tests for the given filter and shard.
Runs all shards of gtest_filter_unittest_ with the given filter, and
verifies that the right set of tests were run. The union of tests run
on each shard should be identical to tests_to_run, without duplicates.
Args:
gtest_filter: A filter to apply to the tests.
total_shards: A total number of shards to split test run into.
tests_to_run: A set of tests expected to run.
args : Arguments to pass to the to the test binary.
check_exit_0: When set to a true value, make sure that all shards
return 0.
"""
tests_to_run = self.AdjustForParameterizedTests(tests_to_run)
# Windows removes empty variables from the environment when passing it
# to a new process. This means it is impossible to pass an empty filter
# into a process using the environment variable. However, we can still
# test the case when the variable is not supplied (i.e., gtest_filter is
# None).
# pylint: disable-msg=C6403
if CAN_TEST_EMPTY_FILTER or gtest_filter != '':
SetEnvVar(FILTER_ENV_VAR, gtest_filter)
partition = []
for i in range(0, total_shards):
(tests_run, exit_code) = RunWithSharding(total_shards, i, args)
if check_exit_0:
self.assertEqual(0, exit_code)
partition.append(tests_run)
self.AssertPartitionIsValid(tests_to_run, partition)
SetEnvVar(FILTER_ENV_VAR, None)
# pylint: enable-msg=C6403
def RunAndVerifyAllowingDisabled(self, gtest_filter, tests_to_run):
"""Checks that the binary runs correct set of tests for the given filter.
Runs gtest_filter_unittest_ with the given filter, and enables
disabled tests. Verifies that the right set of tests were run.
Args:
gtest_filter: A filter to apply to the tests.
tests_to_run: A set of tests expected to run.
"""
tests_to_run = self.AdjustForParameterizedTests(tests_to_run)
# Construct the command line.
args = ['--%s' % ALSO_RUN_DISABED_TESTS_FLAG]
if gtest_filter is not None:
args.append('--%s=%s' % (FILTER_FLAG, gtest_filter))
tests_run = RunAndExtractTestList(args)[0]
self.AssertSetEqual(tests_run, tests_to_run)
def setUp(self):
"""Sets up test case.
Determines whether value-parameterized tests are enabled in the binary and
sets the flags accordingly.
"""
global param_tests_present
if param_tests_present is None:
param_tests_present = PARAM_TEST_REGEX.search(
RunAndReturnOutput()) is not None
def testDefaultBehavior(self):
"""Tests the behavior of not specifying the filter."""
self.RunAndVerify(None, ACTIVE_TESTS)
def testDefaultBehaviorWithShards(self):
"""Tests the behavior without the filter, with sharding enabled."""
self.RunAndVerifyWithSharding(None, 1, ACTIVE_TESTS)
self.RunAndVerifyWithSharding(None, 2, ACTIVE_TESTS)
self.RunAndVerifyWithSharding(None, len(ACTIVE_TESTS) - 1, ACTIVE_TESTS)
self.RunAndVerifyWithSharding(None, len(ACTIVE_TESTS), ACTIVE_TESTS)
self.RunAndVerifyWithSharding(None, len(ACTIVE_TESTS) + 1, ACTIVE_TESTS)
def testEmptyFilter(self):
"""Tests an empty filter."""
self.RunAndVerify('', [])
self.RunAndVerifyWithSharding('', 1, [])
self.RunAndVerifyWithSharding('', 2, [])
def testBadFilter(self):
"""Tests a filter that matches nothing."""
self.RunAndVerify('BadFilter', [])
self.RunAndVerifyAllowingDisabled('BadFilter', [])
def testFullName(self):
"""Tests filtering by full name."""
self.RunAndVerify('FooTest.Xyz', ['FooTest.Xyz'])
self.RunAndVerifyAllowingDisabled('FooTest.Xyz', ['FooTest.Xyz'])
self.RunAndVerifyWithSharding('FooTest.Xyz', 5, ['FooTest.Xyz'])
def testUniversalFilters(self):
"""Tests filters that match everything."""
self.RunAndVerify('*', ACTIVE_TESTS)
self.RunAndVerify('*.*', ACTIVE_TESTS)
self.RunAndVerifyWithSharding('*.*', len(ACTIVE_TESTS) - 3, ACTIVE_TESTS)
self.RunAndVerifyAllowingDisabled('*', ACTIVE_TESTS + DISABLED_TESTS)
self.RunAndVerifyAllowingDisabled('*.*', ACTIVE_TESTS + DISABLED_TESTS)
def testFilterByTestCase(self):
"""Tests filtering by test case name."""
self.RunAndVerify('FooTest.*', ['FooTest.Abc', 'FooTest.Xyz'])
BAZ_TESTS = ['BazTest.TestOne', 'BazTest.TestA', 'BazTest.TestB']
self.RunAndVerify('BazTest.*', BAZ_TESTS)
self.RunAndVerifyAllowingDisabled('BazTest.*',
BAZ_TESTS + ['BazTest.DISABLED_TestC'])
def testFilterByTest(self):
"""Tests filtering by test name."""
self.RunAndVerify('*.TestOne', ['BarTest.TestOne', 'BazTest.TestOne'])
def testFilterDisabledTests(self):
"""Select only the disabled tests to run."""
self.RunAndVerify('DISABLED_FoobarTest.Test1', [])
self.RunAndVerifyAllowingDisabled('DISABLED_FoobarTest.Test1',
['DISABLED_FoobarTest.Test1'])
self.RunAndVerify('*DISABLED_*', [])
self.RunAndVerifyAllowingDisabled('*DISABLED_*', DISABLED_TESTS)
self.RunAndVerify('*.DISABLED_*', [])
self.RunAndVerifyAllowingDisabled('*.DISABLED_*', [
'BarTest.DISABLED_TestFour',
'BarTest.DISABLED_TestFive',
'BazTest.DISABLED_TestC',
'DISABLED_FoobarTest.DISABLED_Test2',
])
self.RunAndVerify('DISABLED_*', [])
self.RunAndVerifyAllowingDisabled('DISABLED_*', [
'DISABLED_FoobarTest.Test1',
'DISABLED_FoobarTest.DISABLED_Test2',
'DISABLED_FoobarbazTest.TestA',
])
def testWildcardInTestCaseName(self):
"""Tests using wildcard in the test case name."""
self.RunAndVerify('*a*.*', [
'BarTest.TestOne',
'BarTest.TestTwo',
'BarTest.TestThree',
'BazTest.TestOne',
'BazTest.TestA',
'BazTest.TestB', ] + DEATH_TESTS + PARAM_TESTS)
def testWildcardInTestName(self):
"""Tests using wildcard in the test name."""
self.RunAndVerify('*.*A*', ['FooTest.Abc', 'BazTest.TestA'])
def testFilterWithoutDot(self):
"""Tests a filter that has no '.' in it."""
self.RunAndVerify('*z*', [
'FooTest.Xyz',
'BazTest.TestOne',
'BazTest.TestA',
'BazTest.TestB',
])
def testTwoPatterns(self):
"""Tests filters that consist of two patterns."""
self.RunAndVerify('Foo*.*:*A*', [
'FooTest.Abc',
'FooTest.Xyz',
'BazTest.TestA',
])
# An empty pattern + a non-empty one
self.RunAndVerify(':*A*', ['FooTest.Abc', 'BazTest.TestA'])
def testThreePatterns(self):
"""Tests filters that consist of three patterns."""
self.RunAndVerify('*oo*:*A*:*One', [
'FooTest.Abc',
'FooTest.Xyz',
'BarTest.TestOne',
'BazTest.TestOne',
'BazTest.TestA',
])
# The 2nd pattern is empty.
self.RunAndVerify('*oo*::*One', [
'FooTest.Abc',
'FooTest.Xyz',
'BarTest.TestOne',
'BazTest.TestOne',
])
# The last 2 patterns are empty.
self.RunAndVerify('*oo*::', [
'FooTest.Abc',
'FooTest.Xyz',
])
def testNegativeFilters(self):
self.RunAndVerify('*-BazTest.TestOne', [
'FooTest.Abc',
'FooTest.Xyz',
'BarTest.TestOne',
'BarTest.TestTwo',
'BarTest.TestThree',
'BazTest.TestA',
'BazTest.TestB',
] + DEATH_TESTS + PARAM_TESTS)
self.RunAndVerify('*-FooTest.Abc:BazTest.*', [
'FooTest.Xyz',
'BarTest.TestOne',
'BarTest.TestTwo',
'BarTest.TestThree',
] + DEATH_TESTS + PARAM_TESTS)
self.RunAndVerify('BarTest.*-BarTest.TestOne', [
'BarTest.TestTwo',
'BarTest.TestThree',
])
# Tests without leading '*'.
self.RunAndVerify('-FooTest.Abc:FooTest.Xyz:BazTest.*', [
'BarTest.TestOne',
'BarTest.TestTwo',
'BarTest.TestThree',
] + DEATH_TESTS + PARAM_TESTS)
# Value parameterized tests.
self.RunAndVerify('*/*', PARAM_TESTS)
# Value parameterized tests filtering by the sequence name.
self.RunAndVerify('SeqP/*', [
'SeqP/ParamTest.TestX/0',
'SeqP/ParamTest.TestX/1',
'SeqP/ParamTest.TestY/0',
'SeqP/ParamTest.TestY/1',
])
# Value parameterized tests filtering by the test name.
self.RunAndVerify('*/0', [
'SeqP/ParamTest.TestX/0',
'SeqP/ParamTest.TestY/0',
'SeqQ/ParamTest.TestX/0',
'SeqQ/ParamTest.TestY/0',
])
def testFlagOverridesEnvVar(self):
"""Tests that the filter flag overrides the filtering env. variable."""
SetEnvVar(FILTER_ENV_VAR, 'Foo*')
args = ['--%s=%s' % (FILTER_FLAG, '*One')]
tests_run = RunAndExtractTestList(args)[0]
SetEnvVar(FILTER_ENV_VAR, None)
self.AssertSetEqual(tests_run, ['BarTest.TestOne', 'BazTest.TestOne'])
def testShardStatusFileIsCreated(self):
"""Tests that the shard file is created if specified in the environment."""
shard_status_file = os.path.join(gtest_test_utils.GetTempDir(),
'shard_status_file')
self.assert_(not os.path.exists(shard_status_file))
extra_env = {SHARD_STATUS_FILE_ENV_VAR: shard_status_file}
try:
InvokeWithModifiedEnv(extra_env, RunAndReturnOutput)
finally:
self.assert_(os.path.exists(shard_status_file))
os.remove(shard_status_file)
def testShardStatusFileIsCreatedWithListTests(self):
"""Tests that the shard file is created with the "list_tests" flag."""
shard_status_file = os.path.join(gtest_test_utils.GetTempDir(),
'shard_status_file2')
self.assert_(not os.path.exists(shard_status_file))
extra_env = {SHARD_STATUS_FILE_ENV_VAR: shard_status_file}
try:
output = InvokeWithModifiedEnv(extra_env,
RunAndReturnOutput,
[LIST_TESTS_FLAG])
finally:
# This assertion ensures that Google Test enumerated the tests as
# opposed to running them.
self.assert_('[==========]' not in output,
'Unexpected output during test enumeration.\n'
'Please ensure that LIST_TESTS_FLAG is assigned the\n'
'correct flag value for listing Google Test tests.')
self.assert_(os.path.exists(shard_status_file))
os.remove(shard_status_file)
if SUPPORTS_DEATH_TESTS:
def testShardingWorksWithDeathTests(self):
"""Tests integration with death tests and sharding."""
gtest_filter = 'HasDeathTest.*:SeqP/*'
expected_tests = [
'HasDeathTest.Test1',
'HasDeathTest.Test2',
'SeqP/ParamTest.TestX/0',
'SeqP/ParamTest.TestX/1',
'SeqP/ParamTest.TestY/0',
'SeqP/ParamTest.TestY/1',
]
for flag in ['--gtest_death_test_style=threadsafe',
'--gtest_death_test_style=fast']:
self.RunAndVerifyWithSharding(gtest_filter, 3, expected_tests,
check_exit_0=True, args=[flag])
self.RunAndVerifyWithSharding(gtest_filter, 5, expected_tests,
check_exit_0=True, args=[flag])
if __name__ == '__main__':
gtest_test_utils.Main()
| Python |
#!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""fuse_gtest_files.py v0.2.0
Fuses Google Test source code into a .h file and a .cc file.
SYNOPSIS
fuse_gtest_files.py [GTEST_ROOT_DIR] OUTPUT_DIR
Scans GTEST_ROOT_DIR for Google Test source code, and generates
two files: OUTPUT_DIR/gtest/gtest.h and OUTPUT_DIR/gtest/gtest-all.cc.
Then you can build your tests by adding OUTPUT_DIR to the include
search path and linking with OUTPUT_DIR/gtest/gtest-all.cc. These
two files contain everything you need to use Google Test. Hence
you can "install" Google Test by copying them to wherever you want.
GTEST_ROOT_DIR can be omitted and defaults to the parent
directory of the directory holding this script.
EXAMPLES
./fuse_gtest_files.py fused_gtest
./fuse_gtest_files.py path/to/unpacked/gtest fused_gtest
This tool is experimental. In particular, it assumes that there is no
conditional inclusion of Google Test headers. Please report any
problems to googletestframework@googlegroups.com. You can read
http://code.google.com/p/googletest/wiki/GoogleTestAdvancedGuide for
more information.
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import re
import sets
import sys
# We assume that this file is in the scripts/ directory in the Google
# Test root directory.
DEFAULT_GTEST_ROOT_DIR = os.path.join(os.path.dirname(__file__), '..')
# Regex for matching '#include "gtest/..."'.
INCLUDE_GTEST_FILE_REGEX = re.compile(r'^\s*#\s*include\s*"(gtest/.+)"')
# Regex for matching '#include "src/..."'.
INCLUDE_SRC_FILE_REGEX = re.compile(r'^\s*#\s*include\s*"(src/.+)"')
# Where to find the source seed files.
GTEST_H_SEED = 'include/gtest/gtest.h'
GTEST_SPI_H_SEED = 'include/gtest/gtest-spi.h'
GTEST_ALL_CC_SEED = 'src/gtest-all.cc'
# Where to put the generated files.
GTEST_H_OUTPUT = 'gtest/gtest.h'
GTEST_ALL_CC_OUTPUT = 'gtest/gtest-all.cc'
def VerifyFileExists(directory, relative_path):
"""Verifies that the given file exists; aborts on failure.
relative_path is the file path relative to the given directory.
"""
if not os.path.isfile(os.path.join(directory, relative_path)):
print 'ERROR: Cannot find %s in directory %s.' % (relative_path,
directory)
print ('Please either specify a valid project root directory '
'or omit it on the command line.')
sys.exit(1)
def ValidateGTestRootDir(gtest_root):
"""Makes sure gtest_root points to a valid gtest root directory.
The function aborts the program on failure.
"""
VerifyFileExists(gtest_root, GTEST_H_SEED)
VerifyFileExists(gtest_root, GTEST_ALL_CC_SEED)
def VerifyOutputFile(output_dir, relative_path):
"""Verifies that the given output file path is valid.
relative_path is relative to the output_dir directory.
"""
# Makes sure the output file either doesn't exist or can be overwritten.
output_file = os.path.join(output_dir, relative_path)
if os.path.exists(output_file):
# TODO(wan@google.com): The following user-interaction doesn't
# work with automated processes. We should provide a way for the
# Makefile to force overwriting the files.
print ('%s already exists in directory %s - overwrite it? (y/N) ' %
(relative_path, output_dir))
answer = sys.stdin.readline().strip()
if answer not in ['y', 'Y']:
print 'ABORTED.'
sys.exit(1)
# Makes sure the directory holding the output file exists; creates
# it and all its ancestors if necessary.
parent_directory = os.path.dirname(output_file)
if not os.path.isdir(parent_directory):
os.makedirs(parent_directory)
def ValidateOutputDir(output_dir):
"""Makes sure output_dir points to a valid output directory.
The function aborts the program on failure.
"""
VerifyOutputFile(output_dir, GTEST_H_OUTPUT)
VerifyOutputFile(output_dir, GTEST_ALL_CC_OUTPUT)
def FuseGTestH(gtest_root, output_dir):
"""Scans folder gtest_root to generate gtest/gtest.h in output_dir."""
output_file = file(os.path.join(output_dir, GTEST_H_OUTPUT), 'w')
processed_files = sets.Set() # Holds all gtest headers we've processed.
def ProcessFile(gtest_header_path):
"""Processes the given gtest header file."""
# We don't process the same header twice.
if gtest_header_path in processed_files:
return
processed_files.add(gtest_header_path)
# Reads each line in the given gtest header.
for line in file(os.path.join(gtest_root, gtest_header_path), 'r'):
m = INCLUDE_GTEST_FILE_REGEX.match(line)
if m:
# It's '#include "gtest/..."' - let's process it recursively.
ProcessFile('include/' + m.group(1))
else:
# Otherwise we copy the line unchanged to the output file.
output_file.write(line)
ProcessFile(GTEST_H_SEED)
output_file.close()
def FuseGTestAllCcToFile(gtest_root, output_file):
"""Scans folder gtest_root to generate gtest/gtest-all.cc in output_file."""
processed_files = sets.Set()
def ProcessFile(gtest_source_file):
"""Processes the given gtest source file."""
# We don't process the same #included file twice.
if gtest_source_file in processed_files:
return
processed_files.add(gtest_source_file)
# Reads each line in the given gtest source file.
for line in file(os.path.join(gtest_root, gtest_source_file), 'r'):
m = INCLUDE_GTEST_FILE_REGEX.match(line)
if m:
if 'include/' + m.group(1) == GTEST_SPI_H_SEED:
# It's '#include "gtest/gtest-spi.h"'. This file is not
# #included by "gtest/gtest.h", so we need to process it.
ProcessFile(GTEST_SPI_H_SEED)
else:
# It's '#include "gtest/foo.h"' where foo is not gtest-spi.
# We treat it as '#include "gtest/gtest.h"', as all other
# gtest headers are being fused into gtest.h and cannot be
# #included directly.
# There is no need to #include "gtest/gtest.h" more than once.
if not GTEST_H_SEED in processed_files:
processed_files.add(GTEST_H_SEED)
output_file.write('#include "%s"\n' % (GTEST_H_OUTPUT,))
else:
m = INCLUDE_SRC_FILE_REGEX.match(line)
if m:
# It's '#include "src/foo"' - let's process it recursively.
ProcessFile(m.group(1))
else:
output_file.write(line)
ProcessFile(GTEST_ALL_CC_SEED)
def FuseGTestAllCc(gtest_root, output_dir):
"""Scans folder gtest_root to generate gtest/gtest-all.cc in output_dir."""
output_file = file(os.path.join(output_dir, GTEST_ALL_CC_OUTPUT), 'w')
FuseGTestAllCcToFile(gtest_root, output_file)
output_file.close()
def FuseGTest(gtest_root, output_dir):
"""Fuses gtest.h and gtest-all.cc."""
ValidateGTestRootDir(gtest_root)
ValidateOutputDir(output_dir)
FuseGTestH(gtest_root, output_dir)
FuseGTestAllCc(gtest_root, output_dir)
def main():
argc = len(sys.argv)
if argc == 2:
# fuse_gtest_files.py OUTPUT_DIR
FuseGTest(DEFAULT_GTEST_ROOT_DIR, sys.argv[1])
elif argc == 3:
# fuse_gtest_files.py GTEST_ROOT_DIR OUTPUT_DIR
FuseGTest(sys.argv[1], sys.argv[2])
else:
print __doc__
sys.exit(1)
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""gen_gtest_pred_impl.py v0.1
Generates the implementation of Google Test predicate assertions and
accompanying tests.
Usage:
gen_gtest_pred_impl.py MAX_ARITY
where MAX_ARITY is a positive integer.
The command generates the implementation of up-to MAX_ARITY-ary
predicate assertions, and writes it to file gtest_pred_impl.h in the
directory where the script is. It also generates the accompanying
unit test in file gtest_pred_impl_unittest.cc.
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import sys
import time
# Where this script is.
SCRIPT_DIR = os.path.dirname(sys.argv[0])
# Where to store the generated header.
HEADER = os.path.join(SCRIPT_DIR, '../include/gtest/gtest_pred_impl.h')
# Where to store the generated unit test.
UNIT_TEST = os.path.join(SCRIPT_DIR, '../test/gtest_pred_impl_unittest.cc')
def HeaderPreamble(n):
"""Returns the preamble for the header file.
Args:
n: the maximum arity of the predicate macros to be generated.
"""
# A map that defines the values used in the preamble template.
DEFS = {
'today' : time.strftime('%m/%d/%Y'),
'year' : time.strftime('%Y'),
'command' : '%s %s' % (os.path.basename(sys.argv[0]), n),
'n' : n
}
return (
"""// Copyright 2006, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// This file is AUTOMATICALLY GENERATED on %(today)s by command
// '%(command)s'. DO NOT EDIT BY HAND!
//
// Implements a family of generic predicate assertion macros.
#ifndef GTEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_
#define GTEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_
// Makes sure this header is not included before gtest.h.
#ifndef GTEST_INCLUDE_GTEST_GTEST_H_
#error Do not include gtest_pred_impl.h directly. Include gtest.h instead.
#endif // GTEST_INCLUDE_GTEST_GTEST_H_
// This header implements a family of generic predicate assertion
// macros:
//
// ASSERT_PRED_FORMAT1(pred_format, v1)
// ASSERT_PRED_FORMAT2(pred_format, v1, v2)
// ...
//
// where pred_format is a function or functor that takes n (in the
// case of ASSERT_PRED_FORMATn) values and their source expression
// text, and returns a testing::AssertionResult. See the definition
// of ASSERT_EQ in gtest.h for an example.
//
// If you don't care about formatting, you can use the more
// restrictive version:
//
// ASSERT_PRED1(pred, v1)
// ASSERT_PRED2(pred, v1, v2)
// ...
//
// where pred is an n-ary function or functor that returns bool,
// and the values v1, v2, ..., must support the << operator for
// streaming to std::ostream.
//
// We also define the EXPECT_* variations.
//
// For now we only support predicates whose arity is at most %(n)s.
// Please email googletestframework@googlegroups.com if you need
// support for higher arities.
// GTEST_ASSERT_ is the basic statement to which all of the assertions
// in this file reduce. Don't use this in your code.
#define GTEST_ASSERT_(expression, on_failure) \\
GTEST_AMBIGUOUS_ELSE_BLOCKER_ \\
if (const ::testing::AssertionResult gtest_ar = (expression)) \\
; \\
else \\
on_failure(gtest_ar.failure_message())
""" % DEFS)
def Arity(n):
"""Returns the English name of the given arity."""
if n < 0:
return None
elif n <= 3:
return ['nullary', 'unary', 'binary', 'ternary'][n]
else:
return '%s-ary' % n
def Title(word):
"""Returns the given word in title case. The difference between
this and string's title() method is that Title('4-ary') is '4-ary'
while '4-ary'.title() is '4-Ary'."""
return word[0].upper() + word[1:]
def OneTo(n):
"""Returns the list [1, 2, 3, ..., n]."""
return range(1, n + 1)
def Iter(n, format, sep=''):
"""Given a positive integer n, a format string that contains 0 or
more '%s' format specs, and optionally a separator string, returns
the join of n strings, each formatted with the format string on an
iterator ranged from 1 to n.
Example:
Iter(3, 'v%s', sep=', ') returns 'v1, v2, v3'.
"""
# How many '%s' specs are in format?
spec_count = len(format.split('%s')) - 1
return sep.join([format % (spec_count * (i,)) for i in OneTo(n)])
def ImplementationForArity(n):
"""Returns the implementation of n-ary predicate assertions."""
# A map the defines the values used in the implementation template.
DEFS = {
'n' : str(n),
'vs' : Iter(n, 'v%s', sep=', '),
'vts' : Iter(n, '#v%s', sep=', '),
'arity' : Arity(n),
'Arity' : Title(Arity(n))
}
impl = """
// Helper function for implementing {EXPECT|ASSERT}_PRED%(n)s. Don't use
// this in your code.
template <typename Pred""" % DEFS
impl += Iter(n, """,
typename T%s""")
impl += """>
AssertionResult AssertPred%(n)sHelper(const char* pred_text""" % DEFS
impl += Iter(n, """,
const char* e%s""")
impl += """,
Pred pred"""
impl += Iter(n, """,
const T%s& v%s""")
impl += """) {
if (pred(%(vs)s)) return AssertionSuccess();
""" % DEFS
impl += ' return AssertionFailure() << pred_text << "("'
impl += Iter(n, """
<< e%s""", sep=' << ", "')
impl += ' << ") evaluates to false, where"'
impl += Iter(n, """
<< "\\n" << e%s << " evaluates to " << v%s""")
impl += """;
}
// Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT%(n)s.
// Don't use this in your code.
#define GTEST_PRED_FORMAT%(n)s_(pred_format, %(vs)s, on_failure)\\
GTEST_ASSERT_(pred_format(%(vts)s, %(vs)s),\\
on_failure)
// Internal macro for implementing {EXPECT|ASSERT}_PRED%(n)s. Don't use
// this in your code.
#define GTEST_PRED%(n)s_(pred, %(vs)s, on_failure)\\
GTEST_ASSERT_(::testing::AssertPred%(n)sHelper(#pred""" % DEFS
impl += Iter(n, """, \\
#v%s""")
impl += """, \\
pred"""
impl += Iter(n, """, \\
v%s""")
impl += """), on_failure)
// %(Arity)s predicate assertion macros.
#define EXPECT_PRED_FORMAT%(n)s(pred_format, %(vs)s) \\
GTEST_PRED_FORMAT%(n)s_(pred_format, %(vs)s, GTEST_NONFATAL_FAILURE_)
#define EXPECT_PRED%(n)s(pred, %(vs)s) \\
GTEST_PRED%(n)s_(pred, %(vs)s, GTEST_NONFATAL_FAILURE_)
#define ASSERT_PRED_FORMAT%(n)s(pred_format, %(vs)s) \\
GTEST_PRED_FORMAT%(n)s_(pred_format, %(vs)s, GTEST_FATAL_FAILURE_)
#define ASSERT_PRED%(n)s(pred, %(vs)s) \\
GTEST_PRED%(n)s_(pred, %(vs)s, GTEST_FATAL_FAILURE_)
""" % DEFS
return impl
def HeaderPostamble():
"""Returns the postamble for the header file."""
return """
#endif // GTEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_
"""
def GenerateFile(path, content):
"""Given a file path and a content string, overwrites it with the
given content."""
print 'Updating file %s . . .' % path
f = file(path, 'w+')
print >>f, content,
f.close()
print 'File %s has been updated.' % path
def GenerateHeader(n):
"""Given the maximum arity n, updates the header file that implements
the predicate assertions."""
GenerateFile(HEADER,
HeaderPreamble(n)
+ ''.join([ImplementationForArity(i) for i in OneTo(n)])
+ HeaderPostamble())
def UnitTestPreamble():
"""Returns the preamble for the unit test file."""
# A map that defines the values used in the preamble template.
DEFS = {
'today' : time.strftime('%m/%d/%Y'),
'year' : time.strftime('%Y'),
'command' : '%s %s' % (os.path.basename(sys.argv[0]), sys.argv[1]),
}
return (
"""// Copyright 2006, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// This file is AUTOMATICALLY GENERATED on %(today)s by command
// '%(command)s'. DO NOT EDIT BY HAND!
// Regression test for gtest_pred_impl.h
//
// This file is generated by a script and quite long. If you intend to
// learn how Google Test works by reading its unit tests, read
// gtest_unittest.cc instead.
//
// This is intended as a regression test for the Google Test predicate
// assertions. We compile it as part of the gtest_unittest target
// only to keep the implementation tidy and compact, as it is quite
// involved to set up the stage for testing Google Test using Google
// Test itself.
//
// Currently, gtest_unittest takes ~11 seconds to run in the testing
// daemon. In the future, if it grows too large and needs much more
// time to finish, we should consider separating this file into a
// stand-alone regression test.
#include <iostream>
#include "gtest/gtest.h"
#include "gtest/gtest-spi.h"
// A user-defined data type.
struct Bool {
explicit Bool(int val) : value(val != 0) {}
bool operator>(int n) const { return value > Bool(n).value; }
Bool operator+(const Bool& rhs) const { return Bool(value + rhs.value); }
bool operator==(const Bool& rhs) const { return value == rhs.value; }
bool value;
};
// Enables Bool to be used in assertions.
std::ostream& operator<<(std::ostream& os, const Bool& x) {
return os << (x.value ? "true" : "false");
}
""" % DEFS)
def TestsForArity(n):
"""Returns the tests for n-ary predicate assertions."""
# A map that defines the values used in the template for the tests.
DEFS = {
'n' : n,
'es' : Iter(n, 'e%s', sep=', '),
'vs' : Iter(n, 'v%s', sep=', '),
'vts' : Iter(n, '#v%s', sep=', '),
'tvs' : Iter(n, 'T%s v%s', sep=', '),
'int_vs' : Iter(n, 'int v%s', sep=', '),
'Bool_vs' : Iter(n, 'Bool v%s', sep=', '),
'types' : Iter(n, 'typename T%s', sep=', '),
'v_sum' : Iter(n, 'v%s', sep=' + '),
'arity' : Arity(n),
'Arity' : Title(Arity(n)),
}
tests = (
"""// Sample functions/functors for testing %(arity)s predicate assertions.
// A %(arity)s predicate function.
template <%(types)s>
bool PredFunction%(n)s(%(tvs)s) {
return %(v_sum)s > 0;
}
// The following two functions are needed to circumvent a bug in
// gcc 2.95.3, which sometimes has problem with the above template
// function.
bool PredFunction%(n)sInt(%(int_vs)s) {
return %(v_sum)s > 0;
}
bool PredFunction%(n)sBool(%(Bool_vs)s) {
return %(v_sum)s > 0;
}
""" % DEFS)
tests += """
// A %(arity)s predicate functor.
struct PredFunctor%(n)s {
template <%(types)s>
bool operator()(""" % DEFS
tests += Iter(n, 'const T%s& v%s', sep=""",
""")
tests += """) {
return %(v_sum)s > 0;
}
};
""" % DEFS
tests += """
// A %(arity)s predicate-formatter function.
template <%(types)s>
testing::AssertionResult PredFormatFunction%(n)s(""" % DEFS
tests += Iter(n, 'const char* e%s', sep=""",
""")
tests += Iter(n, """,
const T%s& v%s""")
tests += """) {
if (PredFunction%(n)s(%(vs)s))
return testing::AssertionSuccess();
return testing::AssertionFailure()
<< """ % DEFS
tests += Iter(n, 'e%s', sep=' << " + " << ')
tests += """
<< " is expected to be positive, but evaluates to "
<< %(v_sum)s << ".";
}
""" % DEFS
tests += """
// A %(arity)s predicate-formatter functor.
struct PredFormatFunctor%(n)s {
template <%(types)s>
testing::AssertionResult operator()(""" % DEFS
tests += Iter(n, 'const char* e%s', sep=""",
""")
tests += Iter(n, """,
const T%s& v%s""")
tests += """) const {
return PredFormatFunction%(n)s(%(es)s, %(vs)s);
}
};
""" % DEFS
tests += """
// Tests for {EXPECT|ASSERT}_PRED_FORMAT%(n)s.
class Predicate%(n)sTest : public testing::Test {
protected:
virtual void SetUp() {
expected_to_finish_ = true;
finished_ = false;""" % DEFS
tests += """
""" + Iter(n, 'n%s_ = ') + """0;
}
"""
tests += """
virtual void TearDown() {
// Verifies that each of the predicate's arguments was evaluated
// exactly once."""
tests += ''.join(["""
EXPECT_EQ(1, n%s_) <<
"The predicate assertion didn't evaluate argument %s "
"exactly once.";""" % (i, i + 1) for i in OneTo(n)])
tests += """
// Verifies that the control flow in the test function is expected.
if (expected_to_finish_ && !finished_) {
FAIL() << "The predicate assertion unexpactedly aborted the test.";
} else if (!expected_to_finish_ && finished_) {
FAIL() << "The failed predicate assertion didn't abort the test "
"as expected.";
}
}
// true iff the test function is expected to run to finish.
static bool expected_to_finish_;
// true iff the test function did run to finish.
static bool finished_;
""" % DEFS
tests += Iter(n, """
static int n%s_;""")
tests += """
};
bool Predicate%(n)sTest::expected_to_finish_;
bool Predicate%(n)sTest::finished_;
""" % DEFS
tests += Iter(n, """int Predicate%%(n)sTest::n%s_;
""") % DEFS
tests += """
typedef Predicate%(n)sTest EXPECT_PRED_FORMAT%(n)sTest;
typedef Predicate%(n)sTest ASSERT_PRED_FORMAT%(n)sTest;
typedef Predicate%(n)sTest EXPECT_PRED%(n)sTest;
typedef Predicate%(n)sTest ASSERT_PRED%(n)sTest;
""" % DEFS
def GenTest(use_format, use_assert, expect_failure,
use_functor, use_user_type):
"""Returns the test for a predicate assertion macro.
Args:
use_format: true iff the assertion is a *_PRED_FORMAT*.
use_assert: true iff the assertion is a ASSERT_*.
expect_failure: true iff the assertion is expected to fail.
use_functor: true iff the first argument of the assertion is
a functor (as opposed to a function)
use_user_type: true iff the predicate functor/function takes
argument(s) of a user-defined type.
Example:
GenTest(1, 0, 0, 1, 0) returns a test that tests the behavior
of a successful EXPECT_PRED_FORMATn() that takes a functor
whose arguments have built-in types."""
if use_assert:
assrt = 'ASSERT' # 'assert' is reserved, so we cannot use
# that identifier here.
else:
assrt = 'EXPECT'
assertion = assrt + '_PRED'
if use_format:
pred_format = 'PredFormat'
assertion += '_FORMAT'
else:
pred_format = 'Pred'
assertion += '%(n)s' % DEFS
if use_functor:
pred_format_type = 'functor'
pred_format += 'Functor%(n)s()'
else:
pred_format_type = 'function'
pred_format += 'Function%(n)s'
if not use_format:
if use_user_type:
pred_format += 'Bool'
else:
pred_format += 'Int'
test_name = pred_format_type.title()
if use_user_type:
arg_type = 'user-defined type (Bool)'
test_name += 'OnUserType'
if expect_failure:
arg = 'Bool(n%s_++)'
else:
arg = 'Bool(++n%s_)'
else:
arg_type = 'built-in type (int)'
test_name += 'OnBuiltInType'
if expect_failure:
arg = 'n%s_++'
else:
arg = '++n%s_'
if expect_failure:
successful_or_failed = 'failed'
expected_or_not = 'expected.'
test_name += 'Failure'
else:
successful_or_failed = 'successful'
expected_or_not = 'UNEXPECTED!'
test_name += 'Success'
# A map that defines the values used in the test template.
defs = DEFS.copy()
defs.update({
'assert' : assrt,
'assertion' : assertion,
'test_name' : test_name,
'pf_type' : pred_format_type,
'pf' : pred_format,
'arg_type' : arg_type,
'arg' : arg,
'successful' : successful_or_failed,
'expected' : expected_or_not,
})
test = """
// Tests a %(successful)s %(assertion)s where the
// predicate-formatter is a %(pf_type)s on a %(arg_type)s.
TEST_F(%(assertion)sTest, %(test_name)s) {""" % defs
indent = (len(assertion) + 3)*' '
extra_indent = ''
if expect_failure:
extra_indent = ' '
if use_assert:
test += """
expected_to_finish_ = false;
EXPECT_FATAL_FAILURE({ // NOLINT"""
else:
test += """
EXPECT_NONFATAL_FAILURE({ // NOLINT"""
test += '\n' + extra_indent + """ %(assertion)s(%(pf)s""" % defs
test = test % defs
test += Iter(n, ',\n' + indent + extra_indent + '%(arg)s' % defs)
test += ');\n' + extra_indent + ' finished_ = true;\n'
if expect_failure:
test += ' }, "");\n'
test += '}\n'
return test
# Generates tests for all 2**6 = 64 combinations.
tests += ''.join([GenTest(use_format, use_assert, expect_failure,
use_functor, use_user_type)
for use_format in [0, 1]
for use_assert in [0, 1]
for expect_failure in [0, 1]
for use_functor in [0, 1]
for use_user_type in [0, 1]
])
return tests
def UnitTestPostamble():
"""Returns the postamble for the tests."""
return ''
def GenerateUnitTest(n):
"""Returns the tests for up-to n-ary predicate assertions."""
GenerateFile(UNIT_TEST,
UnitTestPreamble()
+ ''.join([TestsForArity(i) for i in OneTo(n)])
+ UnitTestPostamble())
def _Main():
"""The entry point of the script. Generates the header file and its
unit test."""
if len(sys.argv) != 2:
print __doc__
print 'Author: ' + __author__
sys.exit(1)
n = int(sys.argv[1])
GenerateHeader(n)
GenerateUnitTest(n)
if __name__ == '__main__':
_Main()
| Python |
#!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""pump v0.2.0 - Pretty Useful for Meta Programming.
A tool for preprocessor meta programming. Useful for generating
repetitive boilerplate code. Especially useful for writing C++
classes, functions, macros, and templates that need to work with
various number of arguments.
USAGE:
pump.py SOURCE_FILE
EXAMPLES:
pump.py foo.cc.pump
Converts foo.cc.pump to foo.cc.
GRAMMAR:
CODE ::= ATOMIC_CODE*
ATOMIC_CODE ::= $var ID = EXPRESSION
| $var ID = [[ CODE ]]
| $range ID EXPRESSION..EXPRESSION
| $for ID SEPARATOR [[ CODE ]]
| $($)
| $ID
| $(EXPRESSION)
| $if EXPRESSION [[ CODE ]] ELSE_BRANCH
| [[ CODE ]]
| RAW_CODE
SEPARATOR ::= RAW_CODE | EMPTY
ELSE_BRANCH ::= $else [[ CODE ]]
| $elif EXPRESSION [[ CODE ]] ELSE_BRANCH
| EMPTY
EXPRESSION has Python syntax.
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import re
import sys
TOKEN_TABLE = [
(re.compile(r'\$var\s+'), '$var'),
(re.compile(r'\$elif\s+'), '$elif'),
(re.compile(r'\$else\s+'), '$else'),
(re.compile(r'\$for\s+'), '$for'),
(re.compile(r'\$if\s+'), '$if'),
(re.compile(r'\$range\s+'), '$range'),
(re.compile(r'\$[_A-Za-z]\w*'), '$id'),
(re.compile(r'\$\(\$\)'), '$($)'),
(re.compile(r'\$'), '$'),
(re.compile(r'\[\[\n?'), '[['),
(re.compile(r'\]\]\n?'), ']]'),
]
class Cursor:
"""Represents a position (line and column) in a text file."""
def __init__(self, line=-1, column=-1):
self.line = line
self.column = column
def __eq__(self, rhs):
return self.line == rhs.line and self.column == rhs.column
def __ne__(self, rhs):
return not self == rhs
def __lt__(self, rhs):
return self.line < rhs.line or (
self.line == rhs.line and self.column < rhs.column)
def __le__(self, rhs):
return self < rhs or self == rhs
def __gt__(self, rhs):
return rhs < self
def __ge__(self, rhs):
return rhs <= self
def __str__(self):
if self == Eof():
return 'EOF'
else:
return '%s(%s)' % (self.line + 1, self.column)
def __add__(self, offset):
return Cursor(self.line, self.column + offset)
def __sub__(self, offset):
return Cursor(self.line, self.column - offset)
def Clone(self):
"""Returns a copy of self."""
return Cursor(self.line, self.column)
# Special cursor to indicate the end-of-file.
def Eof():
"""Returns the special cursor to denote the end-of-file."""
return Cursor(-1, -1)
class Token:
"""Represents a token in a Pump source file."""
def __init__(self, start=None, end=None, value=None, token_type=None):
if start is None:
self.start = Eof()
else:
self.start = start
if end is None:
self.end = Eof()
else:
self.end = end
self.value = value
self.token_type = token_type
def __str__(self):
return 'Token @%s: \'%s\' type=%s' % (
self.start, self.value, self.token_type)
def Clone(self):
"""Returns a copy of self."""
return Token(self.start.Clone(), self.end.Clone(), self.value,
self.token_type)
def StartsWith(lines, pos, string):
"""Returns True iff the given position in lines starts with 'string'."""
return lines[pos.line][pos.column:].startswith(string)
def FindFirstInLine(line, token_table):
best_match_start = -1
for (regex, token_type) in token_table:
m = regex.search(line)
if m:
# We found regex in lines
if best_match_start < 0 or m.start() < best_match_start:
best_match_start = m.start()
best_match_length = m.end() - m.start()
best_match_token_type = token_type
if best_match_start < 0:
return None
return (best_match_start, best_match_length, best_match_token_type)
def FindFirst(lines, token_table, cursor):
"""Finds the first occurrence of any string in strings in lines."""
start = cursor.Clone()
cur_line_number = cursor.line
for line in lines[start.line:]:
if cur_line_number == start.line:
line = line[start.column:]
m = FindFirstInLine(line, token_table)
if m:
# We found a regex in line.
(start_column, length, token_type) = m
if cur_line_number == start.line:
start_column += start.column
found_start = Cursor(cur_line_number, start_column)
found_end = found_start + length
return MakeToken(lines, found_start, found_end, token_type)
cur_line_number += 1
# We failed to find str in lines
return None
def SubString(lines, start, end):
"""Returns a substring in lines."""
if end == Eof():
end = Cursor(len(lines) - 1, len(lines[-1]))
if start >= end:
return ''
if start.line == end.line:
return lines[start.line][start.column:end.column]
result_lines = ([lines[start.line][start.column:]] +
lines[start.line + 1:end.line] +
[lines[end.line][:end.column]])
return ''.join(result_lines)
def StripMetaComments(str):
"""Strip meta comments from each line in the given string."""
# First, completely remove lines containing nothing but a meta
# comment, including the trailing \n.
str = re.sub(r'^\s*\$\$.*\n', '', str)
# Then, remove meta comments from contentful lines.
return re.sub(r'\s*\$\$.*', '', str)
def MakeToken(lines, start, end, token_type):
"""Creates a new instance of Token."""
return Token(start, end, SubString(lines, start, end), token_type)
def ParseToken(lines, pos, regex, token_type):
line = lines[pos.line][pos.column:]
m = regex.search(line)
if m and not m.start():
return MakeToken(lines, pos, pos + m.end(), token_type)
else:
print 'ERROR: %s expected at %s.' % (token_type, pos)
sys.exit(1)
ID_REGEX = re.compile(r'[_A-Za-z]\w*')
EQ_REGEX = re.compile(r'=')
REST_OF_LINE_REGEX = re.compile(r'.*?(?=$|\$\$)')
OPTIONAL_WHITE_SPACES_REGEX = re.compile(r'\s*')
WHITE_SPACE_REGEX = re.compile(r'\s')
DOT_DOT_REGEX = re.compile(r'\.\.')
def Skip(lines, pos, regex):
line = lines[pos.line][pos.column:]
m = re.search(regex, line)
if m and not m.start():
return pos + m.end()
else:
return pos
def SkipUntil(lines, pos, regex, token_type):
line = lines[pos.line][pos.column:]
m = re.search(regex, line)
if m:
return pos + m.start()
else:
print ('ERROR: %s expected on line %s after column %s.' %
(token_type, pos.line + 1, pos.column))
sys.exit(1)
def ParseExpTokenInParens(lines, pos):
def ParseInParens(pos):
pos = Skip(lines, pos, OPTIONAL_WHITE_SPACES_REGEX)
pos = Skip(lines, pos, r'\(')
pos = Parse(pos)
pos = Skip(lines, pos, r'\)')
return pos
def Parse(pos):
pos = SkipUntil(lines, pos, r'\(|\)', ')')
if SubString(lines, pos, pos + 1) == '(':
pos = Parse(pos + 1)
pos = Skip(lines, pos, r'\)')
return Parse(pos)
else:
return pos
start = pos.Clone()
pos = ParseInParens(pos)
return MakeToken(lines, start, pos, 'exp')
def RStripNewLineFromToken(token):
if token.value.endswith('\n'):
return Token(token.start, token.end, token.value[:-1], token.token_type)
else:
return token
def TokenizeLines(lines, pos):
while True:
found = FindFirst(lines, TOKEN_TABLE, pos)
if not found:
yield MakeToken(lines, pos, Eof(), 'code')
return
if found.start == pos:
prev_token = None
prev_token_rstripped = None
else:
prev_token = MakeToken(lines, pos, found.start, 'code')
prev_token_rstripped = RStripNewLineFromToken(prev_token)
if found.token_type == '$var':
if prev_token_rstripped:
yield prev_token_rstripped
yield found
id_token = ParseToken(lines, found.end, ID_REGEX, 'id')
yield id_token
pos = Skip(lines, id_token.end, OPTIONAL_WHITE_SPACES_REGEX)
eq_token = ParseToken(lines, pos, EQ_REGEX, '=')
yield eq_token
pos = Skip(lines, eq_token.end, r'\s*')
if SubString(lines, pos, pos + 2) != '[[':
exp_token = ParseToken(lines, pos, REST_OF_LINE_REGEX, 'exp')
yield exp_token
pos = Cursor(exp_token.end.line + 1, 0)
elif found.token_type == '$for':
if prev_token_rstripped:
yield prev_token_rstripped
yield found
id_token = ParseToken(lines, found.end, ID_REGEX, 'id')
yield id_token
pos = Skip(lines, id_token.end, WHITE_SPACE_REGEX)
elif found.token_type == '$range':
if prev_token_rstripped:
yield prev_token_rstripped
yield found
id_token = ParseToken(lines, found.end, ID_REGEX, 'id')
yield id_token
pos = Skip(lines, id_token.end, OPTIONAL_WHITE_SPACES_REGEX)
dots_pos = SkipUntil(lines, pos, DOT_DOT_REGEX, '..')
yield MakeToken(lines, pos, dots_pos, 'exp')
yield MakeToken(lines, dots_pos, dots_pos + 2, '..')
pos = dots_pos + 2
new_pos = Cursor(pos.line + 1, 0)
yield MakeToken(lines, pos, new_pos, 'exp')
pos = new_pos
elif found.token_type == '$':
if prev_token:
yield prev_token
yield found
exp_token = ParseExpTokenInParens(lines, found.end)
yield exp_token
pos = exp_token.end
elif (found.token_type == ']]' or found.token_type == '$if' or
found.token_type == '$elif' or found.token_type == '$else'):
if prev_token_rstripped:
yield prev_token_rstripped
yield found
pos = found.end
else:
if prev_token:
yield prev_token
yield found
pos = found.end
def Tokenize(s):
"""A generator that yields the tokens in the given string."""
if s != '':
lines = s.splitlines(True)
for token in TokenizeLines(lines, Cursor(0, 0)):
yield token
class CodeNode:
def __init__(self, atomic_code_list=None):
self.atomic_code = atomic_code_list
class VarNode:
def __init__(self, identifier=None, atomic_code=None):
self.identifier = identifier
self.atomic_code = atomic_code
class RangeNode:
def __init__(self, identifier=None, exp1=None, exp2=None):
self.identifier = identifier
self.exp1 = exp1
self.exp2 = exp2
class ForNode:
def __init__(self, identifier=None, sep=None, code=None):
self.identifier = identifier
self.sep = sep
self.code = code
class ElseNode:
def __init__(self, else_branch=None):
self.else_branch = else_branch
class IfNode:
def __init__(self, exp=None, then_branch=None, else_branch=None):
self.exp = exp
self.then_branch = then_branch
self.else_branch = else_branch
class RawCodeNode:
def __init__(self, token=None):
self.raw_code = token
class LiteralDollarNode:
def __init__(self, token):
self.token = token
class ExpNode:
def __init__(self, token, python_exp):
self.token = token
self.python_exp = python_exp
def PopFront(a_list):
head = a_list[0]
a_list[:1] = []
return head
def PushFront(a_list, elem):
a_list[:0] = [elem]
def PopToken(a_list, token_type=None):
token = PopFront(a_list)
if token_type is not None and token.token_type != token_type:
print 'ERROR: %s expected at %s' % (token_type, token.start)
print 'ERROR: %s found instead' % (token,)
sys.exit(1)
return token
def PeekToken(a_list):
if not a_list:
return None
return a_list[0]
def ParseExpNode(token):
python_exp = re.sub(r'([_A-Za-z]\w*)', r'self.GetValue("\1")', token.value)
return ExpNode(token, python_exp)
def ParseElseNode(tokens):
def Pop(token_type=None):
return PopToken(tokens, token_type)
next = PeekToken(tokens)
if not next:
return None
if next.token_type == '$else':
Pop('$else')
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
return code_node
elif next.token_type == '$elif':
Pop('$elif')
exp = Pop('code')
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
inner_else_node = ParseElseNode(tokens)
return CodeNode([IfNode(ParseExpNode(exp), code_node, inner_else_node)])
elif not next.value.strip():
Pop('code')
return ParseElseNode(tokens)
else:
return None
def ParseAtomicCodeNode(tokens):
def Pop(token_type=None):
return PopToken(tokens, token_type)
head = PopFront(tokens)
t = head.token_type
if t == 'code':
return RawCodeNode(head)
elif t == '$var':
id_token = Pop('id')
Pop('=')
next = PeekToken(tokens)
if next.token_type == 'exp':
exp_token = Pop()
return VarNode(id_token, ParseExpNode(exp_token))
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
return VarNode(id_token, code_node)
elif t == '$for':
id_token = Pop('id')
next_token = PeekToken(tokens)
if next_token.token_type == 'code':
sep_token = next_token
Pop('code')
else:
sep_token = None
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
return ForNode(id_token, sep_token, code_node)
elif t == '$if':
exp_token = Pop('code')
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
else_node = ParseElseNode(tokens)
return IfNode(ParseExpNode(exp_token), code_node, else_node)
elif t == '$range':
id_token = Pop('id')
exp1_token = Pop('exp')
Pop('..')
exp2_token = Pop('exp')
return RangeNode(id_token, ParseExpNode(exp1_token),
ParseExpNode(exp2_token))
elif t == '$id':
return ParseExpNode(Token(head.start + 1, head.end, head.value[1:], 'id'))
elif t == '$($)':
return LiteralDollarNode(head)
elif t == '$':
exp_token = Pop('exp')
return ParseExpNode(exp_token)
elif t == '[[':
code_node = ParseCodeNode(tokens)
Pop(']]')
return code_node
else:
PushFront(tokens, head)
return None
def ParseCodeNode(tokens):
atomic_code_list = []
while True:
if not tokens:
break
atomic_code_node = ParseAtomicCodeNode(tokens)
if atomic_code_node:
atomic_code_list.append(atomic_code_node)
else:
break
return CodeNode(atomic_code_list)
def ParseToAST(pump_src_text):
"""Convert the given Pump source text into an AST."""
tokens = list(Tokenize(pump_src_text))
code_node = ParseCodeNode(tokens)
return code_node
class Env:
def __init__(self):
self.variables = []
self.ranges = []
def Clone(self):
clone = Env()
clone.variables = self.variables[:]
clone.ranges = self.ranges[:]
return clone
def PushVariable(self, var, value):
# If value looks like an int, store it as an int.
try:
int_value = int(value)
if ('%s' % int_value) == value:
value = int_value
except Exception:
pass
self.variables[:0] = [(var, value)]
def PopVariable(self):
self.variables[:1] = []
def PushRange(self, var, lower, upper):
self.ranges[:0] = [(var, lower, upper)]
def PopRange(self):
self.ranges[:1] = []
def GetValue(self, identifier):
for (var, value) in self.variables:
if identifier == var:
return value
print 'ERROR: meta variable %s is undefined.' % (identifier,)
sys.exit(1)
def EvalExp(self, exp):
try:
result = eval(exp.python_exp)
except Exception, e:
print 'ERROR: caught exception %s: %s' % (e.__class__.__name__, e)
print ('ERROR: failed to evaluate meta expression %s at %s' %
(exp.python_exp, exp.token.start))
sys.exit(1)
return result
def GetRange(self, identifier):
for (var, lower, upper) in self.ranges:
if identifier == var:
return (lower, upper)
print 'ERROR: range %s is undefined.' % (identifier,)
sys.exit(1)
class Output:
def __init__(self):
self.string = ''
def GetLastLine(self):
index = self.string.rfind('\n')
if index < 0:
return ''
return self.string[index + 1:]
def Append(self, s):
self.string += s
def RunAtomicCode(env, node, output):
if isinstance(node, VarNode):
identifier = node.identifier.value.strip()
result = Output()
RunAtomicCode(env.Clone(), node.atomic_code, result)
value = result.string
env.PushVariable(identifier, value)
elif isinstance(node, RangeNode):
identifier = node.identifier.value.strip()
lower = int(env.EvalExp(node.exp1))
upper = int(env.EvalExp(node.exp2))
env.PushRange(identifier, lower, upper)
elif isinstance(node, ForNode):
identifier = node.identifier.value.strip()
if node.sep is None:
sep = ''
else:
sep = node.sep.value
(lower, upper) = env.GetRange(identifier)
for i in range(lower, upper + 1):
new_env = env.Clone()
new_env.PushVariable(identifier, i)
RunCode(new_env, node.code, output)
if i != upper:
output.Append(sep)
elif isinstance(node, RawCodeNode):
output.Append(node.raw_code.value)
elif isinstance(node, IfNode):
cond = env.EvalExp(node.exp)
if cond:
RunCode(env.Clone(), node.then_branch, output)
elif node.else_branch is not None:
RunCode(env.Clone(), node.else_branch, output)
elif isinstance(node, ExpNode):
value = env.EvalExp(node)
output.Append('%s' % (value,))
elif isinstance(node, LiteralDollarNode):
output.Append('$')
elif isinstance(node, CodeNode):
RunCode(env.Clone(), node, output)
else:
print 'BAD'
print node
sys.exit(1)
def RunCode(env, code_node, output):
for atomic_code in code_node.atomic_code:
RunAtomicCode(env, atomic_code, output)
def IsComment(cur_line):
return '//' in cur_line
def IsInPreprocessorDirevative(prev_lines, cur_line):
if cur_line.lstrip().startswith('#'):
return True
return prev_lines != [] and prev_lines[-1].endswith('\\')
def WrapComment(line, output):
loc = line.find('//')
before_comment = line[:loc].rstrip()
if before_comment == '':
indent = loc
else:
output.append(before_comment)
indent = len(before_comment) - len(before_comment.lstrip())
prefix = indent*' ' + '// '
max_len = 80 - len(prefix)
comment = line[loc + 2:].strip()
segs = [seg for seg in re.split(r'(\w+\W*)', comment) if seg != '']
cur_line = ''
for seg in segs:
if len((cur_line + seg).rstrip()) < max_len:
cur_line += seg
else:
if cur_line.strip() != '':
output.append(prefix + cur_line.rstrip())
cur_line = seg.lstrip()
if cur_line.strip() != '':
output.append(prefix + cur_line.strip())
def WrapCode(line, line_concat, output):
indent = len(line) - len(line.lstrip())
prefix = indent*' ' # Prefix of the current line
max_len = 80 - indent - len(line_concat) # Maximum length of the current line
new_prefix = prefix + 4*' ' # Prefix of a continuation line
new_max_len = max_len - 4 # Maximum length of a continuation line
# Prefers to wrap a line after a ',' or ';'.
segs = [seg for seg in re.split(r'([^,;]+[,;]?)', line.strip()) if seg != '']
cur_line = '' # The current line without leading spaces.
for seg in segs:
# If the line is still too long, wrap at a space.
while cur_line == '' and len(seg.strip()) > max_len:
seg = seg.lstrip()
split_at = seg.rfind(' ', 0, max_len)
output.append(prefix + seg[:split_at].strip() + line_concat)
seg = seg[split_at + 1:]
prefix = new_prefix
max_len = new_max_len
if len((cur_line + seg).rstrip()) < max_len:
cur_line = (cur_line + seg).lstrip()
else:
output.append(prefix + cur_line.rstrip() + line_concat)
prefix = new_prefix
max_len = new_max_len
cur_line = seg.lstrip()
if cur_line.strip() != '':
output.append(prefix + cur_line.strip())
def WrapPreprocessorDirevative(line, output):
WrapCode(line, ' \\', output)
def WrapPlainCode(line, output):
WrapCode(line, '', output)
def IsHeaderGuardOrInclude(line):
return (re.match(r'^#(ifndef|define|endif\s*//)\s*[\w_]+\s*$', line) or
re.match(r'^#include\s', line))
def WrapLongLine(line, output):
line = line.rstrip()
if len(line) <= 80:
output.append(line)
elif IsComment(line):
if IsHeaderGuardOrInclude(line):
# The style guide made an exception to allow long header guard lines
# and includes.
output.append(line)
else:
WrapComment(line, output)
elif IsInPreprocessorDirevative(output, line):
if IsHeaderGuardOrInclude(line):
# The style guide made an exception to allow long header guard lines
# and includes.
output.append(line)
else:
WrapPreprocessorDirevative(line, output)
else:
WrapPlainCode(line, output)
def BeautifyCode(string):
lines = string.splitlines()
output = []
for line in lines:
WrapLongLine(line, output)
output2 = [line.rstrip() for line in output]
return '\n'.join(output2) + '\n'
def ConvertFromPumpSource(src_text):
"""Return the text generated from the given Pump source text."""
ast = ParseToAST(StripMetaComments(src_text))
output = Output()
RunCode(Env(), ast, output)
return BeautifyCode(output.string)
def main(argv):
if len(argv) == 1:
print __doc__
sys.exit(1)
file_path = argv[-1]
output_str = ConvertFromPumpSource(file(file_path, 'r').read())
if file_path.endswith('.pump'):
output_file_path = file_path[:-5]
else:
output_file_path = '-'
if output_file_path == '-':
print output_str,
else:
output_file = file(output_file_path, 'w')
output_file.write('// This file was GENERATED by command:\n')
output_file.write('// %s %s\n' %
(os.path.basename(__file__), os.path.basename(file_path)))
output_file.write('// DO NOT EDIT BY HAND!!!\n\n')
output_file.write(output_str)
output_file.close()
if __name__ == '__main__':
main(sys.argv)
| Python |
global_cflags = ARGUMENTS.get('CFLAGS', '-Wall -Wextra -O2 -fomit-frame-pointer')
global_cxxflags = ARGUMENTS.get('CXXFLAGS', global_cflags + ' -fno-exceptions -fno-rtti')
global_defines = ' -DHAVE_STDINT_H'
vars = Variables()
vars.Add('CC')
vars.Add('CXX')
env = Environment(CPPPATH = ['src', 'include', '../common'],
CFLAGS = global_cflags + global_defines,
CXXFLAGS = global_cxxflags + global_defines,
variables = vars)
sourceFiles = Split('''
src/bitmap_font.cpp
src/cpu.cpp
src/gambatte.cpp
src/initstate.cpp
src/interrupter.cpp
src/interruptrequester.cpp
src/memory.cpp
src/sound.cpp
src/state_osd_elements.cpp
src/statesaver.cpp
src/tima.cpp
src/video.cpp
src/mem/cartridge.cpp
src/mem/memptrs.cpp
src/mem/rtc.cpp
src/sound/channel1.cpp
src/sound/channel2.cpp
src/sound/channel3.cpp
src/sound/channel4.cpp
src/sound/duty_unit.cpp
src/sound/envelope_unit.cpp
src/sound/length_counter.cpp
src/video/ly_counter.cpp
src/video/lyc_irq.cpp
src/video/next_m0_time.cpp
src/video/ppu.cpp
src/video/sprite_mapper.cpp
''')
conf = env.Configure()
if conf.CheckHeader('zlib.h'):
sourceFiles.append('src/file/unzip/unzip.c')
sourceFiles.append('src/file/unzip/ioapi.c')
sourceFiles.append('src/file/file_zip.cpp')
else:
sourceFiles.append('src/file/file.cpp')
conf.Finish()
env.Library('gambatte', sourceFiles)
| Python |
#!/usr/bin/python
'''* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
* Mupen64plus - code_convert.c *
* Mupen64Plus homepage: http://code.google.com/p/mupen64plus/ *
* Copyright (C) 2010 Rhett Osborne *
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
* This program is distributed in the hope that it will be useful, *
* but WITHOUT ANY WARRANTY; without even the implied warranty of *
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
* GNU General Public License for more details. *
* *
* You should have received a copy of the GNU General Public License *
* along with this program; if not, write to the *
* Free Software Foundation, Inc., *
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
Usage:
python code_convert.py > ../data/mupencheat.txt < ../data/mupen64plus.cht
'''
from sys import stdin
class cheat:
def __init__(self):
self.n=""
self.d=""
self.c=[]
self.v=0
self.hb='00'
def add(self, l):
if(self.n == ""):
return
l.append(" cn %s"%(self.n))
if(self.d != ""): l.append(" cd %s"%(self.d))
for code in self.c:
l.append(" "+code)
def clear(self):
self.n=""
self.d=""
self.c=[]
self.v=0
l=[]
cCount=0
_cs = []
for i in range(225):
_cs.append(cheat())
cs = _cs[:]
def print_l():
global l, cs
for cheat in cs:
cheat.add(l)
for line in l:
print line.replace("\x00", "")
l=[]
cCount=0
for i in range(225):
cs[i].clear()
lines = stdin.read().split("\n")
for line in lines:
if len(line) < 2: continue
elif(line[:2] == "//" and line != "//----" and line != "//---" ):
l.append(line)
elif len(line) < 4: continue
elif(line[0] == '[' and line[-1] == ']' and len(line) > 23):
print_l()
l.append("\ncrc %s" % line[1:-1])
elif(line[:5] == "Name="):
l.append("gn %s" % (line[5:]))
elif(line[:5] == "Cheat"):
t = line[5:].split('=')[0]
if (len(t)>1 and t[-2] == '_'):
n = int(t[:-2])
if(t[-1] == 'N'):
cs[n].d = line.split("=")[1]
else:
for option in line.split("=")[1].split("$")[1:]:
if(len(option) < 4):
break;
if(option[-1]==','): end =-1
else: end = None
if(option[2] == " "):
cs[n].c[cs[n].v] += "%s%s:\"%s\""%(cs[n].hb,option[:2],option[3:end].replace("\"", "\\\""))
else:
cs[n].c[cs[n].v] += "%s:\"%s\""%(option[:4],option[5:end].replace("\"", "\\\""))
cs[n].c[cs[n].v]+=','
cs[n].c[cs[n].v] = cs[n].c[cs[n].v][:-1]
else:
n = int(t)
cn = line.split('"')
cs[n].c = cn[2][1:].split(',')
cs[n].n = cn[1];
i=0
for cheat in cs[n].c:
if(cheat[-1] == '?'):
if(cheat[-2:] == '??' and cheat[-4:-2] != '??'):
cs[n].hb = cheat[-4:-2]
else:
cs[n].hb = '00'
cs[n].c[i] = cheat[:9] + "???? ";
cs[n].v=i
i+=1
if(n > cCount):
cCount = n
elif(line != "//----" and line != "//---" ):
l.append("//%s" %line)
| Python |
#!/usr/bin/env python
#/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
# * Mupen64plus - regression-video.py *
# * Mupen64Plus homepage: http://code.google.com/p/mupen64plus/ *
# * Copyright (C) 2008-2009 Richard Goedeken *
# * *
# * This program is free software; you can redistribute it and/or modify *
# * it under the terms of the GNU General Public License as published by *
# * the Free Software Foundation; either version 2 of the License, or *
# * (at your option) any later version. *
# * *
# * This program is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# * GNU General Public License for more details. *
# * *
# * You should have received a copy of the GNU General Public License *
# * along with this program; if not, write to the *
# * Free Software Foundation, Inc., *
# * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. *
# * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
from optparse import OptionParser
from threading import Thread
from datetime import date
import subprocess
import commands
import shutil
import stat
import sys
import os
# set global report string
report = "Mupen64Plus Regression Test report\n----------------------------------\n"
#******************************************************************************
# main functions
#
def main(rootdir, cfgfile, nobuild):
global report
# set up child directory paths
srcdir = os.path.join(rootdir, "source")
shotdir = os.path.join(rootdir, "current")
refdir = os.path.join(rootdir, "reference")
archivedir = os.path.join(rootdir, "archive")
# run the test procedure
tester = RegTester(rootdir, srcdir, shotdir)
rval = 0
while True:
# Step 1: load the test config file
if not tester.LoadConfig(cfgfile):
rval = 1
break
# Step 2: check out from Mercurial
if not nobuild:
if not tester.CheckoutSource(srcdir):
rval = 2
break
# Step 3: run test builds
if not nobuild:
for modname in tester.modulesAndParams:
module = tester.modulesAndParams[modname]
if "testbuilds" not in module:
continue
modurl = module["url"]
modfilename = modurl.split('/')[-1]
testlist = [ name.strip() for name in module["testbuilds"].split(',') ]
makeparams = [ params.strip() for params in module["testbuildparams"].split(',') ]
if len(testlist) != len(makeparams):
report += "Config file error for test builds in %s. Build name list and makefile parameter list have different lengths.\n" % modname
testbuilds = min(len(testlist), len(makeparams))
for i in range(testbuilds):
buildname = testlist[i]
buildmake = makeparams[i]
BuildSource(srcdir, modfilename, modname, buildname, buildmake, module["outputfiles"], True)
# Step 4: build the binary for the video regression test
if not nobuild:
for modname in tester.modulesAndParams:
module = tester.modulesAndParams[modname]
modurl = module["url"]
modfilename = modurl.split('/')[-1]
videobuild = module["videobuild"]
videomake = module["videobuildparams"]
if not BuildSource(srcdir, modfilename, modname, videobuild, videomake, module["outputfiles"], False):
rval = 3
break
if rval != 0:
break
# Step 5: run the tests, check the results
if not tester.RunTests():
rval = 4
break
if not tester.CheckResults(refdir):
rval = 5
break
# test procedure is finished
break
# Step 6: send email report and archive the results
if not tester.SendReport():
rval = 6
if not tester.ArchiveResults(archivedir):
rval = 7
# all done with test process
return rval
#******************************************************************************
# Checkout & build functions
#
def BuildSource(srcdir, moddir, modname, buildname, buildmake, outputfiles, istest):
global report
makepath = os.path.join(srcdir, moddir, "projects", "unix")
# print build report message and clear counters
testbuildcommand = "make -C %s %s" % (makepath, buildmake)
if istest:
report += "Running %s test build \"%s\" with command \"%s\"\n" % (modname, buildname, testbuildcommand)
else:
report += "Building %s \"%s\" for video test with command \"%s\"\n" % (modname, buildname, testbuildcommand)
warnings = 0
errors = 0
# run make and capture the output
output = commands.getoutput(testbuildcommand)
makelines = output.split("\n")
# print warnings and errors
for line in makelines:
if "error:" in line:
report += " " + line + "\n"
errors += 1
if "warning:" in line:
report += " " + line + "\n"
warnings += 1
report += "%i errors. %i warnings.\n" % (errors, warnings)
if errors > 0 and not istest:
return False
# check for output files
for filename in outputfiles.split(','):
if not os.path.exists(os.path.join(makepath, filename)):
report += "Build failed: '%s' not found\n" % filename
errors += 1
if errors > 0 and not istest:
return False
# clean up if this was a test
if istest:
os.system("make -C %s clean" % makepath)
# if this wasn't a test, then copy our output files and data files
if not istest:
for filename in outputfiles.split(','):
shutil.move(os.path.join(makepath, filename), srcdir)
datapath = os.path.join(srcdir, moddir, "data")
if os.path.isdir(datapath):
copytree(datapath, os.path.join(srcdir, "data"))
# build was successful!
return True
#******************************************************************************
# Test execution classes
#
class RegTester:
def __init__(self, rootdir, bindir, screenshotdir):
self.rootdir = rootdir
self.bindir = bindir
self.screenshotdir = screenshotdir
self.generalParams = { }
self.gamesAndParams = { }
self.modulesAndParams = { }
self.videoplugins = [ "mupen64plus-video-rice.so" ]
self.thisdate = str(date.today())
def LoadConfig(self, filename):
global report
# read the config file
report += "\nLoading regression test configuration.\n"
try:
cfgfile = open(os.path.join(self.rootdir, filename), "r")
cfglines = cfgfile.read().split("\n")
cfgfile.close()
except Exception, e:
report += "Error in RegTestConfigParser::LoadConfig(): %s" % e
return False
# parse the file
GameFilename = None
ModuleName = None
for line in cfglines:
# strip leading and trailing whitespace
line = line.strip()
# test for comment
if len(line) == 0 or line[0] == '#':
continue
# test for new game filename
if line[0] == '[' and line [-1] == ']':
GameFilename = line[1:-1]
if GameFilename in self.gamesAndParams:
report += " Warning: Config file '%s' contains duplicate game entry '%s'\n" % (filename, GameFilename)
else:
self.gamesAndParams[GameFilename] = { }
continue
# test for new source module build
if line[0] == '{' and line [-1] == '}':
ModuleName = line[1:-1]
if ModuleName in self.modulesAndParams:
report += " Warning: Config file '%s' contains duplicate source module '%s'\n" % (filename, ModuleName)
else:
self.modulesAndParams[ModuleName] = { }
continue
# print warning and continue if it's not a (key = value) pair
pivot = line.find('=')
if pivot == -1:
report += " Warning: Config file '%s' contains unrecognized line: '%s'\n" % (filename, line)
continue
# parse key, value
key = line[:pivot].strip().lower()
value = line[pivot+1:].strip()
if ModuleName is None:
paramDict = self.generalParams
elif GameFilename is None:
paramDict = self.modulesAndParams[ModuleName]
else:
paramDict = self.gamesAndParams[GameFilename]
if key in paramDict:
report += " Warning: duplicate key '%s'\n" % key
continue
paramDict[key] = value
# check for required parameters
if "rompath" not in self.generalParams:
report += " Error: rompath is not given in config file\n"
return False
# config is loaded
return True
def CheckoutSource(self, srcdir):
global report
# remove any current source directory
if not deltree(srcdir):
return False
os.mkdir(srcdir)
os.mkdir(os.path.join(srcdir, "data"))
# loop through all of the source modules
for modname in self.modulesAndParams:
module = self.modulesAndParams[modname]
if "url" not in module:
report += "Error: no Hg repository URL for module %s\n\n" % modname
return False
modurl = module["url"]
modfilename = modurl.split("/")[-1]
# call Hg to checkout Mupen64Plus source module
output = commands.getoutput("hg clone --cwd %s %s" % (srcdir, modurl))
# parse the output
lastline = output.split("\n")[-1]
if "0 files unresolved" not in lastline:
report += "Hg Error: %s\n\n" % lastline
return False
# get the revision info
RevFound = False
output = commands.getoutput("hg tip -R %s" % os.path.join(srcdir, modfilename))
for line in output.split('\n'):
words = line.split()
if len(words) == 2 and words[0] == 'changeset:':
report += "Hg Checkout %s: changeset %s\n" % (modfilename, words[1])
RevFound = True
if not RevFound:
report += "Hg Error: couldn't find revision information\n\n"
return False
return True
def RunTests(self):
global report
rompath = self.generalParams["rompath"]
if not os.path.exists(rompath):
report += " Error: ROM directory '%s' does not exist!\n" % rompath
return False
# Remove any current screenshot directory
if not deltree(self.screenshotdir):
return False
# Data initialization and start message
os.mkdir(self.screenshotdir)
for plugin in self.videoplugins:
videoname = plugin[:plugin.find('.')]
os.mkdir(os.path.join(self.screenshotdir, videoname))
report += "\nRunning regression tests on %i games.\n" % len(self.gamesAndParams)
# loop over each game filename given in regtest config file
for GameFilename in self.gamesAndParams:
GameParams = self.gamesAndParams[GameFilename]
# if no screenshots parameter given for this game then skip it
if "screenshots" not in GameParams:
report += " Warning: no screenshots taken for game '%s'\n" % GameFilename
continue
# make a list of screenshots and check it
shotlist = [ str(int(framenum.strip())) for framenum in GameParams["screenshots"].split(',') ]
if len(shotlist) < 1 or (len(shotlist) == 1 and shotlist[0] == '0'):
report += " Warning: invalid screenshot list for game '%s'\n" % GameFilename
continue
# run a test for each video plugin
for plugin in self.videoplugins:
videoname = plugin[:plugin.find('.')]
# check if this plugin should be skipped
if "skipvideo" in GameParams:
skipit = False
skiplist = [ name.strip() for name in GameParams["skipvideo"].split(',') ]
for skiptag in skiplist:
if skiptag.lower() in plugin.lower():
skipit = True
if skipit:
continue
# construct the command line
exepath = os.path.join(self.bindir, "mupen64plus")
exeparms = [ "--corelib", os.path.join(self.bindir, "libmupen64plus.so") ]
exeparms += [ "--testshots", ",".join(shotlist) ]
exeparms += [ "--sshotdir", os.path.join(self.screenshotdir, videoname) ]
exeparms += [ "--plugindir", self.bindir ]
exeparms += [ "--datadir", os.path.join(self.bindir, "data") ]
myconfig = os.path.join(self.rootdir, "config")
if os.path.exists(myconfig):
exeparms += [ "--configdir", myconfig ]
exeparms += [ "--gfx", plugin ]
exeparms += [ os.path.join(rompath, GameFilename) ]
# run it, but if it takes too long print an error and kill it
testrun = RegTestRunner(exepath, exeparms)
testrun.start()
testrun.join(60.0)
if testrun.isAlive():
report += " Error: Test run timed out after 60 seconds: '%s'\n" % " ".join(exeparms)
os.kill(testrun.pid, 9)
testrun.join(10.0)
# all tests have been run
return True
def CheckResults(self, refdir):
global report
# print message
warnings = 0
errors = 0
report += "\nChecking regression test results\n"
# get lists of files in the reference folders
refshots = { }
if not os.path.exists(refdir):
os.mkdir(refdir)
for plugin in self.videoplugins:
videoname = plugin[:plugin.find('.')]
videodir = os.path.join(refdir, videoname)
if not os.path.exists(videodir):
os.mkdir(videodir)
refshots[videoname] = [ ]
else:
refshots[videoname] = [ filename for filename in os.listdir(videodir) ]
# get lists of files produced by current test runs
newshots = { }
for plugin in self.videoplugins:
videoname = plugin[:plugin.find('.')]
videodir = os.path.join(self.screenshotdir, videoname)
if not os.path.exists(videodir):
newshots[videoname] = [ ]
else:
newshots[videoname] = [ filename for filename in os.listdir(videodir) ]
# make list of matching ref/test screenshots, and look for missing reference screenshots
checklist = { }
for plugin in self.videoplugins:
videoname = plugin[:plugin.find('.')]
checklist[videoname] = [ ]
for filename in newshots[videoname]:
if filename in refshots[videoname]:
checklist[videoname] += [ filename ]
else:
report += " Warning: reference screenshot '%s/%s' missing. Copying from current test run\n" % (videoname, filename)
shutil.copy(os.path.join(self.screenshotdir, videoname, filename), os.path.join(refdir, videoname))
warnings += 1
# look for missing test screenshots
for plugin in self.videoplugins:
videoname = plugin[:plugin.find('.')]
for filename in refshots[videoname]:
if filename not in newshots[videoname]:
report += " Error: Test screenshot '%s/%s' missing.\n" % (videoname, filename)
errors += 1
# do image comparisons
for plugin in self.videoplugins:
videoname = plugin[:plugin.find('.')]
for filename in checklist[videoname]:
refimage = os.path.join(refdir, videoname, filename)
testimage = os.path.join(self.screenshotdir, videoname, filename)
diffimage = os.path.join(self.screenshotdir, videoname, os.path.splitext(filename)[0] + "_DIFF.png")
cmd = ("/usr/bin/compare", "-metric", "PSNR", refimage, testimage, diffimage)
pipe = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).stdout
similarity = pipe.read().strip()
pipe.close()
try:
db = float(similarity)
except:
db = 0
if db > 60.0:
os.unlink(diffimage)
else:
report += " Warning: test image '%s/%s' does not match reference. PSNR = %s\n" % (videoname, filename, similarity)
warnings += 1
# give report and return
report += "%i errors. %i warnings.\n" % (errors, warnings)
return True
def SendReport(self):
global report
# if there are no email addresses in the config file, then just we're done
if "sendemail" not in self.generalParams:
return True
if len(self.generalParams["sendemail"]) < 5:
return True
# construct the email message header
emailheader = "To: %s\n" % self.generalParams["sendemail"]
emailheader += "From: Mupen64Plus-Tester@fascination.homelinux.net\n"
emailheader += "Subject: %s Regression Test Results for Mupen64Plus\n" % self.thisdate
emailheader += "Reply-to: do-not-reply@fascination.homelinux.net\n"
emailheader += "Content-Type: text/plain; charset=UTF-8\n"
emailheader += "Content-Transfer-Encoding: 8bit\n\n"
# open a pipe to sendmail and dump our report
try:
pipe = subprocess.Popen(("/usr/sbin/sendmail", "-t"), stdin=subprocess.PIPE).stdin
pipe.write(emailheader)
pipe.write(report)
pipe.close()
except Exception, e:
report += "Exception encountered when calling sendmail: '%s'\n" % e
report += "Email header:\n%s\n" % emailheader
return False
return True
def ArchiveResults(self, archivedir):
global report
# create archive dir if it doesn't exist
if not os.path.exists(archivedir):
os.mkdir(archivedir)
# move the images into a subdirectory of 'archive' given by date
subdir = os.path.join(archivedir, self.thisdate)
if os.path.exists(subdir):
if not deltree(subdir):
return False
if os.path.exists(self.screenshotdir):
shutil.move(self.screenshotdir, subdir)
# copy the report into the archive directory
f = open(os.path.join(archivedir, "report_%s.txt" % self.thisdate), "w")
f.write(report)
f.close()
# archival is complete
return True
class RegTestRunner(Thread):
def __init__(self, exepath, exeparms):
self.exepath = exepath
self.exeparms = exeparms
self.pid = 0
self.returnval = None
Thread.__init__(self)
def run(self):
# start the process
testprocess = subprocess.Popen([self.exepath] + self.exeparms)
# get the PID of the new test process
self.pid = testprocess.pid
# wait for the test to complete
self.returnval = testprocess.wait()
#******************************************************************************
# Generic helper functions
#
def deltree(dirname):
global report
if not os.path.exists(dirname):
return True
try:
for path in (os.path.join(dirname, filename) for filename in os.listdir(dirname)):
if os.path.isdir(path):
if not deltree(path):
return False
else:
os.unlink(path)
os.rmdir(dirname)
except Exception, e:
report += "Error in deltree(): %s\n" % e
return False
return True
def copytree(srcpath, dstpath):
if not os.path.isdir(srcpath) or not os.path.isdir(dstpath):
return False
for filename in os.listdir(srcpath):
filepath = os.path.join(srcpath, filename)
if os.path.isdir(filepath):
subdstpath = os.path.join(dstpath, filename)
os.mkdir(subdstpath)
copytree(filepath, subdstpath)
else:
shutil.copy(filepath, dstpath)
return True
#******************************************************************************
# main function call for standard script execution
#
if __name__ == "__main__":
# parse the command-line arguments
parser = OptionParser()
parser.add_option("-n", "--nobuild", dest="nobuild", default=False, action="store_true",
help="Assume source code is present; don't check out and build")
parser.add_option("-t", "--testpath", dest="testpath",
help="Set root of testing directory to PATH", metavar="PATH")
parser.add_option("-c", "--cfgfile", dest="cfgfile", default="daily-tests.cfg",
help="Use regression test config file FILE", metavar="FILE")
(opts, args) = parser.parse_args()
# check test path
if opts.testpath is None:
# change directory to the directory containing this script and set root test path to "."
scriptdir = os.path.dirname(sys.argv[0])
os.chdir(scriptdir)
rootdir = "."
else:
rootdir = opts.testpath
# call the main function
rval = main(rootdir, opts.cfgfile, opts.nobuild)
sys.exit(rval)
| Python |
#!/usr/bin/python
# coding=utf8
# Copyright (C) 2007 Mashrab Kuvatov <kmashrab@uni-bremen.de>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import sys
def needsPrefix(inStr, curIndx):
'Returns true if h should be prefixed after s'
if curIndx > 0:
return inStr[curIndx - 1] in [u'с', u'С']
else:
return False
def isPrevVocal(inStr, curIndx):
'Returns true if the previous char is vocal'
vocalChars = [u'а', u'и', u'е', u'о', u'ў', u'у', \
u'А', u'И', u'Е', u'О', u'Ў', u'У']
if curIndx > 0:
return inStr[curIndx - 1] in vocalChars
else:
return False
def needsUpperCasing(inStr, curIndx):
'Returns true if uppercasing is needed'
prevCharUpper = nextCharUpper = False
if curIndx > 0:
prevCharUpper = inStr[curIndx - 1].isupper()
if prevCharUpper:
return True
if curIndx < len(inStr) - 1:
nextCharUpper = inStr[curIndx + 1].isupper()
return nextCharUpper
def convertLine(cyr_str):
'Convert given cyrillic string into latin one'
lookUpTbl = [u'a', u'b', u'v', u'g', u'd', u'e', u'j', u'z',
u'i', u'y', u'k', u'l', u'm', u'n', u'o', u'p',
u'r', u's', u't', u'u', u'f', u'x', u'ts', u'ch',
u'sh', u'sh', u'ʼ', u'i', u'', u'e', u'yu', u'ya']
lat_str = u''
cyr_str_len = len(cyr_str)
if cyr_str_len == 0:
return cyr_str
isWordBegin = True
# loop over the chars in the string
for curCyrIndx in range(cyr_str_len):
curCyrChar = cyr_str[curCyrIndx]
if (curCyrChar >= u'а' and curCyrChar <= u'я') or \
curCyrChar in [u'ў', u'ҳ', u'қ', u'ғ', u'ё'] or \
(curCyrChar >= u'А' and curCyrChar <= u'Я') or \
curCyrChar in [u'Ў', u'Ҳ', u'Қ', u'Ғ', u'Ё']:
wasCurCharUpper = curCyrChar.isupper()
curCyrChar = curCyrChar.lower()
if curCyrChar == u'ў':
curLatChar = u'oʻ'
elif curCyrChar == u'ҳ':
if needsPrefix(cyr_str, curCyrIndx):
curLatChar = u'ʼh'
else:
curLatChar = u'h'
elif curCyrChar == u'қ':
curLatChar = u'q'
elif curCyrChar == u'ғ':
curLatChar = u'gʻ'
# TODO: Convert 'ё' properly
elif curCyrChar == u'ё':
curLatChar = u'yo'
elif curCyrChar == u'е' and isWordBegin:
curLatChar = u'ye'
elif curCyrChar == u'ц':
if isWordBegin or not isPrevVocal(cyr_str, curCyrIndx):
curLatChar = u's'
else:
curLatChar = u'ts'
# TODO: Take care of 'ю' 'я'
else:
curLatChar = lookUpTbl[ord(curCyrChar) - ord(u'а')]
if wasCurCharUpper:
if curLatChar in [u'ch', u'sh', u'yo', u'yu', u'ya', u'ye', u'ts'] and \
needsUpperCasing(cyr_str, curCyrIndx):
curLatChar = curLatChar.upper()
elif curLatChar == u'ʼh':
curLatChar = u'ʼH'
else:
curLatChar = curLatChar.capitalize()
else:
curLatChar = curCyrChar
lat_str = lat_str + curLatChar
isWordBegin = not curCyrChar.isalnum()
return lat_str
# begining of the program
if len(sys.argv) != 3:
print 'Converts UTF-8 encoded Cyrillic Uzbek text file into',
print 'Latin Uzbek text file.'
print 'Usage: %s cyr.txt lat.txt' % sys.argv[0]
sys.exit(1)
else:
cyr_filename = sys.argv[1]
lat_filename = sys.argv[2]
# read
cyr_file = open(cyr_filename, 'r')
latLines = ''
# convert
for eachCyrLine in cyr_file:
latLines = latLines + convertLine(eachCyrLine.decode('utf-8'))
cyr_file.close()
# save
latLines = latLines.encode('utf-8')
lat_file = open(lat_filename, 'w')
lat_file.write(latLines)
lat_file.close()
| Python |
#!/usr/bin/python
# coding=utf8
# Copyright (C) 2007 Mashrab Kuvatov <kmashrab@uni-bremen.de>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import sys
def needsPrefix(inStr, curIndx):
'Returns true if h should be prefixed after s'
if curIndx > 0:
return inStr[curIndx - 1] in [u'с', u'С']
else:
return False
def isPrevVocal(inStr, curIndx):
'Returns true if the previous char is vocal'
vocalChars = [u'а', u'и', u'е', u'о', u'ў', u'у', \
u'А', u'И', u'Е', u'О', u'Ў', u'У']
if curIndx > 0:
return inStr[curIndx - 1] in vocalChars
else:
return False
def needsUpperCasing(inStr, curIndx):
'Returns true if uppercasing is needed'
prevCharUpper = nextCharUpper = False
if curIndx > 0:
prevCharUpper = inStr[curIndx - 1].isupper()
if prevCharUpper:
return True
if curIndx < len(inStr) - 1:
nextCharUpper = inStr[curIndx + 1].isupper()
return nextCharUpper
def convertLine(cyr_str):
'Convert given cyrillic string into latin one'
lookUpTbl = [u'a', u'b', u'v', u'g', u'd', u'e', u'j', u'z',
u'i', u'y', u'k', u'l', u'm', u'n', u'o', u'p',
u'r', u's', u't', u'u', u'f', u'x', u'ts', u'ch',
u'sh', u'sh', u'ʼ', u'i', u'', u'e', u'yu', u'ya']
lat_str = u''
cyr_str_len = len(cyr_str)
if cyr_str_len == 0:
return cyr_str
isWordBegin = True
# loop over the chars in the string
for curCyrIndx in range(cyr_str_len):
curCyrChar = cyr_str[curCyrIndx]
if (curCyrChar >= u'а' and curCyrChar <= u'я') or \
curCyrChar in [u'ў', u'ҳ', u'қ', u'ғ', u'ё'] or \
(curCyrChar >= u'А' and curCyrChar <= u'Я') or \
curCyrChar in [u'Ў', u'Ҳ', u'Қ', u'Ғ', u'Ё']:
wasCurCharUpper = curCyrChar.isupper()
curCyrChar = curCyrChar.lower()
if curCyrChar == u'ў':
curLatChar = u'oʻ'
elif curCyrChar == u'ҳ':
if needsPrefix(cyr_str, curCyrIndx):
curLatChar = u'ʼh'
else:
curLatChar = u'h'
elif curCyrChar == u'қ':
curLatChar = u'q'
elif curCyrChar == u'ғ':
curLatChar = u'gʻ'
# TODO: Convert 'ё' properly
elif curCyrChar == u'ё':
curLatChar = u'yo'
elif curCyrChar == u'е' and isWordBegin:
curLatChar = u'ye'
elif curCyrChar == u'ц':
if isWordBegin or not isPrevVocal(cyr_str, curCyrIndx):
curLatChar = u's'
else:
curLatChar = u'ts'
# TODO: Take care of 'ю' 'я'
else:
curLatChar = lookUpTbl[ord(curCyrChar) - ord(u'а')]
if wasCurCharUpper:
if curLatChar in [u'ch', u'sh', u'yo', u'yu', u'ya', u'ye', u'ts'] and \
needsUpperCasing(cyr_str, curCyrIndx):
curLatChar = curLatChar.upper()
elif curLatChar == u'ʼh':
curLatChar = u'ʼH'
else:
curLatChar = curLatChar.capitalize()
else:
curLatChar = curCyrChar
lat_str = lat_str + curLatChar
isWordBegin = not curCyrChar.isalnum()
return lat_str
# begining of the program
if len(sys.argv) != 3:
print 'Converts UTF-8 encoded Cyrillic Uzbek text file into',
print 'Latin Uzbek text file.'
print 'Usage: %s cyr.txt lat.txt' % sys.argv[0]
sys.exit(1)
else:
cyr_filename = sys.argv[1]
lat_filename = sys.argv[2]
# read
cyr_file = open(cyr_filename, 'r')
latLines = ''
# convert
for eachCyrLine in cyr_file:
latLines = latLines + convertLine(eachCyrLine.decode('utf-8'))
cyr_file.close()
# save
latLines = latLines.encode('utf-8')
lat_file = open(lat_filename, 'w')
lat_file.write(latLines)
lat_file.close()
| Python |
#!/usr/bin/env python
#
# Copyright 2011 Peter Elespuru
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from google.appengine.ext import webapp
from google.appengine.ext.webapp import util
from google.appengine.ext.webapp import template
class Main(webapp.RequestHandler):
def get (self, q):
self.redirect("/cloudflow/index.html")
def main ():
application = webapp.WSGIApplication ([('/(.*)', Main)], debug=False)
util.run_wsgi_app (application)
if __name__ == '__main__':
main () | Python |
#!/usr/bin/env python
#
# (derivative) Copyright 2011 Peter Elespuru
# (original) Copyright 2008 Brett Slatkin
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Derived from this appengine app:
# http://gregdoesit.com/2010/12/using-google-app-engine-as-proxy-for-cross-domain-requests/
# modified to be a more specific resource caching proxy (not general url=?)
# for the Auroral Resources Toolkit
#
#
__author__ = "(Derivative) Peter Elespuru, (Original) Brett Slatkin"
import datetime
import hashlib
import logging
import pickle
import urllib
import re
import time
import urllib
import wsgiref.handlers
from google.appengine.api import memcache
from google.appengine.api import urlfetch
from google.appengine.ext import db
from google.appengine.ext import webapp
from google.appengine.ext.webapp import template
from google.appengine.runtime import apiproxy_errors
from google.appengine.ext.webapp.util import run_wsgi_app
#
# currently 1-day cache TTL
#
CACHE_TIME = 1440
#
# support service prefixes, each requires a function that knows how to
# fill in parameters
#
SERVICES = {
'spidr.ngdc.GetData': "http://spidr.ngdc.noaa.gov/spidr/servlet/GetData",
'spidr.ngdc.GetMetadata': "http://spidr.ngdc.noaa.gov/spidr/servlet/GetMetadata",
'lasp.tsi.sorce': "http://lasp.colorado.edu/lisird/tss/sorce_tsi_6hr.csv",
'lasp.tsi.sorce.meta': "http://lasp.colorado.edu/lisird/tss/sorce_tsi_6hr.html"
}
SERVICES_DATA_TYPES = {
'spidr.ngdc.GetData': "text/plain",
'spidr.ngdc.GetMetadata': "application/xml",
'lasp.tsi.sorce': "text/plain",
'lasp.tsi.sorce.meta': "text/html"
}
#
#
#
def getMemcacheKey(mapping):
url_hash = hashlib.sha256()
url_hash.update(mapping)
return "hash_" + url_hash.hexdigest()
#
# has an allowed service been reqested ?
#
def isAllowedService(service):
for s in SERVICES:
if s == service:
return True
return False
#
#
#
class ProxyHandler(webapp.RequestHandler):
#
#
#
def get(self):
service = self.request.get('service')
service = urllib.unquote(service)
servarg = self.request.get('args')
servarg = urllib.unquote(servarg)
if not isAllowedService(service):
return # sec check fail
memcacheKey = getMemcacheKey(service+','+servarg)
# Use memcache to store the request for CACHE_TIME
proxiedContent = memcache.get(memcacheKey)
proxiedContentInMemcache = True
if proxiedContent is None:
proxiedContentInMemcache = False
try:
response = urlfetch.fetch(SERVICES[service]+"?"+servarg)
except (urlfetch.Error, apiproxy_errors.Error):
return self.error(404)
# did something legit come back ?
proxiedContent = response.content
if proxiedContent is None:
return self.error(404)
# Add the fetched content to memcache
if (not proxiedContentInMemcache):
memcache.add(memcacheKey, proxiedContent, CACHE_TIME)
self.response.headers['Content-Type'] = SERVICES_DATA_TYPES[service]
self.response.out.write(proxiedContent)
#
#
#
def main():
app = webapp.WSGIApplication([("/proxy", ProxyHandler),], debug=False)
run_wsgi_app(app)
#
#
#
if __name__ == "__main__":
main()
| Python |
#!/usr/bin/env python
#
# Copyright 2011 Peter Elespuru
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from google.appengine.ext import webapp
from google.appengine.ext.webapp import util
from google.appengine.ext.webapp import template
class Main(webapp.RequestHandler):
def get (self, q):
self.redirect("/cloudflow/index.html")
def main ():
application = webapp.WSGIApplication ([('/(.*)', Main)], debug=False)
util.run_wsgi_app (application)
if __name__ == '__main__':
main () | Python |
#!/usr/bin/env python
#
# (derivative) Copyright 2011 Peter Elespuru
# (original) Copyright 2008 Brett Slatkin
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Derived from this appengine app:
# http://gregdoesit.com/2010/12/using-google-app-engine-as-proxy-for-cross-domain-requests/
# modified to be a more specific resource caching proxy (not general url=?)
# for the Auroral Resources Toolkit
#
#
__author__ = "(Derivative) Peter Elespuru, (Original) Brett Slatkin"
import datetime
import hashlib
import logging
import pickle
import urllib
import re
import time
import urllib
import wsgiref.handlers
from google.appengine.api import memcache
from google.appengine.api import urlfetch
from google.appengine.ext import db
from google.appengine.ext import webapp
from google.appengine.ext.webapp import template
from google.appengine.runtime import apiproxy_errors
from google.appengine.ext.webapp.util import run_wsgi_app
#
# currently 1-day cache TTL
#
CACHE_TIME = 1440
#
# support service prefixes, each requires a function that knows how to
# fill in parameters
#
SERVICES = {
'spidr.ngdc.GetData': "http://spidr.ngdc.noaa.gov/spidr/servlet/GetData",
'spidr.ngdc.GetMetadata': "http://spidr.ngdc.noaa.gov/spidr/servlet/GetMetadata",
'lasp.tsi.sorce': "http://lasp.colorado.edu/lisird/tss/sorce_tsi_6hr.csv",
'lasp.tsi.sorce.meta': "http://lasp.colorado.edu/lisird/tss/sorce_tsi_6hr.html"
}
SERVICES_DATA_TYPES = {
'spidr.ngdc.GetData': "text/plain",
'spidr.ngdc.GetMetadata': "application/xml",
'lasp.tsi.sorce': "text/plain",
'lasp.tsi.sorce.meta': "text/html"
}
#
#
#
def getMemcacheKey(mapping):
url_hash = hashlib.sha256()
url_hash.update(mapping)
return "hash_" + url_hash.hexdigest()
#
# has an allowed service been reqested ?
#
def isAllowedService(service):
for s in SERVICES:
if s == service:
return True
return False
#
#
#
class ProxyHandler(webapp.RequestHandler):
#
#
#
def get(self):
service = self.request.get('service')
service = urllib.unquote(service)
servarg = self.request.get('args')
servarg = urllib.unquote(servarg)
if not isAllowedService(service):
return # sec check fail
memcacheKey = getMemcacheKey(service+','+servarg)
# Use memcache to store the request for CACHE_TIME
proxiedContent = memcache.get(memcacheKey)
proxiedContentInMemcache = True
if proxiedContent is None:
proxiedContentInMemcache = False
try:
response = urlfetch.fetch(SERVICES[service]+"?"+servarg)
except (urlfetch.Error, apiproxy_errors.Error):
return self.error(404)
# did something legit come back ?
proxiedContent = response.content
if proxiedContent is None:
return self.error(404)
# Add the fetched content to memcache
if (not proxiedContentInMemcache):
memcache.add(memcacheKey, proxiedContent, CACHE_TIME)
self.response.headers['Content-Type'] = SERVICES_DATA_TYPES[service]
self.response.out.write(proxiedContent)
#
#
#
def main():
app = webapp.WSGIApplication([("/proxy", ProxyHandler),], debug=False)
run_wsgi_app(app)
#
#
#
if __name__ == "__main__":
main()
| Python |
#!/usr/bin/env python
import flowd
import string
import getopt
import sys
mask_fields = [
'TAG' ,
'UNIX_SECS' ,
'UNIX_NSECS' ,
'SYSUPTIME' ,
'EXADDR' ,
'DPKTS' ,
'DOCTETS' ,
'FIRST' ,
'LAST' ,
'ENGINE_TYPE' ,
'ENGINE_ID' ,
'SRCADDR' ,
'DSTADDR' ,
'NEXTHOP' ,
'INPUT' ,
'OUTPUT' ,
'SRCPORT' ,
'DSTPORT' ,
'PROT' ,
'TOS' ,
'TCP_FLAGS' ,
'SRC_MASK' ,
'DST_MASK' ,
'SRC_AS' ,
'DST_AS' ,
]
def format0(src,log):
infile = open(src, "r")
flog = flowd.FlowLog(log, "wb")
mask = infile.readline().strip().split(',')
for m in mask:
if m.startswith('#:'):
m = m.replace('#:','')
if m.upper() not in mask_fields:
print m.upper()
print >> sys.stderr, "Unrecognized field:"+m
lines = infile.readlines()
count = 0
for str in lines:
data = str.strip().split(',')
if len(mask) != len(data):
print >> sys.stderr, "Invalid count of data"
i=0
flow = flowd.Flow()
for flowfield in data:
if mask[i] in ('TAG', 'tag'):
flow.tag = int(flowfield)
if mask[i] in ('UNIX_SECS', 'unix_secs'):
flow.recv_sec = int(flowfield)
if mask[i] in ('UNIX_NSECS', 'unix_nsecs'):
flow.recv_usec = int(flowfield)
if mask[i] in ('SYSUPTIME', 'sysuptime'):
flow.sys_uptime_ms = int(flowfield)
if mask[i] in ('EXADDR', 'exaddr'):
flow.agent_addr = flowfield
if mask[i] in ('DPKTS', 'dpkts'):
flow.packets = int(flowfield)
if mask[i] in ('DOCTETS', 'doctets'):
flow.octets = int(flowfield)
if mask[i] in ('FIRST', 'first'):
flow.flow_start = int(flowfield)
if mask[i] in ('LAST', 'last'):
flow.flow_finish = int(flowfield)
if mask[i] in ('ENGINE_TYPE', 'engine_type'):
flow.engine_type = int(flowfield)
if mask[i] in ('ENGINE_ID', 'engine_id'):
flow.engine_id = int(flowfield)
if mask[i] in ('SRCADDR', 'srcaddr'):
flow.src_addr = flowfield
if mask[i] in ('DSTADDR', 'dstaddr'):
flow.dst_addr = flowfield
if mask[i] in ('NEXTHOP', 'nexthop'):
flow.gateway_addr = flowfield
if mask[i] in ('INPUT', 'input'):
flow.if_ndx_in = int(flowfield)
if mask[i] in ('OUTPUT', 'output'):
flow.if_ndx_out = int(flowfield)
if mask[i] in ('SRCPORT', 'srcport'):
flow.src_port = int(flowfield)
if mask[i] in ('DSTPORT', 'dst_port'):
flow.dst_port = int(flowfield)
if mask[i] in ('PROT', 'prot'):
flow.protocol = int(flowfield)
if mask[i] in ('TOS', 'tos'):
flow.tos = int(flowfield)
if mask[i] in ('TCP_FLAGS', 'tcp_flags'):
flow.tcp_flags = int(flowfield)
if mask[i] in ('SRC_MASK', 'src_mask'):
flow.src_mask = int(flowfield)
if mask[i] in ('DST_MASK', 'dst_mask'):
flow.dst_mask = int(flowfield)
if mask[i] in ('SRC_AS', 'src_as'):
flow.src_as = int(flowfield)
if mask[i] in ('DST_AS', 'dst_as'):
flow.dst_as = int(flowfield)
flow.fields = 1074164750
i += 1
flog.write_flow(flow)
count += 1
print "imported %d flows"%count
infile.close()
def usage():
print >> sys.stderr, "import.py (flowd.py version %s)" % \
flowd.__version__
print >> sys.stderr, "Usage: import.py [options] [source file] [flowd-store]";
print >> sys.stderr, "Options:";
print >> sys.stderr, " -h Display this help";
print >> sys.stderr, " -f Format of output. 0 - ASCII";
print >> sys.stderr, " -m mask_fields. Select fields from: %s. Default all fields" % ', '.join(mask_fields);
sys.exit(1);
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], 'f:m:u:h')
except getopt.GetoptError:
print >> sys.stderr, "Invalid commandline arguments"
usage()
format = None
conn = None
mask = None
outfile = None
uopt = None
for o, a in opts:
if o in ('-h', '--help'):
usage()
sys.exit(0)
if o in ('-f', '--format'):
format = a
if o in('-m'):
mask = a.split(',')
for m in mask:
if m not in mask_fields:
print >> sys.stderr, "Unrecognized field:"+m
sys.exit(1);
if format is None:
format = 0
elif not format.isdigit() or int(format) > 0:
print >> sys.stderr, "Invalid format"
usage()
if len(args) == 0:
print >> sys.stderr, "No logfile specified"
usage()
elif len(args) != 2:
print >> sys.stderr, "Arguments must consist of ascii source file and logfile to store data"
usage()
format0(args[0],args[1])
if __name__ == '__main__': main() | Python |
#!/usr/bin/env python
import flowd
import string
import getopt
import sys
import os
import ipcalc
action_types = [
"src_addr",
"dst_addr",
"src_prefix",
"dst_prefix",
"src_port",
"dst_port",
"exporter",
"nexthop",
"src_as",
"dst_as",
"protocol",
"tos",
"input_interface",
"output_interface"
]
class tag_class:
def __init__(self):
self.name = ""
self.values = []
self.match = 1
def set_name(self, name):
self.name = name
def set_match(self, val):
self.match = val
def add_value(self, value):
self.values.append(value)
def add_values(self, value):
#self.values.extend(value)
self.values = value
def whatflowfield(field, flow):
if field == "src_addr" or field == "src_prefix":
return flow.src_addr
elif field == "dst_addr" or field == "dst_prefix":
return flow.dst_addr
elif field == "src_port":
return flow.src_port
elif field == "dst_port":
return flow.dst_port
elif field == "src_as":
return flow.src_as
elif field == "dst_as":
return flow.dst_as
elif field == "protocol":
return flow.protocol
elif field == "tos":
return flow.tos
elif field == "exporter":
return flow.agent_addr
elif field == "nexthop":
return flow.gateway_addr
elif field == "input_interface":
return flow.if_ndx_in
elif field == "output_interface":
return flow.if_ndx_out
return None
def usage():
print >> sys.stderr, "tag.py (flowd.py version %s)" % \
flowd.__version__
print >> sys.stderr, "Usage: tag.py [options] [flowd-store]";
print >> sys.stderr, "Options:";
print >> sys.stderr, " -h Display this help";
print >> sys.stderr, " -t Path to tagfile";
print >> sys.stderr, " -T Tag number. Hex (in format 0x0) or Decimal";
#print >> sys.stderr, "Example of tagfile:\n";
#print >> sys.stderr, "tag-action tag\n type src_addr\n match 192.168.231.1\n match 192.168.231.2\ntag-action tag2\n type src_port\n match 138\ntag-definition 1\n action tag\n action tag2\ntag-definition 2\n action tag2\n";
#print >> sys.stderr, " tag-action types: %s"% ', '.join(action_types);
sys.exit(1);
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], 't:T:h')
except getopt.GetoptError:
print >> sys.stderr, "Invalid commandline arguments"
usage()
tagfilepath = None
tagnum = None
for o, a in opts:
if o in ('-h', '--help'):
usage()
sys.exit(0)
if o in ('-t'):
tagfilepath = a
if o in ('-T'):
tagnum = a
if tagfilepath is None:
print >> sys.stderr, "No tagfile specified"
usage()
if tagnum is None:
print >> sys.stderr, "No tag specified"
usage()
if len(args) == 0:
print >> sys.stderr, "No logfile specified"
usage()
#parsing tagfile
tagfile = open(tagfilepath, "r")
#tagacthash = {}
tagactions = []
line = tagfile.readline()
while line.strip().startswith("tag-action"):
action = tag_class()
tagactname = line.strip().split(" ")[1]
action.set_name(tagactname)
line = tagfile.readline()
#tagactrules = []
while line.strip().startswith("type"):
tagacttype = line.strip().split(" ")[1]
rule = tag_class()
rule.set_name(tagacttype)
line = tagfile.readline()
while line.strip().startswith("match"):
tagactval = line.strip().split(" ")[1]
if tagactval == "not":
rule.set_match(0)
tagactval = line.strip().split(" ")[2]
if tagacttype=="src_prefix" or tagacttype=="dst_prefix":
rule.add_values(ipcalc.Network(tagactval))
else:
rule.add_value(tagactval)
line = tagfile.readline()
action.add_value(rule)
tagactions.append(action)
#tagacthash[tagactname] = tagactrules
while line.strip().startswith("tag-definition"):
tagid = line.strip().split(" ")[1]
tag = tag_class()
tag.set_name(tagid)
line = tagfile.readline()
#tagactnames = []
while line.strip().startswith("action"):
for act in tagactions:
if act.name == line.strip().split(" ")[1]:
tag.add_value(act)
line = tagfile.readline()
if tagid == tagnum:
break
for act in tag.values:
for rule in act.values:
print rule.name, rule.values
if not tag.name.isdigit():
tag.set_name(long(tag.name,16))
else:
tag.set_name(long(tag.name))
#writing tags
for ffile in args:
flogr = flowd.FlowLog(ffile, "rb")
flogw = flowd.FlowLog("temp", "wb")
for flow in flogr:
trueact = 0
term = 1
for act in tag.values:
for rule in act.values:
if rule.match == 0:
if str(whatflowfield(rule.name, flow)) not in rule.values:
trueact = 1
break
else:
trueact = 0
else:
if str(whatflowfield(rule.name, flow)) in rule.values:
trueact = 1
break
else:
trueact = 0
term = term * trueact
if term == 1:
if flow.tag == 0:
flow.fields += 1
flow.tag = tag.name
flogw.write_flow(flow)
file = open("temp", "rb")
buff = file.read()
file.close()
file = open(ffile, "wb")
file.write(buff)
file.close()
os.remove("temp")
if __name__ == '__main__': main() | Python |
#!/usr/bin/env python
import flowd
import string
import getopt
import sys
import os
import ipcalc
action_types = [
"src_addr",
"dst_addr",
"src_prefix",
"dst_prefix",
"src_port",
"dst_port",
"exporter",
"nexthop",
"src_as",
"dst_as",
"protocol",
"tos",
"input_interface",
"output_interface"
]
class filter_action:
def __init__(self):
self.field = ""
self.values = []
def set_field(self, name):
self.field = name
def add_value(self, value):
self.values.append(value)
def set_value(self, value):
self.values = value
def whatflowfield(field, flow):
if field == "src_addr" or field == "src_prefix":
return flow.src_addr
elif field == "dst_addr" or field == "dst_prefix":
return flow.dst_addr
elif field == "src_port":
return flow.src_port
elif field == "dst_port":
return flow.dst_port
elif field == "src_as":
return flow.src_as
elif field == "dst_as":
return flow.dst_as
elif field == "protocol":
return flow.protocol
elif field == "tos":
return flow.tos
elif field == "exporter":
return flow.agent_addr
elif field == "nexthop":
return flow.gateway_addr
elif field == "input_interface":
return flow.if_ndx_in
elif field == "output_interface":
return flow.if_ndx_out
return None
def usage():
print >> sys.stderr, "filter.py (flowd.py version %s)" % \
flowd.__version__
print >> sys.stderr, "Usage: filter.py [options] [flowd-store]";
print >> sys.stderr, "Options:";
print >> sys.stderr, " -h Display this help";
print >> sys.stderr, " -f Path to filterfile";
print >> sys.stderr, " -F Filter name";
print >> sys.stderr, " -o output file";
#print >> sys.stderr, "Example of tagfile:\n";
#print >> sys.stderr, "tag-action tag\n type src_addr\n match 192.168.231.1\n match 192.168.231.2\ntag-action tag2\n type src_port\n match 138\ntag-definition 1\n action tag\n action tag2\ntag-definition 2\n action tag2\n";
#print >> sys.stderr, " tag-action types: %s"% ', '.join(action_types);
sys.exit(1);
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], 'f:F:o:h')
except getopt.GetoptError:
print >> sys.stderr, "Invalid commandline arguments"
usage()
filepath = None
filter = None
output_file = None
for o, a in opts:
if o in ('-h', '--help'):
usage()
sys.exit(0)
if o in ('-f'):
filepath = a
if o in ('-F'):
filter = a
if o in ('-o'):
output_file = a
if filepath is None:
print >> sys.stderr, "No tagfile specified"
usage()
if filter is None:
print >> sys.stderr, "No tag specified"
usage()
if len(args) == 0:
print >> sys.stderr, "No logfile specified"
usage()
#parsing tagfile
tagfile = open(filepath, "r")
tagacthash = {}
line = tagfile.readline()
while line.strip().startswith("filter-action"):
tagactname = line.strip().split(" ")[1]
line = tagfile.readline()
tagactrules = []
while line.strip().startswith("type"):
tagacttype = line.strip().split(" ")[1]
tagact = filter_action()
tagact.set_field(tagacttype)
line = tagfile.readline()
while line.strip().startswith("match"):
tagactval = line.strip().split(" ")[1]
if tagacttype=="src_prefix" or tagacttype=="dst_prefix":
tagact.set_value(ipcalc.Network(tagactval))
else:
tagact.add_value(tagactval)
line = tagfile.readline()
tagactrules.append(tagact)
tagacthash[tagactname] = tagactrules
while line.strip().startswith("filter-definition"):
tag = line.strip().split(" ")[1]
line = tagfile.readline()
tagactnames = []
while line.strip().startswith("action"):
tagactnames.append(line.strip().split(" ")[1])
line = tagfile.readline()
if tag == filter:
break
for tname in tagactnames:
for rule in tagacthash[tname]:
print rule.field, rule.values
#print int(tagnum,16)
#writing tags
file = open("filt", "w")
for ffile in args:
flogr = flowd.FlowLog(ffile, "rb")
if output_file is not None:
flogw = flowd.FlowLog(output_file, "wb")
for flow in flogr:
trueact = 0
term = 1
for tname in tagactnames:
for rule in tagacthash[tname]:
if str(whatflowfield(rule.field, flow)) in rule.values:
trueact = 1
break
else:
trueact = 0
term = term * trueact
if term == 1:
print flow.format(mask = flowd.DISPLAY_BRIEF, utc = 1)
file.write(flow.format(mask = flowd.DISPLAY_BRIEF, utc = 1))
if output_file is not None:
flogw.write_flow(flow)
file.close()
if __name__ == '__main__': main() | Python |
#!/usr/bin/env python
import flowd
import sys
import string
import math
import getopt
import time
import datetime
import pickle
import gzip
class flow_stat_count:
def __init__(self):
self.flows = 0
self.octets = 0
self.packets = 0
def update(self, flow):
self.flows += 1
if flow.has_field(flowd.FIELD_OCTETS):
self.octets += flow.octets
if flow.has_field(flowd.FIELD_PACKETS):
self.packets += flow.packets
class flow_statistic:
def __init__(self):
self.num_unique = 0
self.counts = {}
def update(self, what, flow):
try:
count = self.counts[what]
except:
self.num_unique += 1
count = flow_stat_count()
self.counts[what] = count
count.update(flow)
class flow_statistics:
def __init__(self):
self.first = None
self.last = None
self.start = None
self.finish = None
self.time = None
self.average_clockskew = None
self.average_clockskew_samples = 0
self.src_port = flow_statistic()
self.dst_port = flow_statistic()
self.port = flow_statistic()
self.src_addr = flow_statistic()
self.dst_addr = flow_statistic()
self.fromto = flow_statistic()
self.addr = flow_statistic()
self.gateway_addr = flow_statistic()
self.agent_addr = flow_statistic()
self.protocol = flow_statistic()
self.in_if = flow_statistic()
self.out_if = flow_statistic()
self.in_out_if = flow_statistic()
self.src_as = flow_statistic()
self.dst_as = flow_statistic()
self.src_dst_as = flow_statistic()
self.tos = flow_statistic()
self.engine_id = flow_statistic()
self.engine_type = flow_statistic()
self.tag = flow_statistic()
self.src_prefix = flow_statistic()
self.dst_prefix = flow_statistic()
self.src_dst_prefix = flow_statistic()
self.flows = 0;
self.octets = None;
self.packets = None;
self.packsize = flow_statistic()
self.ppf = flow_statistic() # packets per flow
self.opf = flow_statistic() # octets per flow
self.k = 0;
def update0(self, flow):
self.flows += 1
if flow.has_field(flowd.FIELD_RECV_TIME):
if self.first is None or \
flow.recv_sec < self.first:
self.first = flow.recv_sec
if self.last is None or \
flow.recv_sec > self.last:
self.last = flow.recv_sec
if self.start is None or \
flow.flow_start < self.start:
self.start = flow.flow_start
if self.finish is None or \
flow.flow_finish > self.finish:
self.finish = flow.flow_finish
if self.time is None:
self.time = flow.flow_finish - flow.flow_start
if flow.has_field(flowd.FIELD_FLOW_TIMES):
delta = flow.recv_sec - \
flow.flow_finish / 1000.0
if self.average_clockskew is None:
self.average_clockskew = delta
self.average_clockskew_samples += 1
new_offset = delta - self.average_clockskew
self.average_clockskew += new_offset / \
self.average_clockskew_samples
self.time += flow.flow_finish - flow.flow_start
if flow.has_field(flowd.FIELD_OCTETS):
if self.octets is None:
self.octets = 0
self.octets += flow.octets
if flow.has_field(flowd.FIELD_PACKETS):
if self.packets is None:
self.packets = 0
self.packets += flow.packets
def update1(self, flow):
if flow.has_field(flowd.FIELD_OCTETS) and \
flow.has_field(flowd.FIELD_PACKETS):
if flow.packets != 0:
self.packsize.update((flow.octets / flow.packets), flow)
def update2(self, flow):
if flow.has_field(flowd.FIELD_PACKETS):
if self.packets is None:
self.packets = 0
self.ppf.update(flow.packets, flow)
def update3(self, flow):
if flow.has_field(flowd.FIELD_OCTETS):
if self.octets is None:
self.octets = 0
self.octets += flow.octets
self.opf.update(flow.octets, flow)
def update5(self, flow):
if flow.has_field(flowd.FIELD_SRCDST_PORT):
self.dst_port.update(flow.dst_port, flow)
def update6(self, flow):
if flow.has_field(flowd.FIELD_SRCDST_PORT):
self.src_port.update(flow.src_port, flow)
def update7(self, flow):
if flow.has_field(flowd.FIELD_SRCDST_PORT):
self.port.update(flow.src_port, flow)
if flow.has_field(flowd.FIELD_SRCDST_PORT):
self.port.update(flow.dst_port, flow)
def update8(self, flow):
if flow.has_field(flowd.FIELD_DST_ADDR):
self.dst_addr.update(flow.dst_addr, flow)
def update9(self, flow):
if flow.has_field(flowd.FIELD_SRC_ADDR):
self.src_addr.update(flow.src_addr, flow)
def update10(self, flow):
if flow.has_field(flowd.FIELD_SRC_ADDR) and \
flow.has_field(flowd.FIELD_DST_ADDR):
fromto = flow.src_addr + " -> " + \
flow.dst_addr
self.fromto.update(fromto, flow)
self.src_dst_prefix.update(self.net(flow.src_addr, flow.src_mask)+"\t"+self.net(flow.dst_addr, flow.dst_mask), flow)
def update11(self, flow):
if flow.has_field(flowd.FIELD_SRC_ADDR):
self.addr.update(flow.src_addr, flow)
if flow.has_field(flowd.FIELD_DST_ADDR):
self.addr.update(flow.dst_addr, flow)
def update12(self, flow):
if flow.has_field(flowd.FIELD_PROTO_FLAGS_TOS):
self.protocol.update(flow.protocol, flow)
def update15(self, flow):
self.flows += 1
if flow.has_field(flowd.FIELD_OCTETS):
if self.octets is None:
self.octets = 0
self.octets += flow.octets
if flow.has_field(flowd.FIELD_PACKETS):
if self.packets is None:
self.packets = 0
self.packets += flow.packets
def update16(self, flow):
if flow.has_field(flowd.FIELD_GATEWAY_ADDR):
self.gateway_addr.update(flow.gateway_addr, flow)
def update17(self, flow):
if flow.has_field(flowd.FIELD_IF_INDICES):
self.in_if.update(flow.if_ndx_in, flow)
def update18(self, flow):
if flow.has_field(flowd.FIELD_IF_INDICES):
self.out_if.update(flow.if_ndx_out, flow)
def update19(self, flow):
if flow.has_field(flowd.FIELD_AS_INFO):
self.src_as.update(flow.src_as, flow)
def update20(self, flow):
if flow.has_field(flowd.FIELD_AS_INFO):
self.dst_as.update(flow.dst_as, flow)
def update21(self, flow):
if flow.has_field(flowd.FIELD_AS_INFO):
srcdstas = "%d\t%d" %(flow.src_as, flow.dst_as)
self.src_dst_as.update(srcdstas, flow)
def update22(self, flow):
if flow.has_field(flowd.FIELD_PROTO_FLAGS_TOS):
self.tos.update(flow.tos, flow)
def update23(self, flow):
if flow.has_field(flowd.FIELD_IF_INDICES):
inout = "%d\t%d" %(flow.if_ndx_in, flow.if_ndx_out)
self.in_out_if.update(inout, flow)
def update24(self, flow):
if flow.has_field(flowd.FIELD_SRC_ADDR):
self.src_prefix.update(self.net(flow.src_addr, flow.src_mask), flow)
def update25(self, flow):
if flow.has_field(flowd.FIELD_DST_ADDR):
self.dst_prefix.update(self.net(flow.dst_addr, flow.dst_mask), flow)
def update26(self, flow):
if flow.has_field(flowd.FIELD_SRC_ADDR) and \
flow.has_field(flowd.FIELD_DST_ADDR):
self.src_dst_prefix.update(self.net(flow.src_addr, flow.src_mask)+"\t"+self.net(flow.dst_addr, flow.dst_mask), flow)
def update27(self, flow):
if flow.has_field(flowd.FIELD_AGENT_ADDR):
self.agent_addr.update(flow.agent_addr, flow)
def update28(self, flow):
if flow.has_field(flowd.FIELD_FLOW_ENGINE_INFO):
self.engine_id.update(flow.engine_id, flow)
def update29(self, flow):
if flow.has_field(flowd.FIELD_FLOW_ENGINE_INFO):
self.engine_type.update(flow.engine_type, flow)
def update30(self, flow):
if flow.has_field(flowd.FIELD_TAG):
self.tag.update(flow.tag, flow)
def update(self, flow):
self.flows += 1
if flow.has_field(flowd.FIELD_RECV_TIME):
if self.first is None or \
flow.recv_sec < self.first:
self.first = flow.recv_sec
if self.last is None or \
flow.recv_sec > self.last:
self.last = flow.recv_sec
if self.start is None or \
flow.flow_start < self.start:
self.start = flow.flow_start
if self.finish is None or \
flow.flow_finish > self.finish:
self.finish = flow.flow_finish
if self.time is None:
self.time = flow.flow_finish - flow.flow_start
if flow.has_field(flowd.FIELD_FLOW_TIMES):
delta = flow.recv_sec - \
flow.flow_finish / 1000.0
if self.average_clockskew is None:
self.average_clockskew = delta
self.average_clockskew_samples += 1
new_offset = delta - self.average_clockskew
self.average_clockskew += new_offset / \
self.average_clockskew_samples
self.time += flow.flow_finish - flow.flow_start
if flow.has_field(flowd.FIELD_OCTETS):
if self.octets is None:
self.octets = 0
self.octets += flow.octets
self.opf.update(flow.octets, flow)
if flow.has_field(flowd.FIELD_PACKETS):
if self.packets is None:
self.packets = 0
self.packets += flow.packets
self.ppf.update(flow.packets, flow)
if flow.has_field(flowd.FIELD_OCTETS) and \
flow.has_field(flowd.FIELD_PACKETS):
if flow.packets != 0:
self.packsize.update((flow.octets / flow.packets), flow)
if flow.has_field(flowd.FIELD_FLOW_TIMES) and \
flow.has_field(flowd.FIELD_FLOW_TIMES):
duration = flow.flow_finish - \
flow.flow_start
duration = int(duration / 1000) # milliseconds
if flow.has_field(flowd.FIELD_SRC_ADDR):
self.src_addr.update(flow.src_addr, flow)
self.addr.update(flow.src_addr, flow)
self.src_prefix.update(self.net(flow.src_addr, flow.src_mask), flow)
if flow.has_field(flowd.FIELD_DST_ADDR):
self.dst_addr.update(flow.dst_addr, flow)
self.addr.update(flow.dst_addr, flow)
self.dst_prefix.update(self.net(flow.dst_addr, flow.dst_mask), flow)
if flow.has_field(flowd.FIELD_SRC_ADDR) and \
flow.has_field(flowd.FIELD_DST_ADDR):
fromto = flow.src_addr + " -> " + \
flow.dst_addr
self.fromto.update(fromto, flow)
self.src_dst_prefix.update(self.net(flow.src_addr, flow.src_mask)+"\t"+self.net(flow.dst_addr, flow.dst_mask), flow)
if flow.has_field(flowd.FIELD_GATEWAY_ADDR):
self.gateway_addr.update(flow.gateway_addr, flow)
if flow.has_field(flowd.FIELD_AGENT_ADDR):
self.agent_addr.update(flow.agent_addr, flow)
if flow.has_field(flowd.FIELD_IF_INDICES):
self.in_if.update(flow.if_ndx_in, flow)
self.out_if.update(flow.if_ndx_out, flow)
inout = "%d\t%d" %(flow.if_ndx_in, flow.if_ndx_out)
self.in_out_if.update(inout, flow)
if flow.has_field(flowd.FIELD_AS_INFO):
self.src_as.update(flow.src_as, flow)
self.dst_as.update(flow.dst_as, flow)
srcdstas = "%d\t%d" %(flow.src_as, flow.dst_as)
self.src_dst_as.update(srcdstas, flow)
if flow.has_field(flowd.FIELD_FLOW_ENGINE_INFO):
self.engine_id.update(flow.engine_id, flow)
self.engine_type.update(flow.engine_type, flow)
if flow.has_field(flowd.FIELD_TAG):
self.tag.update(flow.tag, flow)
if flow.has_field(flowd.FIELD_SRCDST_PORT):
self.src_port.update(flow.src_port, flow)
self.port.update(flow.src_port, flow)
if flow.has_field(flowd.FIELD_SRCDST_PORT):
self.dst_port.update(flow.dst_port, flow)
self.port.update(flow.dst_port, flow)
if flow.has_field(flowd.FIELD_PROTO_FLAGS_TOS):
self.protocol.update(flow.protocol, flow)
self.tos.update(flow.tos, flow)
def net(self, addr, mask):
str = ""
if addr.count('.') > 0:
s = addr.split(".")
if mask == 0:
m = 32
else:
m = mask
x = 32 - m
a = x / 8
b = x % 8
c = 256 - (2 ** b)
s[3-a] = int(s[3-a]) & c
i = 4-a
for d in s:
if i < 4:
s[i] = 0
i += 1
str = "%d.%d.%d.%d/%d" %(int(s[0]),int(s[1]),int(s[2]),int(s[3]),mask)
elif addr.count(':') > 0:
s = addr.split(":")
i = 0
for d in s:
if d == '':
s[i]='0'
k = i
i += 1
l = len(s)
i=k+1
while i <= (k+8-l):
s.insert(i, '0')
i += 1
if mask == 0:
m = 128
else:
m = mask
x = 128 - m
a = x / 16
b = x % 16
c = 65535 - (2 ** b)
s[7-a] = int(s[7-a],16) & c
s[7-a] = "%x"%s[7-a]
i = 8-a
for d in s:
if i < 8:
s[i] = '0'
i += 1
str = "%s:%s:%s:%s:%s:%s:%s:%s/%d" %(s[0],s[1],s[2],s[3],s[4],s[5],s[6],s[7],mask)
return str
def report0(self):
str = ""
str += "Total Flow : %d\n" %self.flows
str += "Total Octets : %d\n" %self.octets
str += "Total Packets : %d\n" %self.packets
str += "Total Time (flows) : %d\n" %self.time
str += "Duration of data (realtime) : %d\n" %(self.last-self.first)
str += "Duration of data (1/1000 secs) : %d\n" %(self.finish-self.start)
str += "Average flow time (1/1000 secs) : %f\n" %(self.time / float(self.flows))
str += "Average packet size (octets) : %.4f\n" %(float(self.octets) / self.packets)
str += "Average flow size (octets) : %.4f\n" %(float(self.octets) / self.flows)
str += "Average packets per flow : %.4f\n" %(float(self.packets) / self.flows)
str += "Average flows / second (flow) : %.4f\n" %(float(self.flows)/((self.finish-self.start)/1000))
str += "Average flows / second (real) : %.4f\n" %(float(self.flows)/(self.last-self.first))
str += "Average Kbits / second (flow) : %.4f\n" %((float(self.octets * 8)/1000)/((self.finish-self.start)/1000))
str += "Average Kbits / second (real) : %.4f\n" %((float(self.octets * 8)/1000)/(self.last-self.first))
return str
def report1(self):
str = "Pkt size \tFlows \t\tOctets \t\tPackets\n"
for pktsize in self.packsize.counts.keys():
flows = self.packsize.counts[pktsize].flows
octets = self.packsize.counts[pktsize].octets
packets = self.packsize.counts[pktsize].packets
str += "%d" %pktsize
if pktsize > 9999999:
str +="\t"
else:
str += "\t\t"
str += "%d" %flows
if flows > 9999999:
str += "\t"
else:
str += "\t\t"
str += "%d" %octets
if octets > 9999999:
str += "\t"
else:
str += "\t\t"
str += "%d" %packets
str += "\n"
return str
def report2(self):
str = "Packets \tFlows \t\tOctets \t\tPackets\n"
for pkt in self.ppf.counts.keys():
flows = self.ppf.counts[pkt].flows
octets = self.ppf.counts[pkt].octets
packets = self.ppf.counts[pkt].packets
str += "%d" %pkt
if pkt > 9999999:
str +="\t"
else:
str += "\t\t"
str += "%d" %flows
if flows > 9999999:
str += "\t"
else:
str += "\t\t"
str += "%d" %octets
if octets > 9999999:
str += "\t"
else:
str += "\t\t"
str += "%d" %packets
str += "\n"
return str
def report3(self):
str = "Octets \t\tFlows \t\tOctets \t\tPackets\n"
for oct in self.opf.counts.keys():
flows = self.opf.counts[oct].flows
octets = self.opf.counts[oct].octets
packets = self.opf.counts[oct].packets
str += "%d" %oct
if oct > 9999999:
str +="\t"
else:
str += "\t\t"
str += "%d" %flows
if flows > 9999999:
str += "\t"
else:
str += "\t\t"
str += "%d" %octets
if octets > 9999999:
str += "\t"
else:
str += "\t\t"
str += "%d" %packets
str += "\n"
return str
def report4(self):
return "not implemented"
def report5(self):
str = "dst port \tFlows \t\tOctets \t\tPackets\n"
for port in self.dst_port.counts.keys():
flows = self.dst_port.counts[port].flows
octets = self.dst_port.counts[port].octets
packets = self.dst_port.counts[port].packets
str += "%d" %port
if port > 9999999:
str +="\t"
else:
str += "\t\t"
str += "%d" %flows
if flows > 9999999:
str += "\t"
else:
str += "\t\t"
str += "%d" %octets
if octets > 9999999:
str += "\t"
else:
str += "\t\t"
str += "%d" %packets
str += "\n"
return str
def report6(self):
str = "src port \tFlows \t\tOctets \t\tPackets\n"
for port in self.src_port.counts.keys():
flows = self.src_port.counts[port].flows
octets = self.src_port.counts[port].octets
packets = self.src_port.counts[port].packets
str += "%d" %port
if port > 9999999:
str +="\t"
else:
str += "\t\t"
str += "%d" %flows
if flows > 9999999:
str += "\t"
else:
str += "\t\t"
str += "%d" %octets
if octets > 9999999:
str += "\t"
else:
str += "\t\t"
str += "%d" %packets
str += "\n"
return str
def report7(self):
str = "port \t\tFlows \t\tOctets \t\tPackets\n"
for port in self.port.counts.keys():
flows = self.port.counts[port].flows
octets = self.port.counts[port].octets
packets = self.port.counts[port].packets
str += "%d" %port
if port > 9999999:
str +="\t"
else:
str += "\t\t"
str += "%d" %flows
if flows > 9999999:
str += "\t"
else:
str += "\t\t"
str += "%d" %octets
if octets > 9999999:
str += "\t"
else:
str += "\t\t"
str += "%d" %packets
str += "\n"
return str
def report8(self):
str = "DST IP address \t\t\t\b Flows \t\t\b Octets \t\t\b Packets\n"
for addr in self.dst_addr.counts.keys():
flows = self.dst_addr.counts[addr].flows
octets = self.dst_addr.counts[addr].octets
packets = self.dst_addr.counts[addr].packets
str += addr
if len(addr) < 8:
str +="\t\t\t\t"
else:
if len(addr) < 16:
str +="\t\t\t"
else:
if len (addr) < 24:
str +="\t\t"
else: str += "\t"
str += "%d" %flows
if flows > 9999999:
str += "\t"
else:
str += "\t\t"
str += "%d" %octets
if octets > 9999999:
str += "\t"
else:
str += "\t\t"
str += "%d" %packets
str += "\n"
return str
def report9(self):
str = "SRC IP address \t\t\t\b Flows \t\t\b Octets \t\t\b Packets\n"
for addr in self.src_addr.counts.keys():
flows = self.src_addr.counts[addr].flows
octets = self.src_addr.counts[addr].octets
packets = self.src_addr.counts[addr].packets
str += addr
if len(addr) < 8:
str +="\t\t\t\t"
else:
if len(addr) < 16:
str +="\t\t\t"
else:
if len (addr) < 24:
str +="\t\t"
else: str += "\t"
str += "%d" %flows
if flows > 9999999:
str += "\t"
else:
str += "\t\t"
str += "%d" %octets
if octets > 9999999:
str += "\t"
else:
str += "\t\t"
str += "%d" %packets
str += "\n"
return str
def report10(self):
str = "SRC IP -> DST IP \t\t\t\b Flows \t\t\b Octets \t\t\b Packets\n"
for addr in self.fromto.counts.keys():
flows = self.fromto.counts[addr].flows
octets = self.fromto.counts[addr].octets
packets = self.fromto.counts[addr].packets
str += addr
if len(addr) < 24:
str +="\t\t\t"
else:
if len(addr) < 32:
str +="\t\t"
else:
if len (addr) < 40:
str +="\t"
else: str += "\t"
str += "%d" %flows
if flows > 9999999:
str += "\t"
else:
str += "\t\t"
str += "%d" %octets
if octets > 9999999:
str += "\t"
else:
str += "\t\t"
str += "%d" %packets
str += "\n"
return str
def report11(self):
str = "IP address \t\t\t\b Flows \t\t\b Octets \t\t\b Packets\n"
for addr in self.addr.counts.keys():
flows = self.addr.counts[addr].flows
octets = self.addr.counts[addr].octets
packets = self.addr.counts[addr].packets
str += addr
if len(addr) < 8:
str +="\t\t\t\t\t"
else:
if len(addr) < 16:
str +="\t\t\t"
else:
if len (addr) < 24:
str +="\t\t"
else: str += "\t"
str += "%d" %flows
if flows > 9999999:
str += "\t"
else:
str += "\t\t"
str += "%d" %octets
if octets > 9999999:
str += "\t"
else:
str += "\t\t"
str += "%d" %packets
str += "\n"
return str
def report12(self):
str = "protocol \tFlows \t\tOctets \t\tPackets\n"
for proto in self.protocol.counts.keys():
flows = self.protocol.counts[proto].flows
octets = self.protocol.counts[proto].octets
packets = self.protocol.counts[proto].packets
str += "%d" %proto
if proto > 9999999:
str +="\t"
else:
str += "\t\t"
str += "%d" %flows
if flows > 9999999:
str += "\t"
else:
str += "\t\t"
str += "%d" %octets
if octets > 9999999:
str += "\t"
else:
str += "\t\t"
str += "%d" %packets
str += "\n"
return str
def report13(self, flow):
str = ""
if self.k == 0:
str += "start \t\toctets \nend \t\toctets\n"
self.k = 1
if flow.has_field(flowd.FIELD_FLOW_TIMES) and \
flow.has_field(flowd.FIELD_OCTETS):
str += "%d \t\t%d \n%d \t\t%d\n\n" %(flow.flow_start, flow.octets, flow.flow_finish, flow.octets)
return str
def report14(self, flow):
str = ""
if self.k == 0:
str += "start \t\tpackets \nend \t\tpackets\n"
self.k = 1
if flow.has_field(flowd.FIELD_FLOW_TIMES) and \
flow.has_field(flowd.FIELD_PACKETS):
str += "%d \t\t%d \n%d \t\t%d\n\n" %(flow.flow_start, flow.packets, flow.flow_finish, flow.packets)
return str
def report15(self):
str = "Octets \t\tPackets \t\tMBytes\n"
str += "%d \t" %self.octets
str += "%d \t\t" %self.packets
str += "%.3f" %(float(self.octets)/1000000)
return str
def report16(self):
str = "IP address \t\t\t\b Flows \t\t\b Octets \t\t\b Packets\n"
for addr in self.gateway_addr.counts.keys():
flows = self.gateway_addr.counts[addr].flows
octets = self.gateway_addr.counts[addr].octets
packets = self.gateway_addr.counts[addr].packets
str += addr
if len(addr) < 8:
str +="\t\t\t\t"
else:
if len(addr) < 16:
str +="\t\t\t"
else:
if len (addr) < 24:
str +="\t\t"
else: str += "\t"
str += "%d" %flows
if flows > 9999999:
str += "\t"
else:
str += "\t\t"
str += "%d" %octets
if octets > 9999999:
str += "\t"
else:
str += "\t\t"
str += "%d" %packets
str += "\n"
return str
def report17(self):
str = "interface \tFlows \t\tOctets \t\tPackets\n"
for iface in self.in_if.counts.keys():
flows = self.in_if.counts[iface].flows
octets = self.in_if.counts[iface].octets
packets = self.in_if.counts[iface].packets
str += "%d" %iface
if iface > 9999999:
str +="\t"
else:
str += "\t\t"
str += "%d" %flows
if flows > 9999999:
str += "\t"
else:
str += "\t\t"
str += "%d" %octets
if octets > 9999999:
str += "\t"
else:
str += "\t\t"
str += "%d" %packets
str += "\n"
return str
def report18(self):
str = "interface \tFlows \t\tOctets \t\tPackets\n"
for iface in self.out_if.counts.keys():
flows = self.out_if.counts[iface].flows
octets = self.out_if.counts[iface].octets
packets = self.out_if.counts[iface].packets
str += "%d" %iface
if iface > 9999999:
str +="\t"
else:
str += "\t\t"
str += "%d" %flows
if flows > 9999999:
str += "\t"
else:
str += "\t\t"
str += "%d" %octets
if octets > 9999999:
str += "\t"
else:
str += "\t\t"
str += "%d" %packets
str += "\n"
return str
def report23(self):
str = "in \tout \t\tFlows \t\tOctets \t\tPackets\n"
for iface in self.in_out_if.counts.keys():
flows = self.in_out_if.counts[iface].flows
octets = self.in_out_if.counts[iface].octets
packets = self.in_out_if.counts[iface].packets
str += "%s" %iface
if len(iface) > 7:
str +="\t"
else:
str += "\t\t"
str += "%d" %flows
if flows > 9999999:
str += "\t"
else:
str += "\t\t"
str += "%d" %octets
if octets > 9999999:
str += "\t"
else:
str += "\t\t"
str += "%d" %packets
str += "\n"
return str
def report19(self):
str = "src as \t\tFlows \t\tOctets \t\tPackets\n"
for asys in self.src_as.counts.keys():
flows = self.src_as.counts[asys].flows
octets = self.src_as.counts[asys].octets
packets = self.src_as.counts[asys].packets
str += "%d" %asys
if asys > 9999999:
str +="\t"
else:
str += "\t\t"
str += "%d" %flows
if flows > 9999999:
str += "\t"
else:
str += "\t\t"
str += "%d" %octets
if octets > 9999999:
str += "\t"
else:
str += "\t\t"
str += "%d" %packets
str += "\n"
return str
def report20(self):
str = "dst as \t\tFlows \t\tOctets \t\tPackets\n"
for asys in self.dst_as.counts.keys():
flows = self.dst_as.counts[asys].flows
octets = self.dst_as.counts[asys].octets
packets = self.dst_as.counts[asys].packets
str += "%d" %asys
if asys > 9999999:
str +="\t"
else:
str += "\t\t"
str += "%d" %flows
if flows > 9999999:
str += "\t"
else:
str += "\t\t"
str += "%d" %octets
if octets > 9999999:
str += "\t"
else:
str += "\t\t"
str += "%d" %packets
str += "\n"
return str
def report21(self):
str = "src as\tdst as \t\tFlows \t\tOctets \t\tPackets\n"
for asys in self.src_dst_as.counts.keys():
flows = self.src_dst_as.counts[asys].flows
octets = self.src_dst_as.counts[asys].octets
packets = self.src_dst_as.counts[asys].packets
str += "%s" %asys
if len(asys) > 7:
str +="\t"
else:
str += "\t\t"
str += "%d" %flows
if flows > 9999999:
str += "\t"
else:
str += "\t\t"
str += "%d" %octets
if octets > 9999999:
str += "\t"
else:
str += "\t\t"
str += "%d" %packets
str += "\n"
return str
def report22(self):
str = "ToS \t\tFlows \t\tOctets \t\tPackets\n"
for tos in self.tos.counts.keys():
flows = self.tos.counts[tos].flows
octets = self.tos.counts[tos].octets
packets = self.tos.counts[tos].packets
str += "%d" %tos
if tos > 9999999:
str +="\t"
else:
str += "\t\t"
str += "%d" %flows
if flows > 9999999:
str += "\t"
else:
str += "\t\t"
str += "%d" %octets
if octets > 9999999:
str += "\t"
else:
str += "\t\t"
str += "%d" %packets
str += "\n"
return str
def report24(self):
str = "Source prefix \t\t\t\t\b Flows \t\t\b Octets \t\t\b Packets\n"
for addr in self.src_prefix.counts.keys():
flows = self.src_prefix.counts[addr].flows
octets = self.src_prefix.counts[addr].octets
packets = self.src_prefix.counts[addr].packets
str += addr
if len(addr) < 8:
str +="\t\t\t\t\t"
elif len(addr) < 16:
str +="\t\t\t\t"
elif len (addr) < 24:
str +="\t\t\t"
elif len(addr) < 32:
str += "\t\t"
else:
str += "\t"
str += "%d" %flows
if flows > 9999999:
str += "\t"
else:
str += "\t\t"
str += "%d" %octets
if octets > 9999999:
str += "\t"
else:
str += "\t\t"
str += "%d" %packets
str += "\n"
return str
def report25(self):
str = "Destination prefix \t\t\t\b Flows \t\t\b Octets \t\t\b Packets\n"
for addr in self.dst_prefix.counts.keys():
flows = self.dst_prefix.counts[addr].flows
octets = self.dst_prefix.counts[addr].octets
packets = self.dst_prefix.counts[addr].packets
str += addr
if len(addr) < 8:
str +="\t\t\t\t\t"
elif len(addr) < 16:
str +="\t\t\t\t"
elif len (addr) < 24:
str +="\t\t\t"
elif len(addr) < 32:
str += "\t\t"
else:
str += "\t"
str += "%d" %flows
if flows > 9999999:
str += "\t"
else:
str += "\t\t"
str += "%d" %octets
if octets > 9999999:
str += "\t"
else:
str += "\t\t"
str += "%d" %packets
str += "\n"
return str
def report26(self):
str = "Source prefix \t\tDestination prefix \n\t\tFlows \t\tOctets \t\tPackets\n"
for addr in self.src_dst_prefix.counts.keys():
flows = self.src_dst_prefix.counts[addr].flows
octets = self.src_dst_prefix.counts[addr].octets
packets = self.src_dst_prefix.counts[addr].packets
str += addr+"\n\t\t"
str += "%d" %flows
if flows > 9999999:
str += "\t"
else:
str += "\t\t"
str += "%d" %octets
if octets > 9999999:
str += "\t"
else:
str += "\t\t"
str += "%d" %packets
str += "\n"
return str
def report27(self):
str = "IP address \t\t\t\b Flows \t\t\b Octets \t\t\b Packets\n"
for addr in self.agent_addr.counts.keys():
flows = self.agent_addr.counts[addr].flows
octets = self.agent_addr.counts[addr].octets
packets = self.agent_addr.counts[addr].packets
str += addr
if len(addr) < 8:
str +="\t\t\t\t"
else:
if len(addr) < 16:
str +="\t\t\t"
else:
if len (addr) < 24:
str +="\t\t"
else: str += "\t"
str += "%d" %flows
if flows > 9999999:
str += "\t"
else:
str += "\t\t"
str += "%d" %octets
if octets > 9999999:
str += "\t"
else:
str += "\t\t"
str += "%d" %packets
str += "\n"
return str
def report28(self):
str = "engine_id \tFlows \t\tOctets \t\tPackets\n"
for engine in self.engine_id.counts.keys():
flows = self.engine_id.counts[engine].flows
octets = self.engine_id.counts[engine].octets
packets = self.engine_id.counts[engine].packets
str += "%d" %engine
if engine > 9999999:
str +="\t"
else:
str += "\t\t"
str += "%d" %flows
if flows > 9999999:
str += "\t"
else:
str += "\t\t"
str += "%d" %octets
if octets > 9999999:
str += "\t"
else:
str += "\t\t"
str += "%d" %packets
str += "\n"
return str
def report29(self):
str = "engine_type \tFlows \t\tOctets \t\tPackets\n"
for engine in self.engine_type.counts.keys():
flows = self.engine_type.counts[engine].flows
octets = self.engine_type.counts[engine].octets
packets = self.engine_type.counts[engine].packets
str += "%d" %engine
if engine > 9999999:
str +="\t"
else:
str += "\t\t"
str += "%d" %flows
if flows > 9999999:
str += "\t"
else:
str += "\t\t"
str += "%d" %octets
if octets > 9999999:
str += "\t"
else:
str += "\t\t"
str += "%d" %packets
str += "\n"
return str
def report30(self):
str = "tag \t\tFlows \t\tOctets \t\tPackets\n"
for tag in self.tag.counts.keys():
flows = self.tag.counts[tag].flows
octets = self.tag.counts[tag].octets
packets = self.tag.counts[tag].packets
str += "%d" %tag
if tag > 9999999:
str +="\t"
else:
str += "\t\t"
str += "%d" %flows
if flows > 9999999:
str += "\t"
else:
str += "\t\t"
str += "%d" %octets
if octets > 9999999:
str += "\t"
else:
str += "\t\t"
str += "%d" %packets
str += "\n"
return str
def usage():
print >> sys.stderr, "stat.py (flowd.py version %s)" % \
flowd.__version__
print >> sys.stderr, "Usage: stats.py [options] [flowd-store]";
print >> sys.stderr, "Options:";
print >> sys.stderr, " -h Display this help";
print >> sys.stderr, " -f Format of output (like flow-stat from flow-tools). Default 0";
print >> sys.stderr, " -p pickle_file";
print >> sys.stderr, " -o output_file";
# print >> sys.stderr, " Report format. Choose from the following:";
# print >> sys.stderr, " 0 Overall Summary";
# print >> sys.stderr, " 1 Average packet size distribution";
# print >> sys.stderr, " 2 Packets per flow distribution";
# print >> sys.stderr, " 3 Octets per flow distribution";
# print >> sys.stderr, " 4 Bandwidth per flow distribution";
# print >> sys.stderr, " 5 UDP/TCP destination port";
# print >> sys.stderr, " 6 UDP/TCP source port";
# print >> sys.stderr, " 7 UDP/TCP port";
# print >> sys.stderr, " 8 Destination IP";
# print >> sys.stderr, " 9 Source IP";
# print >> sys.stderr, " 10 Source/Destination IP";
# print >> sys.stderr, " 11 Source or Destination IP";
# print >> sys.stderr, " 12 IP protocol";
# print >> sys.stderr, " 13 octets for flow duration plot data";
# print >> sys.stderr, " 14 packets for flow duration plot data";
# print >> sys.stderr, " 15 short summary";
# print >> sys.stderr, " 16 IP Next Hop";
# print >> sys.stderr, " 17 Input interface";
# print >> sys.stderr, " 18 Output interface";
# print >> sys.stderr, " 19 Source AS";
# print >> sys.stderr, " 20 Destination AS";
# print >> sys.stderr, " 21 Source/Destination AS";
# print >> sys.stderr, " 22 IP ToS";
# print >> sys.stderr, " 23 Input/Output Interface";
# print >> sys.stderr, " 24 Source Prefix";
# print >> sys.stderr, " 25 Destination Prefix";
# print >> sys.stderr, " 26 Source/Destination Prefix";
# print >> sys.stderr, " 27 Exporter IP";
# print >> sys.stderr, " 28 Engine Id";
# print >> sys.stderr, " 29 Engine Type";
# print >> sys.stderr, " 30 Tag";
sys.exit(1);
def main():
stats = flow_statistics()
try:
opts, args = getopt.getopt(sys.argv[1:], 'f:p:o:h')
except getopt.GetoptError:
print >> sys.stderr, "Invalid commandline arguments"
usage()
outformat = None
pickle_file = None
output_file = None
for o, a in opts:
if o in ('-h', '--help'):
usage()
sys.exit(0)
if o in ('-f', '--format'):
outformat = a
if o in ('-p', '--pickle'):
pickle_file = a
if o in ('-o', '--output'):
output_file = a
if len(args) == 0:
print >> sys.stderr, "No logfiles specified"
usage()
if outformat is None:
outformat = 0
elif not outformat.isdigit() or int(outformat) > 30:
print >> sys.stderr, "Invalid format"
usage()
if output_file is not None:
out = open(output_file, "w")
print "report #"+outformat
#if output_file is not None:
# out.write("report #"+outformat)
#print dir(flowd)
for ffile in args:
if ffile == "-":
flog = flowd.FlowLog_fromfile(sys.stdin)
else:
flog = flowd.FlowLog(ffile, "rb")
for flow in flog:
#stats.update(flow)
if outformat == '0':
stats.update0(flow)
elif outformat == '1':
stats.update1(flow)
elif outformat == '2':
stats.update2(flow)
elif outformat == '3':
stats.update3(flow)
elif outformat == '5':
stats.update5(flow)
elif outformat == '6':
stats.update6(flow)
elif outformat == '7':
stats.update7(flow)
elif outformat == '8':
stats.update8(flow)
elif outformat == '9':
stats.update9(flow)
elif outformat == '10':
stats.update10(flow)
elif outformat == '11':
stats.update11(flow)
elif outformat == '12':
stats.update12(flow)
elif outformat == '13':
print stats.report13(flow)
if output_file is not None:
out.write(stats.report13(flow))
elif outformat == '14':
print stats.report14(flow)
if output_file is not None:
out.write(stats.report14(flow))
elif outformat == '15':
stats.update15(flow)
elif outformat == '16':
stats.update16(flow)
elif outformat == '17':
stats.update17(flow)
elif outformat == '18':
stats.update18(flow)
elif outformat == '19':
stats.update19(flow)
elif outformat == '20':
stats.update20(flow)
elif outformat == '21':
stats.update21(flow)
elif outformat == '22':
stats.update22(flow)
elif outformat == '23':
stats.update23(flow)
elif outformat == '24':
stats.update24(flow)
elif outformat == '25':
stats.update25(flow)
elif outformat == '26':
stats.update26(flow)
elif outformat == '27':
stats.update27(flow)
elif outformat == '28':
stats.update28(flow)
elif outformat == '29':
stats.update29(flow)
elif outformat == '30':
stats.update30(flow)
if outformat == '0':
print stats.report0()
if output_file is not None:
out.write(stats.report0())
elif outformat == '1':
print stats.report1()
if output_file is not None:
out.write(stats.report1())
elif outformat == '2':
print stats.report2()
if output_file is not None:
out.write(stats.report2())
elif outformat == '3':
print stats.report3()
if output_file is not None:
out.write(stats.report3())
elif outformat == '4':
print stats.report4()
if output_file is not None:
out.write(stats.report4())
elif outformat == '5':
print stats.report5()
if output_file is not None:
out.write(stats.report5())
elif outformat == '6':
print stats.report6()
if output_file is not None:
out.write(stats.report6())
elif outformat == '7':
print stats.report7()
if output_file is not None:
out.write(stats.report7())
elif outformat == '8':
print stats.report8()
if output_file is not None:
out.write(stats.report8())
elif outformat == '9':
print stats.report9()
if output_file is not None:
out.write(stats.report9())
elif outformat == '10':
print stats.report10()
if output_file is not None:
out.write(stats.report10())
elif outformat == '11':
print stats.report11()
if output_file is not None:
out.write(stats.report11())
elif outformat == '12':
print stats.report12()
if output_file is not None:
out.write(stats.report12())
elif outformat == '15':
print stats.report15()
if output_file is not None:
out.write(stats.report15())
elif outformat == '16':
print stats.report16()
if output_file is not None:
out.write(stats.report16())
elif outformat == '17':
print stats.report17()
if output_file is not None:
out.write(stats.report17())
elif outformat == '18':
print stats.report18()
if output_file is not None:
out.write(stats.report18())
elif outformat == '19':
print stats.report19()
if output_file is not None:
out.write(stats.report19())
elif outformat == '20':
print stats.report20()
if output_file is not None:
out.write(stats.report20())
elif outformat == '21':
print stats.report21()
if output_file is not None:
out.write(stats.report21())
elif outformat == '22':
print stats.report22()
if output_file is not None:
out.write(stats.report22())
elif outformat == '23':
print stats.report23()
if output_file is not None:
out.write(stats.report23())
elif outformat == '24':
print stats.report24()
if output_file is not None:
out.write(stats.report24())
elif outformat == '25':
print stats.report25()
if output_file is not None:
out.write(stats.report25())
elif outformat == '26':
print stats.report26()
if output_file is not None:
out.write(stats.report26())
elif outformat == '27':
print stats.report27()
if output_file is not None:
out.write(stats.report27())
elif outformat == '28':
print stats.report28()
if output_file is not None:
out.write(stats.report28())
elif outformat == '29':
print stats.report29()
if output_file is not None:
out.write(stats.report29())
elif outformat == '30':
print stats.report30()
if output_file is not None:
out.write(stats.report30())
if output_file is not None:
out.close()
print "Statistics stored to \"%s\"" % \
output_file
if pickle_file is not None:
out = open(pickle_file, "wb")
pickle.dump(stats, out)
out.close()
print >> sys.stderr, "Statistics pickled to \"%s\"" % \
pickle_file
if __name__ == '__main__': main() | Python |
#!/usr/bin/env python
import flowd
import string
import getopt
import sys
import os
import ipcalc
action_types = [
"src_addr",
"dst_addr",
"src_prefix",
"dst_prefix",
"src_port",
"dst_port",
"exporter",
"nexthop",
"src_as",
"dst_as",
"protocol",
"tos",
"input_interface",
"output_interface"
]
class tag_class:
def __init__(self):
self.name = ""
self.values = []
self.match = 1
def set_name(self, name):
self.name = name
def set_match(self, val):
self.match = val
def add_value(self, value):
self.values.append(value)
def add_values(self, value):
#self.values.extend(value)
self.values = value
def whatflowfield(field, flow):
if field == "src_addr" or field == "src_prefix":
return flow.src_addr
elif field == "dst_addr" or field == "dst_prefix":
return flow.dst_addr
elif field == "src_port":
return flow.src_port
elif field == "dst_port":
return flow.dst_port
elif field == "src_as":
return flow.src_as
elif field == "dst_as":
return flow.dst_as
elif field == "protocol":
return flow.protocol
elif field == "tos":
return flow.tos
elif field == "exporter":
return flow.agent_addr
elif field == "nexthop":
return flow.gateway_addr
elif field == "input_interface":
return flow.if_ndx_in
elif field == "output_interface":
return flow.if_ndx_out
return None
def usage():
print >> sys.stderr, "tag.py (flowd.py version %s)" % \
flowd.__version__
print >> sys.stderr, "Usage: tag.py [options] [flowd-store]";
print >> sys.stderr, "Options:";
print >> sys.stderr, " -h Display this help";
print >> sys.stderr, " -t Path to tagfile";
print >> sys.stderr, " -T Tag number. Hex (in format 0x0) or Decimal";
#print >> sys.stderr, "Example of tagfile:\n";
#print >> sys.stderr, "tag-action tag\n type src_addr\n match 192.168.231.1\n match 192.168.231.2\ntag-action tag2\n type src_port\n match 138\ntag-definition 1\n action tag\n action tag2\ntag-definition 2\n action tag2\n";
#print >> sys.stderr, " tag-action types: %s"% ', '.join(action_types);
sys.exit(1);
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], 't:T:h')
except getopt.GetoptError:
print >> sys.stderr, "Invalid commandline arguments"
usage()
tagfilepath = None
tagnum = None
for o, a in opts:
if o in ('-h', '--help'):
usage()
sys.exit(0)
if o in ('-t'):
tagfilepath = a
if o in ('-T'):
tagnum = a
if tagfilepath is None:
print >> sys.stderr, "No tagfile specified"
usage()
if tagnum is None:
print >> sys.stderr, "No tag specified"
usage()
if len(args) == 0:
print >> sys.stderr, "No logfile specified"
usage()
#parsing tagfile
tagfile = open(tagfilepath, "r")
#tagacthash = {}
tagactions = []
line = tagfile.readline()
while line.strip().startswith("tag-action"):
action = tag_class()
tagactname = line.strip().split(" ")[1]
action.set_name(tagactname)
line = tagfile.readline()
#tagactrules = []
while line.strip().startswith("type"):
tagacttype = line.strip().split(" ")[1]
rule = tag_class()
rule.set_name(tagacttype)
line = tagfile.readline()
while line.strip().startswith("match"):
tagactval = line.strip().split(" ")[1]
if tagactval == "not":
rule.set_match(0)
tagactval = line.strip().split(" ")[2]
if tagacttype=="src_prefix" or tagacttype=="dst_prefix":
rule.add_values(ipcalc.Network(tagactval))
else:
rule.add_value(tagactval)
line = tagfile.readline()
action.add_value(rule)
tagactions.append(action)
#tagacthash[tagactname] = tagactrules
while line.strip().startswith("tag-definition"):
tagid = line.strip().split(" ")[1]
tag = tag_class()
tag.set_name(tagid)
line = tagfile.readline()
#tagactnames = []
while line.strip().startswith("action"):
for act in tagactions:
if act.name == line.strip().split(" ")[1]:
tag.add_value(act)
line = tagfile.readline()
if tagid == tagnum:
break
for act in tag.values:
for rule in act.values:
print rule.name, rule.values
if not tag.name.isdigit():
tag.set_name(long(tag.name,16))
else:
tag.set_name(long(tag.name))
#writing tags
for ffile in args:
flogr = flowd.FlowLog(ffile, "rb")
flogw = flowd.FlowLog("temp", "wb")
for flow in flogr:
trueact = 0
term = 1
for act in tag.values:
for rule in act.values:
if rule.match == 0:
if str(whatflowfield(rule.name, flow)) not in rule.values:
trueact = 1
break
else:
trueact = 0
else:
if str(whatflowfield(rule.name, flow)) in rule.values:
trueact = 1
break
else:
trueact = 0
term = term * trueact
if term == 1:
if flow.tag == 0:
flow.fields += 1
flow.tag = tag.name
flogw.write_flow(flow)
file = open("temp", "rb")
buff = file.read()
file.close()
file = open(ffile, "wb")
file.write(buff)
file.close()
os.remove("temp")
if __name__ == '__main__': main() | Python |
#!/usr/bin/env python
import flowd
import string
import getopt
import sys
mask_fields = [
'TAG' ,
'UNIX_SECS' ,
'UNIX_NSECS' ,
'SYSUPTIME' ,
'EXADDR' ,
'DPKTS' ,
'DOCTETS' ,
'FIRST' ,
'LAST' ,
'ENGINE_TYPE' ,
'ENGINE_ID' ,
'SRCADDR' ,
'DSTADDR' ,
'NEXTHOP' ,
'INPUT' ,
'OUTPUT' ,
'SRCPORT' ,
'DSTPORT' ,
'PROT' ,
'TOS' ,
'TCP_FLAGS' ,
'SRC_MASK' ,
'DST_MASK' ,
'SRC_AS' ,
'DST_AS' ,
]
def format0(src,log):
infile = open(src, "r")
flog = flowd.FlowLog(log, "wb")
mask = infile.readline().strip().split(',')
for m in mask:
if m.startswith('#:'):
m = m.replace('#:','')
if m.upper() not in mask_fields:
print m.upper()
print >> sys.stderr, "Unrecognized field:"+m
lines = infile.readlines()
count = 0
for str in lines:
data = str.strip().split(',')
if len(mask) != len(data):
print >> sys.stderr, "Invalid count of data"
i=0
flow = flowd.Flow()
for flowfield in data:
if mask[i] in ('TAG', 'tag'):
flow.tag = int(flowfield)
if mask[i] in ('UNIX_SECS', 'unix_secs'):
flow.recv_sec = int(flowfield)
if mask[i] in ('UNIX_NSECS', 'unix_nsecs'):
flow.recv_usec = int(flowfield)
if mask[i] in ('SYSUPTIME', 'sysuptime'):
flow.sys_uptime_ms = int(flowfield)
if mask[i] in ('EXADDR', 'exaddr'):
flow.agent_addr = flowfield
if mask[i] in ('DPKTS', 'dpkts'):
flow.packets = int(flowfield)
if mask[i] in ('DOCTETS', 'doctets'):
flow.octets = int(flowfield)
if mask[i] in ('FIRST', 'first'):
flow.flow_start = int(flowfield)
if mask[i] in ('LAST', 'last'):
flow.flow_finish = int(flowfield)
if mask[i] in ('ENGINE_TYPE', 'engine_type'):
flow.engine_type = int(flowfield)
if mask[i] in ('ENGINE_ID', 'engine_id'):
flow.engine_id = int(flowfield)
if mask[i] in ('SRCADDR', 'srcaddr'):
flow.src_addr = flowfield
if mask[i] in ('DSTADDR', 'dstaddr'):
flow.dst_addr = flowfield
if mask[i] in ('NEXTHOP', 'nexthop'):
flow.gateway_addr = flowfield
if mask[i] in ('INPUT', 'input'):
flow.if_ndx_in = int(flowfield)
if mask[i] in ('OUTPUT', 'output'):
flow.if_ndx_out = int(flowfield)
if mask[i] in ('SRCPORT', 'srcport'):
flow.src_port = int(flowfield)
if mask[i] in ('DSTPORT', 'dst_port'):
flow.dst_port = int(flowfield)
if mask[i] in ('PROT', 'prot'):
flow.protocol = int(flowfield)
if mask[i] in ('TOS', 'tos'):
flow.tos = int(flowfield)
if mask[i] in ('TCP_FLAGS', 'tcp_flags'):
flow.tcp_flags = int(flowfield)
if mask[i] in ('SRC_MASK', 'src_mask'):
flow.src_mask = int(flowfield)
if mask[i] in ('DST_MASK', 'dst_mask'):
flow.dst_mask = int(flowfield)
if mask[i] in ('SRC_AS', 'src_as'):
flow.src_as = int(flowfield)
if mask[i] in ('DST_AS', 'dst_as'):
flow.dst_as = int(flowfield)
flow.fields = 1074164750
i += 1
flog.write_flow(flow)
count += 1
print "imported %d flows"%count
infile.close()
def usage():
print >> sys.stderr, "import.py (flowd.py version %s)" % \
flowd.__version__
print >> sys.stderr, "Usage: import.py [options] [source file] [flowd-store]";
print >> sys.stderr, "Options:";
print >> sys.stderr, " -h Display this help";
print >> sys.stderr, " -f Format of output. 0 - ASCII";
print >> sys.stderr, " -m mask_fields. Select fields from: %s. Default all fields" % ', '.join(mask_fields);
sys.exit(1);
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], 'f:m:u:h')
except getopt.GetoptError:
print >> sys.stderr, "Invalid commandline arguments"
usage()
format = None
conn = None
mask = None
outfile = None
uopt = None
for o, a in opts:
if o in ('-h', '--help'):
usage()
sys.exit(0)
if o in ('-f', '--format'):
format = a
if o in('-m'):
mask = a.split(',')
for m in mask:
if m not in mask_fields:
print >> sys.stderr, "Unrecognized field:"+m
sys.exit(1);
if format is None:
format = 0
elif not format.isdigit() or int(format) > 0:
print >> sys.stderr, "Invalid format"
usage()
if len(args) == 0:
print >> sys.stderr, "No logfile specified"
usage()
elif len(args) != 2:
print >> sys.stderr, "Arguments must consist of ascii source file and logfile to store data"
usage()
format0(args[0],args[1])
if __name__ == '__main__': main() | Python |
#!/usr/bin/env python
import flowd
import string
import getopt
import sys
import MySQLdb
#import psycopg
mask_fields = [
'TAG' ,
'UNIX_SECS' ,
'UNIX_NSECS' ,
'SYSUPTIME' ,
'EXADDR' ,
'DPKTS' ,
'DOCTETS' ,
'FIRST' ,
'LAST' ,
'ENGINE_TYPE' ,
'ENGINE_ID' ,
'SRCADDR' ,
'DSTADDR' ,
'NEXTHOP' ,
'INPUT' ,
'OUTPUT' ,
'SRCPORT' ,
'DSTPORT' ,
'PROT' ,
'TOS' ,
'TCP_FLAGS' ,
'SRC_MASK' ,
'DST_MASK' ,
'SRC_AS' ,
'DST_AS' ,
]
def usage():
print >> sys.stderr, "export.py (flowd.py version %s)" % \
flowd.__version__
print >> sys.stderr, "Usage: export.py [options] [flowd-store]";
print >> sys.stderr, "Options:";
print >> sys.stderr, " -h Display this help";
print >> sys.stderr, " -f Format of output. 0 - ASCII, 1 - MySQL, 2 - PostgreSQL. Default 0";
print >> sys.stderr, " -m mask_fields. Select fields from: %s. Default all fields" % ', '.join(mask_fields);
print >> sys.stderr, " -o output file for ASCII format";
print >> sys.stderr, " -u user:password:host:port:name:table Configure MySQL or PostgresSQL access";
sys.exit(1);
def get_mask_value(mask, flow):
mask_value = []
if 'TAG' in mask:
mask_value.append("%d"%flow.tag)
if 'UNIX_SECS' in mask:
mask_value.append("%d"%flow.recv_sec)
if 'UNIX_NSECS' in mask:
mask_value.append("%d"%flow.recv_usec)
if 'SYSUPTIME' in mask:
mask_value.append("%d"%flow.sys_uptime_ms)
if 'EXADDR' in mask:
mask_value.append(flow.agent_addr)
if 'DPKTS' in mask:
mask_value.append("%d"%flow.packets)
if 'DOCTETS' in mask:
mask_value.append("%d"%flow.octets)
if 'FIRST' in mask:
mask_value.append("%d"%flow.flow_start)
if 'LAST' in mask:
mask_value.append("%d"%flow.flow_finish)
if 'ENGINE_TYPE' in mask:
mask_value.append("%d"%flow.engine_type)
if 'ENGINE_ID' in mask:
mask_value.append("%d"%flow.engine_id)
if 'SRCADDR' in mask:
mask_value.append(flow.src_addr)
if 'DSTADDR' in mask:
mask_value.append(flow.dst_addr)
if 'NEXTHOP' in mask:
mask_value.append(flow.gateway_addr)
if 'INPUT' in mask:
mask_value.append("%d"%flow.if_ndx_in)
if 'OUTPUT' in mask:
mask_value.append("%d"%flow.if_ndx_out)
if 'SRCPORT' in mask:
mask_value.append("%d"%flow.src_port)
if 'DSTPORT' in mask:
mask_value.append("%d"%flow.dst_port)
if 'PROT' in mask:
mask_value.append("%d"%flow.protocol)
if 'TOS' in mask:
mask_value.append("%d"%flow.tos)
if 'TCP_FLAGS' in mask:
mask_value.append("%s"%flow.tcp_flags)
if 'SRC_MASK' in mask:
mask_value.append("%d"%flow.src_mask)
if 'DST_MASK'in mask:
mask_value.append("%d"%flow.dst_mask)
if 'SRC_AS' in mask:
mask_value.append("%d"%flow.src_as)
if 'DST_AS' in mask:
mask_value.append("%d"%flow.dst_as)
return mask_value
def create_query(mask, dbtable):
str = "CREATE TABLE IF NOT EXISTS %s (" %dbtable
for m in mask:
if str[-1] != '(':
str +=", "
str += m
if m in ("SRCADDR", "DSTADDR", "EXADDR", "NEXTHOP"):
str += " VARCHAR(64)"
else:
str += " INTEGER"
str += ");"
return str
def format0(outfile, mask, flow):
outfile.write("%s\n"%",".join(get_mask_value(mask,flow)))
def format1(conn, dbtable, mask, flow):
cursor = conn.cursor()
str = "INSERT INTO %s " %dbtable
str += "(%s) "%','.join(mask)
str += "VALUES ('%s')"%"','".join(get_mask_value(mask,flow))
cursor.execute(str)
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], 'f:m:u:o:h')
except getopt.GetoptError:
print >> sys.stderr, "Invalid commandline arguments"
usage()
format = None
conn = None
mask = None
outfile = None
uopt = None
for o, a in opts:
if o in ('-h', '--help'):
usage()
sys.exit(0)
if o in ('-f', '--format'):
format = a
if o in('-m'):
mask = a.split(',')
for m in mask:
if m not in mask_fields:
print >> sys.stderr, "Unrecognized field:"+m
sys.exit(1);
if o in ('-u'):
if format not in ('1','2'):
print >> sys.stderr, "Option -u must be used only for MySQL or PostgresSQL format"
usage()
else:
uopt = 1
if len(a.split(':')) == 6:
dbuser, dbpswd, dbhost, dbport, dbname, dbtable = a.split(':')
dbport = int (dbport)
else:
print >> sys.stderr, "Option -u need six values"
usage()
if o in ('-o'):
if format != '0':
print >> sys.stderr, "Option -o must be used only for ASCII format"
usage()
else:
outfile = open(a, "w")
if len(args) == 0:
print >> sys.stderr, "No logfile specified"
usage()
if format is None:
format = '0'
if mask is None:
mask = mask_fields
else:
mask_sort = [m for m in mask_fields if m in mask]
mask = mask_sort
if not format.isdigit() or int(format) > 2:
print >> sys.stderr, "Invalid format"
usage()
else:
if format == '0':
outfile.write("#:%s\n"%",".join(mask))
elif format == '1':
if not uopt:
print >> sys.stderr, "No -u option specified"
usage()
try:
conn = MySQLdb.Connect(host=dbhost, port=dbport, user=dbuser, passwd=dbpswd, db=dbname)
cursor = conn.cursor()
cursor.execute(create_query(mask, dbtable))
except MySQLdb.Error, e:
print("Error: unavailable connection with database %s" % dbname)
sys.exit(-1)
elif format == '2':
if not uopt:
print >> sys.stderr, "No -u option specified"
usage()
try:
#conn = psycopg.connect('dbname=%s host=%s port=%s user=%s password=%s'%(dbname,dbhost,dbport,dbuser,dbpswd))
conn = psycopg.connect('dbname=%s user=%s'%(dbname,dbuser,))
cursor = conn.cursor()
cursor.execute(string.replace(create_query(mask, dbtable),'IF NOT EXISTS',''))
#print string.replace(create_query(mask, dbtable),'IF NOT EXISTS','')
except MySQLdb.Error, e:
print("Error: unavailable connection with database %s" % dbname)
sys.exit(-1)
count = 0
for ffile in args:
flog = flowd.FlowLog(ffile, "rb")
for flow in flog:
count += 1
if format == '0':
format0(outfile, mask, flow)
elif format in ('1','2'):
format1(conn, dbtable, mask, flow)
print "exported %d flows"%count
if format == '0':
outfile.close()
if __name__ == '__main__': main() | Python |
Subsets and Splits
SQL Console for ajibawa-2023/Python-Code-Large
Provides a useful breakdown of language distribution in the training data, showing which languages have the most samples and helping identify potential imbalances across different language groups.