CombinedText stringlengths 4 3.42M |
|---|
/******************************************************************************
* ogre_interface.di - main interface file for D clients
******************************************************************************
* This file is part of
* __ __ _
* / // /_____ ____ (_)
* / // // ___// __ \ / /
* / // // /__ / /_/ // /
* /_//_/ \___/ \____//_/
*
* Low Level C Ogre Interface (llcoi)
*
* See http://code.google.com/p/llcoi/ for more information.
*
* Copyright (c) 2011, Llcoi Team
*
* License: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
******************************************************************************/
module llcoi.ogre_interface;
extern(C):
alias float coiReal;
const int EVENT_FRAME_STARTED = 1;
const int EVENT_FRAME_RENDERING_QUEUED = 2;
const int EVENT_FRAME_ENDED = 4;
// From OgrePlatform.h
alias uint uint32;
alias ushort uint16;
alias ubyte uint8;
alias int int32;
alias short int16;
alias byte int8;
// OgreSceneManager.h
alias ushort SceneTypeMask;
// OgreColourValue.h
alias uint32 RGBA;
alias uint32 ARGB;
alias uint32 ABGR;
alias uint32 BGRA;
alias void* CameraHandle;
alias void* EntityHandle;
alias void* SceneNodeHandle;
alias void* LightHandle;
alias void* RenderWindowHandle;
alias void* RootHandle;
alias void* RenderSystemHandle;
alias void* RenderSystemListHandle;
alias void* SceneManagerHandle;
alias void* ViewportHandle;
alias void* LogManagerHandle;
alias void* LogHandle;
alias void* LogListenerHandle;
alias void* NameValuePairListHandle;
alias void* FrameListenerHandle;
alias void* PlaneHandle;
alias void* MeshHandle;
alias void* TimerHandle;
alias void* WindowListenerHandle;
// listener typedefs
alias int function(float,float,int) FrameListenerEvent;
alias void function(RenderWindowHandle) WindowListenerEvent;
alias void function(const char* message, int lml, int maskDebug, const char* log_name, int skip_message) LogListenerEvent;
alias void function(const char* message, int lml, int maskDebug, const char* log_name, int skip_message, void* userdata) LogListenerCtx;
alias int function(const ref FrameEvent evt, int frame_type, void* userdata) FrameListenerCtx;
struct coiQuaternion
{
float w;
float x;
float y;
float z;
}
struct coiVector3
{
float x;
float y;
float z;
}
struct ViewPoint
{
coiVector3 position;
coiQuaternion orientation;
};
struct FrameEvent
{
coiReal timeSinceLastEvent;
coiReal timeSinceLastFrame;
}
struct ColourValue
{
float r;
float g;
float b;
float a;
}
struct engine_options
{
const char* renderer_s;
const char* plugin_folder_s;
const char* window_title;
const char* log_name;
int width, height, auto_window;
};
struct FrameStats
{
float lastFPS;
float avgFPS;
float bestFPS;
float worstFPS;
ulong bestFrameTime;
ulong worstFrameTime;
size_t triangleCount;
size_t batchCount;
};
enum LoggingLevel
{
LL_LOW = 1,
LL_NORMAL = 2,
LL_BOREME = 3
};
enum LogMessageLevel
{
LML_TRIVIAL = 1,
LML_NORMAL = 2,
LML_CRITICAL = 3
};
enum stat_flags
{
SF_NONE = 0,
SF_FPS = 1,
SF_AVG_FPS = 2,
SF_BEST_FPS = 4,
SF_WORST_FPS = 8,
SF_TRIANGLE_COUNT = 16,
SF_ALL = 0xFFFF
};
enum frame_buffer
{
FB_FRONT,
FB_BACK,
FB_AUTO
};
enum scene_type
{
ST_GENERIC = 1,
ST_EXTERIOR_CLOSE = 2,
ST_EXTERIOR_FAR = 4,
ST_EXTERIOR_REAL_FAR = 8,
ST_INTERIOR = 16
};
enum hardware_buffer_usage
{
HBU_STATIC = 1,
HBU_DYNAMIC = 2,
HBU_WRITE_ONLY = 4,
HBU_DISCARDABLE = 8,
HBU_STATIC_WRITE_ONLY = 5,
HBU_DYNAMIC_WRITE_ONLY = 6,
HBU_DYNAMIC_WRITE_ONLY_DISCARDABLE = 14
}
enum light_types
{
/// Point light sources give off light equally in all directions, so require only position not direction
LT_POINT = 0,
/// Directional lights simulate parallel light beams from a distant source, hence have direction but no position
LT_DIRECTIONAL = 1,
/// Spotlights simulate a cone of light from a source so require position and direction, plus extra values for falloff
LT_SPOTLIGHT = 2
};
enum transform_space
{
TS_LOCAL,
TS_PARENT,
TS_WORLD
};
// Root functions
void release_engine();
void default_engine_options(engine_options* options);
void init_engine(const engine_options options);
RootHandle create_root(const char* pluginFileName, const char* configFileName, const char* logFileName);
RenderWindowHandle root_initialise(int auto_create_window, const char* render_window_title);
TimerHandle root_get_timer();
RenderWindowHandle create_render_window(const char* name, const int width, const int height, const int full_screen);
RenderWindowHandle create_render_window_gl_context(const char* name, const int width, const int height, const int full_screen);
RenderWindowHandle create_render_window_hwnd(const char* name, const int width, const int height, const int full_screen, ulong hwnd);
uint render_window_get_hwnd(RenderWindowHandle window_handle);
void render_window_set_visible(RenderWindowHandle window_handle, int visible);
void render_window_update(RenderWindowHandle window_handle, int swap_buffers);
void current_window_update(int swap_buffers);
void render_window_resize(uint width, uint height);
void render_window_moved_or_resized();
int render_window_closed();
int root_is_initialised();
void save_config();
int restore_config();
int show_config_dialog();
void load_ogre_plugin(const char * plugin);
// Doesn't use OgreManager. Can still throw if type_name doesn't exist.
SceneManagerHandle root_create_scene_manager(const char* type_name, const char* instance_name);
// Doesn't use OgreManager. If a specific scene manager is not found,
// the default implementation is always returned.
SceneManagerHandle root_create_scene_manager_by_mask(SceneTypeMask type_mask, const char* instance_name);
// Does use OgreManager.
SceneManagerHandle create_scene_manager(const char* type_name, const char* instance_name);
SceneManagerHandle get_scene_manager();
SceneManagerHandle get_scene_manager_by_name(const char* scene_manager_instance_name);
int render_one_frame();
int render_one_frame_ex(float time_since_last_frame);
void render_loop();
void pump_messages();
void log_message(const char* message);
RenderWindowHandle root_create_render_window(const char* name, uint width, uint height, int fullscreen, NameValuePairListHandle params);
RenderSystemListHandle root_get_available_renderers();
// Ogre::SceneManager calls
EntityHandle scenemanager_create_entity(SceneManagerHandle handle, const char* name, const char* mesh_name, const char* group_name);
SceneNodeHandle scenemanager_get_root_scene_node(SceneManagerHandle handle);
LightHandle scenemanager_create_light(SceneManagerHandle handle, const char* name);
void scenemanager_set_sky_box(SceneManagerHandle handle, int enable, const char* material_name, float distance,
int draw_first, const coiQuaternion* orientation,
const char* group_name);
void scenemanager_set_sky_dome(SceneManagerHandle handle, int enable, const char* material_name, float curvature,
float tiling, float distance, int draw_first, const coiQuaternion* orientation,
int xsegments, int ysegments, int ysegments_keep, const char* group_name);
// RenderSystem functions
void add_render_system(RenderSystemHandle render_system);
void set_render_system(RenderSystemHandle render_system);
RenderSystemHandle get_render_system();
RenderSystemHandle get_render_system_by_name(const char* render_system_name);
const(char*) render_system_get_name(RenderSystemHandle handle);
void render_system_set_config_option(RenderSystemHandle render_system_handle, const char* option, const char* value);
uint render_system_list_size(RenderSystemListHandle list_handle);
RenderSystemHandle render_system_list_get(RenderSystemListHandle list_handle, uint at);
void destroy_render_system_list(RenderSystemListHandle handle);
// SceneManager functions
void set_default_num_mipmaps(int number);
void set_ambient_light_rgba(const float r, const float g, const float b, const float a);
void set_ambient_light_rgb(const float r, const float g, const float b);
ViewportHandle add_viewport(CameraHandle camera_handle);
void scene_manager_log_name();
// Scene nodes
SceneNodeHandle create_child_scenenode(const char* node_name);
void attach_entity_to_scenenode(EntityHandle entity_handle, SceneNodeHandle scenenode_handle);
void scenenode_update(SceneNodeHandle scenenode_handle, int update_children, int parent_has_changed);
void scenenode_update_bounds(SceneNodeHandle scenenode_handle);
EntityHandle scenenode_get_attached_entity_int(SceneNodeHandle scenenode_handle, int entity_index);
EntityHandle scenenode_get_attached_entity(SceneNodeHandle scenenode_handle, const char* entity_name);
int scenenode_num_attached_objects(SceneNodeHandle scenenode_handle);
void scenenode_detach_entity_int(SceneNodeHandle scenenode_handle, int entity_index);
void scenenode_detach_entity(SceneNodeHandle scenenode_handle, EntityHandle entity_handle);
void scenenode_detach_entity_string(SceneNodeHandle scenenode_handle, const char* entity_name);
void scenenode_detach_all_objects(SceneNodeHandle scenenode_handle);
int scenenode_is_in_scenegraph(SceneNodeHandle scenenode_handle);
void scenenode_notify_rootnode(SceneNodeHandle scenenode_handle);
void scenenode_show_boundingbox(SceneNodeHandle scenenode_handle, int show_boundingbox);
void scenenode_hide_boundingbox(SceneNodeHandle scenenode_handle, int hide_boundingbox);
int scenenode_get_show_boundingbox(SceneNodeHandle scenenode_handle);
SceneNodeHandle scenenode_get_parent_scenenode(SceneNodeHandle scenenode_handle);
void scenenode_set_visible(SceneNodeHandle scenenode_handle, int visible);
void scenenode_set_visible_ex(SceneNodeHandle scenenode_handle, int visible, int cascade);
void scenenode_flip_visibility(SceneNodeHandle scenenode_handle);
void scenenode_flip_visibility_ex(SceneNodeHandle scenenode_handle, int cascade);
void scenenode_set_debug_display_enabled(SceneNodeHandle scenenode_handle, int enabled);
void scenenode_set_debug_display_enabled_ex(SceneNodeHandle scenenode_handle, int enabled, int cascade);
SceneManagerHandle scenenode_get_creator(SceneNodeHandle scenenode_handle);
void scenenode_set_direction(SceneNodeHandle scenenode_handle, float x, float y, float z);
void scenenode_set_orientation(SceneNodeHandle scenenode_handle, float w, float x, float y, float z);
void scenenode_set_position(SceneNodeHandle scenenode_handle, float x, float y, float z);
void scenenode_get_position(SceneNodeHandle handle, ref coiVector3 pos);
void scenenode_set_derived_position(SceneNodeHandle handle, const coiVector3* pos);
void scenenode_get_derived_position(SceneNodeHandle handle, coiVector3* pos);
void scenenode_yaw_degree(SceneNodeHandle handle, coiReal angle, transform_space relative_to);
void scenenode_yaw(SceneNodeHandle scenenode_handle, coiReal radians, transform_space relative_to);
void scenenode_set_scale(SceneNodeHandle scenenode_handle, float x, float y, float z);
void scenenode_scale(SceneNodeHandle scenenode_handle, float x, float y, float z);
void scenenode_translate(SceneNodeHandle scenenode_handle, float x, float y, float z, transform_space relative_to);
void scenenode_roll(SceneNodeHandle scenenode_handle, coiReal radians, transform_space relative_to);
void scenenode_pitch(SceneNodeHandle scenenode_handle, coiReal radians, transform_space relative_to);
SceneNodeHandle scenenode_create_child_scenenode(SceneNodeHandle handle, const char* name, const ref coiVector3 translate, const ref coiQuaternion rotate);
// Viewports
void viewport_set_background_colour(ViewportHandle viewport_handle, float r, float g, float b, float a);
void viewport_set_background_colour_cv(ViewportHandle viewport_handle, ref ColourValue cv);
void viewport_set_auto_updated(ViewportHandle handle, int autoupdate);
int viewport_is_auto_updated(ViewportHandle handle);
float viewport_get_top(ViewportHandle handle);
float viewport_get_left(ViewportHandle handle);
float viewport_get_width(ViewportHandle viewport_handle);
float viewport_get_height(ViewportHandle viewport_handle);
int viewport_get_actual_top(ViewportHandle handle);
int viewport_get_actual_left(ViewportHandle handle);
int viewport_get_actual_width(ViewportHandle handle);
int viewport_get_actual_height(ViewportHandle handle);
// Resource management
void setup_resources(const char* resources_cfg);
void add_resource_location(const char* location, const char* type, const char* group);
void initialise_all_resourcegroups();
const(char*) resourcegroupmanager_DEFAULT_RESOURCE_GROUP_NAME();
const(char*) resourcegroupmanager_INTERNAL_RESOURCE_GROUP_NAME();
const(char*) resourcegroupmanager_AUTODETECT_RESOURCE_GROUP_NAME();
size_t resourcegroupmanager_RESOURCE_SYSTEM_NUM_REFERENCE_COUNTS();
// Camera
CameraHandle create_camera(const char* camera_name);
CameraHandle get_camera(const char* camera_name);
void camera_move(CameraHandle handle, const float x, const float y, const float z);
void camera_move_relative(CameraHandle handle, const float x, const float y, const float z);
void camera_set_direction(CameraHandle handle, const float x, const float y, const float z);
void camera_get_direction(CameraHandle handle, coiVector3* v3);
void camera_set_near_clip_distance(CameraHandle camera_handle, float d);
void camera_set_far_clip_distance(CameraHandle camera_handle, float d);
void camera_set_aspect_ratio(CameraHandle camera_handle, float w, float h);
void camera_set_aspect_ratio_ex(CameraHandle handle, float ratio);
float camera_get_aspect_ratio(CameraHandle handle);
void camera_set_auto_aspect_ratio(CameraHandle camera_handle, int on);
void camera_set_fovy(CameraHandle camera_handle, float angle);
void camera_set_frustum_offset(CameraHandle camera_handle, const int offset_x, const int offset_y);
void camera_set_focal_length(CameraHandle camera_handle, float fl);
void camera_set_position(CameraHandle camera_handle, const float x, const float y, const float z);
void camera_get_position(CameraHandle handle, ref coiVector3 result);
void camera_lookat(CameraHandle camera_handle, const float x, const float y, const float z);
void camera_roll(CameraHandle handle, coiReal angle);
void camera_yaw(CameraHandle handle, coiReal angle);
void camera_pitch(CameraHandle handle, coiReal angle);
void camera_rotate(CameraHandle handle, const coiVector3* axis, coiReal angle);
void camera_rotate_q(CameraHandle handle, const coiQuaternion* q);
// Entity
EntityHandle create_entity(const char* entity_name, const char* mesh_file);
void entity_set_cast_shadows(EntityHandle handle, int enabled);
int entity_get_cast_shadows(EntityHandle handle);
int entity_get_receives_shadows(EntityHandle handle);
void entity_set_material_name(EntityHandle handle, const char* material_name, const char* group_name);
// Light
LightHandle create_light(const char* light_name);
void light_set_position(LightHandle light_handle, const float x, const float y, const float z);
void destroy_light(LightHandle handle);
void light_set_type(LightHandle handle, light_types type);
void light_set_diffuse_colour(LightHandle handle, const ref ColourValue colour);
void light_set_specular_colour(LightHandle handle, const ref ColourValue colour);
// FrameListener
void add_frame_listener(FrameListenerEvent frame_event,const int frame_event_type);
void remove_frame_listener(FrameListenerEvent frame_event);
FrameListenerHandle add_frame_listener_ctx(FrameListenerCtx callback, void* userdata);
void remove_frame_listener_ctx(FrameListenerHandle handle);
// WindowListener
void add_window_listener(RenderWindowHandle window_handle, WindowListenerEvent window_event);
void remove_window_listener(RenderWindowHandle window_handle);
WindowListenerHandle add_window_listener_ctx(RenderWindowHandle window_handle, WindowListenerEvent window_event, void* userdata);
void remove_window_listener_ctx(RenderWindowHandle window_handle, WindowListenerHandle listener_handle);
// LogManager
LogManagerHandle create_log_manager();
// LogManager::getSingletonPtr
LogManagerHandle get_log_manager();
//LogManager::getLog
LogHandle logmanager_get_log(const char* name);
//LogManager::getDefaultLog
LogHandle logmanager_get_default_log();
//LogManager::setDefaultLog
LogHandle logmanager_set_default_log(LogHandle log_handle);
//LogManager::createLog
LogHandle logmanager_create_log(const char* name, int default_log, int debugger_output, int suppress_file_output);
// n.b., Allows for finer grained control over the log messages at the cost of
// having to supply all these variables. If you don't need this control,
// use log_message above.
//LogManager::logMessage
void logmanager_log_message(const char* message, LogMessageLevel lml, int maskDebug, const char* log_name, int skip_message);
//LogManager::setLogDetail
void logmanager_set_log_detail(LoggingLevel lvl);
//LogManager::destroyLog
void logmanager_destroy_log(const char* name);
//LogManager::destroyLog overload
void logmanager_destroy_log_by_handle(LogHandle log_handle);
//Log::addListener
LogListenerHandle add_log_listener(LogListenerEvent log_event, LogHandle log_handle);
//Log::addListener
LogListenerHandle add_log_listener_ctx(LogListenerCtx log_event, LogHandle log_handle, void* userdata);
//Log::removeListener
void remove_log_listener(LogListenerHandle llh, LogHandle log_handle);
//Log::removeListener
void remove_log_listener_ctx(LogListenerHandle llh, LogHandle log_handle);
// NameValuePairList
NameValuePairListHandle create_name_value_pair_list();
void add_pair(NameValuePairListHandle params, const char* name, const char* value);
void destroy_name_value_pair_list(NameValuePairListHandle params);
// RenderWindow
ViewportHandle render_window_add_viewport(RenderWindowHandle window_handle, CameraHandle camera_handle, int zorder, float left, float top, float width, float height);
int render_window_is_closed(RenderWindowHandle handle);
void render_window_set_active(RenderWindowHandle handle, int state);
void render_window_swap_buffers(RenderWindowHandle handle, int wait_for_vsync);
void render_window_get_custom_attribute(RenderWindowHandle handle, const char* attribute, void* pdata);
uint render_window_get_width(RenderWindowHandle handle);
uint render_window_get_height(RenderWindowHandle handle);
void renderwindow_get_statistics(RenderWindowHandle handle, ref FrameStats stats);
void renderwindow_get_statistics_ex(RenderWindowHandle handle, ref float lastFPS, ref float avgFPS, ref float bestFPS, ref float worstFPS);
// ColourValue
void colourvalue_zero(ref ColourValue c);
void colourvalue_black(ref ColourValue c);
void colourvalue_white(ref ColourValue c);
void colourvalue_red(ref ColourValue c);
void colourvalue_green(ref ColourValue c);
void colourvalue_blue(ref ColourValue c);
// Vector3
//Vector3::operator !=
int vector3_notequals_vector3(coiVector3 lhs, coiVector3 rhs);
//Vector3::operator ==
int vector3_equals_vector3(coiVector3 lhs, coiVector3 rhs);
//Vector3::operator +
coiVector3 vector3_add_vector3(coiVector3 lhs, coiVector3 rhs);
//Vector3::operator +=
void vector3_update_add_vector3(coiVector3 lhs, coiVector3 rhs);
//Vector3::operator -
coiVector3 vector3_subtract_vector3(coiVector3 lhs, coiVector3 rhs);
//Vector3::operator -=
void vector3_update_subtract_vector3(coiVector3 lhs, coiVector3 rhs);
//Vector3::operator -
coiVector3 vector3_negate(coiVector3 v3);
// Vector3::operator/
coiVector3 vector3_divide_vector3(coiVector3 lhs, coiVector3 rhs);
// Vector3::operator*
coiVector3 vector3_multiply_vector3(coiVector3 lhs, coiVector3 rhs);
// Vector3::isNaN
int vector3_is_nan(coiVector3 v3);
//Vector3::primaryAxis
coiVector3 vector3_primary_axis(coiVector3);
// Vector3::ZERO
coiVector3 vector3_ZERO();
coiVector3 vector3_UNIT_X();
coiVector3 vector3_UNIT_Y();
coiVector3 vector3_UNIT_Z();
coiVector3 vector3_NEGATIVE_UNIT_X();
coiVector3 vector3_NEGATIVE_UNIT_Y();
coiVector3 vector3_NEGATIVE_UNIT_Z();
coiVector3 vector3_UNIT_SCALE();
// Plane
PlaneHandle plane_create_plane();
PlaneHandle plane_create_plane_normal(float x, float y, float z, float distance);
void plane_destroy_plane(PlaneHandle handle);
void plane_get_normal(PlaneHandle handle, coiVector3* normal);
void plane_set_normal(PlaneHandle handle, const coiVector3* normal);
coiReal plane_get_d(PlaneHandle handle);
void plane_set_d(PlaneHandle handle, coiReal d);
// MeshManager
MeshHandle meshmanager_create_plane(const char* name, const char* group_name,
PlaneHandle plane, float width,
float height, int xsegments, int ysegments,
int normals, ushort num_tex_coord_sets,
float utile, float vtile, ref coiVector3 up_vector,
hardware_buffer_usage vertex_buffer_usage,
hardware_buffer_usage index_buffer_usage,
int vertex_shadow_buffer, int index_shadow_buffer);
// Ogre::Timer
int timer_set_option(TimerHandle handle, const char* key, void* value);
ulong timer_get_milliseconds(TimerHandle handle);
ulong timer_get_microseconds(TimerHandle handle);
ulong timer_get_milliseconds_cpu(TimerHandle handle);
ulong timer_get_microseconds_cpu(TimerHandle handle);
void timer_reset(TimerHandle handle);
missed one
/******************************************************************************
* ogre_interface.di - main interface file for D clients
******************************************************************************
* This file is part of
* __ __ _
* / // /_____ ____ (_)
* / // // ___// __ \ / /
* / // // /__ / /_/ // /
* /_//_/ \___/ \____//_/
*
* Low Level C Ogre Interface (llcoi)
*
* See http://code.google.com/p/llcoi/ for more information.
*
* Copyright (c) 2011, Llcoi Team
*
* License: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
******************************************************************************/
module llcoi.ogre_interface;
extern(C):
alias float coiReal;
const int EVENT_FRAME_STARTED = 1;
const int EVENT_FRAME_RENDERING_QUEUED = 2;
const int EVENT_FRAME_ENDED = 4;
// From OgrePlatform.h
alias uint uint32;
alias ushort uint16;
alias ubyte uint8;
alias int int32;
alias short int16;
alias byte int8;
// OgreSceneManager.h
alias ushort SceneTypeMask;
// OgreColourValue.h
alias uint32 RGBA;
alias uint32 ARGB;
alias uint32 ABGR;
alias uint32 BGRA;
alias void* CameraHandle;
alias void* EntityHandle;
alias void* SceneNodeHandle;
alias void* LightHandle;
alias void* RenderWindowHandle;
alias void* RootHandle;
alias void* RenderSystemHandle;
alias void* RenderSystemListHandle;
alias void* SceneManagerHandle;
alias void* ViewportHandle;
alias void* LogManagerHandle;
alias void* LogHandle;
alias void* LogListenerHandle;
alias void* NameValuePairListHandle;
alias void* FrameListenerHandle;
alias void* PlaneHandle;
alias void* MeshHandle;
alias void* TimerHandle;
alias void* WindowListenerHandle;
alias void* AxisAlignedBoxHandle;
// listener typedefs
alias int function(float,float,int) FrameListenerEvent;
alias void function(RenderWindowHandle) WindowListenerEvent;
alias void function(const char* message, int lml, int maskDebug, const char* log_name, int skip_message) LogListenerEvent;
alias void function(const char* message, int lml, int maskDebug, const char* log_name, int skip_message, void* userdata) LogListenerCtx;
alias int function(const ref FrameEvent evt, int frame_type, void* userdata) FrameListenerCtx;
struct coiQuaternion
{
float w;
float x;
float y;
float z;
}
struct coiVector3
{
float x;
float y;
float z;
}
struct ViewPoint
{
coiVector3 position;
coiQuaternion orientation;
};
struct FrameEvent
{
coiReal timeSinceLastEvent;
coiReal timeSinceLastFrame;
}
struct ColourValue
{
float r;
float g;
float b;
float a;
}
struct engine_options
{
const char* renderer_s;
const char* plugin_folder_s;
const char* window_title;
const char* log_name;
int width, height, auto_window;
};
struct FrameStats
{
float lastFPS;
float avgFPS;
float bestFPS;
float worstFPS;
ulong bestFrameTime;
ulong worstFrameTime;
size_t triangleCount;
size_t batchCount;
};
enum LoggingLevel
{
LL_LOW = 1,
LL_NORMAL = 2,
LL_BOREME = 3
};
enum LogMessageLevel
{
LML_TRIVIAL = 1,
LML_NORMAL = 2,
LML_CRITICAL = 3
};
enum stat_flags
{
SF_NONE = 0,
SF_FPS = 1,
SF_AVG_FPS = 2,
SF_BEST_FPS = 4,
SF_WORST_FPS = 8,
SF_TRIANGLE_COUNT = 16,
SF_ALL = 0xFFFF
};
enum frame_buffer
{
FB_FRONT,
FB_BACK,
FB_AUTO
};
enum scene_type
{
ST_GENERIC = 1,
ST_EXTERIOR_CLOSE = 2,
ST_EXTERIOR_FAR = 4,
ST_EXTERIOR_REAL_FAR = 8,
ST_INTERIOR = 16
};
enum hardware_buffer_usage
{
HBU_STATIC = 1,
HBU_DYNAMIC = 2,
HBU_WRITE_ONLY = 4,
HBU_DISCARDABLE = 8,
HBU_STATIC_WRITE_ONLY = 5,
HBU_DYNAMIC_WRITE_ONLY = 6,
HBU_DYNAMIC_WRITE_ONLY_DISCARDABLE = 14
}
enum light_types
{
/// Point light sources give off light equally in all directions, so require only position not direction
LT_POINT = 0,
/// Directional lights simulate parallel light beams from a distant source, hence have direction but no position
LT_DIRECTIONAL = 1,
/// Spotlights simulate a cone of light from a source so require position and direction, plus extra values for falloff
LT_SPOTLIGHT = 2
};
enum transform_space
{
TS_LOCAL,
TS_PARENT,
TS_WORLD
};
// Root functions
void release_engine();
void default_engine_options(engine_options* options);
void init_engine(const engine_options options);
RootHandle create_root(const char* pluginFileName, const char* configFileName, const char* logFileName);
RenderWindowHandle root_initialise(int auto_create_window, const char* render_window_title);
TimerHandle root_get_timer();
RenderWindowHandle create_render_window(const char* name, const int width, const int height, const int full_screen);
RenderWindowHandle create_render_window_gl_context(const char* name, const int width, const int height, const int full_screen);
RenderWindowHandle create_render_window_hwnd(const char* name, const int width, const int height, const int full_screen, ulong hwnd);
uint render_window_get_hwnd(RenderWindowHandle window_handle);
void render_window_set_visible(RenderWindowHandle window_handle, int visible);
void render_window_update(RenderWindowHandle window_handle, int swap_buffers);
void current_window_update(int swap_buffers);
void render_window_resize(uint width, uint height);
void render_window_moved_or_resized();
int render_window_closed();
int root_is_initialised();
void save_config();
int restore_config();
int show_config_dialog();
void load_ogre_plugin(const char * plugin);
// Doesn't use OgreManager. Can still throw if type_name doesn't exist.
SceneManagerHandle root_create_scene_manager(const char* type_name, const char* instance_name);
// Doesn't use OgreManager. If a specific scene manager is not found,
// the default implementation is always returned.
SceneManagerHandle root_create_scene_manager_by_mask(SceneTypeMask type_mask, const char* instance_name);
// Does use OgreManager.
SceneManagerHandle create_scene_manager(const char* type_name, const char* instance_name);
SceneManagerHandle get_scene_manager();
SceneManagerHandle get_scene_manager_by_name(const char* scene_manager_instance_name);
int render_one_frame();
int render_one_frame_ex(float time_since_last_frame);
void render_loop();
void pump_messages();
void log_message(const char* message);
RenderWindowHandle root_create_render_window(const char* name, uint width, uint height, int fullscreen, NameValuePairListHandle params);
RenderSystemListHandle root_get_available_renderers();
// Ogre::SceneManager calls
EntityHandle scenemanager_create_entity(SceneManagerHandle handle, const char* name, const char* mesh_name, const char* group_name);
SceneNodeHandle scenemanager_get_root_scene_node(SceneManagerHandle handle);
LightHandle scenemanager_create_light(SceneManagerHandle handle, const char* name);
void scenemanager_set_sky_box(SceneManagerHandle handle, int enable, const char* material_name, float distance,
int draw_first, const coiQuaternion* orientation,
const char* group_name);
void scenemanager_set_sky_dome(SceneManagerHandle handle, int enable, const char* material_name, float curvature,
float tiling, float distance, int draw_first, const coiQuaternion* orientation,
int xsegments, int ysegments, int ysegments_keep, const char* group_name);
// RenderSystem functions
void add_render_system(RenderSystemHandle render_system);
void set_render_system(RenderSystemHandle render_system);
RenderSystemHandle get_render_system();
RenderSystemHandle get_render_system_by_name(const char* render_system_name);
const(char*) render_system_get_name(RenderSystemHandle handle);
void render_system_set_config_option(RenderSystemHandle render_system_handle, const char* option, const char* value);
uint render_system_list_size(RenderSystemListHandle list_handle);
RenderSystemHandle render_system_list_get(RenderSystemListHandle list_handle, uint at);
void destroy_render_system_list(RenderSystemListHandle handle);
// SceneManager functions
void set_default_num_mipmaps(int number);
void set_ambient_light_rgba(const float r, const float g, const float b, const float a);
void set_ambient_light_rgb(const float r, const float g, const float b);
ViewportHandle add_viewport(CameraHandle camera_handle);
void scene_manager_log_name();
// Scene nodes
SceneNodeHandle create_child_scenenode(const char* node_name);
void attach_entity_to_scenenode(EntityHandle entity_handle, SceneNodeHandle scenenode_handle);
void scenenode_update(SceneNodeHandle scenenode_handle, int update_children, int parent_has_changed);
void scenenode_update_bounds(SceneNodeHandle scenenode_handle);
EntityHandle scenenode_get_attached_entity_int(SceneNodeHandle scenenode_handle, int entity_index);
EntityHandle scenenode_get_attached_entity(SceneNodeHandle scenenode_handle, const char* entity_name);
int scenenode_num_attached_objects(SceneNodeHandle scenenode_handle);
void scenenode_detach_entity_int(SceneNodeHandle scenenode_handle, int entity_index);
void scenenode_detach_entity(SceneNodeHandle scenenode_handle, EntityHandle entity_handle);
void scenenode_detach_entity_string(SceneNodeHandle scenenode_handle, const char* entity_name);
void scenenode_detach_all_objects(SceneNodeHandle scenenode_handle);
int scenenode_is_in_scenegraph(SceneNodeHandle scenenode_handle);
void scenenode_notify_rootnode(SceneNodeHandle scenenode_handle);
void scenenode_show_boundingbox(SceneNodeHandle scenenode_handle, int show_boundingbox);
void scenenode_hide_boundingbox(SceneNodeHandle scenenode_handle, int hide_boundingbox);
int scenenode_get_show_boundingbox(SceneNodeHandle scenenode_handle);
SceneNodeHandle scenenode_get_parent_scenenode(SceneNodeHandle scenenode_handle);
void scenenode_set_visible(SceneNodeHandle scenenode_handle, int visible);
void scenenode_set_visible_ex(SceneNodeHandle scenenode_handle, int visible, int cascade);
void scenenode_flip_visibility(SceneNodeHandle scenenode_handle);
void scenenode_flip_visibility_ex(SceneNodeHandle scenenode_handle, int cascade);
void scenenode_set_debug_display_enabled(SceneNodeHandle scenenode_handle, int enabled);
void scenenode_set_debug_display_enabled_ex(SceneNodeHandle scenenode_handle, int enabled, int cascade);
SceneManagerHandle scenenode_get_creator(SceneNodeHandle scenenode_handle);
void scenenode_set_direction(SceneNodeHandle scenenode_handle, float x, float y, float z);
void scenenode_set_orientation(SceneNodeHandle scenenode_handle, float w, float x, float y, float z);
void scenenode_set_position(SceneNodeHandle scenenode_handle, float x, float y, float z);
void scenenode_get_position(SceneNodeHandle handle, ref coiVector3 pos);
void scenenode_set_derived_position(SceneNodeHandle handle, const coiVector3* pos);
void scenenode_get_derived_position(SceneNodeHandle handle, coiVector3* pos);
void scenenode_yaw_degree(SceneNodeHandle handle, coiReal angle, transform_space relative_to);
void scenenode_yaw(SceneNodeHandle scenenode_handle, coiReal radians, transform_space relative_to);
void scenenode_set_scale(SceneNodeHandle scenenode_handle, float x, float y, float z);
void scenenode_scale(SceneNodeHandle scenenode_handle, float x, float y, float z);
void scenenode_translate(SceneNodeHandle scenenode_handle, float x, float y, float z, transform_space relative_to);
void scenenode_roll(SceneNodeHandle scenenode_handle, coiReal radians, transform_space relative_to);
void scenenode_pitch(SceneNodeHandle scenenode_handle, coiReal radians, transform_space relative_to);
SceneNodeHandle scenenode_create_child_scenenode(SceneNodeHandle handle, const char* name, const ref coiVector3 translate, const ref coiQuaternion rotate);
// Viewports
void viewport_set_background_colour(ViewportHandle viewport_handle, float r, float g, float b, float a);
void viewport_set_background_colour_cv(ViewportHandle viewport_handle, ref ColourValue cv);
void viewport_set_auto_updated(ViewportHandle handle, int autoupdate);
int viewport_is_auto_updated(ViewportHandle handle);
float viewport_get_top(ViewportHandle handle);
float viewport_get_left(ViewportHandle handle);
float viewport_get_width(ViewportHandle viewport_handle);
float viewport_get_height(ViewportHandle viewport_handle);
int viewport_get_actual_top(ViewportHandle handle);
int viewport_get_actual_left(ViewportHandle handle);
int viewport_get_actual_width(ViewportHandle handle);
int viewport_get_actual_height(ViewportHandle handle);
// Resource management
void setup_resources(const char* resources_cfg);
void add_resource_location(const char* location, const char* type, const char* group);
void initialise_all_resourcegroups();
const(char*) resourcegroupmanager_DEFAULT_RESOURCE_GROUP_NAME();
const(char*) resourcegroupmanager_INTERNAL_RESOURCE_GROUP_NAME();
const(char*) resourcegroupmanager_AUTODETECT_RESOURCE_GROUP_NAME();
size_t resourcegroupmanager_RESOURCE_SYSTEM_NUM_REFERENCE_COUNTS();
// Camera
CameraHandle create_camera(const char* camera_name);
CameraHandle get_camera(const char* camera_name);
void camera_move(CameraHandle handle, const float x, const float y, const float z);
void camera_move_relative(CameraHandle handle, const float x, const float y, const float z);
void camera_set_direction(CameraHandle handle, const float x, const float y, const float z);
void camera_get_direction(CameraHandle handle, coiVector3* v3);
void camera_set_near_clip_distance(CameraHandle camera_handle, float d);
void camera_set_far_clip_distance(CameraHandle camera_handle, float d);
void camera_set_aspect_ratio(CameraHandle camera_handle, float w, float h);
void camera_set_aspect_ratio_ex(CameraHandle handle, float ratio);
float camera_get_aspect_ratio(CameraHandle handle);
void camera_set_auto_aspect_ratio(CameraHandle camera_handle, int on);
void camera_set_fovy(CameraHandle camera_handle, float angle);
void camera_set_frustum_offset(CameraHandle camera_handle, const int offset_x, const int offset_y);
void camera_set_focal_length(CameraHandle camera_handle, float fl);
void camera_set_position(CameraHandle camera_handle, const float x, const float y, const float z);
void camera_get_position(CameraHandle handle, ref coiVector3 result);
void camera_lookat(CameraHandle camera_handle, const float x, const float y, const float z);
void camera_roll(CameraHandle handle, coiReal angle);
void camera_yaw(CameraHandle handle, coiReal angle);
void camera_pitch(CameraHandle handle, coiReal angle);
void camera_rotate(CameraHandle handle, const coiVector3* axis, coiReal angle);
void camera_rotate_q(CameraHandle handle, const coiQuaternion* q);
// Entity
EntityHandle create_entity(const char* entity_name, const char* mesh_file);
void entity_set_cast_shadows(EntityHandle handle, int enabled);
int entity_get_cast_shadows(EntityHandle handle);
int entity_get_receives_shadows(EntityHandle handle);
void entity_set_material_name(EntityHandle handle, const char* material_name, const char* group_name);
//Ogre::Entity::getBoundingBox() const
AxisAlignedBoxHandle entity_get_bounding_box(EntityHandle handle);
// Light
LightHandle create_light(const char* light_name);
void light_set_position(LightHandle light_handle, const float x, const float y, const float z);
void destroy_light(LightHandle handle);
void light_set_type(LightHandle handle, light_types type);
void light_set_diffuse_colour(LightHandle handle, const ref ColourValue colour);
void light_set_specular_colour(LightHandle handle, const ref ColourValue colour);
// FrameListener
void add_frame_listener(FrameListenerEvent frame_event,const int frame_event_type);
void remove_frame_listener(FrameListenerEvent frame_event);
FrameListenerHandle add_frame_listener_ctx(FrameListenerCtx callback, void* userdata);
void remove_frame_listener_ctx(FrameListenerHandle handle);
// WindowListener
void add_window_listener(RenderWindowHandle window_handle, WindowListenerEvent window_event);
void remove_window_listener(RenderWindowHandle window_handle);
WindowListenerHandle add_window_listener_ctx(RenderWindowHandle window_handle, WindowListenerEvent window_event, void* userdata);
void remove_window_listener_ctx(RenderWindowHandle window_handle, WindowListenerHandle listener_handle);
// LogManager
LogManagerHandle create_log_manager();
// LogManager::getSingletonPtr
LogManagerHandle get_log_manager();
//LogManager::getLog
LogHandle logmanager_get_log(const char* name);
//LogManager::getDefaultLog
LogHandle logmanager_get_default_log();
//LogManager::setDefaultLog
LogHandle logmanager_set_default_log(LogHandle log_handle);
//LogManager::createLog
LogHandle logmanager_create_log(const char* name, int default_log, int debugger_output, int suppress_file_output);
// n.b., Allows for finer grained control over the log messages at the cost of
// having to supply all these variables. If you don't need this control,
// use log_message above.
//LogManager::logMessage
void logmanager_log_message(const char* message, LogMessageLevel lml, int maskDebug, const char* log_name, int skip_message);
//LogManager::setLogDetail
void logmanager_set_log_detail(LoggingLevel lvl);
//LogManager::destroyLog
void logmanager_destroy_log(const char* name);
//LogManager::destroyLog overload
void logmanager_destroy_log_by_handle(LogHandle log_handle);
//Log::addListener
LogListenerHandle add_log_listener(LogListenerEvent log_event, LogHandle log_handle);
//Log::addListener
LogListenerHandle add_log_listener_ctx(LogListenerCtx log_event, LogHandle log_handle, void* userdata);
//Log::removeListener
void remove_log_listener(LogListenerHandle llh, LogHandle log_handle);
//Log::removeListener
void remove_log_listener_ctx(LogListenerHandle llh, LogHandle log_handle);
// NameValuePairList
NameValuePairListHandle create_name_value_pair_list();
void add_pair(NameValuePairListHandle params, const char* name, const char* value);
void destroy_name_value_pair_list(NameValuePairListHandle params);
// RenderWindow
ViewportHandle render_window_add_viewport(RenderWindowHandle window_handle, CameraHandle camera_handle, int zorder, float left, float top, float width, float height);
int render_window_is_closed(RenderWindowHandle handle);
void render_window_set_active(RenderWindowHandle handle, int state);
void render_window_swap_buffers(RenderWindowHandle handle, int wait_for_vsync);
void render_window_get_custom_attribute(RenderWindowHandle handle, const char* attribute, void* pdata);
uint render_window_get_width(RenderWindowHandle handle);
uint render_window_get_height(RenderWindowHandle handle);
void renderwindow_get_statistics(RenderWindowHandle handle, ref FrameStats stats);
void renderwindow_get_statistics_ex(RenderWindowHandle handle, ref float lastFPS, ref float avgFPS, ref float bestFPS, ref float worstFPS);
// ColourValue
void colourvalue_zero(ref ColourValue c);
void colourvalue_black(ref ColourValue c);
void colourvalue_white(ref ColourValue c);
void colourvalue_red(ref ColourValue c);
void colourvalue_green(ref ColourValue c);
void colourvalue_blue(ref ColourValue c);
// Vector3
//Vector3::operator !=
int vector3_notequals_vector3(coiVector3 lhs, coiVector3 rhs);
//Vector3::operator ==
int vector3_equals_vector3(coiVector3 lhs, coiVector3 rhs);
//Vector3::operator +
coiVector3 vector3_add_vector3(coiVector3 lhs, coiVector3 rhs);
//Vector3::operator +=
void vector3_update_add_vector3(coiVector3 lhs, coiVector3 rhs);
//Vector3::operator -
coiVector3 vector3_subtract_vector3(coiVector3 lhs, coiVector3 rhs);
//Vector3::operator -=
void vector3_update_subtract_vector3(coiVector3 lhs, coiVector3 rhs);
//Vector3::operator -
coiVector3 vector3_negate(coiVector3 v3);
// Vector3::operator/
coiVector3 vector3_divide_vector3(coiVector3 lhs, coiVector3 rhs);
// Vector3::operator*
coiVector3 vector3_multiply_vector3(coiVector3 lhs, coiVector3 rhs);
// Vector3::isNaN
int vector3_is_nan(coiVector3 v3);
//Vector3::primaryAxis
coiVector3 vector3_primary_axis(coiVector3);
// Vector3::ZERO
coiVector3 vector3_ZERO();
coiVector3 vector3_UNIT_X();
coiVector3 vector3_UNIT_Y();
coiVector3 vector3_UNIT_Z();
coiVector3 vector3_NEGATIVE_UNIT_X();
coiVector3 vector3_NEGATIVE_UNIT_Y();
coiVector3 vector3_NEGATIVE_UNIT_Z();
coiVector3 vector3_UNIT_SCALE();
// Plane
PlaneHandle plane_create_plane();
PlaneHandle plane_create_plane_normal(float x, float y, float z, float distance);
void plane_destroy_plane(PlaneHandle handle);
void plane_get_normal(PlaneHandle handle, coiVector3* normal);
void plane_set_normal(PlaneHandle handle, const coiVector3* normal);
coiReal plane_get_d(PlaneHandle handle);
void plane_set_d(PlaneHandle handle, coiReal d);
// MeshManager
MeshHandle meshmanager_create_plane(const char* name, const char* group_name,
PlaneHandle plane, float width,
float height, int xsegments, int ysegments,
int normals, ushort num_tex_coord_sets,
float utile, float vtile, ref coiVector3 up_vector,
hardware_buffer_usage vertex_buffer_usage,
hardware_buffer_usage index_buffer_usage,
int vertex_shadow_buffer, int index_shadow_buffer);
// Ogre::Timer
int timer_set_option(TimerHandle handle, const char* key, void* value);
ulong timer_get_milliseconds(TimerHandle handle);
ulong timer_get_microseconds(TimerHandle handle);
ulong timer_get_milliseconds_cpu(TimerHandle handle);
ulong timer_get_microseconds_cpu(TimerHandle handle);
void timer_reset(TimerHandle handle);
// Ogre::AxisAlignedBox
AxisAlignedBoxHandle create_axis_aligned_box();
void destroy_axis_aligned_box(AxisAlignedBoxHandle handle);
|
profile.di
<?xml version="1.0" encoding="ASCII"?>
<di:SashWindowsMngr xmi:version="2.0" xmlns:xmi="http://www.omg.org/XMI" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:di="http://www.eclipse.org/papyrus/0.7.0/sashdi">
<pageList>
<availablePage>
<emfPageIdentifier href="zca.profile.notation#_gTe5YL-VEeCXSLIizrgdww"/>
</availablePage>
</pageList>
<sashModel currentSelection="//@sashModel/@windows.0/@children.0">
<windows>
<children xsi:type="di:TabFolder">
<children>
<emfPageIdentifier href="zca.profile.notation#_gTe5YL-VEeCXSLIizrgdww"/>
</children>
</children>
</windows>
</sashModel>
</di:SashWindowsMngr>
|
/******************************************************************************
* ogre_interface.d - main interface file for D clients
******************************************************************************
* This file is part of
* __ __ _
* / // /_____ ____ (_)
* / // // ___// __ \ / /
* / // // /__ / /_/ // /
* /_//_/ \___/ \____//_/
*
* Low Level C Ogre Interface (llcoi)
*
* See http://code.google.com/p/llcoi/ for more information.
*
* Copyright (c) 2011, Llcoi Team
*
* License: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
******************************************************************************/
module llcoi.ogre_interface;
extern(C):
alias float coiReal;
const int EVENT_FRAME_STARTED = 1;
const int EVENT_FRAME_RENDERING_QUEUED = 2;
const int EVENT_FRAME_ENDED = 4;
// From OgrePlatform.h
alias uint uint32;
alias ushort uint16;
alias ubyte uint8;
alias int int32;
alias short int16;
alias byte int8;
// OgreSceneManager.h
alias ushort SceneTypeMask;
// OgreColourValue.h
alias uint32 RGBA;
alias uint32 ARGB;
alias uint32 ABGR;
alias uint32 BGRA;
alias void* CameraHandle;
alias void* EntityHandle;
alias void* SceneNodeHandle;
alias void* LightHandle;
alias void* RenderWindowHandle;
alias void* RootHandle;
alias void* RenderSystemHandle;
alias void* RenderSystemListHandle;
alias void* SceneManagerHandle;
alias void* ViewportHandle;
alias void* LogManagerHandle;
alias void* LogHandle;
alias void* LogListenerHandle;
alias void* NameValuePairListHandle;
alias void* FrameListenerHandle;
alias void* PlaneHandle;
alias void* PlaneListHandle;
alias void* PlaneBoundedVolumeHandle;
alias void* MeshHandle;
alias void* TimerHandle;
alias void* WindowListenerHandle;
alias void* AxisAlignedBoxHandle;
alias void* RayHandle;
alias void* SphereHandle;
alias void* SceneQueryHandle;
alias void* RaySceneQueryHandle;
alias void* RaySceneQueryResultHandle;
alias void* SceneQueryListenerHandle;
alias void* RaySceneQueryListenerHandle;
alias void* SceneQueryResultHandle;
alias void* MovableObjectHandle;
alias void* RenderOperationHandle;
// listener typedefs
alias int function(float,float,int) FrameListenerEvent;
alias void function(RenderWindowHandle) WindowListenerEvent;
alias void function(const char* message, int lml, int maskDebug, const char* log_name, int skip_message) LogListenerEvent;
alias void function(const char* message, int lml, int maskDebug, const char* log_name, int skip_message, void* userdata) LogListenerCtx;
alias int function(const ref FrameEvent evt, int frame_type, void* userdata) FrameListenerCtx;
alias int function(const ref world_fragment frag, void* userdata) SceneQueryFragmentResult;
alias int function(MovableObjectHandle handle, void* userdata) SceneQueryObjectResult;
alias int function(const ref world_fragment frag, coiReal distance, void* userdata) RaySceneQueryFragmentResult;
alias int function(MovableObjectHandle handle, coiReal distance, void* userdata) RaySceneQueryObjectResult;
struct coiQuaternion
{
float w;
float x;
float y;
float z;
}
struct coiVector3
{
float x;
float y;
float z;
}
struct ViewPoint
{
coiVector3 position;
coiQuaternion orientation;
};
struct FrameEvent
{
coiReal timeSinceLastEvent;
coiReal timeSinceLastFrame;
}
struct ray_pair
{
int intersects;
coiReal distance;
}
struct ColourValue
{
float r;
float g;
float b;
float a;
}
struct engine_options
{
const char* renderer_s;
const char* plugin_folder_s;
const char* window_title;
const char* log_name;
int width, height, auto_window;
};
struct FrameStats
{
float lastFPS;
float avgFPS;
float bestFPS;
float worstFPS;
ulong bestFrameTime;
ulong worstFrameTime;
size_t triangleCount;
size_t batchCount;
};
struct world_fragment
{
world_fragment_type fragment_type;
coiVector3 single_intersection;
PlaneListHandle planes;
void* geometry;
RenderOperationHandle render_op;
}
struct rayscenequery_result_entry
{
coiReal distance;
MovableObjectHandle movable;
world_fragment* fragment;
}
enum LoggingLevel
{
LL_LOW = 1,
LL_NORMAL = 2,
LL_BOREME = 3
};
enum LogMessageLevel
{
LML_TRIVIAL = 1,
LML_NORMAL = 2,
LML_CRITICAL = 3
};
enum stat_flags
{
SF_NONE = 0,
SF_FPS = 1,
SF_AVG_FPS = 2,
SF_BEST_FPS = 4,
SF_WORST_FPS = 8,
SF_TRIANGLE_COUNT = 16,
SF_ALL = 0xFFFF
};
enum frame_buffer
{
FB_FRONT,
FB_BACK,
FB_AUTO
};
enum scene_type
{
ST_GENERIC = 1,
ST_EXTERIOR_CLOSE = 2,
ST_EXTERIOR_FAR = 4,
ST_EXTERIOR_REAL_FAR = 8,
ST_INTERIOR = 16
};
enum hardware_buffer_usage
{
HBU_STATIC = 1,
HBU_DYNAMIC = 2,
HBU_WRITE_ONLY = 4,
HBU_DISCARDABLE = 8,
HBU_STATIC_WRITE_ONLY = 5,
HBU_DYNAMIC_WRITE_ONLY = 6,
HBU_DYNAMIC_WRITE_ONLY_DISCARDABLE = 14
}
enum light_types
{
/// Point light sources give off light equally in all directions, so require only position not direction
LT_POINT = 0,
/// Directional lights simulate parallel light beams from a distant source, hence have direction but no position
LT_DIRECTIONAL = 1,
/// Spotlights simulate a cone of light from a source so require position and direction, plus extra values for falloff
LT_SPOTLIGHT = 2
};
enum transform_space
{
TS_LOCAL,
TS_PARENT,
TS_WORLD
};
enum Extent
{
EXTENT_NULL,
EXTENT_FINITE,
EXTENT_INFINITE
};
enum CornerEnum
{
FAR_LEFT_BOTTOM = 0,
FAR_LEFT_TOP = 1,
FAR_RIGHT_TOP = 2,
FAR_RIGHT_BOTTOM = 3,
NEAR_RIGHT_BOTTOM = 7,
NEAR_LEFT_BOTTOM = 6,
NEAR_LEFT_TOP = 5,
NEAR_RIGHT_TOP = 4
};
enum plane_side
{
NO_SIDE,
POSITIVE_SIDE,
NEGATIVE_SIDE,
BOTH_SIDE
};
enum world_fragment_type
{
/// Return no world geometry hits at all
WFT_NONE,
/// Return pointers to convex plane-bounded regions
WFT_PLANE_BOUNDED_REGION,
/// Return a single intersection point (typically RaySceneQuery only)
WFT_SINGLE_INTERSECTION,
/// Custom geometry as defined by the SceneManager
WFT_CUSTOM_GEOMETRY,
/// General RenderOperation structure
WFT_RENDER_OPERATION
};
// Root functions
void release_engine();
void default_engine_options(engine_options* options);
void init_engine(const engine_options options);
RootHandle create_root(const char* pluginFileName, const char* configFileName, const char* logFileName);
RenderWindowHandle root_initialise(int auto_create_window, const char* render_window_title);
TimerHandle root_get_timer();
RenderWindowHandle create_render_window(const char* name, const int width, const int height, const int full_screen);
RenderWindowHandle create_render_window_gl_context(const char* name, const int width, const int height, const int full_screen);
RenderWindowHandle create_render_window_hwnd(const char* name, const int width, const int height, const int full_screen, ulong hwnd);
uint render_window_get_hwnd(RenderWindowHandle window_handle);
void render_window_set_visible(RenderWindowHandle window_handle, int visible);
void render_window_update(RenderWindowHandle window_handle, int swap_buffers);
void current_window_update(int swap_buffers);
void render_window_resize(uint width, uint height);
void render_window_moved_or_resized();
int render_window_closed();
int root_is_initialised();
void save_config();
int restore_config();
int show_config_dialog();
void load_ogre_plugin(const char * plugin);
// Doesn't use OgreManager. Can still throw if type_name doesn't exist.
SceneManagerHandle root_create_scene_manager(const char* type_name, const char* instance_name);
// Doesn't use OgreManager. If a specific scene manager is not found,
// the default implementation is always returned.
SceneManagerHandle root_create_scene_manager_by_mask(SceneTypeMask type_mask, const char* instance_name);
// Does use OgreManager.
SceneManagerHandle create_scene_manager(const char* type_name, const char* instance_name);
SceneManagerHandle get_scene_manager();
SceneManagerHandle get_scene_manager_by_name(const char* scene_manager_instance_name);
int render_one_frame();
int render_one_frame_ex(float time_since_last_frame);
void render_loop();
void pump_messages();
void log_message(const char* message);
RenderWindowHandle root_create_render_window(const char* name, uint width, uint height, int fullscreen, NameValuePairListHandle params);
RenderSystemListHandle root_get_available_renderers();
// Ogre::SceneManager calls
EntityHandle scenemanager_create_entity(SceneManagerHandle handle, const char* name, const char* mesh_name, const char* group_name);
SceneNodeHandle scenemanager_get_root_scene_node(SceneManagerHandle handle);
LightHandle scenemanager_create_light(SceneManagerHandle handle, const char* name);
void scenemanager_set_sky_box(SceneManagerHandle handle, int enable, const char* material_name, float distance,
int draw_first, const coiQuaternion* orientation,
const char* group_name);
void scenemanager_set_sky_dome(SceneManagerHandle handle, int enable, const char* material_name, float curvature,
float tiling, float distance, int draw_first, const coiQuaternion* orientation,
int xsegments, int ysegments, int ysegments_keep, const char* group_name);
const(char*) scenemanager_get_name(SceneManagerHandle handle);
//void SceneManager::destroyQuery(Ogre::SceneQuery* query);
void scenemanager_destroy_scenequery(SceneManagerHandle handle, SceneQueryHandle query);
// Ogre::SceneManager::createRayQuery(Ogre::Ray const&, unsigned long)
RaySceneQueryHandle scenemanager_create_rayquery(SceneQueryHandle handle, RayHandle ray_handle, ulong mask);
// RenderSystem functions
void add_render_system(RenderSystemHandle render_system);
void set_render_system(RenderSystemHandle render_system);
RenderSystemHandle get_render_system();
RenderSystemHandle get_render_system_by_name(const char* render_system_name);
const(char*) render_system_get_name(RenderSystemHandle handle);
void render_system_set_config_option(RenderSystemHandle render_system_handle, const char* option, const char* value);
uint render_system_list_size(RenderSystemListHandle list_handle);
RenderSystemHandle render_system_list_get(RenderSystemListHandle list_handle, uint at);
void destroy_render_system_list(RenderSystemListHandle handle);
// SceneManager functions
void set_default_num_mipmaps(int number);
void set_ambient_light_rgba(const float r, const float g, const float b, const float a);
void set_ambient_light_rgb(const float r, const float g, const float b);
ViewportHandle add_viewport(CameraHandle camera_handle);
void scene_manager_log_name();
// Scene nodes
SceneNodeHandle create_child_scenenode(const char* node_name);
void attach_entity_to_scenenode(EntityHandle entity_handle, SceneNodeHandle scenenode_handle);
void scenenode_update(SceneNodeHandle scenenode_handle, int update_children, int parent_has_changed);
void scenenode_update_bounds(SceneNodeHandle scenenode_handle);
EntityHandle scenenode_get_attached_entity_int(SceneNodeHandle scenenode_handle, int entity_index);
EntityHandle scenenode_get_attached_entity(SceneNodeHandle scenenode_handle, const char* entity_name);
ushort scenenode_num_attached_objects(SceneNodeHandle scenenode_handle);
void scenenode_detach_entity_int(SceneNodeHandle scenenode_handle, int entity_index);
void scenenode_detach_entity(SceneNodeHandle scenenode_handle, EntityHandle entity_handle);
void scenenode_detach_entity_string(SceneNodeHandle scenenode_handle, const char* entity_name);
void scenenode_detach_all_objects(SceneNodeHandle scenenode_handle);
int scenenode_is_in_scenegraph(SceneNodeHandle scenenode_handle);
void scenenode_notify_rootnode(SceneNodeHandle scenenode_handle);
void scenenode_show_boundingbox(SceneNodeHandle scenenode_handle, int show_boundingbox);
void scenenode_hide_boundingbox(SceneNodeHandle scenenode_handle, int hide_boundingbox);
int scenenode_get_show_boundingbox(SceneNodeHandle scenenode_handle);
SceneNodeHandle scenenode_get_parent_scenenode(SceneNodeHandle scenenode_handle);
void scenenode_set_visible(SceneNodeHandle scenenode_handle, int visible);
void scenenode_set_visible_ex(SceneNodeHandle scenenode_handle, int visible, int cascade);
void scenenode_flip_visibility(SceneNodeHandle scenenode_handle);
void scenenode_flip_visibility_ex(SceneNodeHandle scenenode_handle, int cascade);
void scenenode_set_debug_display_enabled(SceneNodeHandle scenenode_handle, int enabled);
void scenenode_set_debug_display_enabled_ex(SceneNodeHandle scenenode_handle, int enabled, int cascade);
SceneManagerHandle scenenode_get_creator(SceneNodeHandle scenenode_handle);
void scenenode_set_direction(SceneNodeHandle scenenode_handle, float x, float y, float z, transform_space relative_to);
void scenenode_set_orientation(SceneNodeHandle scenenode_handle, float w, float x, float y, float z);
void scenenode_set_position(SceneNodeHandle scenenode_handle, float x, float y, float z);
void scenenode_get_position(SceneNodeHandle handle, ref coiVector3 pos);
void scenenode_set_derived_position(SceneNodeHandle handle, const ref coiVector3 pos);
void scenenode_get_derived_position(SceneNodeHandle handle, ref coiVector3 pos);
void scenenode_yaw_degree(SceneNodeHandle handle, coiReal angle, transform_space relative_to);
void scenenode_yaw(SceneNodeHandle scenenode_handle, coiReal radians, transform_space relative_to);
void scenenode_set_scale(SceneNodeHandle scenenode_handle, float x, float y, float z);
void scenenode_scale(SceneNodeHandle scenenode_handle, float x, float y, float z);
void scenenode_translate(SceneNodeHandle scenenode_handle, float x, float y, float z, transform_space relative_to);
void scenenode_roll(SceneNodeHandle scenenode_handle, coiReal radians, transform_space relative_to);
void scenenode_pitch(SceneNodeHandle scenenode_handle, coiReal radians, transform_space relative_to);
SceneNodeHandle scenenode_create_child_scenenode(SceneNodeHandle handle, const char* name, const ref coiVector3 translate, const ref coiQuaternion rotate);
// Viewports
void viewport_set_background_colour(ViewportHandle viewport_handle, float r, float g, float b, float a);
void viewport_set_background_colour_cv(ViewportHandle viewport_handle, ref ColourValue cv);
void viewport_set_auto_updated(ViewportHandle handle, int autoupdate);
int viewport_is_auto_updated(ViewportHandle handle);
float viewport_get_top(ViewportHandle handle);
float viewport_get_left(ViewportHandle handle);
float viewport_get_width(ViewportHandle viewport_handle);
float viewport_get_height(ViewportHandle viewport_handle);
int viewport_get_actual_top(ViewportHandle handle);
int viewport_get_actual_left(ViewportHandle handle);
int viewport_get_actual_width(ViewportHandle handle);
int viewport_get_actual_height(ViewportHandle handle);
//Ogre::Viewport::setDimensions(float, float, float, float)
void viewport_set_dimensions(ViewportHandle handle, coiReal left, coiReal top, coiReal width, coiReal height);
//Ogre::Viewport::getActualDimensions(int&, int&, int&, int&) const
void viewport_get_actual_dimensions(ViewportHandle handle, ref int left, ref int top, ref int width, ref int height);
//Ogre::Viewport::getBackgroundColour() const
void viewport_get_background_colour(ViewportHandle handle, ref ColourValue cv);
// Resource management
void setup_resources(const char* resources_cfg);
void add_resource_location(const char* location, const char* type, const char* group);
void initialise_all_resourcegroups();
const(char*) resourcegroupmanager_DEFAULT_RESOURCE_GROUP_NAME();
const(char*) resourcegroupmanager_INTERNAL_RESOURCE_GROUP_NAME();
const(char*) resourcegroupmanager_AUTODETECT_RESOURCE_GROUP_NAME();
size_t resourcegroupmanager_RESOURCE_SYSTEM_NUM_REFERENCE_COUNTS();
// Camera
CameraHandle create_camera(const char* camera_name);
CameraHandle get_camera(const char* camera_name);
void camera_move(CameraHandle handle, const float x, const float y, const float z);
void camera_move_relative(CameraHandle handle, const float x, const float y, const float z);
void camera_set_direction(CameraHandle handle, const float x, const float y, const float z, transform_space relative_to);
void camera_get_direction(CameraHandle handle, ref coiVector3 v3);
void camera_get_up(CameraHandle handle, ref coiVector3 up);
void camera_get_right(CameraHandle handle, ref coiVector3 right);
void camera_set_near_clip_distance(CameraHandle camera_handle, float d);
void camera_set_far_clip_distance(CameraHandle camera_handle, float d);
void camera_set_aspect_ratio(CameraHandle camera_handle, float w, float h);
void camera_set_aspect_ratio_ex(CameraHandle handle, float ratio);
float camera_get_aspect_ratio(CameraHandle handle);
void camera_set_auto_aspect_ratio(CameraHandle camera_handle, int on);
void camera_set_fovy(CameraHandle camera_handle, float angle);
void camera_set_frustum_offset(CameraHandle camera_handle, const int offset_x, const int offset_y);
void camera_set_focal_length(CameraHandle camera_handle, float fl);
void camera_set_position(CameraHandle camera_handle, const float x, const float y, const float z);
void camera_get_position(CameraHandle handle, ref coiVector3 result);
void camera_lookat(CameraHandle camera_handle, const float x, const float y, const float z);
void camera_roll(CameraHandle handle, coiReal angle);
void camera_yaw(CameraHandle handle, coiReal angle);
void camera_pitch(CameraHandle handle, coiReal angle);
void camera_rotate(CameraHandle handle, const coiVector3* axis, coiReal angle);
void camera_rotate_q(CameraHandle handle, const coiQuaternion* q);
//Ogre::Camera::setFixedYawAxis(bool, Ogre::Vector3 const&)
void camera_set_fixed_yaw_axis(CameraHandle handle, int on, const ref coiVector3 fixed_axis);
//Ogre::Camera::getOrientation() const
void camera_get_orientation(CameraHandle handle, ref coiQuaternion orientation);
//Ogre::Camera::setOrientation(Ogre::Quaternion const&)
void camera_set_orientation(CameraHandle handle, const ref coiQuaternion orientation);
//Ogre::Camera::getDerivedOrientation() const
void camera_get_derived_orientation(CameraHandle handle, ref coiQuaternion orientation);
//Ogre::Camera::getDerivedPosition() const
void camera_get_derived_position(CameraHandle handle, ref coiVector3 position);
//Ogre::Camera::getDerivedDirection() const
void camera_get_derived_direction(CameraHandle handle, ref coiVector3 direction);
//Ogre::Camera::getDerivedUp() const
void camera_get_derived_up(CameraHandle handle, ref coiVector3 up);
//Ogre::Camera::getDerivedRight() const
void camera_get_derived_right(CameraHandle handle, ref coiVector3 right);
//Ogre::Camera::setAutoTracking(bool, Ogre::SceneNode*, Ogre::Vector3 const&)
void camera_set_autotracking(CameraHandle handle, int on, SceneNodeHandle sn_handle, const ref coiVector3 offset);
//Ogre::Camera::setLodBias(float)
void camera_set_lod_bias(CameraHandle handle, coiReal factor);
//Ogre::Camera::getLodBias() const
coiReal camera_get_lod_bias(CameraHandle handle);
//Ogre::Camera::getCameraToViewportRay(float, float, Ogre::Ray*) const
void camera_get_camera_to_viewport_ray(CameraHandle handle, coiReal screenx, coiReal screeny, RayHandle ray);
//Ogre::Camera::setWindow(float, float, float, float)
void camera_set_window(CameraHandle handle, coiReal left, coiReal top, coiReal right, coiReal bottom);
SceneManagerHandle camera_get_scenemanager(CameraHandle handle);
// Entity
EntityHandle create_entity(const char* entity_name, const char* mesh_file);
void entity_set_cast_shadows(EntityHandle handle, int enabled);
int entity_get_cast_shadows(EntityHandle handle);
int entity_get_receives_shadows(EntityHandle handle);
void entity_set_material_name(EntityHandle handle, const char* material_name, const char* group_name);
//Ogre::Entity::getBoundingBox() const
AxisAlignedBoxHandle entity_get_bounding_box(EntityHandle handle);
//Ogre::Entity::getBoundingRadius() const
coiReal entity_get_bounding_radius(EntityHandle handle);
// Light
LightHandle create_light(const char* light_name);
void light_set_position(LightHandle light_handle, const float x, const float y, const float z);
void destroy_light(LightHandle handle);
void light_set_type(LightHandle handle, light_types type);
void light_set_diffuse_colour(LightHandle handle, const ref ColourValue colour);
void light_set_specular_colour(LightHandle handle, const ref ColourValue colour);
// FrameListener
void add_frame_listener(FrameListenerEvent frame_event,const int frame_event_type);
void remove_frame_listener(FrameListenerEvent frame_event);
FrameListenerHandle add_frame_listener_ctx(FrameListenerCtx callback, void* userdata);
void remove_frame_listener_ctx(FrameListenerHandle handle);
// WindowListener
void add_window_listener(RenderWindowHandle window_handle, WindowListenerEvent window_event);
void remove_window_listener(RenderWindowHandle window_handle);
WindowListenerHandle add_window_listener_ctx(RenderWindowHandle window_handle, WindowListenerEvent window_event, void* userdata);
void remove_window_listener_ctx(RenderWindowHandle window_handle, WindowListenerHandle listener_handle);
// LogManager
LogManagerHandle create_log_manager();
// LogManager::getSingletonPtr
LogManagerHandle get_log_manager();
//LogManager::getLog
LogHandle logmanager_get_log(const char* name);
//LogManager::getDefaultLog
LogHandle logmanager_get_default_log();
//LogManager::setDefaultLog
LogHandle logmanager_set_default_log(LogHandle log_handle);
//LogManager::createLog
LogHandle logmanager_create_log(const char* name, int default_log, int debugger_output, int suppress_file_output);
// n.b., Allows for finer grained control over the log messages at the cost of
// having to supply all these variables. If you don't need this control,
// use log_message above.
//LogManager::logMessage
void logmanager_log_message(const char* message, LogMessageLevel lml, int maskDebug, const char* log_name, int skip_message);
//LogManager::setLogDetail
void logmanager_set_log_detail(LoggingLevel lvl);
//LogManager::destroyLog
void logmanager_destroy_log(const char* name);
//LogManager::destroyLog overload
void logmanager_destroy_log_by_handle(LogHandle log_handle);
//Log::addListener
LogListenerHandle add_log_listener(LogListenerEvent log_event, LogHandle log_handle);
//Log::addListener
LogListenerHandle add_log_listener_ctx(LogListenerCtx log_event, LogHandle log_handle, void* userdata);
//Log::removeListener
void remove_log_listener(LogListenerHandle llh, LogHandle log_handle);
//Log::removeListener
void remove_log_listener_ctx(LogListenerHandle llh, LogHandle log_handle);
// NameValuePairList
NameValuePairListHandle create_name_value_pair_list();
void add_pair(NameValuePairListHandle params, const char* name, const char* value);
void destroy_name_value_pair_list(NameValuePairListHandle params);
// RenderWindow
ViewportHandle render_window_add_viewport(RenderWindowHandle window_handle, CameraHandle camera_handle, int zorder, float left, float top, float width, float height);
int render_window_is_closed(RenderWindowHandle handle);
void render_window_set_active(RenderWindowHandle handle, int state);
void render_window_swap_buffers(RenderWindowHandle handle, int wait_for_vsync);
void render_window_get_custom_attribute(RenderWindowHandle handle, const char* attribute, void* pdata);
uint render_window_get_width(RenderWindowHandle handle);
uint render_window_get_height(RenderWindowHandle handle);
void renderwindow_get_statistics(RenderWindowHandle handle, ref FrameStats stats);
void renderwindow_get_statistics_ex(RenderWindowHandle handle, ref float lastFPS, ref float avgFPS, ref float bestFPS, ref float worstFPS);
// ColourValue
void colourvalue_zero(ref ColourValue c);
void colourvalue_black(ref ColourValue c);
void colourvalue_white(ref ColourValue c);
void colourvalue_red(ref ColourValue c);
void colourvalue_green(ref ColourValue c);
void colourvalue_blue(ref ColourValue c);
// Vector3
//Vector3::operator !=
int vector3_notequals_vector3(coiVector3 lhs, coiVector3 rhs);
//Vector3::operator ==
int vector3_equals_vector3(coiVector3 lhs, coiVector3 rhs);
//Vector3::operator +
coiVector3 vector3_add_vector3(coiVector3 lhs, coiVector3 rhs);
//Vector3::operator +=
void vector3_update_add_vector3(coiVector3 lhs, coiVector3 rhs);
//Vector3::operator -
coiVector3 vector3_subtract_vector3(coiVector3 lhs, coiVector3 rhs);
//Vector3::operator -=
void vector3_update_subtract_vector3(coiVector3 lhs, coiVector3 rhs);
//Vector3::operator -
coiVector3 vector3_negate(coiVector3 v3);
// Vector3::operator/
coiVector3 vector3_divide_vector3(coiVector3 lhs, coiVector3 rhs);
// Vector3::operator*
coiVector3 vector3_multiply_vector3(coiVector3 lhs, coiVector3 rhs);
// Vector3::isNaN
int vector3_is_nan(coiVector3 v3);
//Vector3::primaryAxis
coiVector3 vector3_primary_axis(coiVector3);
// Vector3::ZERO
coiVector3 vector3_ZERO();
coiVector3 vector3_UNIT_X();
coiVector3 vector3_UNIT_Y();
coiVector3 vector3_UNIT_Z();
coiVector3 vector3_NEGATIVE_UNIT_X();
coiVector3 vector3_NEGATIVE_UNIT_Y();
coiVector3 vector3_NEGATIVE_UNIT_Z();
coiVector3 vector3_UNIT_SCALE();
// Plane
PlaneHandle plane_create_plane();
PlaneHandle plane_create_plane_normal(float x, float y, float z, float distance);
void plane_destroy_plane(PlaneHandle handle);
void plane_get_normal(PlaneHandle handle, coiVector3* normal);
void plane_set_normal(PlaneHandle handle, const coiVector3* normal);
coiReal plane_get_d(PlaneHandle handle);
void plane_set_d(PlaneHandle handle, coiReal d);
// PlaneList (typedef vector<Plane>::type PlaneList)
PlaneListHandle create_planelist();
void destroy_planelist(PlaneListHandle handle);
// PlaneBoundedVolume
PlaneBoundedVolumeHandle create_planeboundedvolume(plane_side the_outside);
void destroy_planeboundedvolume(PlaneBoundedVolumeHandle handle);
// bool intersects(const AxisAlignedBox&) const
int planeboundedvolume_intersects_axisalignedbox(PlaneBoundedVolumeHandle handle, AxisAlignedBoxHandle query);
// bool intersects(const Sphere&) const
int planeboundedvolume_intersects_sphere(PlaneBoundedVolumeHandle handle, SphereHandle query);
// std::pair<bool, Real> intersects(const Ray&) const
void planeboundedvolume_intersects_ray(PlaneBoundedVolumeHandle handle, RayHandle query, ref ray_pair result);
// MeshManager
MeshHandle meshmanager_create_plane(const char* name, const char* group_name,
PlaneHandle plane, float width,
float height, int xsegments, int ysegments,
int normals, ushort num_tex_coord_sets,
float utile, float vtile, ref coiVector3 up_vector,
hardware_buffer_usage vertex_buffer_usage,
hardware_buffer_usage index_buffer_usage,
int vertex_shadow_buffer, int index_shadow_buffer);
// Ogre::Timer
int timer_set_option(TimerHandle handle, const char* key, void* value);
ulong timer_get_milliseconds(TimerHandle handle);
ulong timer_get_microseconds(TimerHandle handle);
ulong timer_get_milliseconds_cpu(TimerHandle handle);
ulong timer_get_microseconds_cpu(TimerHandle handle);
void timer_reset(TimerHandle handle);
// Ogre::AxisAlignedBox
AxisAlignedBoxHandle create_axis_aligned_box();
AxisAlignedBoxHandle create_axis_aligned_box_ex(Extent e);
AxisAlignedBoxHandle create_axis_aligned_box_v3(const ref coiVector3 min, const ref coiVector3 max);
void destroy_axis_aligned_box(AxisAlignedBoxHandle handle);
void axisalignedbox_get_size(AxisAlignedBoxHandle handle, ref coiVector3 size);
void axisalignedbox_get_minimum(AxisAlignedBoxHandle handle, ref coiVector3 minimum);
void axisalignedbox_get_maximum(AxisAlignedBoxHandle handle, ref coiVector3 maximum);
void axisalignedbox_set_minimum_x(AxisAlignedBoxHandle handle, coiReal x);
void axisalignedbox_set_minimum_y(AxisAlignedBoxHandle handle, coiReal y);
void axisalignedbox_set_minimum_z(AxisAlignedBoxHandle handle, coiReal z);
void axisalignedbox_set_minimum(AxisAlignedBoxHandle handle, const ref coiVector3 min);
void axisalignedbox_set_maximum(AxisAlignedBoxHandle handle, const ref coiVector3 max);
void axisalignedbox_set_maximum_x(AxisAlignedBoxHandle handle, coiReal x);
void axisalignedbox_set_maximum_y(AxisAlignedBoxHandle handle, coiReal y);
void axisalignedbox_set_maximum_z(AxisAlignedBoxHandle handle, coiReal z);
void axisalignedbox_set_extents(AxisAlignedBoxHandle handle, const ref coiVector3 min, const ref coiVector3 max);
void axisalignedbox_get_corner(AxisAlignedBoxHandle handle, CornerEnum e, ref coiVector3 corner);
//Ogre::Ray
RayHandle create_ray(const ref coiVector3 origin, const ref coiVector3 direction);
void destroy_ray(RayHandle handle);
//Ray::setOrigin
void ray_set_origin(RayHandle handle, const ref coiVector3 origin);
//Ray::getOrigin
void ray_get_origin(RayHandle handle, ref coiVector3 origin);
//Ray::setDirection
void ray_set_direction(RayHandle handle, const ref coiVector3 direction);
//Ray::getDirection
void ray_get_direction(RayHandle handle, ref coiVector3 direction);
//Ray::getPoint
void ray_get_point(RayHandle handle, coiReal units, ref coiVector3 point);
//Ray::intersects(Plane)
void ray_intersects_plane(RayHandle handle, PlaneHandle plane_handle, ref ray_pair result);
//Ray::intersects(AxisAlignedBox)
void ray_intersects_axisalignedbox(RayHandle handle, AxisAlignedBoxHandle query_handle, ref ray_pair result);
//Ray::intersects(Sphere)
void ray_intersects_sphere(RayHandle handle, SphereHandle query_handle, ref ray_pair result);
// Ogre::Sphere
SphereHandle create_sphere(const ref coiVector3 center, coiReal radius);
void destroy_sphere(SphereHandle handle);
//void setRadius(Real)
void sphere_set_radius(SphereHandle handle, coiReal radius);
//Real getRadius(void) const
coiReal sphere_get_radius(SphereHandle handle);
//void setCenter(Vector3)
void sphere_set_center(SphereHandle handle, const ref coiVector3 center);
//Real getCenter(void) const
void sphere_get_center(SphereHandle handle, ref coiVector3 center);
// bool intersects(const Sphere&) const
int sphere_intersects_sphere(SphereHandle handle, SphereHandle query);
// bool intersects(const AxisAlignedBox&) const
int sphere_intersects_axisalignedbox(SphereHandle handle, AxisAlignedBoxHandle query);
// bool intersects(const Plane&) const
int sphere_intersects_plane(SphereHandle handle, PlaneHandle query);
// bool intersects(const Vector3&) const
int sphere_intersects_vector3(SphereHandle handle, const ref coiVector3 query);
// void merge(const Sphere&)
void sphere_merge(SphereHandle handle, SphereHandle other_sphere);
// Ogre::SceneQuery
// SceneQuery::setQueryMask(uint32 mask)
void scenequery_set_query_mask(SceneQueryHandle handle, uint32 mask);
//uint32 SceneQuery::getQueryMask(void) const
uint32 scenequery_get_query_mask(SceneQueryHandle handle);
//void SceneQuery::setWorldFragmentType(enum WorldFragmentType wft);
void scenequery_set_world_fragment_type(SceneQueryHandle handle, world_fragment_type wft);
//WorldFragmentType SceneQuery::getWorldFragmentType(void) const;
world_fragment_type scenequery_get_world_fragment_type(SceneQueryHandle handle);
// SceneQueryListener
SceneQueryListenerHandle create_scenequerylistener(SceneQueryFragmentResult fragment_callback, SceneQueryObjectResult object_callback, void* userdata);
void destroy_scenequerylistener(SceneQueryListenerHandle handle);
size_t scenequeryresult_movables_count(SceneQueryResultHandle handle);
MovableObjectHandle scenequeryresult_movables_at(SceneQueryResultHandle handle, int index);
size_t scenequeryresult_worldfragments_count(SceneQueryResultHandle handle, int index);
void scenequeryresult_worldfragments_at(SceneQueryResultHandle handle, int index, ref world_fragment result);
RaySceneQueryListenerHandle create_rayscenequerylistener(RaySceneQueryFragmentResult fragment_callback, RaySceneQueryObjectResult object_callback, void* userdata);
void destroy_rayscenequerylistener(RaySceneQueryListenerHandle handle);
//setRay
void rayscenequery_set_ray(RaySceneQueryHandle handle, RayHandle ray_handle);
//getRay
RayHandle rayscenequery_get_ray(RaySceneQueryHandle handle);
//void setSortByDistance(bool sort, ushort maxresults = 0);
void rayscenequery_set_sort_by_distance(RaySceneQueryHandle handle, int on, ushort maxresults);
//bool getSortByDistance(void) const;
int rayscenequery_get_sort_by_distance(RaySceneQueryHandle handle);
//ushort getMaxResults(void) const;
ushort rayscenequery_get_max_results(RaySceneQueryHandle handle);
// typedef vector<RaySceneQueryResultEntry>::type RaySceneQueryResult;
size_t rayscenequeryresult_count(RaySceneQueryResultHandle handle);
void rayscenequeryresult_at(RaySceneQueryResultHandle handle, int index, ref rayscenequery_result_entry result);
ditto
/******************************************************************************
* ogre_interface.d - main interface file for D clients
******************************************************************************
* This file is part of
* __ __ _
* / // /_____ ____ (_)
* / // // ___// __ \ / /
* / // // /__ / /_/ // /
* /_//_/ \___/ \____//_/
*
* Low Level C Ogre Interface (llcoi)
*
* See http://code.google.com/p/llcoi/ for more information.
*
* Copyright (c) 2011, Llcoi Team
*
* License: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
******************************************************************************/
module llcoi.ogre_interface;
extern(C):
alias float coiReal;
const int EVENT_FRAME_STARTED = 1;
const int EVENT_FRAME_RENDERING_QUEUED = 2;
const int EVENT_FRAME_ENDED = 4;
// From OgrePlatform.h
alias uint uint32;
alias ushort uint16;
alias ubyte uint8;
alias int int32;
alias short int16;
alias byte int8;
// OgreSceneManager.h
alias ushort SceneTypeMask;
// OgreColourValue.h
alias uint32 RGBA;
alias uint32 ARGB;
alias uint32 ABGR;
alias uint32 BGRA;
alias void* CameraHandle;
alias void* EntityHandle;
alias void* SceneNodeHandle;
alias void* LightHandle;
alias void* RenderWindowHandle;
alias void* RootHandle;
alias void* RenderSystemHandle;
alias void* RenderSystemListHandle;
alias void* SceneManagerHandle;
alias void* ViewportHandle;
alias void* LogManagerHandle;
alias void* LogHandle;
alias void* LogListenerHandle;
alias void* NameValuePairListHandle;
alias void* FrameListenerHandle;
alias void* PlaneHandle;
alias void* PlaneListHandle;
alias void* PlaneBoundedVolumeHandle;
alias void* MeshHandle;
alias void* TimerHandle;
alias void* WindowListenerHandle;
alias void* AxisAlignedBoxHandle;
alias void* RayHandle;
alias void* SphereHandle;
alias void* SceneQueryHandle;
alias void* RaySceneQueryHandle;
alias void* RaySceneQueryResultHandle;
alias void* SceneQueryListenerHandle;
alias void* RaySceneQueryListenerHandle;
alias void* SceneQueryResultHandle;
alias void* MovableObjectHandle;
alias void* RenderOperationHandle;
// listener typedefs
alias int function(float,float,int) FrameListenerEvent;
alias void function(RenderWindowHandle) WindowListenerEvent;
alias void function(const char* message, int lml, int maskDebug, const char* log_name, int skip_message) LogListenerEvent;
alias void function(const char* message, int lml, int maskDebug, const char* log_name, int skip_message, void* userdata) LogListenerCtx;
alias int function(const ref FrameEvent evt, int frame_type, void* userdata) FrameListenerCtx;
alias int function(const ref world_fragment frag, void* userdata) SceneQueryFragmentResult;
alias int function(MovableObjectHandle handle, void* userdata) SceneQueryObjectResult;
alias int function(const ref world_fragment frag, coiReal distance, void* userdata) RaySceneQueryFragmentResult;
alias int function(MovableObjectHandle handle, coiReal distance, void* userdata) RaySceneQueryObjectResult;
struct coiQuaternion
{
float w;
float x;
float y;
float z;
}
struct coiVector3
{
float x;
float y;
float z;
}
struct ViewPoint
{
coiVector3 position;
coiQuaternion orientation;
};
struct FrameEvent
{
coiReal timeSinceLastEvent;
coiReal timeSinceLastFrame;
}
struct ray_pair
{
int intersects;
coiReal distance;
}
struct ColourValue
{
float r;
float g;
float b;
float a;
}
struct engine_options
{
const char* renderer_s;
const char* plugin_folder_s;
const char* window_title;
const char* log_name;
int width, height, auto_window;
};
struct FrameStats
{
float lastFPS;
float avgFPS;
float bestFPS;
float worstFPS;
ulong bestFrameTime;
ulong worstFrameTime;
size_t triangleCount;
size_t batchCount;
};
struct world_fragment
{
world_fragment_type fragment_type;
coiVector3 single_intersection;
PlaneListHandle planes;
void* geometry;
RenderOperationHandle render_op;
}
struct rayscenequery_result_entry
{
coiReal distance;
MovableObjectHandle movable;
world_fragment* fragment;
}
enum LoggingLevel
{
LL_LOW = 1,
LL_NORMAL = 2,
LL_BOREME = 3
};
enum LogMessageLevel
{
LML_TRIVIAL = 1,
LML_NORMAL = 2,
LML_CRITICAL = 3
};
enum stat_flags
{
SF_NONE = 0,
SF_FPS = 1,
SF_AVG_FPS = 2,
SF_BEST_FPS = 4,
SF_WORST_FPS = 8,
SF_TRIANGLE_COUNT = 16,
SF_ALL = 0xFFFF
};
enum frame_buffer
{
FB_FRONT,
FB_BACK,
FB_AUTO
};
enum scene_type
{
ST_GENERIC = 1,
ST_EXTERIOR_CLOSE = 2,
ST_EXTERIOR_FAR = 4,
ST_EXTERIOR_REAL_FAR = 8,
ST_INTERIOR = 16
};
enum hardware_buffer_usage
{
HBU_STATIC = 1,
HBU_DYNAMIC = 2,
HBU_WRITE_ONLY = 4,
HBU_DISCARDABLE = 8,
HBU_STATIC_WRITE_ONLY = 5,
HBU_DYNAMIC_WRITE_ONLY = 6,
HBU_DYNAMIC_WRITE_ONLY_DISCARDABLE = 14
}
enum light_types
{
/// Point light sources give off light equally in all directions, so require only position not direction
LT_POINT = 0,
/// Directional lights simulate parallel light beams from a distant source, hence have direction but no position
LT_DIRECTIONAL = 1,
/// Spotlights simulate a cone of light from a source so require position and direction, plus extra values for falloff
LT_SPOTLIGHT = 2
};
enum transform_space
{
TS_LOCAL,
TS_PARENT,
TS_WORLD
};
enum Extent
{
EXTENT_NULL,
EXTENT_FINITE,
EXTENT_INFINITE
};
enum CornerEnum
{
FAR_LEFT_BOTTOM = 0,
FAR_LEFT_TOP = 1,
FAR_RIGHT_TOP = 2,
FAR_RIGHT_BOTTOM = 3,
NEAR_RIGHT_BOTTOM = 7,
NEAR_LEFT_BOTTOM = 6,
NEAR_LEFT_TOP = 5,
NEAR_RIGHT_TOP = 4
};
enum plane_side
{
NO_SIDE,
POSITIVE_SIDE,
NEGATIVE_SIDE,
BOTH_SIDE
};
enum world_fragment_type
{
/// Return no world geometry hits at all
WFT_NONE,
/// Return pointers to convex plane-bounded regions
WFT_PLANE_BOUNDED_REGION,
/// Return a single intersection point (typically RaySceneQuery only)
WFT_SINGLE_INTERSECTION,
/// Custom geometry as defined by the SceneManager
WFT_CUSTOM_GEOMETRY,
/// General RenderOperation structure
WFT_RENDER_OPERATION
};
// Root functions
void release_engine();
void default_engine_options(engine_options* options);
void init_engine(const engine_options options);
RootHandle create_root(const char* pluginFileName, const char* configFileName, const char* logFileName);
RenderWindowHandle root_initialise(int auto_create_window, const char* render_window_title);
TimerHandle root_get_timer();
RenderWindowHandle create_render_window(const char* name, const int width, const int height, const int full_screen);
RenderWindowHandle create_render_window_gl_context(const char* name, const int width, const int height, const int full_screen);
RenderWindowHandle create_render_window_hwnd(const char* name, const int width, const int height, const int full_screen, ulong hwnd);
uint render_window_get_hwnd(RenderWindowHandle window_handle);
void render_window_set_visible(RenderWindowHandle window_handle, int visible);
void render_window_update(RenderWindowHandle window_handle, int swap_buffers);
void current_window_update(int swap_buffers);
void render_window_resize(uint width, uint height);
void render_window_moved_or_resized();
int render_window_closed();
int root_is_initialised();
void save_config();
int restore_config();
int show_config_dialog();
void load_ogre_plugin(const char * plugin);
// Doesn't use OgreManager. Can still throw if type_name doesn't exist.
SceneManagerHandle root_create_scene_manager(const char* type_name, const char* instance_name);
// Doesn't use OgreManager. If a specific scene manager is not found,
// the default implementation is always returned.
SceneManagerHandle root_create_scene_manager_by_mask(SceneTypeMask type_mask, const char* instance_name);
// Does use OgreManager.
SceneManagerHandle create_scene_manager(const char* type_name, const char* instance_name);
SceneManagerHandle get_scene_manager();
SceneManagerHandle get_scene_manager_by_name(const char* scene_manager_instance_name);
int render_one_frame();
int render_one_frame_ex(float time_since_last_frame);
void render_loop();
void pump_messages();
void log_message(const char* message);
RenderWindowHandle root_create_render_window(const char* name, uint width, uint height, int fullscreen, NameValuePairListHandle params);
RenderSystemListHandle root_get_available_renderers();
// Ogre::SceneManager calls
EntityHandle scenemanager_create_entity(SceneManagerHandle handle, const char* name, const char* mesh_name, const char* group_name);
SceneNodeHandle scenemanager_get_root_scene_node(SceneManagerHandle handle);
LightHandle scenemanager_create_light(SceneManagerHandle handle, const char* name);
void scenemanager_set_sky_box(SceneManagerHandle handle, int enable, const char* material_name, float distance,
int draw_first, const coiQuaternion* orientation,
const char* group_name);
void scenemanager_set_sky_dome(SceneManagerHandle handle, int enable, const char* material_name, float curvature,
float tiling, float distance, int draw_first, const coiQuaternion* orientation,
int xsegments, int ysegments, int ysegments_keep, const char* group_name);
const(char*) scenemanager_get_name(SceneManagerHandle handle);
//void SceneManager::destroyQuery(Ogre::SceneQuery* query);
void scenemanager_destroy_scenequery(SceneManagerHandle handle, SceneQueryHandle query);
// Ogre::SceneManager::createRayQuery(Ogre::Ray const&, unsigned long)
RaySceneQueryHandle scenemanager_create_rayquery(SceneQueryHandle handle, RayHandle ray_handle, ulong mask);
// RenderSystem functions
void add_render_system(RenderSystemHandle render_system);
void set_render_system(RenderSystemHandle render_system);
RenderSystemHandle get_render_system();
RenderSystemHandle get_render_system_by_name(const char* render_system_name);
const(char*) render_system_get_name(RenderSystemHandle handle);
void render_system_set_config_option(RenderSystemHandle render_system_handle, const char* option, const char* value);
uint render_system_list_size(RenderSystemListHandle list_handle);
RenderSystemHandle render_system_list_get(RenderSystemListHandle list_handle, uint at);
void destroy_render_system_list(RenderSystemListHandle handle);
// SceneManager functions
void set_default_num_mipmaps(int number);
void set_ambient_light_rgba(const float r, const float g, const float b, const float a);
void set_ambient_light_rgb(const float r, const float g, const float b);
ViewportHandle add_viewport(CameraHandle camera_handle);
void scene_manager_log_name();
// Scene nodes
SceneNodeHandle create_child_scenenode(const char* node_name);
void attach_entity_to_scenenode(EntityHandle entity_handle, SceneNodeHandle scenenode_handle);
void scenenode_update(SceneNodeHandle scenenode_handle, int update_children, int parent_has_changed);
void scenenode_update_bounds(SceneNodeHandle scenenode_handle);
EntityHandle scenenode_get_attached_entity_int(SceneNodeHandle scenenode_handle, int entity_index);
EntityHandle scenenode_get_attached_entity(SceneNodeHandle scenenode_handle, const char* entity_name);
ushort scenenode_num_attached_objects(SceneNodeHandle scenenode_handle);
void scenenode_detach_entity_int(SceneNodeHandle scenenode_handle, int entity_index);
void scenenode_detach_entity(SceneNodeHandle scenenode_handle, EntityHandle entity_handle);
void scenenode_detach_entity_string(SceneNodeHandle scenenode_handle, const char* entity_name);
void scenenode_detach_all_objects(SceneNodeHandle scenenode_handle);
int scenenode_is_in_scenegraph(SceneNodeHandle scenenode_handle);
void scenenode_notify_rootnode(SceneNodeHandle scenenode_handle);
void scenenode_show_boundingbox(SceneNodeHandle scenenode_handle, int show_boundingbox);
void scenenode_hide_boundingbox(SceneNodeHandle scenenode_handle, int hide_boundingbox);
int scenenode_get_show_boundingbox(SceneNodeHandle scenenode_handle);
SceneNodeHandle scenenode_get_parent_scenenode(SceneNodeHandle scenenode_handle);
void scenenode_set_visible(SceneNodeHandle scenenode_handle, int visible);
void scenenode_set_visible_ex(SceneNodeHandle scenenode_handle, int visible, int cascade);
void scenenode_flip_visibility(SceneNodeHandle scenenode_handle);
void scenenode_flip_visibility_ex(SceneNodeHandle scenenode_handle, int cascade);
void scenenode_set_debug_display_enabled(SceneNodeHandle scenenode_handle, int enabled);
void scenenode_set_debug_display_enabled_ex(SceneNodeHandle scenenode_handle, int enabled, int cascade);
SceneManagerHandle scenenode_get_creator(SceneNodeHandle scenenode_handle);
void scenenode_set_direction(SceneNodeHandle scenenode_handle, float x, float y, float z, transform_space relative_to);
void scenenode_set_orientation(SceneNodeHandle scenenode_handle, float w, float x, float y, float z);
void scenenode_set_position(SceneNodeHandle scenenode_handle, float x, float y, float z);
void scenenode_get_position(SceneNodeHandle handle, ref coiVector3 pos);
void scenenode_set_derived_position(SceneNodeHandle handle, const ref coiVector3 pos);
void scenenode_get_derived_position(SceneNodeHandle handle, ref coiVector3 pos);
void scenenode_yaw_degree(SceneNodeHandle handle, coiReal angle, transform_space relative_to);
void scenenode_yaw(SceneNodeHandle scenenode_handle, coiReal radians, transform_space relative_to);
void scenenode_set_scale(SceneNodeHandle scenenode_handle, float x, float y, float z);
void scenenode_scale(SceneNodeHandle scenenode_handle, float x, float y, float z);
void scenenode_translate(SceneNodeHandle scenenode_handle, float x, float y, float z, transform_space relative_to);
void scenenode_roll(SceneNodeHandle scenenode_handle, coiReal radians, transform_space relative_to);
void scenenode_pitch(SceneNodeHandle scenenode_handle, coiReal radians, transform_space relative_to);
SceneNodeHandle scenenode_create_child_scenenode(SceneNodeHandle handle, const char* name, const ref coiVector3 translate, const ref coiQuaternion rotate);
// Viewports
void viewport_set_background_colour(ViewportHandle viewport_handle, float r, float g, float b, float a);
void viewport_set_background_colour_cv(ViewportHandle viewport_handle, ref ColourValue cv);
void viewport_set_auto_updated(ViewportHandle handle, int autoupdate);
int viewport_is_auto_updated(ViewportHandle handle);
float viewport_get_top(ViewportHandle handle);
float viewport_get_left(ViewportHandle handle);
float viewport_get_width(ViewportHandle viewport_handle);
float viewport_get_height(ViewportHandle viewport_handle);
int viewport_get_actual_top(ViewportHandle handle);
int viewport_get_actual_left(ViewportHandle handle);
int viewport_get_actual_width(ViewportHandle handle);
int viewport_get_actual_height(ViewportHandle handle);
//Ogre::Viewport::setDimensions(float, float, float, float)
void viewport_set_dimensions(ViewportHandle handle, coiReal left, coiReal top, coiReal width, coiReal height);
//Ogre::Viewport::getActualDimensions(int&, int&, int&, int&) const
void viewport_get_actual_dimensions(ViewportHandle handle, ref int left, ref int top, ref int width, ref int height);
//Ogre::Viewport::getBackgroundColour() const
void viewport_get_background_colour(ViewportHandle handle, ref ColourValue cv);
// Resource management
void setup_resources(const char* resources_cfg);
void add_resource_location(const char* location, const char* type, const char* group);
void initialise_all_resourcegroups();
const(char*) resourcegroupmanager_DEFAULT_RESOURCE_GROUP_NAME();
const(char*) resourcegroupmanager_INTERNAL_RESOURCE_GROUP_NAME();
const(char*) resourcegroupmanager_AUTODETECT_RESOURCE_GROUP_NAME();
size_t resourcegroupmanager_RESOURCE_SYSTEM_NUM_REFERENCE_COUNTS();
// Camera
CameraHandle create_camera(const char* camera_name);
CameraHandle get_camera(const char* camera_name);
void camera_move(CameraHandle handle, const float x, const float y, const float z);
void camera_move_relative(CameraHandle handle, const float x, const float y, const float z);
void camera_set_direction(CameraHandle handle, const float x, const float y, const float z, transform_space relative_to);
void camera_get_direction(CameraHandle handle, ref coiVector3 v3);
void camera_get_up(CameraHandle handle, ref coiVector3 up);
void camera_get_right(CameraHandle handle, ref coiVector3 right);
void camera_set_near_clip_distance(CameraHandle camera_handle, float d);
void camera_set_far_clip_distance(CameraHandle camera_handle, float d);
void camera_set_aspect_ratio(CameraHandle camera_handle, float w, float h);
void camera_set_aspect_ratio_ex(CameraHandle handle, float ratio);
float camera_get_aspect_ratio(CameraHandle handle);
void camera_set_auto_aspect_ratio(CameraHandle camera_handle, int on);
void camera_set_fovy(CameraHandle camera_handle, float angle);
void camera_set_frustum_offset(CameraHandle camera_handle, const int offset_x, const int offset_y);
void camera_set_focal_length(CameraHandle camera_handle, float fl);
void camera_set_position(CameraHandle camera_handle, const float x, const float y, const float z);
void camera_get_position(CameraHandle handle, ref coiVector3 result);
void camera_lookat(CameraHandle camera_handle, const float x, const float y, const float z);
void camera_roll(CameraHandle handle, coiReal angle);
void camera_yaw(CameraHandle handle, coiReal angle);
void camera_pitch(CameraHandle handle, coiReal angle);
void camera_rotate(CameraHandle handle, const ref coiVector3 axis, coiReal angle);
void camera_rotate_q(CameraHandle handle, const ref coiQuaternion q);
//Ogre::Camera::setFixedYawAxis(bool, Ogre::Vector3 const&)
void camera_set_fixed_yaw_axis(CameraHandle handle, int on, const ref coiVector3 fixed_axis);
//Ogre::Camera::getOrientation() const
void camera_get_orientation(CameraHandle handle, ref coiQuaternion orientation);
//Ogre::Camera::setOrientation(Ogre::Quaternion const&)
void camera_set_orientation(CameraHandle handle, const ref coiQuaternion orientation);
//Ogre::Camera::getDerivedOrientation() const
void camera_get_derived_orientation(CameraHandle handle, ref coiQuaternion orientation);
//Ogre::Camera::getDerivedPosition() const
void camera_get_derived_position(CameraHandle handle, ref coiVector3 position);
//Ogre::Camera::getDerivedDirection() const
void camera_get_derived_direction(CameraHandle handle, ref coiVector3 direction);
//Ogre::Camera::getDerivedUp() const
void camera_get_derived_up(CameraHandle handle, ref coiVector3 up);
//Ogre::Camera::getDerivedRight() const
void camera_get_derived_right(CameraHandle handle, ref coiVector3 right);
//Ogre::Camera::setAutoTracking(bool, Ogre::SceneNode*, Ogre::Vector3 const&)
void camera_set_autotracking(CameraHandle handle, int on, SceneNodeHandle sn_handle, const ref coiVector3 offset);
//Ogre::Camera::setLodBias(float)
void camera_set_lod_bias(CameraHandle handle, coiReal factor);
//Ogre::Camera::getLodBias() const
coiReal camera_get_lod_bias(CameraHandle handle);
//Ogre::Camera::getCameraToViewportRay(float, float, Ogre::Ray*) const
void camera_get_camera_to_viewport_ray(CameraHandle handle, coiReal screenx, coiReal screeny, RayHandle ray);
//Ogre::Camera::setWindow(float, float, float, float)
void camera_set_window(CameraHandle handle, coiReal left, coiReal top, coiReal right, coiReal bottom);
SceneManagerHandle camera_get_scenemanager(CameraHandle handle);
// Entity
EntityHandle create_entity(const char* entity_name, const char* mesh_file);
void entity_set_cast_shadows(EntityHandle handle, int enabled);
int entity_get_cast_shadows(EntityHandle handle);
int entity_get_receives_shadows(EntityHandle handle);
void entity_set_material_name(EntityHandle handle, const char* material_name, const char* group_name);
//Ogre::Entity::getBoundingBox() const
AxisAlignedBoxHandle entity_get_bounding_box(EntityHandle handle);
//Ogre::Entity::getBoundingRadius() const
coiReal entity_get_bounding_radius(EntityHandle handle);
// Light
LightHandle create_light(const char* light_name);
void light_set_position(LightHandle light_handle, const float x, const float y, const float z);
void destroy_light(LightHandle handle);
void light_set_type(LightHandle handle, light_types type);
void light_set_diffuse_colour(LightHandle handle, const ref ColourValue colour);
void light_set_specular_colour(LightHandle handle, const ref ColourValue colour);
// FrameListener
void add_frame_listener(FrameListenerEvent frame_event,const int frame_event_type);
void remove_frame_listener(FrameListenerEvent frame_event);
FrameListenerHandle add_frame_listener_ctx(FrameListenerCtx callback, void* userdata);
void remove_frame_listener_ctx(FrameListenerHandle handle);
// WindowListener
void add_window_listener(RenderWindowHandle window_handle, WindowListenerEvent window_event);
void remove_window_listener(RenderWindowHandle window_handle);
WindowListenerHandle add_window_listener_ctx(RenderWindowHandle window_handle, WindowListenerEvent window_event, void* userdata);
void remove_window_listener_ctx(RenderWindowHandle window_handle, WindowListenerHandle listener_handle);
// LogManager
LogManagerHandle create_log_manager();
// LogManager::getSingletonPtr
LogManagerHandle get_log_manager();
//LogManager::getLog
LogHandle logmanager_get_log(const char* name);
//LogManager::getDefaultLog
LogHandle logmanager_get_default_log();
//LogManager::setDefaultLog
LogHandle logmanager_set_default_log(LogHandle log_handle);
//LogManager::createLog
LogHandle logmanager_create_log(const char* name, int default_log, int debugger_output, int suppress_file_output);
// n.b., Allows for finer grained control over the log messages at the cost of
// having to supply all these variables. If you don't need this control,
// use log_message above.
//LogManager::logMessage
void logmanager_log_message(const char* message, LogMessageLevel lml, int maskDebug, const char* log_name, int skip_message);
//LogManager::setLogDetail
void logmanager_set_log_detail(LoggingLevel lvl);
//LogManager::destroyLog
void logmanager_destroy_log(const char* name);
//LogManager::destroyLog overload
void logmanager_destroy_log_by_handle(LogHandle log_handle);
//Log::addListener
LogListenerHandle add_log_listener(LogListenerEvent log_event, LogHandle log_handle);
//Log::addListener
LogListenerHandle add_log_listener_ctx(LogListenerCtx log_event, LogHandle log_handle, void* userdata);
//Log::removeListener
void remove_log_listener(LogListenerHandle llh, LogHandle log_handle);
//Log::removeListener
void remove_log_listener_ctx(LogListenerHandle llh, LogHandle log_handle);
// NameValuePairList
NameValuePairListHandle create_name_value_pair_list();
void add_pair(NameValuePairListHandle params, const char* name, const char* value);
void destroy_name_value_pair_list(NameValuePairListHandle params);
// RenderWindow
ViewportHandle render_window_add_viewport(RenderWindowHandle window_handle, CameraHandle camera_handle, int zorder, float left, float top, float width, float height);
int render_window_is_closed(RenderWindowHandle handle);
void render_window_set_active(RenderWindowHandle handle, int state);
void render_window_swap_buffers(RenderWindowHandle handle, int wait_for_vsync);
void render_window_get_custom_attribute(RenderWindowHandle handle, const char* attribute, void* pdata);
uint render_window_get_width(RenderWindowHandle handle);
uint render_window_get_height(RenderWindowHandle handle);
void renderwindow_get_statistics(RenderWindowHandle handle, ref FrameStats stats);
void renderwindow_get_statistics_ex(RenderWindowHandle handle, ref float lastFPS, ref float avgFPS, ref float bestFPS, ref float worstFPS);
// ColourValue
void colourvalue_zero(ref ColourValue c);
void colourvalue_black(ref ColourValue c);
void colourvalue_white(ref ColourValue c);
void colourvalue_red(ref ColourValue c);
void colourvalue_green(ref ColourValue c);
void colourvalue_blue(ref ColourValue c);
// Vector3
//Vector3::operator !=
int vector3_notequals_vector3(coiVector3 lhs, coiVector3 rhs);
//Vector3::operator ==
int vector3_equals_vector3(coiVector3 lhs, coiVector3 rhs);
//Vector3::operator +
coiVector3 vector3_add_vector3(coiVector3 lhs, coiVector3 rhs);
//Vector3::operator +=
void vector3_update_add_vector3(coiVector3 lhs, coiVector3 rhs);
//Vector3::operator -
coiVector3 vector3_subtract_vector3(coiVector3 lhs, coiVector3 rhs);
//Vector3::operator -=
void vector3_update_subtract_vector3(coiVector3 lhs, coiVector3 rhs);
//Vector3::operator -
coiVector3 vector3_negate(coiVector3 v3);
// Vector3::operator/
coiVector3 vector3_divide_vector3(coiVector3 lhs, coiVector3 rhs);
// Vector3::operator*
coiVector3 vector3_multiply_vector3(coiVector3 lhs, coiVector3 rhs);
// Vector3::isNaN
int vector3_is_nan(coiVector3 v3);
//Vector3::primaryAxis
coiVector3 vector3_primary_axis(coiVector3);
// Vector3::ZERO
coiVector3 vector3_ZERO();
coiVector3 vector3_UNIT_X();
coiVector3 vector3_UNIT_Y();
coiVector3 vector3_UNIT_Z();
coiVector3 vector3_NEGATIVE_UNIT_X();
coiVector3 vector3_NEGATIVE_UNIT_Y();
coiVector3 vector3_NEGATIVE_UNIT_Z();
coiVector3 vector3_UNIT_SCALE();
// Plane
PlaneHandle plane_create_plane();
PlaneHandle plane_create_plane_normal(float x, float y, float z, float distance);
void plane_destroy_plane(PlaneHandle handle);
void plane_get_normal(PlaneHandle handle, ref coiVector3 normal);
void plane_set_normal(PlaneHandle handle, const ref coiVector3 normal);
coiReal plane_get_d(PlaneHandle handle);
void plane_set_d(PlaneHandle handle, coiReal d);
// PlaneList (typedef vector<Plane>::type PlaneList)
PlaneListHandle create_planelist();
void destroy_planelist(PlaneListHandle handle);
// PlaneBoundedVolume
PlaneBoundedVolumeHandle create_planeboundedvolume(plane_side the_outside);
void destroy_planeboundedvolume(PlaneBoundedVolumeHandle handle);
// bool intersects(const AxisAlignedBox&) const
int planeboundedvolume_intersects_axisalignedbox(PlaneBoundedVolumeHandle handle, AxisAlignedBoxHandle query);
// bool intersects(const Sphere&) const
int planeboundedvolume_intersects_sphere(PlaneBoundedVolumeHandle handle, SphereHandle query);
// std::pair<bool, Real> intersects(const Ray&) const
void planeboundedvolume_intersects_ray(PlaneBoundedVolumeHandle handle, RayHandle query, ref ray_pair result);
// MeshManager
MeshHandle meshmanager_create_plane(const char* name, const char* group_name,
PlaneHandle plane, float width,
float height, int xsegments, int ysegments,
int normals, ushort num_tex_coord_sets,
float utile, float vtile, ref coiVector3 up_vector,
hardware_buffer_usage vertex_buffer_usage,
hardware_buffer_usage index_buffer_usage,
int vertex_shadow_buffer, int index_shadow_buffer);
// Ogre::Timer
int timer_set_option(TimerHandle handle, const char* key, void* value);
ulong timer_get_milliseconds(TimerHandle handle);
ulong timer_get_microseconds(TimerHandle handle);
ulong timer_get_milliseconds_cpu(TimerHandle handle);
ulong timer_get_microseconds_cpu(TimerHandle handle);
void timer_reset(TimerHandle handle);
// Ogre::AxisAlignedBox
AxisAlignedBoxHandle create_axis_aligned_box();
AxisAlignedBoxHandle create_axis_aligned_box_ex(Extent e);
AxisAlignedBoxHandle create_axis_aligned_box_v3(const ref coiVector3 min, const ref coiVector3 max);
void destroy_axis_aligned_box(AxisAlignedBoxHandle handle);
void axisalignedbox_get_size(AxisAlignedBoxHandle handle, ref coiVector3 size);
void axisalignedbox_get_minimum(AxisAlignedBoxHandle handle, ref coiVector3 minimum);
void axisalignedbox_get_maximum(AxisAlignedBoxHandle handle, ref coiVector3 maximum);
void axisalignedbox_set_minimum_x(AxisAlignedBoxHandle handle, coiReal x);
void axisalignedbox_set_minimum_y(AxisAlignedBoxHandle handle, coiReal y);
void axisalignedbox_set_minimum_z(AxisAlignedBoxHandle handle, coiReal z);
void axisalignedbox_set_minimum(AxisAlignedBoxHandle handle, const ref coiVector3 min);
void axisalignedbox_set_maximum(AxisAlignedBoxHandle handle, const ref coiVector3 max);
void axisalignedbox_set_maximum_x(AxisAlignedBoxHandle handle, coiReal x);
void axisalignedbox_set_maximum_y(AxisAlignedBoxHandle handle, coiReal y);
void axisalignedbox_set_maximum_z(AxisAlignedBoxHandle handle, coiReal z);
void axisalignedbox_set_extents(AxisAlignedBoxHandle handle, const ref coiVector3 min, const ref coiVector3 max);
void axisalignedbox_get_corner(AxisAlignedBoxHandle handle, CornerEnum e, ref coiVector3 corner);
//Ogre::Ray
RayHandle create_ray(const ref coiVector3 origin, const ref coiVector3 direction);
void destroy_ray(RayHandle handle);
//Ray::setOrigin
void ray_set_origin(RayHandle handle, const ref coiVector3 origin);
//Ray::getOrigin
void ray_get_origin(RayHandle handle, ref coiVector3 origin);
//Ray::setDirection
void ray_set_direction(RayHandle handle, const ref coiVector3 direction);
//Ray::getDirection
void ray_get_direction(RayHandle handle, ref coiVector3 direction);
//Ray::getPoint
void ray_get_point(RayHandle handle, coiReal units, ref coiVector3 point);
//Ray::intersects(Plane)
void ray_intersects_plane(RayHandle handle, PlaneHandle plane_handle, ref ray_pair result);
//Ray::intersects(AxisAlignedBox)
void ray_intersects_axisalignedbox(RayHandle handle, AxisAlignedBoxHandle query_handle, ref ray_pair result);
//Ray::intersects(Sphere)
void ray_intersects_sphere(RayHandle handle, SphereHandle query_handle, ref ray_pair result);
// Ogre::Sphere
SphereHandle create_sphere(const ref coiVector3 center, coiReal radius);
void destroy_sphere(SphereHandle handle);
//void setRadius(Real)
void sphere_set_radius(SphereHandle handle, coiReal radius);
//Real getRadius(void) const
coiReal sphere_get_radius(SphereHandle handle);
//void setCenter(Vector3)
void sphere_set_center(SphereHandle handle, const ref coiVector3 center);
//Real getCenter(void) const
void sphere_get_center(SphereHandle handle, ref coiVector3 center);
// bool intersects(const Sphere&) const
int sphere_intersects_sphere(SphereHandle handle, SphereHandle query);
// bool intersects(const AxisAlignedBox&) const
int sphere_intersects_axisalignedbox(SphereHandle handle, AxisAlignedBoxHandle query);
// bool intersects(const Plane&) const
int sphere_intersects_plane(SphereHandle handle, PlaneHandle query);
// bool intersects(const Vector3&) const
int sphere_intersects_vector3(SphereHandle handle, const ref coiVector3 query);
// void merge(const Sphere&)
void sphere_merge(SphereHandle handle, SphereHandle other_sphere);
// Ogre::SceneQuery
// SceneQuery::setQueryMask(uint32 mask)
void scenequery_set_query_mask(SceneQueryHandle handle, uint32 mask);
//uint32 SceneQuery::getQueryMask(void) const
uint32 scenequery_get_query_mask(SceneQueryHandle handle);
//void SceneQuery::setWorldFragmentType(enum WorldFragmentType wft);
void scenequery_set_world_fragment_type(SceneQueryHandle handle, world_fragment_type wft);
//WorldFragmentType SceneQuery::getWorldFragmentType(void) const;
world_fragment_type scenequery_get_world_fragment_type(SceneQueryHandle handle);
// SceneQueryListener
SceneQueryListenerHandle create_scenequerylistener(SceneQueryFragmentResult fragment_callback, SceneQueryObjectResult object_callback, void* userdata);
void destroy_scenequerylistener(SceneQueryListenerHandle handle);
size_t scenequeryresult_movables_count(SceneQueryResultHandle handle);
MovableObjectHandle scenequeryresult_movables_at(SceneQueryResultHandle handle, int index);
size_t scenequeryresult_worldfragments_count(SceneQueryResultHandle handle, int index);
void scenequeryresult_worldfragments_at(SceneQueryResultHandle handle, int index, ref world_fragment result);
RaySceneQueryListenerHandle create_rayscenequerylistener(RaySceneQueryFragmentResult fragment_callback, RaySceneQueryObjectResult object_callback, void* userdata);
void destroy_rayscenequerylistener(RaySceneQueryListenerHandle handle);
//setRay
void rayscenequery_set_ray(RaySceneQueryHandle handle, RayHandle ray_handle);
//getRay
RayHandle rayscenequery_get_ray(RaySceneQueryHandle handle);
//void setSortByDistance(bool sort, ushort maxresults = 0);
void rayscenequery_set_sort_by_distance(RaySceneQueryHandle handle, int on, ushort maxresults);
//bool getSortByDistance(void) const;
int rayscenequery_get_sort_by_distance(RaySceneQueryHandle handle);
//ushort getMaxResults(void) const;
ushort rayscenequery_get_max_results(RaySceneQueryHandle handle);
// typedef vector<RaySceneQueryResultEntry>::type RaySceneQueryResult;
size_t rayscenequeryresult_count(RaySceneQueryResultHandle handle);
void rayscenequeryresult_at(RaySceneQueryResultHandle handle, int index, ref rayscenequery_result_entry result);
|
<?xml version="1.0" encoding="UTF-8"?>
<xmi:XMI xmi:version="2.0" xmlns:xmi="http://www.omg.org/XMI"/>
Delete mapping.di
|
/******************************************************************************
* ogre_interface.di - main interface file for D clients
******************************************************************************
* This file is part of
* __ __ _
* / // /_____ ____ (_)
* / // // ___// __ \ / /
* / // // /__ / /_/ // /
* /_//_/ \___/ \____//_/
*
* Low Level C Ogre Interface (llcoi)
*
* See http://code.google.com/p/llcoi/ for more information.
*
* Copyright (c) 2011, Llcoi Team
*
* License: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
******************************************************************************/
module ogre_interface;
extern(C):
alias float coiReal;
const int EVENT_FRAME_STARTED = 1;
const int EVENT_FRAME_RENDERING_QUEUED = 2;
const int EVENT_FRAME_ENDED = 4;
alias void* CameraHandle;
alias void* EntityHandle;
alias void* SceneNodeHandle;
alias void* LightHandle;
alias void* RenderWindowHandle;
alias void* RootHandle;
alias void* RenderSystemHandle;
alias void* SceneManagerHandle;
alias void* ViewportHandle;
// listener typedefs
alias int function(float,float,int) FrameListenerEvent;
alias void function(RenderWindowHandle) WindowListenerEvent;
struct coiQuaternion
{
float w;
float x;
float y;
float z;
};
struct coiVector3
{
float x;
float y;
float z;
} ;
struct engine_options
{
const char* renderer_s;
const char* plugin_folder_s;
const char* window_title;
const char* log_name;
int width, height, auto_window;
};
// Root functions
void release_engine();
void default_engine_options(engine_options* options);
void init_engine(const engine_options options);
RootHandle create_root(const char* pluginFileName, const char* configFileName, const char* logFileName);
RenderWindowHandle root_initialise(int auto_create_window, const char* render_window_title);
RenderWindowHandle create_render_window(const char* name, const int width, const int height, const int full_screen);
RenderWindowHandle create_render_window_gl_context(const char* name, const int width, const int height, const int full_screen);
RenderWindowHandle create_render_window_hwnd(const char* name, const int width, const int height, const int full_screen, ulong hwnd);
uint render_window_get_hwnd(RenderWindowHandle window_handle);
void render_window_set_visible(RenderWindowHandle window_handle, int visible);
void render_window_update(RenderWindowHandle window_handle, int swap_buffers);
void current_window_update(int swap_buffers);
void render_window_resize(uint width, uint height);
void render_window_moved_or_resized();
int render_window_closed();
int root_is_initialised();
void save_config();
int restore_config();
int show_config_dialog();
void add_render_system(RenderSystemHandle render_system);
void set_render_system(RenderSystemHandle render_system);
RenderSystemHandle get_render_system();
RenderSystemHandle get_render_system_by_name(const char* render_system_name);
void load_ogre_plugin(const char * plugin);
SceneManagerHandle create_scene_manager(const char* type_name, const char* instance_name);
SceneManagerHandle get_scene_manager();
SceneManagerHandle get_scene_manager_by_name(const char* scene_manager_instance_name);
int render_one_frame();
int render_one_frame_ex(float time_since_last_frame);
void render_loop();
void pump_messages();
void render_system_set_config_option(RenderSystemHandle render_system_handle, const char* option, const char* value);
void log_message(const char* message);
// SceneManager functions
void set_default_num_mipmaps(int number);
void set_ambient_light_rgba(const float r, const float g, const float b, const float a);
void set_ambient_light_rgb(const float r, const float g, const float b);
ViewportHandle add_viewport(CameraHandle camera_handle);
void scene_manager_log_name();
// Scene nodes
SceneNodeHandle create_child_scenenode(const char* node_name);
void attach_entity_to_scenenode(EntityHandle entity_handle, SceneNodeHandle scenenode_handle);
void scenenode_update(SceneNodeHandle scenenode_handle, int update_children, int parent_has_changed);
void scenenode_update_bounds(SceneNodeHandle scenenode_handle);
EntityHandle scenenode_get_attached_entity_int(SceneNodeHandle scenenode_handle, int entity_index);
EntityHandle scenenode_get_attached_entity(SceneNodeHandle scenenode_handle, const char* entity_name);
int scenenode_num_attached_objects(SceneNodeHandle scenenode_handle);
void scenenode_detach_entity_int(SceneNodeHandle scenenode_handle, int entity_index);
void scenenode_detach_entity(SceneNodeHandle scenenode_handle, EntityHandle entity_handle);
void scenenode_detach_entity_string(SceneNodeHandle scenenode_handle, const char* entity_name);
void scenenode_detach_all_objects(SceneNodeHandle scenenode_handle);
int scenenode_is_in_scenegraph(SceneNodeHandle scenenode_handle);
void scenenode_notify_rootnode(SceneNodeHandle scenenode_handle);
void scenenode_show_boundingbox(SceneNodeHandle scenenode_handle, int show_boundingbox);
void scenenode_hide_boundingbox(SceneNodeHandle scenenode_handle, int hide_boundingbox);
int scenenode_get_show_boundingbox(SceneNodeHandle scenenode_handle);
SceneNodeHandle scenenode_get_parent_scenenode(SceneNodeHandle scenenode_handle);
void scenenode_set_visible(SceneNodeHandle scenenode_handle, int visible);
void scenenode_set_visible_ex(SceneNodeHandle scenenode_handle, int visible, int cascade);
void scenenode_flip_visibility(SceneNodeHandle scenenode_handle);
void scenenode_flip_visibility_ex(SceneNodeHandle scenenode_handle, int cascade);
void scenenode_set_debug_display_enabled(SceneNodeHandle scenenode_handle, int enabled);
void scenenode_set_debug_display_enabled_ex(SceneNodeHandle scenenode_handle, int enabled, int cascade);
SceneManagerHandle scenenode_get_creator(SceneNodeHandle scenenode_handle);
void scenenode_set_direction(SceneNodeHandle scenenode_handle, float x, float y, float z);
void scenenode_set_orientation(SceneNodeHandle scenenode_handle, float w, float x, float y, float z);
void scenenode_set_position(SceneNodeHandle scenenode_handle, float x, float y, float z);
void scenenode_yaw(SceneNodeHandle scenenode_handle, coiReal radians);
void scenenode_set_scale(SceneNodeHandle scenenode_handle, float x, float y, float z);
void scenenode_scale(SceneNodeHandle scenenode_handle, float x, float y, float z);
void scenenode_translate(SceneNodeHandle scenenode_handle, float x, float y, float z);
void scenenode_roll(SceneNodeHandle scenenode_handle, coiReal radians);
void scenenode_pitch(SceneNodeHandle scenenode_handle, coiReal radians);
// Viewports
void viewport_set_background_colour(ViewportHandle viewport_handle, float r, float g, float b);
float viewport_get_width(ViewportHandle viewport_handle);
float viewport_get_height(ViewportHandle viewport_handle);
// Resource management
void setup_resources(const char* resources_cfg);
void add_resource_location(const char* location, const char* type, const char* group);
void initialise_all_resourcegroups();
// Camera
CameraHandle create_camera(const char* camera_name);
CameraHandle get_camera(const char* camera_name);
void camera_set_near_clip_distance(CameraHandle camera_handle, float d);
void camera_set_far_clip_distance(CameraHandle camera_handle, float d);
void camera_set_aspect_ratio(CameraHandle camera_handle, float w, float h);
void camera_set_auto_aspect_ratio(CameraHandle camera_handle, int on);
void camera_set_fovy(CameraHandle camera_handle, float angle);
void camera_set_frustum_offset(CameraHandle camera_handle, const int offset_x, const int offset_y);
void camera_set_focal_length(CameraHandle camera_handle, float fl);
void camera_set_position(CameraHandle camera_handle, const float x, const float y, const float z);
void camera_lookat(CameraHandle camera_handle, const float x, const float y, const float z);
// Entity
EntityHandle create_entity(const char* entity_name, const char* mesh_file);
// Light
LightHandle create_light(const char* light_name);
void light_set_position(LightHandle light_handle, const float x, const float y, const float z);
// FrameListener
void add_frame_listener(FrameListenerEvent frame_event,const int frame_event_type);
void remove_frame_listener(FrameListenerEvent frame_event);
// WindowListener
void add_window_listener(RenderWindowHandle window_handle, WindowListenerEvent window_event);
void remove_window_listener(RenderWindowHandle window_handle);
added new functionality to D2 interface file.
/******************************************************************************
* ogre_interface.di - main interface file for D clients
******************************************************************************
* This file is part of
* __ __ _
* / // /_____ ____ (_)
* / // // ___// __ \ / /
* / // // /__ / /_/ // /
* /_//_/ \___/ \____//_/
*
* Low Level C Ogre Interface (llcoi)
*
* See http://code.google.com/p/llcoi/ for more information.
*
* Copyright (c) 2011, Llcoi Team
*
* License: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
******************************************************************************/
module ogre_interface;
extern(C):
alias float coiReal;
const int EVENT_FRAME_STARTED = 1;
const int EVENT_FRAME_RENDERING_QUEUED = 2;
const int EVENT_FRAME_ENDED = 4;
alias void* CameraHandle;
alias void* EntityHandle;
alias void* SceneNodeHandle;
alias void* LightHandle;
alias void* RenderWindowHandle;
alias void* RootHandle;
alias void* RenderSystemHandle;
alias void* SceneManagerHandle;
alias void* ViewportHandle;
alias void* LogManagerHandle;
alias void* LogHandle;
// listener typedefs
alias int function(float,float,int) FrameListenerEvent;
alias void function(RenderWindowHandle) WindowListenerEvent;
struct coiQuaternion
{
float w;
float x;
float y;
float z;
};
struct coiVector3
{
float x;
float y;
float z;
} ;
struct engine_options
{
const char* renderer_s;
const char* plugin_folder_s;
const char* window_title;
const char* log_name;
int width, height, auto_window;
};
enum logging_level
{
LL_LOW = 1,
LL_NORMAL = 2,
LL_BOREME = 3
};
enum log_message_level
{
LML_TRIVIAL = 1,
LML_NORMAL = 2,
LML_CRITICAL = 3
};
alias void function(const char* message, log_message_level lml, int maskDebug, const char* log_name, int skip_message) LogListenerEvent;
// Root functions
void release_engine();
void default_engine_options(engine_options* options);
void init_engine(const engine_options options);
RootHandle create_root(const char* pluginFileName, const char* configFileName, const char* logFileName);
RenderWindowHandle root_initialise(int auto_create_window, const char* render_window_title);
RenderWindowHandle create_render_window(const char* name, const int width, const int height, const int full_screen);
RenderWindowHandle create_render_window_gl_context(const char* name, const int width, const int height, const int full_screen);
RenderWindowHandle create_render_window_hwnd(const char* name, const int width, const int height, const int full_screen, ulong hwnd);
uint render_window_get_hwnd(RenderWindowHandle window_handle);
void render_window_set_visible(RenderWindowHandle window_handle, int visible);
void render_window_update(RenderWindowHandle window_handle, int swap_buffers);
void current_window_update(int swap_buffers);
void render_window_resize(uint width, uint height);
void render_window_moved_or_resized();
int render_window_closed();
int root_is_initialised();
void save_config();
int restore_config();
int show_config_dialog();
void add_render_system(RenderSystemHandle render_system);
void set_render_system(RenderSystemHandle render_system);
RenderSystemHandle get_render_system();
RenderSystemHandle get_render_system_by_name(const char* render_system_name);
void load_ogre_plugin(const char * plugin);
SceneManagerHandle create_scene_manager(const char* type_name, const char* instance_name);
SceneManagerHandle get_scene_manager();
SceneManagerHandle get_scene_manager_by_name(const char* scene_manager_instance_name);
int render_one_frame();
int render_one_frame_ex(float time_since_last_frame);
void render_loop();
void pump_messages();
void render_system_set_config_option(RenderSystemHandle render_system_handle, const char* option, const char* value);
void log_message(const char* message);
// SceneManager functions
void set_default_num_mipmaps(int number);
void set_ambient_light_rgba(const float r, const float g, const float b, const float a);
void set_ambient_light_rgb(const float r, const float g, const float b);
ViewportHandle add_viewport(CameraHandle camera_handle);
void scene_manager_log_name();
// Scene nodes
SceneNodeHandle create_child_scenenode(const char* node_name);
void attach_entity_to_scenenode(EntityHandle entity_handle, SceneNodeHandle scenenode_handle);
void scenenode_update(SceneNodeHandle scenenode_handle, int update_children, int parent_has_changed);
void scenenode_update_bounds(SceneNodeHandle scenenode_handle);
EntityHandle scenenode_get_attached_entity_int(SceneNodeHandle scenenode_handle, int entity_index);
EntityHandle scenenode_get_attached_entity(SceneNodeHandle scenenode_handle, const char* entity_name);
int scenenode_num_attached_objects(SceneNodeHandle scenenode_handle);
void scenenode_detach_entity_int(SceneNodeHandle scenenode_handle, int entity_index);
void scenenode_detach_entity(SceneNodeHandle scenenode_handle, EntityHandle entity_handle);
void scenenode_detach_entity_string(SceneNodeHandle scenenode_handle, const char* entity_name);
void scenenode_detach_all_objects(SceneNodeHandle scenenode_handle);
int scenenode_is_in_scenegraph(SceneNodeHandle scenenode_handle);
void scenenode_notify_rootnode(SceneNodeHandle scenenode_handle);
void scenenode_show_boundingbox(SceneNodeHandle scenenode_handle, int show_boundingbox);
void scenenode_hide_boundingbox(SceneNodeHandle scenenode_handle, int hide_boundingbox);
int scenenode_get_show_boundingbox(SceneNodeHandle scenenode_handle);
SceneNodeHandle scenenode_get_parent_scenenode(SceneNodeHandle scenenode_handle);
void scenenode_set_visible(SceneNodeHandle scenenode_handle, int visible);
void scenenode_set_visible_ex(SceneNodeHandle scenenode_handle, int visible, int cascade);
void scenenode_flip_visibility(SceneNodeHandle scenenode_handle);
void scenenode_flip_visibility_ex(SceneNodeHandle scenenode_handle, int cascade);
void scenenode_set_debug_display_enabled(SceneNodeHandle scenenode_handle, int enabled);
void scenenode_set_debug_display_enabled_ex(SceneNodeHandle scenenode_handle, int enabled, int cascade);
SceneManagerHandle scenenode_get_creator(SceneNodeHandle scenenode_handle);
void scenenode_set_direction(SceneNodeHandle scenenode_handle, float x, float y, float z);
void scenenode_set_orientation(SceneNodeHandle scenenode_handle, float w, float x, float y, float z);
void scenenode_set_position(SceneNodeHandle scenenode_handle, float x, float y, float z);
void scenenode_yaw(SceneNodeHandle scenenode_handle, coiReal radians);
void scenenode_set_scale(SceneNodeHandle scenenode_handle, float x, float y, float z);
void scenenode_scale(SceneNodeHandle scenenode_handle, float x, float y, float z);
void scenenode_translate(SceneNodeHandle scenenode_handle, float x, float y, float z);
void scenenode_roll(SceneNodeHandle scenenode_handle, coiReal radians);
void scenenode_pitch(SceneNodeHandle scenenode_handle, coiReal radians);
// Viewports
void viewport_set_background_colour(ViewportHandle viewport_handle, float r, float g, float b);
float viewport_get_width(ViewportHandle viewport_handle);
float viewport_get_height(ViewportHandle viewport_handle);
// Resource management
void setup_resources(const char* resources_cfg);
void add_resource_location(const char* location, const char* type, const char* group);
void initialise_all_resourcegroups();
// Camera
CameraHandle create_camera(const char* camera_name);
CameraHandle get_camera(const char* camera_name);
void camera_set_near_clip_distance(CameraHandle camera_handle, float d);
void camera_set_far_clip_distance(CameraHandle camera_handle, float d);
void camera_set_aspect_ratio(CameraHandle camera_handle, float w, float h);
void camera_set_auto_aspect_ratio(CameraHandle camera_handle, int on);
void camera_set_fovy(CameraHandle camera_handle, float angle);
void camera_set_frustum_offset(CameraHandle camera_handle, const int offset_x, const int offset_y);
void camera_set_focal_length(CameraHandle camera_handle, float fl);
void camera_set_position(CameraHandle camera_handle, const float x, const float y, const float z);
void camera_lookat(CameraHandle camera_handle, const float x, const float y, const float z);
// Entity
EntityHandle create_entity(const char* entity_name, const char* mesh_file);
// Light
LightHandle create_light(const char* light_name);
void light_set_position(LightHandle light_handle, const float x, const float y, const float z);
// FrameListener
void add_frame_listener(FrameListenerEvent frame_event,const int frame_event_type);
void remove_frame_listener(FrameListenerEvent frame_event);
// WindowListener
void add_window_listener(RenderWindowHandle window_handle, WindowListenerEvent window_event);
void remove_window_listener(RenderWindowHandle window_handle);
// LogManager
LogManagerHandle create_log_manager();
// LogManager::getSingletonPtr
LogManagerHandle get_log_manager();
//LogManager::getLog
LogHandle logmanager_get_log(const char* name);
//LogManager::getDefaultLog
LogHandle logmanager_get_default_log();
//LogManager::setDefaultLog
LogHandle logmanager_set_default_log(LogHandle log_handle);
//LogManager::createLog
LogHandle logmanager_create_log(const char* name, int default_log, int debugger_output, int suppress_file_output);
// n.b., Allows for finer grained control over the log messages at the cost of
// having to supply all these variables. If you don't need this control,
// use log_message above.
//LogManager::logMessage
void logmanager_log_message(const char* message, log_message_level lml, int maskDebug, const char* log_name, int skip_message);
//LogManager::destroyLog
void logmanager_set_log_detail(logging_level lvl);
// XXX: How should we handle functions with multiple overloads?
// e.g., this can take either an Ogre::String or a Log*
//LogManager::destroyLog
void logmanager_destroy_log(const char* name);
void logmanager_destroy_log_by_handle(LogHandle log_handle);
//Log::addListener
void add_log_listener(LogListenerEvent log_event, LogHandle log_handle);
//Log::removeListener
void remove_log_listener(LogListenerEvent log_event, LogHandle log_handle);
|
// Copyright (c) 2013-2014 Conformal Systems <info@conformal.com>
//
// This file originated from: http://opensource.conformal.com/
//
// Permission to use, copy, modify, and distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
// copyright notice and this permission notice appear in all copies.
//
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
// Go bindings for GTK+ 3. Supports version 3.6 and later.
//
// Functions use the same names as the native C function calls, but use
// CamelCase. In cases where native GTK uses pointers to values to
// simulate multiple return values, Go's native multiple return values
// are used instead. Whenever a native GTK call could return an
// unexpected NULL pointer, an additonal error is returned in the Go
// binding.
//
// GTK's C API documentation can be very useful for understanding how the
// functions in this package work and what each type is for. This
// documentation can be found at https://developer.gnome.org/gtk3/.
//
// In addition to Go versions of the C GTK functions, every struct type
// includes a method named Native (either by direct implementation, or
// by means of struct embedding). These methods return a uintptr of the
// native C object the binding type represents. These pointers may be
// type switched to a native C pointer using unsafe and used with cgo
// function calls outside this package.
//
// Memory management is handled in proper Go fashion, using runtime
// finalizers to properly free memory when it is no longer needed. Each
// time a Go type is created with a pointer to a GObject, a reference is
// added for Go, sinking the floating reference when necessary. After
// going out of scope and the next time Go's garbage collector is run, a
// finalizer is run to remove Go's reference to the GObject. When this
// reference count hits zero (when neither Go nor GTK holds ownership)
// the object will be freed internally by GTK.
package gtk
// #cgo pkg-config: gtk+-3.0
// #include <gtk/gtk.h>
// #include "gtk.go.h"
import "C"
import (
"errors"
"fmt"
"runtime"
"unsafe"
"github.com/conformal/gotk3/cairo"
"github.com/conformal/gotk3/gdk"
"github.com/conformal/gotk3/glib"
"github.com/conformal/gotk3/pango"
)
func init() {
tm := []glib.TypeMarshaler{
// Enums
{glib.Type(C.gtk_align_get_type()), marshalAlign},
{glib.Type(C.gtk_accel_flags_get_type()), marshalAccelFlags},
{glib.Type(C.gtk_arrow_placement_get_type()), marshalArrowPlacement},
{glib.Type(C.gtk_arrow_type_get_type()), marshalArrowType},
{glib.Type(C.gtk_assistant_page_type_get_type()), marshalAssistantPageType},
{glib.Type(C.gtk_buttons_type_get_type()), marshalButtonsType},
{glib.Type(C.gtk_calendar_display_options_get_type()), marshalCalendarDisplayOptions},
{glib.Type(C.gtk_dialog_flags_get_type()), marshalDialogFlags},
{glib.Type(C.gtk_entry_icon_position_get_type()), marshalEntryIconPosition},
{glib.Type(C.gtk_file_chooser_action_get_type()), marshalFileChooserAction},
{glib.Type(C.gtk_icon_size_get_type()), marshalIconSize},
{glib.Type(C.gtk_image_type_get_type()), marshalImageType},
{glib.Type(C.gtk_input_hints_get_type()), marshalInputHints},
{glib.Type(C.gtk_input_purpose_get_type()), marshalInputPurpose},
{glib.Type(C.gtk_justification_get_type()), marshalJustification},
{glib.Type(C.gtk_license_get_type()), marshalLicense},
{glib.Type(C.gtk_message_type_get_type()), marshalMessageType},
{glib.Type(C.gtk_orientation_get_type()), marshalOrientation},
{glib.Type(C.gtk_pack_type_get_type()), marshalPackType},
{glib.Type(C.gtk_path_type_get_type()), marshalPathType},
{glib.Type(C.gtk_policy_type_get_type()), marshalPolicyType},
{glib.Type(C.gtk_position_type_get_type()), marshalPositionType},
{glib.Type(C.gtk_relief_style_get_type()), marshalReliefStyle},
{glib.Type(C.gtk_response_type_get_type()), marshalResponseType},
{glib.Type(C.gtk_selection_mode_get_type()), marshalSelectionMode},
{glib.Type(C.gtk_shadow_type_get_type()), marshalShadowType},
{glib.Type(C.gtk_state_flags_get_type()), marshalStateFlags},
{glib.Type(C.gtk_toolbar_style_get_type()), marshalToolbarStyle},
{glib.Type(C.gtk_tree_model_flags_get_type()), marshalTreeModelFlags},
{glib.Type(C.gtk_window_position_get_type()), marshalWindowPosition},
{glib.Type(C.gtk_window_type_get_type()), marshalWindowType},
{glib.Type(C.gtk_wrap_mode_get_type()), marshalWrapMode},
// Objects/Interfaces
{glib.Type(C.gtk_about_dialog_get_type()), marshalAboutDialog},
{glib.Type(C.gtk_adjustment_get_type()), marshalAdjustment},
{glib.Type(C.gtk_alignment_get_type()), marshalAlignment},
{glib.Type(C.gtk_arrow_get_type()), marshalArrow},
{glib.Type(C.gtk_assistant_get_type()), marshalAssistant},
{glib.Type(C.gtk_bin_get_type()), marshalBin},
{glib.Type(C.gtk_builder_get_type()), marshalBuilder},
{glib.Type(C.gtk_button_get_type()), marshalButton},
{glib.Type(C.gtk_box_get_type()), marshalBox},
{glib.Type(C.gtk_calendar_get_type()), marshalCalendar},
{glib.Type(C.gtk_cell_layout_get_type()), marshalCellLayout},
{glib.Type(C.gtk_cell_renderer_get_type()), marshalCellRenderer},
{glib.Type(C.gtk_cell_renderer_text_get_type()), marshalCellRendererText},
{glib.Type(C.gtk_cell_renderer_toggle_get_type()), marshalCellRendererToggle},
{glib.Type(C.gtk_check_button_get_type()), marshalCheckButton},
{glib.Type(C.gtk_check_menu_item_get_type()), marshalCheckMenuItem},
{glib.Type(C.gtk_clipboard_get_type()), marshalClipboard},
{glib.Type(C.gtk_combo_box_get_type()), marshalComboBox},
{glib.Type(C.gtk_container_get_type()), marshalContainer},
{glib.Type(C.gtk_dialog_get_type()), marshalDialog},
{glib.Type(C.gtk_drawing_area_get_type()), marshalDrawingArea},
{glib.Type(C.gtk_editable_get_type()), marshalEditable},
{glib.Type(C.gtk_entry_get_type()), marshalEntry},
{glib.Type(C.gtk_entry_buffer_get_type()), marshalEntryBuffer},
{glib.Type(C.gtk_entry_completion_get_type()), marshalEntryCompletion},
{glib.Type(C.gtk_event_box_get_type()), marshalEventBox},
{glib.Type(C.gtk_file_chooser_get_type()), marshalFileChooser},
{glib.Type(C.gtk_file_chooser_button_get_type()), marshalFileChooserButton},
{glib.Type(C.gtk_file_chooser_widget_get_type()), marshalFileChooserWidget},
{glib.Type(C.gtk_frame_get_type()), marshalFrame},
{glib.Type(C.gtk_grid_get_type()), marshalGrid},
{glib.Type(C.gtk_image_get_type()), marshalImage},
{glib.Type(C.gtk_label_get_type()), marshalLabel},
{glib.Type(C.gtk_list_store_get_type()), marshalListStore},
{glib.Type(C.gtk_menu_get_type()), marshalMenu},
{glib.Type(C.gtk_menu_bar_get_type()), marshalMenuBar},
{glib.Type(C.gtk_menu_button_get_type()), marshalMenuButton},
{glib.Type(C.gtk_menu_item_get_type()), marshalMenuItem},
{glib.Type(C.gtk_menu_shell_get_type()), marshalMenuShell},
{glib.Type(C.gtk_message_dialog_get_type()), marshalMessageDialog},
{glib.Type(C.gtk_misc_get_type()), marshalMisc},
{glib.Type(C.gtk_notebook_get_type()), marshalNotebook},
{glib.Type(C.gtk_offscreen_window_get_type()), marshalOffscreenWindow},
{glib.Type(C.gtk_orientable_get_type()), marshalOrientable},
{glib.Type(C.gtk_progress_bar_get_type()), marshalProgressBar},
{glib.Type(C.gtk_radio_button_get_type()), marshalRadioButton},
{glib.Type(C.gtk_radio_menu_item_get_type()), marshalRadioMenuItem},
{glib.Type(C.gtk_range_get_type()), marshalRange},
{glib.Type(C.gtk_scrollbar_get_type()), marshalScrollbar},
{glib.Type(C.gtk_scrolled_window_get_type()), marshalScrolledWindow},
{glib.Type(C.gtk_search_entry_get_type()), marshalSearchEntry},
{glib.Type(C.gtk_separator_get_type()), marshalSeparator},
{glib.Type(C.gtk_separator_menu_item_get_type()), marshalSeparatorMenuItem},
{glib.Type(C.gtk_separator_tool_item_get_type()), marshalSeparatorToolItem},
{glib.Type(C.gtk_spin_button_get_type()), marshalSpinButton},
{glib.Type(C.gtk_spinner_get_type()), marshalSpinner},
{glib.Type(C.gtk_statusbar_get_type()), marshalStatusbar},
{glib.Type(C.gtk_switch_get_type()), marshalSwitch},
{glib.Type(C.gtk_text_view_get_type()), marshalTextView},
{glib.Type(C.gtk_text_tag_table_get_type()), marshalTextTagTable},
{glib.Type(C.gtk_text_buffer_get_type()), marshalTextBuffer},
{glib.Type(C.gtk_toggle_button_get_type()), marshalToggleButton},
{glib.Type(C.gtk_toolbar_get_type()), marshalToolbar},
{glib.Type(C.gtk_tool_button_get_type()), marshalToolButton},
{glib.Type(C.gtk_tool_item_get_type()), marshalToolItem},
{glib.Type(C.gtk_tree_model_get_type()), marshalTreeModel},
{glib.Type(C.gtk_tree_selection_get_type()), marshalTreeSelection},
{glib.Type(C.gtk_tree_view_get_type()), marshalTreeView},
{glib.Type(C.gtk_tree_view_column_get_type()), marshalTreeViewColumn},
{glib.Type(C.gtk_widget_get_type()), marshalWidget},
{glib.Type(C.gtk_window_get_type()), marshalWindow},
// Boxed
{glib.Type(C.gtk_text_iter_get_type()), marshalTextIter},
{glib.Type(C.gtk_tree_iter_get_type()), marshalTreeIter},
{glib.Type(C.gtk_tree_path_get_type()), marshalTreePath},
}
glib.RegisterGValueMarshalers(tm)
}
/*
* Type conversions
*/
func gbool(b bool) C.gboolean {
if b {
return C.gboolean(1)
}
return C.gboolean(0)
}
func gobool(b C.gboolean) bool {
if b != 0 {
return true
}
return false
}
// Wrapper function for TestBoolConvs since cgo can't be used with
// testing package
func testBoolConvs() error {
b := gobool(gbool(true))
if b != true {
return errors.New("Unexpected bool conversion result")
}
cb := gbool(gobool(C.gboolean(0)))
if cb != C.gboolean(0) {
return errors.New("Unexpected bool conversion result")
}
return nil
}
/*
* Unexported vars
*/
var nilPtrErr = errors.New("cgo returned unexpected nil pointer")
/*
* Constants
*/
// Align is a representation of GTK's GtkAlign.
type Align int
const (
ALIGN_FILL Align = C.GTK_ALIGN_FILL
ALIGN_START Align = C.GTK_ALIGN_START
ALIGN_END Align = C.GTK_ALIGN_END
ALIGN_CENTER Align = C.GTK_ALIGN_CENTER
)
func marshalAlign(p uintptr) (interface{}, error) {
c := C.g_value_get_enum((*C.GValue)(unsafe.Pointer(p)))
return Align(c), nil
}
// AccelFlags is a representation of GTK's GtkAccelFlags
type AccelFlags int
const (
ACCEL_VISIBLE AccelFlags = C.GTK_ACCEL_VISIBLE
ACCEL_LOCKED AccelFlags = C.GTK_ACCEL_LOCKED
ACCEL_MASK AccelFlags = C.GTK_ACCEL_MASK
)
func marshalAccelFlags(p uintptr) (interface{}, error) {
c := C.g_value_get_enum((*C.GValue)(unsafe.Pointer(p)))
return AccelFlags(c), nil
}
// ArrowPlacement is a representation of GTK's GtkArrowPlacement.
type ArrowPlacement int
const (
ARROWS_BOTH ArrowPlacement = C.GTK_ARROWS_BOTH
ARROWS_START ArrowPlacement = C.GTK_ARROWS_START
ARROWS_END ArrowPlacement = C.GTK_ARROWS_END
)
func marshalArrowPlacement(p uintptr) (interface{}, error) {
c := C.g_value_get_enum((*C.GValue)(unsafe.Pointer(p)))
return ArrowPlacement(c), nil
}
// ArrowType is a representation of GTK's GtkArrowType.
type ArrowType int
const (
ARROW_UP ArrowType = C.GTK_ARROW_UP
ARROW_DOWN ArrowType = C.GTK_ARROW_DOWN
ARROW_LEFT ArrowType = C.GTK_ARROW_LEFT
ARROW_RIGHT ArrowType = C.GTK_ARROW_RIGHT
ARROW_NONE ArrowType = C.GTK_ARROW_NONE
)
func marshalArrowType(p uintptr) (interface{}, error) {
c := C.g_value_get_enum((*C.GValue)(unsafe.Pointer(p)))
return ArrowType(c), nil
}
// AssistantPageType is a representation of GTK's GtkAssistantPageType.
type AssistantPageType int
const (
ASSISTANT_PAGE_CONTENT AssistantPageType = C.GTK_ASSISTANT_PAGE_CONTENT
ASSISTANT_PAGE_INTRO AssistantPageType = C.GTK_ASSISTANT_PAGE_INTRO
ASSISTANT_PAGE_CONFIRM AssistantPageType = C.GTK_ASSISTANT_PAGE_CONFIRM
ASSISTANT_PAGE_SUMMARY AssistantPageType = C.GTK_ASSISTANT_PAGE_SUMMARY
ASSISTANT_PAGE_PROGRESS AssistantPageType = C.GTK_ASSISTANT_PAGE_PROGRESS
ASSISTANT_PAGE_CUSTOM AssistantPageType = C.GTK_ASSISTANT_PAGE_CUSTOM
)
func marshalAssistantPageType(p uintptr) (interface{}, error) {
c := C.g_value_get_enum((*C.GValue)(unsafe.Pointer(p)))
return AssistantPageType(c), nil
}
// ButtonsType is a representation of GTK's GtkButtonsType.
type ButtonsType int
const (
BUTTONS_NONE ButtonsType = C.GTK_BUTTONS_NONE
BUTTONS_OK ButtonsType = C.GTK_BUTTONS_OK
BUTTONS_CLOSE ButtonsType = C.GTK_BUTTONS_CLOSE
BUTTONS_CANCEL ButtonsType = C.GTK_BUTTONS_CANCEL
BUTTONS_YES_NO ButtonsType = C.GTK_BUTTONS_YES_NO
BUTTONS_OK_CANCEL ButtonsType = C.GTK_BUTTONS_OK_CANCEL
)
func marshalButtonsType(p uintptr) (interface{}, error) {
c := C.g_value_get_enum((*C.GValue)(unsafe.Pointer(p)))
return ButtonsType(c), nil
}
// CalendarDisplayOptions is a representation of GTK's GtkCalendarDisplayOptions
type CalendarDisplayOptions int
const (
CALENDAR_SHOW_HEADING CalendarDisplayOptions = C.GTK_CALENDAR_SHOW_HEADING
CALENDAR_SHOW_DAY_NAMES CalendarDisplayOptions = C.GTK_CALENDAR_SHOW_DAY_NAMES
CALENDAR_NO_MONTH_CHANGE CalendarDisplayOptions = C.GTK_CALENDAR_NO_MONTH_CHANGE
CALENDAR_SHOW_WEEK_NUMBERS CalendarDisplayOptions = C.GTK_CALENDAR_SHOW_WEEK_NUMBERS
CALENDAR_SHOW_DETAILS CalendarDisplayOptions = C.GTK_CALENDAR_SHOW_DETAILS
)
func marshalCalendarDisplayOptions(p uintptr) (interface{}, error) {
c := C.g_value_get_enum((*C.GValue)(unsafe.Pointer(p)))
return CalendarDisplayOptions(c), nil
}
// DialogFlags is a representation of GTK's GtkDialogFlags.
type DialogFlags int
const (
DIALOG_MODAL DialogFlags = C.GTK_DIALOG_MODAL
DIALOG_DESTROY_WITH_PARENT DialogFlags = C.GTK_DIALOG_DESTROY_WITH_PARENT
)
func marshalDialogFlags(p uintptr) (interface{}, error) {
c := C.g_value_get_enum((*C.GValue)(unsafe.Pointer(p)))
return DialogFlags(c), nil
}
// EntryIconPosition is a representation of GTK's GtkEntryIconPosition.
type EntryIconPosition int
const (
ENTRY_ICON_PRIMARY EntryIconPosition = C.GTK_ENTRY_ICON_PRIMARY
ENTRY_ICON_SECONDARY EntryIconPosition = C.GTK_ENTRY_ICON_SECONDARY
)
func marshalEntryIconPosition(p uintptr) (interface{}, error) {
c := C.g_value_get_enum((*C.GValue)(unsafe.Pointer(p)))
return EntryIconPosition(c), nil
}
// FileChooserAction is a representation of GTK's GtkFileChooserAction.
type FileChooserAction int
const (
FILE_CHOOSER_ACTION_OPEN FileChooserAction = C.GTK_FILE_CHOOSER_ACTION_OPEN
FILE_CHOOSER_ACTION_SAVE FileChooserAction = C.GTK_FILE_CHOOSER_ACTION_SAVE
FILE_CHOOSER_ACTION_SELECT_FOLDER FileChooserAction = C.GTK_FILE_CHOOSER_ACTION_SELECT_FOLDER
FILE_CHOOSER_ACTION_CREATE_FOLDER FileChooserAction = C.GTK_FILE_CHOOSER_ACTION_CREATE_FOLDER
)
func marshalFileChooserAction(p uintptr) (interface{}, error) {
c := C.g_value_get_enum((*C.GValue)(unsafe.Pointer(p)))
return FileChooserAction(c), nil
}
// IconSize is a representation of GTK's GtkIconSize.
type IconSize int
const (
ICON_SIZE_INVALID IconSize = C.GTK_ICON_SIZE_INVALID
ICON_SIZE_MENU IconSize = C.GTK_ICON_SIZE_MENU
ICON_SIZE_SMALL_TOOLBAR IconSize = C.GTK_ICON_SIZE_SMALL_TOOLBAR
ICON_SIZE_LARGE_TOOLBAR IconSize = C.GTK_ICON_SIZE_LARGE_TOOLBAR
ICON_SIZE_BUTTON IconSize = C.GTK_ICON_SIZE_BUTTON
ICON_SIZE_DND IconSize = C.GTK_ICON_SIZE_DND
ICON_SIZE_DIALOG IconSize = C.GTK_ICON_SIZE_DIALOG
)
func marshalIconSize(p uintptr) (interface{}, error) {
c := C.g_value_get_enum((*C.GValue)(unsafe.Pointer(p)))
return IconSize(c), nil
}
// ImageType is a representation of GTK's GtkImageType.
type ImageType int
const (
IMAGE_EMPTY ImageType = C.GTK_IMAGE_EMPTY
IMAGE_PIXBUF ImageType = C.GTK_IMAGE_PIXBUF
IMAGE_STOCK ImageType = C.GTK_IMAGE_STOCK
IMAGE_ICON_SET ImageType = C.GTK_IMAGE_ICON_SET
IMAGE_ANIMATION ImageType = C.GTK_IMAGE_ANIMATION
IMAGE_ICON_NAME ImageType = C.GTK_IMAGE_ICON_NAME
IMAGE_GICON ImageType = C.GTK_IMAGE_GICON
)
func marshalImageType(p uintptr) (interface{}, error) {
c := C.g_value_get_enum((*C.GValue)(unsafe.Pointer(p)))
return ImageType(c), nil
}
// InputHints is a representation of GTK's GtkInputHints.
type InputHints int
const (
INPUT_HINT_NONE InputHints = C.GTK_INPUT_HINT_NONE
INPUT_HINT_SPELLCHECK InputHints = C.GTK_INPUT_HINT_SPELLCHECK
INPUT_HINT_NO_SPELLCHECK InputHints = C.GTK_INPUT_HINT_NO_SPELLCHECK
INPUT_HINT_WORD_COMPLETION InputHints = C.GTK_INPUT_HINT_WORD_COMPLETION
INPUT_HINT_LOWERCASE InputHints = C.GTK_INPUT_HINT_LOWERCASE
INPUT_HINT_UPPERCASE_CHARS InputHints = C.GTK_INPUT_HINT_UPPERCASE_CHARS
INPUT_HINT_UPPERCASE_WORDS InputHints = C.GTK_INPUT_HINT_UPPERCASE_WORDS
INPUT_HINT_UPPERCASE_SENTENCES InputHints = C.GTK_INPUT_HINT_UPPERCASE_SENTENCES
INPUT_HINT_INHIBIT_OSK InputHints = C.GTK_INPUT_HINT_INHIBIT_OSK
)
func marshalInputHints(p uintptr) (interface{}, error) {
c := C.g_value_get_enum((*C.GValue)(unsafe.Pointer(p)))
return InputHints(c), nil
}
// InputPurpose is a representation of GTK's GtkInputPurpose.
type InputPurpose int
const (
INPUT_PURPOSE_FREE_FORM InputPurpose = C.GTK_INPUT_PURPOSE_FREE_FORM
INPUT_PURPOSE_ALPHA InputPurpose = C.GTK_INPUT_PURPOSE_ALPHA
INPUT_PURPOSE_DIGITS InputPurpose = C.GTK_INPUT_PURPOSE_DIGITS
INPUT_PURPOSE_NUMBER InputPurpose = C.GTK_INPUT_PURPOSE_NUMBER
INPUT_PURPOSE_PHONE InputPurpose = C.GTK_INPUT_PURPOSE_PHONE
INPUT_PURPOSE_URL InputPurpose = C.GTK_INPUT_PURPOSE_URL
INPUT_PURPOSE_EMAIL InputPurpose = C.GTK_INPUT_PURPOSE_EMAIL
INPUT_PURPOSE_NAME InputPurpose = C.GTK_INPUT_PURPOSE_NAME
INPUT_PURPOSE_PASSWORD InputPurpose = C.GTK_INPUT_PURPOSE_PASSWORD
INPUT_PURPOSE_PIN InputPurpose = C.GTK_INPUT_PURPOSE_PIN
)
func marshalInputPurpose(p uintptr) (interface{}, error) {
c := C.g_value_get_enum((*C.GValue)(unsafe.Pointer(p)))
return InputPurpose(c), nil
}
// Justify is a representation of GTK's GtkJustification.
type Justification int
const (
JUSTIFY_LEFT Justification = C.GTK_JUSTIFY_LEFT
JUSTIFY_RIGHT Justification = C.GTK_JUSTIFY_RIGHT
JUSTIFY_CENTER Justification = C.GTK_JUSTIFY_CENTER
JUSTIFY_FILL Justification = C.GTK_JUSTIFY_FILL
)
func marshalJustification(p uintptr) (interface{}, error) {
c := C.g_value_get_enum((*C.GValue)(unsafe.Pointer(p)))
return Justification(c), nil
}
// License is a representation of GTK's GtkLicense.
type License int
const (
LICENSE_UNKNOWN License = C.GTK_LICENSE_UNKNOWN
LICENSE_CUSTOM License = C.GTK_LICENSE_CUSTOM
LICENSE_GPL_2_0 License = C.GTK_LICENSE_GPL_2_0
LICENSE_GPL_3_0 License = C.GTK_LICENSE_GPL_3_0
LICENSE_LGPL_2_1 License = C.GTK_LICENSE_LGPL_2_1
LICENSE_LGPL_3_0 License = C.GTK_LICENSE_LGPL_3_0
LICENSE_BSD License = C.GTK_LICENSE_BSD
LICENSE_MIT_X11 License = C.GTK_LICENSE_MIT_X11
LICENSE_GTK_ARTISTIC License = C.GTK_LICENSE_ARTISTIC
)
func marshalLicense(p uintptr) (interface{}, error) {
c := C.g_value_get_enum((*C.GValue)(unsafe.Pointer(p)))
return License(c), nil
}
// MessageType is a representation of GTK's GtkMessageType.
type MessageType int
const (
MESSAGE_INFO MessageType = C.GTK_MESSAGE_INFO
MESSAGE_WARNING MessageType = C.GTK_MESSAGE_WARNING
MESSAGE_QUESTION MessageType = C.GTK_MESSAGE_QUESTION
MESSAGE_ERROR MessageType = C.GTK_MESSAGE_ERROR
MESSAGE_OTHER MessageType = C.GTK_MESSAGE_OTHER
)
func marshalMessageType(p uintptr) (interface{}, error) {
c := C.g_value_get_enum((*C.GValue)(unsafe.Pointer(p)))
return MessageType(c), nil
}
// Orientation is a representation of GTK's GtkOrientation.
type Orientation int
const (
ORIENTATION_HORIZONTAL Orientation = C.GTK_ORIENTATION_HORIZONTAL
ORIENTATION_VERTICAL Orientation = C.GTK_ORIENTATION_VERTICAL
)
func marshalOrientation(p uintptr) (interface{}, error) {
c := C.g_value_get_enum((*C.GValue)(unsafe.Pointer(p)))
return Orientation(c), nil
}
// PackType is a representation of GTK's GtkPackType.
type PackType int
const (
PACK_START PackType = C.GTK_PACK_START
PACK_END PackType = C.GTK_PACK_END
)
func marshalPackType(p uintptr) (interface{}, error) {
c := C.g_value_get_enum((*C.GValue)(unsafe.Pointer(p)))
return PackType(c), nil
}
// PathType is a representation of GTK's GtkPathType.
type PathType int
const (
PATH_WIDGET PathType = C.GTK_PATH_WIDGET
PATH_WIDGET_CLASS PathType = C.GTK_PATH_WIDGET_CLASS
PATH_CLASS PathType = C.GTK_PATH_CLASS
)
func marshalPathType(p uintptr) (interface{}, error) {
c := C.g_value_get_enum((*C.GValue)(unsafe.Pointer(p)))
return PathType(c), nil
}
// PolicyType is a representation of GTK's GtkPolicyType.
type PolicyType int
const (
POLICY_ALWAYS PolicyType = C.GTK_POLICY_ALWAYS
POLICY_AUTOMATIC PolicyType = C.GTK_POLICY_AUTOMATIC
POLICY_NEVER PolicyType = C.GTK_POLICY_NEVER
)
func marshalPolicyType(p uintptr) (interface{}, error) {
c := C.g_value_get_enum((*C.GValue)(unsafe.Pointer(p)))
return PolicyType(c), nil
}
// PositionType is a representation of GTK's GtkPositionType.
type PositionType int
const (
POS_LEFT PositionType = C.GTK_POS_LEFT
POS_RIGHT PositionType = C.GTK_POS_RIGHT
POS_TOP PositionType = C.GTK_POS_TOP
POS_BOTTOM PositionType = C.GTK_POS_BOTTOM
)
func marshalPositionType(p uintptr) (interface{}, error) {
c := C.g_value_get_enum((*C.GValue)(unsafe.Pointer(p)))
return PositionType(c), nil
}
// ReliefStyle is a representation of GTK's GtkReliefStyle.
type ReliefStyle int
const (
RELIEF_NORMAL ReliefStyle = C.GTK_RELIEF_NORMAL
RELIEF_HALF ReliefStyle = C.GTK_RELIEF_HALF
RELIEF_NONE ReliefStyle = C.GTK_RELIEF_NONE
)
func marshalReliefStyle(p uintptr) (interface{}, error) {
c := C.g_value_get_enum((*C.GValue)(unsafe.Pointer(p)))
return ReliefStyle(c), nil
}
// ResponseType is a representation of GTK's GtkResponseType.
type ResponseType int
const (
RESPONSE_NONE ResponseType = C.GTK_RESPONSE_NONE
RESPONSE_REJECT ResponseType = C.GTK_RESPONSE_REJECT
RESPONSE_ACCEPT ResponseType = C.GTK_RESPONSE_ACCEPT
RESPONSE_DELETE_EVENT ResponseType = C.GTK_RESPONSE_DELETE_EVENT
RESPONSE_OK ResponseType = C.GTK_RESPONSE_OK
RESPONSE_CANCEL ResponseType = C.GTK_RESPONSE_CANCEL
RESPONSE_CLOSE ResponseType = C.GTK_RESPONSE_CLOSE
RESPONSE_YES ResponseType = C.GTK_RESPONSE_YES
RESPONSE_NO ResponseType = C.GTK_RESPONSE_NO
RESPONSE_APPLY ResponseType = C.GTK_RESPONSE_APPLY
RESPONSE_HELP ResponseType = C.GTK_RESPONSE_HELP
)
func marshalResponseType(p uintptr) (interface{}, error) {
c := C.g_value_get_enum((*C.GValue)(unsafe.Pointer(p)))
return ResponseType(c), nil
}
// SelectionMode is a representation of GTK's GtkSelectionMode.
type SelectionMode int
const (
SELECTION_NONE SelectionMode = C.GTK_SELECTION_NONE
SELECTION_SINGLE SelectionMode = C.GTK_SELECTION_SINGLE
SELECTION_BROWSE SelectionMode = C.GTK_SELECTION_BROWSE
SELECTION_MULTIPLE SelectionMode = C.GTK_SELECTION_MULTIPLE
)
func marshalSelectionMode(p uintptr) (interface{}, error) {
c := C.g_value_get_enum((*C.GValue)(unsafe.Pointer(p)))
return SelectionMode(c), nil
}
// ShadowType is a representation of GTK's GtkShadowType.
type ShadowType int
const (
SHADOW_NONE ShadowType = C.GTK_SHADOW_NONE
SHADOW_IN ShadowType = C.GTK_SHADOW_IN
SHADOW_OUT ShadowType = C.GTK_SHADOW_OUT
SHADOW_ETCHED_IN ShadowType = C.GTK_SHADOW_ETCHED_IN
SHADOW_ETCHED_OUT ShadowType = C.GTK_SHADOW_ETCHED_OUT
)
func marshalShadowType(p uintptr) (interface{}, error) {
c := C.g_value_get_enum((*C.GValue)(unsafe.Pointer(p)))
return ShadowType(c), nil
}
// StateFlags is a representation of GTK's GtkStateFlags.
type StateFlags int
const (
STATE_FLAG_NORMAL StateFlags = C.GTK_STATE_FLAG_NORMAL
STATE_FLAG_ACTIVE StateFlags = C.GTK_STATE_FLAG_ACTIVE
STATE_FLAG_PRELIGHT StateFlags = C.GTK_STATE_FLAG_PRELIGHT
STATE_FLAG_SELECTED StateFlags = C.GTK_STATE_FLAG_SELECTED
STATE_FLAG_INSENSITIVE StateFlags = C.GTK_STATE_FLAG_INSENSITIVE
STATE_FLAG_INCONSISTENT StateFlags = C.GTK_STATE_FLAG_INCONSISTENT
STATE_FLAG_FOCUSED StateFlags = C.GTK_STATE_FLAG_FOCUSED
STATE_FLAG_BACKDROP StateFlags = C.GTK_STATE_FLAG_BACKDROP
)
func marshalStateFlags(p uintptr) (interface{}, error) {
c := C.g_value_get_enum((*C.GValue)(unsafe.Pointer(p)))
return StateFlags(c), nil
}
// ToolbarStyle is a representation of GTK's GtkToolbarStyle.
type ToolbarStyle int
const (
TOOLBAR_ICONS ToolbarStyle = C.GTK_TOOLBAR_ICONS
TOOLBAR_TEXT ToolbarStyle = C.GTK_TOOLBAR_TEXT
TOOLBAR_BOTH ToolbarStyle = C.GTK_TOOLBAR_BOTH
TOOLBAR_BOTH_HORIZ ToolbarStyle = C.GTK_TOOLBAR_BOTH_HORIZ
)
func marshalToolbarStyle(p uintptr) (interface{}, error) {
c := C.g_value_get_enum((*C.GValue)(unsafe.Pointer(p)))
return ToolbarStyle(c), nil
}
// TreeModelFlags is a representation of GTK's GtkTreeModelFlags.
type TreeModelFlags int
const (
TREE_MODEL_ITERS_PERSIST TreeModelFlags = C.GTK_TREE_MODEL_ITERS_PERSIST
TREE_MODEL_LIST_ONLY TreeModelFlags = C.GTK_TREE_MODEL_LIST_ONLY
)
func marshalTreeModelFlags(p uintptr) (interface{}, error) {
c := C.g_value_get_enum((*C.GValue)(unsafe.Pointer(p)))
return TreeModelFlags(c), nil
}
// WindowPosition is a representation of GTK's GtkWindowPosition.
type WindowPosition int
const (
WIN_POS_NONE WindowPosition = C.GTK_WIN_POS_NONE
WIN_POS_CENTER WindowPosition = C.GTK_WIN_POS_CENTER
WIN_POS_MOUSE WindowPosition = C.GTK_WIN_POS_MOUSE
WIN_POS_CENTER_ALWAYS WindowPosition = C.GTK_WIN_POS_CENTER_ALWAYS
WIN_POS_CENTER_ON_PARENT WindowPosition = C.GTK_WIN_POS_CENTER_ON_PARENT
)
func marshalWindowPosition(p uintptr) (interface{}, error) {
c := C.g_value_get_enum((*C.GValue)(unsafe.Pointer(p)))
return WindowPosition(c), nil
}
// WindowType is a representation of GTK's GtkWindowType.
type WindowType int
const (
WINDOW_TOPLEVEL WindowType = C.GTK_WINDOW_TOPLEVEL
WINDOW_POPUP WindowType = C.GTK_WINDOW_POPUP
)
func marshalWindowType(p uintptr) (interface{}, error) {
c := C.g_value_get_enum((*C.GValue)(unsafe.Pointer(p)))
return WindowType(c), nil
}
// WrapMode is a representation of GTK's GtkWrapMode.
type WrapMode int
const (
WRAP_NONE WrapMode = C.GTK_WRAP_NONE
WRAP_CHAR WrapMode = C.GTK_WRAP_CHAR
WRAP_WORD WrapMode = C.GTK_WRAP_WORD
WRAP_WORD_CHAR WrapMode = C.GTK_WRAP_WORD_CHAR
)
func marshalWrapMode(p uintptr) (interface{}, error) {
c := C.g_value_get_enum((*C.GValue)(unsafe.Pointer(p)))
return WrapMode(c), nil
}
/*
* Init and main event loop
*/
/*
Init() is a wrapper around gtk_init() and must be called before any
other GTK calls and is used to initialize everything necessary.
In addition to setting up GTK for usage, a pointer to a slice of
strings may be passed in to parse standard GTK command line arguments.
args will be modified to remove any flags that were handled.
Alternatively, nil may be passed in to not perform any command line
parsing.
*/
func Init(args *[]string) {
if args != nil {
argc := C.int(len(*args))
argv := make([]*C.char, argc)
for i, arg := range *args {
argv[i] = C.CString(arg)
}
C.gtk_init((*C.int)(unsafe.Pointer(&argc)),
(***C.char)(unsafe.Pointer(&argv)))
unhandled := make([]string, argc)
for i := 0; i < int(argc); i++ {
unhandled[i] = C.GoString(argv[i])
C.free(unsafe.Pointer(argv[i]))
}
*args = unhandled
} else {
C.gtk_init(nil, nil)
}
}
// Main() is a wrapper around gtk_main() and runs the GTK main loop,
// blocking until MainQuit() is called.
func Main() {
C.gtk_main()
}
// MainQuit() is a wrapper around gtk_main_quit() is used to terminate
// the GTK main loop (started by Main()).
func MainQuit() {
C.gtk_main_quit()
}
/*
* GtkAboutDialog
*/
// AboutDialog is a representation of GTK's GtkAboutDialog.
type AboutDialog struct {
Dialog
}
// native returns a pointer to the underlying GtkAboutDialog.
func (v *AboutDialog) native() *C.GtkAboutDialog {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkAboutDialog(p)
}
func marshalAboutDialog(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapAboutDialog(obj), nil
}
func wrapAboutDialog(obj *glib.Object) *AboutDialog {
return &AboutDialog{Dialog{Window{Bin{Container{Widget{glib.InitiallyUnowned{obj}}}}}}}
}
// AboutDialogNew is a wrapper around gtk_about_dialog_new().
func AboutDialogNew() (*AboutDialog, error) {
c := C.gtk_about_dialog_new()
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
a := wrapAboutDialog(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return a, nil
}
// GetComments is a wrapper around gtk_about_dialog_get_comments().
func (v *AboutDialog) GetComments() string {
c := C.gtk_about_dialog_get_comments(v.native())
return C.GoString((*C.char)(c))
}
// SetComments is a wrapper around gtk_about_dialog_set_comments().
func (v *AboutDialog) SetComments(comments string) {
cstr := C.CString(comments)
defer C.free(unsafe.Pointer(cstr))
C.gtk_about_dialog_set_comments(v.native(), (*C.gchar)(cstr))
}
// GetCopyright is a wrapper around gtk_about_dialog_get_copyright().
func (v *AboutDialog) GetCopyright() string {
c := C.gtk_about_dialog_get_copyright(v.native())
return C.GoString((*C.char)(c))
}
// SetCopyright is a wrapper around gtk_about_dialog_set_copyright().
func (v *AboutDialog) SetCopyright(copyright string) {
cstr := C.CString(copyright)
defer C.free(unsafe.Pointer(cstr))
C.gtk_about_dialog_set_copyright(v.native(), (*C.gchar)(cstr))
}
// GetLicense is a wrapper around gtk_about_dialog_get_license().
func (v *AboutDialog) GetLicense() string {
c := C.gtk_about_dialog_get_license(v.native())
return C.GoString((*C.char)(c))
}
// SetLicense is a wrapper around gtk_about_dialog_set_license().
func (v *AboutDialog) SetLicense(license string) {
cstr := C.CString(license)
defer C.free(unsafe.Pointer(cstr))
C.gtk_about_dialog_set_license(v.native(), (*C.gchar)(cstr))
}
// GetLicenseType is a wrapper around gtk_about_dialog_get_license_type().
func (v *AboutDialog) GetLicenseType() License {
c := C.gtk_about_dialog_get_license_type(v.native())
return License(c)
}
// SetLicenseType is a wrapper around gtk_about_dialog_set_license_type().
func (v *AboutDialog) SetLicenseType(license License) {
C.gtk_about_dialog_set_license_type(v.native(), C.GtkLicense(license))
}
// GetLogoIconName is a wrapper around gtk_about_dialog_get_logo_icon_name().
func (v *AboutDialog) GetLogoIconName() string {
c := C.gtk_about_dialog_get_logo_icon_name(v.native())
return C.GoString((*C.char)(c))
}
// SetLogoIconName is a wrapper around gtk_about_dialog_set_logo_icon_name().
func (v *AboutDialog) SetLogoIconName(name string) {
cstr := C.CString(name)
defer C.free(unsafe.Pointer(cstr))
C.gtk_about_dialog_set_logo_icon_name(v.native(), (*C.gchar)(cstr))
}
// GetProgramName is a wrapper around gtk_about_dialog_get_program_name().
func (v *AboutDialog) GetProgramName() string {
c := C.gtk_about_dialog_get_program_name(v.native())
return C.GoString((*C.char)(c))
}
// SetProgramName is a wrapper around gtk_about_dialog_set_program_name().
func (v *AboutDialog) SetProgramName(name string) {
cstr := C.CString(name)
defer C.free(unsafe.Pointer(cstr))
C.gtk_about_dialog_set_program_name(v.native(), (*C.gchar)(cstr))
}
// GetTranslatorCredits is a wrapper around gtk_about_dialog_get_translator_credits().
func (v *AboutDialog) GetTranslatorCredits() string {
c := C.gtk_about_dialog_get_translator_credits(v.native())
return C.GoString((*C.char)(c))
}
// SetTranslatorCredits is a wrapper around gtk_about_dialog_set_translator_credits().
func (v *AboutDialog) SetTranslatorCredits(translatorCredits string) {
cstr := C.CString(translatorCredits)
defer C.free(unsafe.Pointer(cstr))
C.gtk_about_dialog_set_translator_credits(v.native(), (*C.gchar)(cstr))
}
// GetVersion is a wrapper around gtk_about_dialog_get_version().
func (v *AboutDialog) GetVersion() string {
c := C.gtk_about_dialog_get_version(v.native())
return C.GoString((*C.char)(c))
}
// SetVersion is a wrapper around gtk_about_dialog_set_version().
func (v *AboutDialog) SetVersion(version string) {
cstr := C.CString(version)
defer C.free(unsafe.Pointer(cstr))
C.gtk_about_dialog_set_version(v.native(), (*C.gchar)(cstr))
}
// GetWebsite is a wrapper around gtk_about_dialog_get_website().
func (v *AboutDialog) GetWebsite() string {
c := C.gtk_about_dialog_get_website(v.native())
return C.GoString((*C.char)(c))
}
// SetWebsite is a wrapper around gtk_about_dialog_set_website().
func (v *AboutDialog) SetWebsite(website string) {
cstr := C.CString(website)
defer C.free(unsafe.Pointer(cstr))
C.gtk_about_dialog_set_website(v.native(), (*C.gchar)(cstr))
}
// GetWebsiteLabel is a wrapper around gtk_about_dialog_get_website_label().
func (v *AboutDialog) GetWebsiteLabel() string {
c := C.gtk_about_dialog_get_website_label(v.native())
return C.GoString((*C.char)(c))
}
// SetWebsiteLabel is a wrapper around gtk_about_dialog_set_website_label().
func (v *AboutDialog) SetWebsiteLabel(websiteLabel string) {
cstr := C.CString(websiteLabel)
defer C.free(unsafe.Pointer(cstr))
C.gtk_about_dialog_set_website_label(v.native(), (*C.gchar)(cstr))
}
// GetWrapLicense is a wrapper around gtk_about_dialog_get_wrap_license().
func (v *AboutDialog) GetWrapLicense() bool {
return gobool(C.gtk_about_dialog_get_wrap_license(v.native()))
}
// SetWrapLicense is a wrapper around gtk_about_dialog_set_wrap_license().
func (v *AboutDialog) SetWrapLicense(wrapLicense bool) {
C.gtk_about_dialog_set_wrap_license(v.native(), gbool(wrapLicense))
}
/*
* GtkAdjustment
*/
// Adjustment is a representation of GTK's GtkAdjustment.
type Adjustment struct {
glib.InitiallyUnowned
}
// native returns a pointer to the underlying GtkAdjustment.
func (v *Adjustment) native() *C.GtkAdjustment {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkAdjustment(p)
}
func marshalAdjustment(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapAdjustment(obj), nil
}
func wrapAdjustment(obj *glib.Object) *Adjustment {
return &Adjustment{glib.InitiallyUnowned{obj}}
}
/*
* GtkAlignment
*/
type Alignment struct {
Bin
}
// native returns a pointer to the underlying GtkAlignment.
func (v *Alignment) native() *C.GtkAlignment {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkAlignment(p)
}
func marshalAlignment(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapAlignment(obj), nil
}
func wrapAlignment(obj *glib.Object) *Alignment {
return &Alignment{Bin{Container{Widget{glib.InitiallyUnowned{obj}}}}}
}
// AlignmentNew is a wrapper around gtk_alignment_new().
func AlignmentNew(xalign, yalign, xscale, yscale float32) (*Alignment, error) {
c := C.gtk_alignment_new(C.gfloat(xalign), C.gfloat(yalign), C.gfloat(xscale),
C.gfloat(yscale))
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
a := wrapAlignment(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return a, nil
}
// Set is a wrapper around gtk_alignment_set().
func (v *Alignment) Set(xalign, yalign, xscale, yscale float32) {
C.gtk_alignment_set(v.native(), C.gfloat(xalign), C.gfloat(yalign),
C.gfloat(xscale), C.gfloat(yscale))
}
// GetPadding is a wrapper around gtk_alignment_get_padding().
func (v *Alignment) GetPadding() (top, bottom, left, right uint) {
var ctop, cbottom, cleft, cright C.guint
C.gtk_alignment_get_padding(v.native(), &ctop, &cbottom, &cleft,
&cright)
return uint(ctop), uint(cbottom), uint(cleft), uint(cright)
}
// SetPadding is a wrapper around gtk_alignment_set_padding().
func (v *Alignment) SetPadding(top, bottom, left, right uint) {
C.gtk_alignment_set_padding(v.native(), C.guint(top), C.guint(bottom),
C.guint(left), C.guint(right))
}
/*
* GtkArrow
*/
// Arrow is a representation of GTK's GtkArrow.
type Arrow struct {
Misc
}
// native returns a pointer to the underlying GtkButton.
func (v *Arrow) native() *C.GtkArrow {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkArrow(p)
}
func marshalArrow(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapArrow(obj), nil
}
func wrapArrow(obj *glib.Object) *Arrow {
return &Arrow{Misc{Widget{glib.InitiallyUnowned{obj}}}}
}
// ArrowNew is a wrapper around gtk_arrow_new().
func ArrowNew(arrowType ArrowType, shadowType ShadowType) (*Arrow, error) {
c := C.gtk_arrow_new(C.GtkArrowType(arrowType),
C.GtkShadowType(shadowType))
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
a := wrapArrow(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return a, nil
}
// Set is a wrapper around gtk_arrow_set().
func (v *Arrow) Set(arrowType ArrowType, shadowType ShadowType) {
C.gtk_arrow_set(v.native(), C.GtkArrowType(arrowType), C.GtkShadowType(shadowType))
}
/*
* GtkAssistant
*/
// Assistant is a representation of GTK's GtkAssistant.
type Assistant struct {
Window
}
// native returns a pointer to the underlying GtkAssistant.
func (v *Assistant) native() *C.GtkAssistant {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkAssistant(p)
}
func marshalAssistant(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapAssistant(obj), nil
}
func wrapAssistant(obj *glib.Object) *Assistant {
return &Assistant{Window{Bin{Container{Widget{glib.InitiallyUnowned{obj}}}}}}
}
// AssistantNew is a wrapper around gtk_assistant_new().
func AssistantNew() (*Assistant, error) {
c := C.gtk_assistant_new()
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
a := wrapAssistant(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return a, nil
}
// GetCurrentPage is a wrapper around gtk_assistant_get_current_page().
func (v *Assistant) GetCurrentPage() int {
c := C.gtk_assistant_get_current_page(v.native())
return int(c)
}
// SetCurrentPage is a wrapper around gtk_assistant_set_current_page().
func (v *Assistant) SetCurrentPage(pageNum int) {
C.gtk_assistant_set_current_page(v.native(), C.gint(pageNum))
}
// GetNPages is a wrapper around gtk_assistant_get_n_pages().
func (v *Assistant) GetNPages() int {
c := C.gtk_assistant_get_n_pages(v.native())
return int(c)
}
// GetNthPage is a wrapper around gtk_assistant_get_nth_page().
func (v *Assistant) GetNthPage(pageNum int) *Widget {
c := C.gtk_assistant_get_nth_page(v.native(), C.gint(pageNum))
if c == nil {
return nil
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
w := wrapWidget(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return w
}
// PrependPage is a wrapper around gtk_assistant_prepend_page().
func (v *Assistant) PrependPage(page IWidget) int {
c := C.gtk_assistant_prepend_page(v.native(), page.toWidget())
return int(c)
}
// AppendPage is a wrapper around gtk_assistant_append_page().
func (v *Assistant) AppendPage(page IWidget) int {
c := C.gtk_assistant_append_page(v.native(), page.toWidget())
return int(c)
}
// InsertPage is a wrapper around gtk_assistant_insert_page().
func (v *Assistant) InsertPage(page IWidget, position int) int {
c := C.gtk_assistant_insert_page(v.native(), page.toWidget(),
C.gint(position))
return int(c)
}
// RemovePage is a wrapper around gtk_assistant_remove_page().
func (v *Assistant) RemovePage(pageNum int) {
C.gtk_assistant_remove_page(v.native(), C.gint(pageNum))
}
// TODO: gtk_assistant_set_forward_page_func
// SetPageType is a wrapper around gtk_assistant_set_page_type().
func (v *Assistant) SetPageType(page IWidget, ptype AssistantPageType) {
C.gtk_assistant_set_page_type(v.native(), page.toWidget(),
C.GtkAssistantPageType(ptype))
}
// GetPageType is a wrapper around gtk_assistant_get_page_type().
func (v *Assistant) GetPageType(page IWidget) AssistantPageType {
c := C.gtk_assistant_get_page_type(v.native(), page.toWidget())
return AssistantPageType(c)
}
// SetPageTitle is a wrapper around gtk_assistant_set_page_title().
func (v *Assistant) SetPageTitle(page IWidget, title string) {
cstr := C.CString(title)
defer C.free(unsafe.Pointer(cstr))
C.gtk_assistant_set_page_title(v.native(), page.toWidget(),
(*C.gchar)(cstr))
}
// GetPageTitle is a wrapper around gtk_assistant_get_page_title().
func (v *Assistant) GetPageTitle(page IWidget) string {
c := C.gtk_assistant_get_page_title(v.native(), page.toWidget())
return C.GoString((*C.char)(c))
}
// SetPageComplete is a wrapper around gtk_assistant_set_page_complete().
func (v *Assistant) SetPageComplete(page IWidget, complete bool) {
C.gtk_assistant_set_page_complete(v.native(), page.toWidget(),
gbool(complete))
}
// GetPageComplete is a wrapper around gtk_assistant_get_page_complete().
func (v *Assistant) GetPageComplete(page IWidget) bool {
c := C.gtk_assistant_get_page_complete(v.native(), page.toWidget())
return gobool(c)
}
// AddActionWidget is a wrapper around gtk_assistant_add_action_widget().
func (v *Assistant) AddActionWidget(child IWidget) {
C.gtk_assistant_add_action_widget(v.native(), child.toWidget())
}
// RemoveActionWidget is a wrapper around gtk_assistant_remove_action_widget().
func (v *Assistant) RemoveActionWidget(child IWidget) {
C.gtk_assistant_remove_action_widget(v.native(), child.toWidget())
}
// UpdateButtonsState is a wrapper around gtk_assistant_update_buttons_state().
func (v *Assistant) UpdateButtonsState() {
C.gtk_assistant_update_buttons_state(v.native())
}
// Commit is a wrapper around gtk_assistant_commit().
func (v *Assistant) Commit() {
C.gtk_assistant_commit(v.native())
}
// NextPage is a wrapper around gtk_assistant_next_page().
func (v *Assistant) NextPage() {
C.gtk_assistant_next_page(v.native())
}
// PreviousPage is a wrapper around gtk_assistant_previous_page().
func (v *Assistant) PreviousPage() {
C.gtk_assistant_previous_page(v.native())
}
/*
* GtkBin
*/
// Bin is a representation of GTK's GtkBin.
type Bin struct {
Container
}
// native returns a pointer to the underlying GtkBin.
func (v *Bin) native() *C.GtkBin {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkBin(p)
}
func marshalBin(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapBin(obj), nil
}
func wrapBin(obj *glib.Object) *Bin {
return &Bin{Container{Widget{glib.InitiallyUnowned{obj}}}}
}
// GetChild is a wrapper around gtk_bin_get_child().
func (v *Bin) GetChild() (*Widget, error) {
c := C.gtk_bin_get_child(v.native())
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
w := wrapWidget(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return w, nil
}
/*
* GtkBuilder
*/
// Builder is a representation of GTK's GtkBuilder.
type Builder struct {
*glib.Object
}
// native() returns a pointer to the underlying GtkBuilder.
func (b *Builder) native() *C.GtkBuilder {
if b == nil || b.GObject == nil {
return nil
}
p := unsafe.Pointer(b.GObject)
return C.toGtkBuilder(p)
}
func marshalBuilder(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return &Builder{obj}, nil
}
// BuilderNew is a wrapper around gtk_builder_new().
func BuilderNew() (*Builder, error) {
c := C.gtk_builder_new()
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
b := &Builder{obj}
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return b, nil
}
// AddFromFile is a wrapper around gtk_builder_add_from_file().
func (b *Builder) AddFromFile(filename string) error {
cstr := C.CString(filename)
defer C.free(unsafe.Pointer(cstr))
var err *C.GError = nil
res := C.gtk_builder_add_from_file(b.native(), (*C.gchar)(cstr), &err)
if res == 0 {
defer C.g_error_free(err)
return errors.New(C.GoString((*C.char)(C.error_get_message(err))))
}
return nil
}
// AddFromResource is a wrapper around gtk_builder_add_from_resource().
func (b *Builder) AddFromResource(path string) error {
cstr := C.CString(path)
defer C.free(unsafe.Pointer(cstr))
var err *C.GError = nil
res := C.gtk_builder_add_from_resource(b.native(), (*C.gchar)(cstr), &err)
if res == 0 {
defer C.g_error_free(err)
return errors.New(C.GoString((*C.char)(C.error_get_message(err))))
}
return nil
}
// AddFromString is a wrapper around gtk_builder_add_from_string().
func (b *Builder) AddFromString(str string) error {
cstr := C.CString(str)
defer C.free(unsafe.Pointer(cstr))
length := (C.gsize)(len(str))
var err *C.GError = nil
res := C.gtk_builder_add_from_string(b.native(), (*C.gchar)(cstr), length, &err)
if res == 0 {
defer C.g_error_free(err)
return errors.New(C.GoString((*C.char)(C.error_get_message(err))))
}
return nil
}
// GetObject is a wrapper around gtk_builder_get_object(). The returned result
// is an IObject, so it will need to be type-asserted to the appropriate type before
// being used. For example, to get an object and type assert it as a window:
//
// obj, err := builder.GetObject("window")
// if err != nil {
// // object not found
// return
// }
// if w, ok := obj.(*gtk.Window); ok {
// // do stuff with w here
// } else {
// // not a *gtk.Window
// }
//
func (b *Builder) GetObject(name string) (glib.IObject, error) {
cstr := C.CString(name)
defer C.free(unsafe.Pointer(cstr))
c := C.gtk_builder_get_object(b.native(), (*C.gchar)(cstr))
if c == nil {
return nil, errors.New("object '" + name + "' not found")
}
obj, err := cast(c)
if err != nil {
return nil, err
}
return obj, nil
}
/*
* GtkButton
*/
// Button is a representation of GTK's GtkButton.
type Button struct {
Bin
}
// native() returns a pointer to the underlying GtkButton.
func (v *Button) native() *C.GtkButton {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkButton(p)
}
func marshalButton(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapButton(obj), nil
}
func wrapButton(obj *glib.Object) *Button {
return &Button{Bin{Container{Widget{glib.InitiallyUnowned{obj}}}}}
}
// ButtonNew() is a wrapper around gtk_button_new().
func ButtonNew() (*Button, error) {
c := C.gtk_button_new()
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
b := wrapButton(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return b, nil
}
// ButtonNewWithLabel() is a wrapper around gtk_button_new_with_label().
func ButtonNewWithLabel(label string) (*Button, error) {
cstr := C.CString(label)
defer C.free(unsafe.Pointer(cstr))
c := C.gtk_button_new_with_label((*C.gchar)(cstr))
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
b := wrapButton(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return b, nil
}
// ButtonNewWithMnemonic() is a wrapper around gtk_button_new_with_mnemonic().
func ButtonNewWithMnemonic(label string) (*Button, error) {
cstr := C.CString(label)
defer C.free(unsafe.Pointer(cstr))
c := C.gtk_button_new_with_mnemonic((*C.gchar)(cstr))
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
b := wrapButton(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return b, nil
}
// Clicked() is a wrapper around gtk_button_clicked().
func (v *Button) Clicked() {
C.gtk_button_clicked(v.native())
}
// SetRelief() is a wrapper around gtk_button_set_relief().
func (v *Button) SetRelief(newStyle ReliefStyle) {
C.gtk_button_set_relief(v.native(), C.GtkReliefStyle(newStyle))
}
// GetRelief() is a wrapper around gtk_button_get_relief().
func (v *Button) GetRelief() ReliefStyle {
c := C.gtk_button_get_relief(v.native())
return ReliefStyle(c)
}
// SetLabel() is a wrapper around gtk_button_set_label().
func (v *Button) SetLabel(label string) {
cstr := C.CString(label)
defer C.free(unsafe.Pointer(cstr))
C.gtk_button_set_label(v.native(), (*C.gchar)(cstr))
}
// GetLabel() is a wrapper around gtk_button_get_label().
func (v *Button) GetLabel() (string, error) {
c := C.gtk_button_get_label(v.native())
if c == nil {
return "", nilPtrErr
}
return C.GoString((*C.char)(c)), nil
}
// SetUseUnderline() is a wrapper around gtk_button_set_use_underline().
func (v *Button) SetUseUnderline(useUnderline bool) {
C.gtk_button_set_use_underline(v.native(), gbool(useUnderline))
}
// GetUseUnderline() is a wrapper around gtk_button_get_use_underline().
func (v *Button) GetUseUnderline() bool {
c := C.gtk_button_get_use_underline(v.native())
return gobool(c)
}
// SetFocusOnClick() is a wrapper around gtk_button_set_focus_on_click().
func (v *Button) SetFocusOnClick(focusOnClick bool) {
C.gtk_button_set_focus_on_click(v.native(), gbool(focusOnClick))
}
// GetFocusOnClick() is a wrapper around gtk_button_get_focus_on_click().
func (v *Button) GetFocusOnClick() bool {
c := C.gtk_button_get_focus_on_click(v.native())
return gobool(c)
}
// SetAlignment() is a wrapper around gtk_button_set_alignment().
func (v *Button) SetAlignment(xalign, yalign float32) {
C.gtk_button_set_alignment(v.native(), (C.gfloat)(xalign),
(C.gfloat)(yalign))
}
// GetAlignment() is a wrapper around gtk_button_get_alignment().
func (v *Button) GetAlignment() (xalign, yalign float32) {
var x, y C.gfloat
C.gtk_button_get_alignment(v.native(), &x, &y)
return float32(x), float32(y)
}
// SetImage() is a wrapper around gtk_button_set_image().
func (v *Button) SetImage(image IWidget) {
C.gtk_button_set_image(v.native(), image.toWidget())
}
// GetImage() is a wrapper around gtk_button_get_image().
func (v *Button) GetImage() (*Widget, error) {
c := C.gtk_button_get_image(v.native())
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
w := wrapWidget(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return w, nil
}
// SetImagePosition() is a wrapper around gtk_button_set_image_position().
func (v *Button) SetImagePosition(position PositionType) {
C.gtk_button_set_image_position(v.native(), C.GtkPositionType(position))
}
// GetImagePosition() is a wrapper around gtk_button_get_image_position().
func (v *Button) GetImagePosition() PositionType {
c := C.gtk_button_get_image_position(v.native())
return PositionType(c)
}
// SetAlwaysShowImage() is a wrapper around gtk_button_set_always_show_image().
func (v *Button) SetAlwaysShowImage(alwaysShow bool) {
C.gtk_button_set_always_show_image(v.native(), gbool(alwaysShow))
}
// GetAlwaysShowImage() is a wrapper around gtk_button_get_always_show_image().
func (v *Button) GetAlwaysShowImage() bool {
c := C.gtk_button_get_always_show_image(v.native())
return gobool(c)
}
// GetEventWindow() is a wrapper around gtk_button_get_event_window().
func (v *Button) GetEventWindow() (*gdk.Window, error) {
c := C.gtk_button_get_event_window(v.native())
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
w := &gdk.Window{obj}
w.Ref()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return w, nil
}
/*
* GtkBox
*/
// Box is a representation of GTK's GtkBox.
type Box struct {
Container
}
// native() returns a pointer to the underlying GtkBox.
func (v *Box) native() *C.GtkBox {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkBox(p)
}
func marshalBox(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapBox(obj), nil
}
func wrapBox(obj *glib.Object) *Box {
return &Box{Container{Widget{glib.InitiallyUnowned{obj}}}}
}
// BoxNew() is a wrapper around gtk_box_new().
func BoxNew(orientation Orientation, spacing int) (*Box, error) {
c := C.gtk_box_new(C.GtkOrientation(orientation), C.gint(spacing))
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
b := wrapBox(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return b, nil
}
// PackStart() is a wrapper around gtk_box_pack_start().
func (v *Box) PackStart(child IWidget, expand, fill bool, padding uint) {
C.gtk_box_pack_start(v.native(), child.toWidget(), gbool(expand),
gbool(fill), C.guint(padding))
}
// PackEnd() is a wrapper around gtk_box_pack_end().
func (v *Box) PackEnd(child IWidget, expand, fill bool, padding uint) {
C.gtk_box_pack_end(v.native(), child.toWidget(), gbool(expand),
gbool(fill), C.guint(padding))
}
// GetHomogeneous() is a wrapper around gtk_box_get_homogeneous().
func (v *Box) GetHomogeneous() bool {
c := C.gtk_box_get_homogeneous(v.native())
return gobool(c)
}
// SetHomogeneous() is a wrapper around gtk_box_set_homogeneous().
func (v *Box) SetHomogeneous(homogeneous bool) {
C.gtk_box_set_homogeneous(v.native(), gbool(homogeneous))
}
// GetSpacing() is a wrapper around gtk_box_get_spacing().
func (v *Box) GetSpacing() int {
c := C.gtk_box_get_spacing(v.native())
return int(c)
}
// SetSpacing() is a wrapper around gtk_box_set_spacing()
func (v *Box) SetSpacing(spacing int) {
C.gtk_box_set_spacing(v.native(), C.gint(spacing))
}
// ReorderChild() is a wrapper around gtk_box_reorder_child().
func (v *Box) ReorderChild(child IWidget, position int) {
C.gtk_box_reorder_child(v.native(), child.toWidget(), C.gint(position))
}
// QueryChildPacking() is a wrapper around gtk_box_query_child_packing().
func (v *Box) QueryChildPacking(child IWidget) (expand, fill bool, padding uint, packType PackType) {
var cexpand, cfill C.gboolean
var cpadding C.guint
var cpackType C.GtkPackType
C.gtk_box_query_child_packing(v.native(), child.toWidget(), &cexpand,
&cfill, &cpadding, &cpackType)
return gobool(cexpand), gobool(cfill), uint(cpadding), PackType(cpackType)
}
// SetChildPacking() is a wrapper around gtk_box_set_child_packing().
func (v *Box) SetChildPacking(child IWidget, expand, fill bool, padding uint, packType PackType) {
C.gtk_box_set_child_packing(v.native(), child.toWidget(), gbool(expand),
gbool(fill), C.guint(padding), C.GtkPackType(packType))
}
/*
* GtkCalendar
*/
// Calendar is a representation of GTK's GtkCalendar.
type Calendar struct {
Widget
}
// native() returns a pointer to the underlying GtkCalendar.
func (v *Calendar) native() *C.GtkCalendar {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkCalendar(p)
}
func marshalCalendar(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapCalendar(obj), nil
}
func wrapCalendar(obj *glib.Object) *Calendar {
return &Calendar{Widget{glib.InitiallyUnowned{obj}}}
}
// CalendarNew is a wrapper around gtk_calendar_new().
func CalendarNew() (*Calendar, error) {
c := C.gtk_calendar_new()
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
a := wrapCalendar(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return a, nil
}
// SelectMonth is a wrapper around gtk_calendar_select_month().
func (v *Calendar) SelectMonth(month, year uint) {
C.gtk_calendar_select_month(v.native(), C.guint(month), C.guint(year))
}
// SelectDay is a wrapper around gtk_calendar_select_day().
func (v *Calendar) SelectDay(day uint) {
C.gtk_calendar_select_day(v.native(), C.guint(day))
}
// MarkDay is a wrapper around gtk_calendar_mark_day().
func (v *Calendar) MarkDay(day uint) {
C.gtk_calendar_mark_day(v.native(), C.guint(day))
}
// UnmarkDay is a wrapper around gtk_calendar_unmark_day().
func (v *Calendar) UnmarkDay(day uint) {
C.gtk_calendar_unmark_day(v.native(), C.guint(day))
}
// GetDayIsMarked is a wrapper around gtk_calendar_get_day_is_marked().
func (v *Calendar) GetDayIsMarked(day uint) bool {
c := C.gtk_calendar_get_day_is_marked(v.native(), C.guint(day))
return gobool(c)
}
// ClearMarks is a wrapper around gtk_calendar_clear_marks().
func (v *Calendar) ClearMarks() {
C.gtk_calendar_clear_marks(v.native())
}
// GetDisplayOptions is a wrapper around gtk_calendar_get_display_options().
func (v *Calendar) GetDisplayOptions() CalendarDisplayOptions {
c := C.gtk_calendar_get_display_options(v.native())
return CalendarDisplayOptions(c)
}
// SetDisplayOptions is a wrapper around gtk_calendar_set_display_options().
func (v *Calendar) SetDisplayOptions(flags CalendarDisplayOptions) {
C.gtk_calendar_set_display_options(v.native(),
C.GtkCalendarDisplayOptions(flags))
}
// GetDate is a wrapper around gtk_calendar_get_date().
func (v *Calendar) GetDate() (year, month, day uint) {
var cyear, cmonth, cday C.guint
C.gtk_calendar_get_date(v.native(), &cyear, &cmonth, &cday)
return uint(cyear), uint(cmonth), uint(cday)
}
// TODO gtk_calendar_set_detail_func
// GetDetailWidthChars is a wrapper around gtk_calendar_get_detail_width_chars().
func (v *Calendar) GetDetailWidthChars() int {
c := C.gtk_calendar_get_detail_width_chars(v.native())
return int(c)
}
// SetDetailWidthChars is a wrapper around gtk_calendar_set_detail_width_chars().
func (v *Calendar) SetDetailWidthChars(chars int) {
C.gtk_calendar_set_detail_width_chars(v.native(), C.gint(chars))
}
// GetDetailHeightRows is a wrapper around gtk_calendar_get_detail_height_rows().
func (v *Calendar) GetDetailHeightRows() int {
c := C.gtk_calendar_get_detail_height_rows(v.native())
return int(c)
}
// SetDetailHeightRows is a wrapper around gtk_calendar_set_detail_height_rows().
func (v *Calendar) SetDetailHeightRows(rows int) {
C.gtk_calendar_set_detail_height_rows(v.native(), C.gint(rows))
}
/*
* GtkCellLayout
*/
// CellLayout is a representation of GTK's GtkCellLayout GInterface.
type CellLayout struct {
*glib.Object
}
// ICellLayout is an interface type implemented by all structs
// embedding a CellLayout. It is meant to be used as an argument type
// for wrapper functions that wrap around a C GTK function taking a
// GtkCellLayout.
type ICellLayout interface {
toCellLayout() *C.GtkCellLayout
}
// native() returns a pointer to the underlying GObject as a GtkCellLayout.
func (v *CellLayout) native() *C.GtkCellLayout {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkCellLayout(p)
}
func marshalCellLayout(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapCellLayout(obj), nil
}
func wrapCellLayout(obj *glib.Object) *CellLayout {
return &CellLayout{obj}
}
func (v *CellLayout) toCellLayout() *C.GtkCellLayout {
if v == nil {
return nil
}
return v.native()
}
// PackStart() is a wrapper around gtk_cell_layout_pack_start().
func (v *CellLayout) PackStart(cell ICellRenderer, expand bool) {
C.gtk_cell_layout_pack_start(v.native(), cell.toCellRenderer(),
gbool(expand))
}
// AddAttribute() is a wrapper around gtk_cell_layout_add_attribute().
func (v *CellLayout) AddAttribute(cell ICellRenderer, attribute string, column int) {
cstr := C.CString(attribute)
defer C.free(unsafe.Pointer(cstr))
C.gtk_cell_layout_add_attribute(v.native(), cell.toCellRenderer(),
(*C.gchar)(cstr), C.gint(column))
}
/*
* GtkCellRenderer
*/
// CellRenderer is a representation of GTK's GtkCellRenderer.
type CellRenderer struct {
glib.InitiallyUnowned
}
// ICellRenderer is an interface type implemented by all structs
// embedding a CellRenderer. It is meant to be used as an argument type
// for wrapper functions that wrap around a C GTK function taking a
// GtkCellRenderer.
type ICellRenderer interface {
toCellRenderer() *C.GtkCellRenderer
}
// native returns a pointer to the underlying GtkCellRenderer.
func (v *CellRenderer) native() *C.GtkCellRenderer {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkCellRenderer(p)
}
func (v *CellRenderer) toCellRenderer() *C.GtkCellRenderer {
if v == nil {
return nil
}
return v.native()
}
func marshalCellRenderer(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapCellRenderer(obj), nil
}
func wrapCellRenderer(obj *glib.Object) *CellRenderer {
return &CellRenderer{glib.InitiallyUnowned{obj}}
}
/*
* GtkCellRendererText
*/
// CellRendererText is a representation of GTK's GtkCellRendererText.
type CellRendererText struct {
CellRenderer
}
// native returns a pointer to the underlying GtkCellRendererText.
func (v *CellRendererText) native() *C.GtkCellRendererText {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkCellRendererText(p)
}
func marshalCellRendererText(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapCellRendererText(obj), nil
}
func wrapCellRendererText(obj *glib.Object) *CellRendererText {
return &CellRendererText{CellRenderer{glib.InitiallyUnowned{obj}}}
}
// CellRendererTextNew is a wrapper around gtk_cell_renderer_text_new().
func CellRendererTextNew() (*CellRendererText, error) {
c := C.gtk_cell_renderer_text_new()
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
crt := wrapCellRendererText(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return crt, nil
}
/*
* GtkCellRendererToggle
*/
// CellRendererToggle is a representation of GTK's GtkCellRendererToggle.
type CellRendererToggle struct {
CellRenderer
}
// native returns a pointer to the underlying GtkCellRendererToggle.
func (v *CellRendererToggle) native() *C.GtkCellRendererToggle {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkCellRendererToggle(p)
}
func (v *CellRendererToggle) toCellRenderer() *C.GtkCellRenderer {
if v == nil {
return nil
}
return v.CellRenderer.native()
}
func marshalCellRendererToggle(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapCellRendererToggle(obj), nil
}
func wrapCellRendererToggle(obj *glib.Object) *CellRendererToggle {
return &CellRendererToggle{CellRenderer{glib.InitiallyUnowned{obj}}}
}
// CellRendererToggleNew is a wrapper around gtk_cell_renderer_toggle_new().
func CellRendererToggleNew() (*CellRendererToggle, error) {
c := C.gtk_cell_renderer_toggle_new()
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
crt := wrapCellRendererToggle(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return crt, nil
}
// SetRadio is a wrapper around gtk_cell_renderer_toggle_set_radio().
func (v *CellRendererToggle) SetRadio(set bool) {
C.gtk_cell_renderer_toggle_set_radio(v.native(), gbool(set))
}
// GetRadio is a wrapper around gtk_cell_renderer_toggle_get_radio().
func (v *CellRendererToggle) GetRadio() bool {
c := C.gtk_cell_renderer_toggle_get_radio(v.native())
return gobool(c)
}
// SetActive is a wrapper arround gtk_cell_renderer_set_active().
func (v *CellRendererToggle) SetActive(active bool) {
C.gtk_cell_renderer_toggle_set_active(v.native(), gbool(active))
}
// GetActive is a wrapper around gtk_cell_renderer_get_active().
func (v *CellRendererToggle) GetActive() bool {
c := C.gtk_cell_renderer_toggle_get_active(v.native())
return gobool(c)
}
// SetActivatable is a wrapper around gtk_cell_renderer_set_activatable().
func (v *CellRendererToggle) SetActivatable(activatable bool) {
C.gtk_cell_renderer_toggle_set_activatable(v.native(),
gbool(activatable))
}
// GetActivatable is a wrapper around gtk_cell_renderer_get_activatable().
func (v *CellRendererToggle) GetActivatable() bool {
c := C.gtk_cell_renderer_toggle_get_activatable(v.native())
return gobool(c)
}
/*
* GtkCheckButton
*/
// CheckButton is a wrapper around GTK's GtkCheckButton.
type CheckButton struct {
ToggleButton
}
// native returns a pointer to the underlying GtkCheckButton.
func (v *CheckButton) native() *C.GtkCheckButton {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkCheckButton(p)
}
func marshalCheckButton(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapCheckButton(obj), nil
}
func wrapCheckButton(obj *glib.Object) *CheckButton {
return &CheckButton{ToggleButton{Button{Bin{Container{Widget{
glib.InitiallyUnowned{obj}}}}}}}
}
// CheckButtonNew is a wrapper around gtk_check_button_new().
func CheckButtonNew() (*CheckButton, error) {
c := C.gtk_check_button_new()
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
cb := wrapCheckButton(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return cb, nil
}
// CheckButtonNewWithLabel is a wrapper around
// gtk_check_button_new_with_label().
func CheckButtonNewWithLabel(label string) (*CheckButton, error) {
cstr := C.CString(label)
defer C.free(unsafe.Pointer(cstr))
c := C.gtk_check_button_new_with_label((*C.gchar)(cstr))
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
cb := wrapCheckButton(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return cb, nil
}
// CheckButtonNewWithMnemonic is a wrapper around
// gtk_check_button_new_with_mnemonic().
func CheckButtonNewWithMnemonic(label string) (*CheckButton, error) {
cstr := C.CString(label)
defer C.free(unsafe.Pointer(cstr))
c := C.gtk_check_button_new_with_mnemonic((*C.gchar)(cstr))
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
cb := wrapCheckButton(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return cb, nil
}
/*
* GtkCheckMenuItem
*/
type CheckMenuItem struct {
MenuItem
}
// native returns a pointer to the underlying GtkCheckMenuItem.
func (v *CheckMenuItem) native() *C.GtkCheckMenuItem {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkCheckMenuItem(p)
}
func marshalCheckMenuItem(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapCheckMenuItem(obj), nil
}
func wrapCheckMenuItem(obj *glib.Object) *CheckMenuItem {
return &CheckMenuItem{MenuItem{Bin{Container{Widget{
glib.InitiallyUnowned{obj}}}}}}
}
// CheckMenuItemNew is a wrapper around gtk_check_menu_item_new().
func CheckMenuItemNew() (*CheckMenuItem, error) {
c := C.gtk_check_menu_item_new()
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
cm := wrapCheckMenuItem(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return cm, nil
}
// CheckMenuItemNewWithLabel is a wrapper around
// gtk_check_menu_item_new_with_label().
func CheckMenuItemNewWithLabel(label string) (*CheckMenuItem, error) {
cstr := C.CString(label)
defer C.free(unsafe.Pointer(cstr))
c := C.gtk_check_menu_item_new_with_label((*C.gchar)(cstr))
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
cm := wrapCheckMenuItem(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return cm, nil
}
// CheckMenuItemNewWithMnemonic is a wrapper around
// gtk_check_menu_item_new_with_mnemonic().
func CheckMenuItemNewWithMnemonic(label string) (*CheckMenuItem, error) {
cstr := C.CString(label)
defer C.free(unsafe.Pointer(cstr))
c := C.gtk_check_menu_item_new_with_mnemonic((*C.gchar)(cstr))
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
cm := wrapCheckMenuItem(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return cm, nil
}
// GetActive is a wrapper around gtk_check_menu_item_get_active().
func (v *CheckMenuItem) GetActive() bool {
c := C.gtk_check_menu_item_get_active(v.native())
return gobool(c)
}
// SetActive is a wrapper around gtk_check_menu_item_set_active().
func (v *CheckMenuItem) SetActive(isActive bool) {
C.gtk_check_menu_item_set_active(v.native(), gbool(isActive))
}
// Toggled is a wrapper around gtk_check_menu_item_toggled().
func (v *CheckMenuItem) Toggled() {
C.gtk_check_menu_item_toggled(v.native())
}
// GetInconsistent is a wrapper around gtk_check_menu_item_get_inconsistent().
func (v *CheckMenuItem) GetInconsistent() bool {
c := C.gtk_check_menu_item_get_inconsistent(v.native())
return gobool(c)
}
// SetInconsistent is a wrapper around gtk_check_menu_item_set_inconsistent().
func (v *CheckMenuItem) SetInconsistent(setting bool) {
C.gtk_check_menu_item_set_inconsistent(v.native(), gbool(setting))
}
// SetDrawAsRadio is a wrapper around gtk_check_menu_item_set_draw_as_radio().
func (v *CheckMenuItem) SetDrawAsRadio(drawAsRadio bool) {
C.gtk_check_menu_item_set_draw_as_radio(v.native(), gbool(drawAsRadio))
}
// GetDrawAsRadio is a wrapper around gtk_check_menu_item_get_draw_as_radio().
func (v *CheckMenuItem) GetDrawAsRadio() bool {
c := C.gtk_check_menu_item_get_draw_as_radio(v.native())
return gobool(c)
}
/*
* GtkClipboard
*/
// Clipboard is a wrapper around GTK's GtkClipboard.
type Clipboard struct {
*glib.Object
}
// native returns a pointer to the underlying GtkClipboard.
func (v *Clipboard) native() *C.GtkClipboard {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkClipboard(p)
}
func marshalClipboard(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapClipboard(obj), nil
}
func wrapClipboard(obj *glib.Object) *Clipboard {
return &Clipboard{obj}
}
// ClipboardGet() is a wrapper around gtk_clipboard_get().
func ClipboardGet(atom gdk.Atom) (*Clipboard, error) {
c := C.gtk_clipboard_get(C.GdkAtom(unsafe.Pointer(atom)))
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
cb := &Clipboard{obj}
obj.Ref()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return cb, nil
}
// ClipboardGetForDisplay() is a wrapper around gtk_clipboard_get_for_display().
func ClipboardGetForDisplay(display *gdk.Display, atom gdk.Atom) (*Clipboard, error) {
displayPtr := (*C.GdkDisplay)(unsafe.Pointer(display.Native()))
c := C.gtk_clipboard_get_for_display(displayPtr,
C.GdkAtom(unsafe.Pointer(atom)))
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
cb := &Clipboard{obj}
obj.Ref()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return cb, nil
}
// SetText() is a wrapper around gtk_clipboard_set_text().
func (v *Clipboard) SetText(text string) {
cstr := C.CString(text)
defer C.free(unsafe.Pointer(cstr))
C.gtk_clipboard_set_text(v.native(), (*C.gchar)(cstr),
C.gint(len(text)))
}
/*
* GtkComboBox
*/
// ComboBox is a representation of GTK's GtkComboBox.
type ComboBox struct {
Bin
// Interfaces
CellLayout
}
// native returns a pointer to the underlying GtkComboBox.
func (v *ComboBox) native() *C.GtkComboBox {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkComboBox(p)
}
func (v *ComboBox) toCellLayout() *C.GtkCellLayout {
if v == nil {
return nil
}
return C.toGtkCellLayout(unsafe.Pointer(v.GObject))
}
func marshalComboBox(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapComboBox(obj), nil
}
func wrapComboBox(obj *glib.Object) *ComboBox {
cl := wrapCellLayout(obj)
return &ComboBox{Bin{Container{Widget{glib.InitiallyUnowned{obj}}}}, *cl}
}
// ComboBoxNew() is a wrapper around gtk_combo_box_new().
func ComboBoxNew() (*ComboBox, error) {
c := C.gtk_combo_box_new()
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
cb := wrapComboBox(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return cb, nil
}
// ComboBoxNewWithEntry() is a wrapper around gtk_combo_box_new_with_entry().
func ComboBoxNewWithEntry() (*ComboBox, error) {
c := C.gtk_combo_box_new_with_entry()
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
cb := wrapComboBox(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return cb, nil
}
// ComboBoxNewWithModel() is a wrapper around gtk_combo_box_new_with_model().
func ComboBoxNewWithModel(model ITreeModel) (*ComboBox, error) {
c := C.gtk_combo_box_new_with_model(model.toTreeModel())
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
cb := wrapComboBox(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return cb, nil
}
// GetActive() is a wrapper around gtk_combo_box_get_active().
func (v *ComboBox) GetActive() int {
c := C.gtk_combo_box_get_active(v.native())
return int(c)
}
// SetActive() is a wrapper around gtk_combo_box_set_active().
func (v *ComboBox) SetActive(index int) {
C.gtk_combo_box_set_active(v.native(), C.gint(index))
}
/*
* GtkContainer
*/
// Container is a representation of GTK's GtkContainer.
type Container struct {
Widget
}
// native returns a pointer to the underlying GtkContainer.
func (v *Container) native() *C.GtkContainer {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkContainer(p)
}
func marshalContainer(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapContainer(obj), nil
}
func wrapContainer(obj *glib.Object) *Container {
return &Container{Widget{glib.InitiallyUnowned{obj}}}
}
// Add is a wrapper around gtk_container_add().
func (v *Container) Add(w IWidget) {
C.gtk_container_add(v.native(), w.toWidget())
}
// Remove is a wrapper around gtk_container_remove().
func (v *Container) Remove(w IWidget) {
C.gtk_container_remove(v.native(), w.toWidget())
}
// TODO: gtk_container_add_with_properties
// CheckResize is a wrapper around gtk_container_check_resize().
func (v *Container) CheckResize() {
C.gtk_container_check_resize(v.native())
}
// TODO: gtk_container_foreach
// TODO: gtk_container_get_children
// TODO: gtk_container_get_path_for_child
// SetReallocateRedraws is a wrapper around
// gtk_container_set_reallocate_redraws().
func (v *Container) SetReallocateRedraws(needsRedraws bool) {
C.gtk_container_set_reallocate_redraws(v.native(), gbool(needsRedraws))
}
// GetFocusChild is a wrapper around gtk_container_get_focus_child().
func (v *Container) GetFocusChild() *Widget {
c := C.gtk_container_get_focus_child(v.native())
if c == nil {
return nil
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
w := wrapWidget(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return w
}
// SetFocusChild is a wrapper around gtk_container_set_focus_child().
func (v *Container) SetFocusChild(child IWidget) {
C.gtk_container_set_focus_child(v.native(), child.toWidget())
}
// GetFocusVAdjustment is a wrapper around
// gtk_container_get_focus_vadjustment().
func (v *Container) GetFocusVAdjustment() *Adjustment {
c := C.gtk_container_get_focus_vadjustment(v.native())
if c == nil {
return nil
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
a := wrapAdjustment(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return a
}
// SetFocusVAdjustment is a wrapper around
// gtk_container_set_focus_vadjustment().
func (v *Container) SetFocusVAdjustment(adjustment *Adjustment) {
C.gtk_container_set_focus_vadjustment(v.native(), adjustment.native())
}
// GetFocusHAdjustment is a wrapper around
// gtk_container_get_focus_hadjustment().
func (v *Container) GetFocusHAdjustment() *Adjustment {
c := C.gtk_container_get_focus_hadjustment(v.native())
if c == nil {
return nil
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
a := wrapAdjustment(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return a
}
// SetFocusHAdjustment is a wrapper around
// gtk_container_set_focus_hadjustment().
func (v *Container) SetFocusHAdjustment(adjustment *Adjustment) {
C.gtk_container_set_focus_hadjustment(v.native(), adjustment.native())
}
// ChildType is a wrapper around gtk_container_child_type().
func (v *Container) ChildType() glib.Type {
c := C.gtk_container_child_type(v.native())
return glib.Type(c)
}
// TODO: gtk_container_child_get_valist
// TODO: gtk_container_child_set_valist
// ChildNotify is a wrapper around gtk_container_child_notify().
func (v *Container) ChildNotify(child IWidget, childProperty string) {
cstr := C.CString(childProperty)
defer C.free(unsafe.Pointer(cstr))
C.gtk_container_child_notify(v.native(), child.toWidget(),
(*C.gchar)(cstr))
}
// TODO: gtk_container_forall
// GetBorderWidth is a wrapper around gtk_container_get_border_width().
func (v *Container) GetBorderWidth() uint {
c := C.gtk_container_get_border_width(v.native())
return uint(c)
}
// SetBorderWidth is a wrapper around gtk_container_set_border_width().
func (v *Container) SetBorderWidth(borderWidth uint) {
C.gtk_container_set_border_width(v.native(), C.guint(borderWidth))
}
// PropagateDraw is a wrapper around gtk_container_propagate_draw().
func (v *Container) PropagateDraw(child IWidget, cr *cairo.Context) {
context := (*C.cairo_t)(unsafe.Pointer(cr.Native()))
C.gtk_container_propagate_draw(v.native(), child.toWidget(), context)
}
// GetFocusChain is a wrapper around gtk_container_get_focus_chain().
func (v *Container) GetFocusChain() ([]*Widget, bool) {
var cwlist *C.struct__GList
c := C.gtk_container_get_focus_chain(v.native(), &cwlist)
var widgets []*Widget
wlist := (*glib.List)(unsafe.Pointer(cwlist))
for ; wlist.Data != uintptr(unsafe.Pointer(nil)); wlist = wlist.Next {
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(wlist.Data))}
w := wrapWidget(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
widgets = append(widgets, w)
}
return widgets, gobool(c)
}
// SetFocusChain is a wrapper around gtk_container_set_focus_chain().
func (v *Container) SetFocusChain(focusableWidgets []IWidget) {
var list *glib.List
for _, w := range focusableWidgets {
data := uintptr(unsafe.Pointer(w.toWidget()))
list = list.Append(data)
}
glist := (*C.GList)(unsafe.Pointer(list))
C.gtk_container_set_focus_chain(v.native(), glist)
}
/*
* GtkDialog
*/
// Dialog is a representation of GTK's GtkDialog.
type Dialog struct {
Window
}
// native returns a pointer to the underlying GtkDialog.
func (v *Dialog) native() *C.GtkDialog {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkDialog(p)
}
func marshalDialog(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapDialog(obj), nil
}
func wrapDialog(obj *glib.Object) *Dialog {
return &Dialog{Window{Bin{Container{Widget{glib.InitiallyUnowned{obj}}}}}}
}
// DialogNew() is a wrapper around gtk_dialog_new().
func DialogNew() (*Dialog, error) {
c := C.gtk_dialog_new()
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
d := wrapDialog(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return d, nil
}
// Run() is a wrapper around gtk_dialog_run().
func (v *Dialog) Run() int {
c := C.gtk_dialog_run(v.native())
return int(c)
}
// Response() is a wrapper around gtk_dialog_response().
func (v *Dialog) Response(response ResponseType) {
C.gtk_dialog_response(v.native(), C.gint(response))
}
// AddButton() is a wrapper around gtk_dialog_add_button(). text may
// be either the literal button text, or if using GTK 3.8 or earlier, a
// Stock type converted to a string.
func (v *Dialog) AddButton(text string, id ResponseType) (*Button, error) {
cstr := C.CString(text)
defer C.free(unsafe.Pointer(cstr))
c := C.gtk_dialog_add_button(v.native(), (*C.gchar)(cstr), C.gint(id))
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
b := &Button{Bin{Container{Widget{glib.InitiallyUnowned{obj}}}}}
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return b, nil
}
// AddActionWidget() is a wrapper around gtk_dialog_add_action_widget().
func (v *Dialog) AddActionWidget(child IWidget, id ResponseType) {
C.gtk_dialog_add_action_widget(v.native(), child.toWidget(), C.gint(id))
}
// SetDefaultResponse() is a wrapper around gtk_dialog_set_default_response().
func (v *Dialog) SetDefaultResponse(id ResponseType) {
C.gtk_dialog_set_default_response(v.native(), C.gint(id))
}
// SetResponseSensitive() is a wrapper around
// gtk_dialog_set_response_sensitive().
func (v *Dialog) SetResponseSensitive(id ResponseType, setting bool) {
C.gtk_dialog_set_response_sensitive(v.native(), C.gint(id),
gbool(setting))
}
// GetResponseForWidget() is a wrapper around
// gtk_dialog_get_response_for_widget().
func (v *Dialog) GetResponseForWidget(widget IWidget) ResponseType {
c := C.gtk_dialog_get_response_for_widget(v.native(), widget.toWidget())
return ResponseType(c)
}
// GetWidgetForResponse() is a wrapper around
// gtk_dialog_get_widget_for_response().
func (v *Dialog) GetWidgetForResponse(id ResponseType) (*Widget, error) {
c := C.gtk_dialog_get_widget_for_response(v.native(), C.gint(id))
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
w := wrapWidget(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return w, nil
}
// GetContentArea() is a wrapper around gtk_dialog_get_content_area().
func (v *Dialog) GetContentArea() (*Box, error) {
c := C.gtk_dialog_get_content_area(v.native())
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
b := &Box{Container{Widget{glib.InitiallyUnowned{obj}}}}
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return b, nil
}
// TODO(jrick)
/*
func (v *gdk.Screen) AlternativeDialogButtonOrder() bool {
c := C.gtk_alternative_dialog_button_order(v.native())
return gobool(c)
}
*/
// TODO(jrick)
/*
func SetAlternativeButtonOrder(ids ...ResponseType) {
}
*/
/*
* GtkDrawingArea
*/
// DrawingArea is a representation of GTK's GtkDrawingArea.
type DrawingArea struct {
Widget
}
// native returns a pointer to the underlying GtkDrawingArea.
func (v *DrawingArea) native() *C.GtkDrawingArea {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkDrawingArea(p)
}
func marshalDrawingArea(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapDrawingArea(obj), nil
}
func wrapDrawingArea(obj *glib.Object) *DrawingArea {
return &DrawingArea{Widget{glib.InitiallyUnowned{obj}}}
}
// DrawingAreaNew is a wrapper around gtk_drawing_area_new().
func DrawingAreaNew() (*DrawingArea, error) {
c := C.gtk_drawing_area_new()
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
d := wrapDrawingArea(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return d, nil
}
/*
* GtkEditable
*/
// Editable is a representation of GTK's GtkEditable GInterface.
type Editable struct {
*glib.Object
}
// IEditable is an interface type implemented by all structs
// embedding an Editable. It is meant to be used as an argument type
// for wrapper functions that wrap around a C GTK function taking a
// GtkEditable.
type IEditable interface {
toEditable() *C.GtkEditable
}
// native() returns a pointer to the underlying GObject as a GtkEditable.
func (v *Editable) native() *C.GtkEditable {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkEditable(p)
}
func marshalEditable(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapEditable(obj), nil
}
func wrapEditable(obj *glib.Object) *Editable {
return &Editable{obj}
}
func (v *Editable) toEditable() *C.GtkEditable {
if v == nil {
return nil
}
return v.native()
}
// SelectRegion is a wrapper around gtk_editable_select_region().
func (v *Editable) SelectRegion(startPos, endPos int) {
C.gtk_editable_select_region(v.native(), C.gint(startPos),
C.gint(endPos))
}
// GetSelectionBounds is a wrapper around gtk_editable_get_selection_bounds().
func (v *Editable) GetSelectionBounds() (start, end int, nonEmpty bool) {
var cstart, cend C.gint
c := C.gtk_editable_get_selection_bounds(v.native(), &cstart, &cend)
return int(cstart), int(cend), gobool(c)
}
// InsertText is a wrapper around gtk_editable_insert_text(). The returned
// int is the position after the inserted text.
func (v *Editable) InsertText(newText string, position int) int {
cstr := C.CString(newText)
defer C.free(unsafe.Pointer(cstr))
pos := new(C.gint)
*pos = C.gint(position)
C.gtk_editable_insert_text(v.native(), (*C.gchar)(cstr),
C.gint(len(newText)), pos)
return int(*pos)
}
// DeleteText is a wrapper around gtk_editable_delete_text().
func (v *Editable) DeleteText(startPos, endPos int) {
C.gtk_editable_delete_text(v.native(), C.gint(startPos), C.gint(endPos))
}
// GetChars is a wrapper around gtk_editable_get_chars().
func (v *Editable) GetChars(startPos, endPos int) string {
c := C.gtk_editable_get_chars(v.native(), C.gint(startPos),
C.gint(endPos))
defer C.free(unsafe.Pointer(c))
return C.GoString((*C.char)(c))
}
// CutClipboard is a wrapper around gtk_editable_cut_clipboard().
func (v *Editable) CutClipboard() {
C.gtk_editable_cut_clipboard(v.native())
}
// CopyClipboard is a wrapper around gtk_editable_copy_clipboard().
func (v *Editable) CopyClipboard() {
C.gtk_editable_copy_clipboard(v.native())
}
// PasteClipboard is a wrapper around gtk_editable_paste_clipboard().
func (v *Editable) PasteClipboard() {
C.gtk_editable_paste_clipboard(v.native())
}
// DeleteSelection is a wrapper around gtk_editable_delete_selection().
func (v *Editable) DeleteSelection() {
C.gtk_editable_delete_selection(v.native())
}
// SetPosition is a wrapper around gtk_editable_set_position().
func (v *Editable) SetPosition(position int) {
C.gtk_editable_set_position(v.native(), C.gint(position))
}
// GetPosition is a wrapper around gtk_editable_get_position().
func (v *Editable) GetPosition() int {
c := C.gtk_editable_get_position(v.native())
return int(c)
}
// SetEditable is a wrapper around gtk_editable_set_editable().
func (v *Editable) SetEditable(isEditable bool) {
C.gtk_editable_set_editable(v.native(), gbool(isEditable))
}
// GetEditable is a wrapper around gtk_editable_get_editable().
func (v *Editable) GetEditable() bool {
c := C.gtk_editable_get_editable(v.native())
return gobool(c)
}
/*
* GtkEntry
*/
// Entry is a representation of GTK's GtkEntry.
type Entry struct {
Widget
// Interfaces
Editable
}
type IEntry interface {
toEntry() *C.GtkEntry
}
func (v *Entry) toEntry() *C.GtkEntry {
return v.native()
}
// native returns a pointer to the underlying GtkEntry.
func (v *Entry) native() *C.GtkEntry {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkEntry(p)
}
func marshalEntry(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapEntry(obj), nil
}
func wrapEntry(obj *glib.Object) *Entry {
e := wrapEditable(obj)
return &Entry{Widget{glib.InitiallyUnowned{obj}}, *e}
}
// EntryNew() is a wrapper around gtk_entry_new().
func EntryNew() (*Entry, error) {
c := C.gtk_entry_new()
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
e := wrapEntry(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return e, nil
}
// EntryNewWithBuffer() is a wrapper around gtk_entry_new_with_buffer().
func EntryNewWithBuffer(buffer *EntryBuffer) (*Entry, error) {
c := C.gtk_entry_new_with_buffer(buffer.native())
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
e := wrapEntry(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return e, nil
}
// GetBuffer() is a wrapper around gtk_entry_get_buffer().
func (v *Entry) GetBuffer() (*EntryBuffer, error) {
c := C.gtk_entry_get_buffer(v.native())
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
e := &EntryBuffer{obj}
obj.Ref()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return e, nil
}
// SetBuffer() is a wrapper around gtk_entry_set_buffer().
func (v *Entry) SetBuffer(buffer *EntryBuffer) {
C.gtk_entry_set_buffer(v.native(), buffer.native())
}
// SetText() is a wrapper around gtk_entry_set_text().
func (v *Entry) SetText(text string) {
cstr := C.CString(text)
defer C.free(unsafe.Pointer(cstr))
C.gtk_entry_set_text(v.native(), (*C.gchar)(cstr))
}
// GetText() is a wrapper around gtk_entry_get_text().
func (v *Entry) GetText() (string, error) {
c := C.gtk_entry_get_text(v.native())
if c == nil {
return "", nilPtrErr
}
return C.GoString((*C.char)(c)), nil
}
// GetTextLength() is a wrapper around gtk_entry_get_text_length().
func (v *Entry) GetTextLength() uint16 {
c := C.gtk_entry_get_text_length(v.native())
return uint16(c)
}
// TODO(jrick) GdkRectangle
/*
func (v *Entry) GetTextArea() {
}
*/
// SetVisibility() is a wrapper around gtk_entry_set_visibility().
func (v *Entry) SetVisibility(visible bool) {
C.gtk_entry_set_visibility(v.native(), gbool(visible))
}
// SetInvisibleChar() is a wrapper around gtk_entry_set_invisible_char().
func (v *Entry) SetInvisibleChar(ch rune) {
C.gtk_entry_set_invisible_char(v.native(), C.gunichar(ch))
}
// UnsetInvisibleChar() is a wrapper around gtk_entry_unset_invisible_char().
func (v *Entry) UnsetInvisibleChar() {
C.gtk_entry_unset_invisible_char(v.native())
}
// SetMaxLength() is a wrapper around gtk_entry_set_max_length().
func (v *Entry) SetMaxLength(len int) {
C.gtk_entry_set_max_length(v.native(), C.gint(len))
}
// GetActivatesDefault() is a wrapper around gtk_entry_get_activates_default().
func (v *Entry) GetActivatesDefault() bool {
c := C.gtk_entry_get_activates_default(v.native())
return gobool(c)
}
// GetHasFrame() is a wrapper around gtk_entry_get_has_frame().
func (v *Entry) GetHasFrame() bool {
c := C.gtk_entry_get_has_frame(v.native())
return gobool(c)
}
// GetWidthChars() is a wrapper around gtk_entry_get_width_chars().
func (v *Entry) GetWidthChars() int {
c := C.gtk_entry_get_width_chars(v.native())
return int(c)
}
// SetActivatesDefault() is a wrapper around gtk_entry_set_activates_default().
func (v *Entry) SetActivatesDefault(setting bool) {
C.gtk_entry_set_activates_default(v.native(), gbool(setting))
}
// SetHasFrame() is a wrapper around gtk_entry_set_has_frame().
func (v *Entry) SetHasFrame(setting bool) {
C.gtk_entry_set_has_frame(v.native(), gbool(setting))
}
// SetWidthChars() is a wrapper around gtk_entry_set_width_chars().
func (v *Entry) SetWidthChars(nChars int) {
C.gtk_entry_set_width_chars(v.native(), C.gint(nChars))
}
// GetInvisibleChar() is a wrapper around gtk_entry_get_invisible_char().
func (v *Entry) GetInvisibleChar() rune {
c := C.gtk_entry_get_invisible_char(v.native())
return rune(c)
}
// SetAlignment() is a wrapper around gtk_entry_set_alignment().
func (v *Entry) SetAlignment(xalign float32) {
C.gtk_entry_set_alignment(v.native(), C.gfloat(xalign))
}
// GetAlignment() is a wrapper around gtk_entry_get_alignment().
func (v *Entry) GetAlignment() float32 {
c := C.gtk_entry_get_alignment(v.native())
return float32(c)
}
// SetPlaceholderText() is a wrapper around gtk_entry_set_placeholder_text().
func (v *Entry) SetPlaceholderText(text string) {
cstr := C.CString(text)
defer C.free(unsafe.Pointer(cstr))
C.gtk_entry_set_placeholder_text(v.native(), (*C.gchar)(cstr))
}
// GetPlaceholderText() is a wrapper around gtk_entry_get_placeholder_text().
func (v *Entry) GetPlaceholderText() (string, error) {
c := C.gtk_entry_get_placeholder_text(v.native())
if c == nil {
return "", nilPtrErr
}
return C.GoString((*C.char)(c)), nil
}
// SetOverwriteMode() is a wrapper around gtk_entry_set_overwrite_mode().
func (v *Entry) SetOverwriteMode(overwrite bool) {
C.gtk_entry_set_overwrite_mode(v.native(), gbool(overwrite))
}
// GetOverwriteMode() is a wrapper around gtk_entry_get_overwrite_mode().
func (v *Entry) GetOverwriteMode() bool {
c := C.gtk_entry_get_overwrite_mode(v.native())
return gobool(c)
}
// TODO(jrick) Pangolayout
/*
func (v *Entry) GetLayout() {
}
*/
// GetLayoutOffsets() is a wrapper around gtk_entry_get_layout_offsets().
func (v *Entry) GetLayoutOffsets() (x, y int) {
var gx, gy C.gint
C.gtk_entry_get_layout_offsets(v.native(), &gx, &gy)
return int(gx), int(gy)
}
// LayoutIndexToTextIndex() is a wrapper around
// gtk_entry_layout_index_to_text_index().
func (v *Entry) LayoutIndexToTextIndex(layoutIndex int) int {
c := C.gtk_entry_layout_index_to_text_index(v.native(),
C.gint(layoutIndex))
return int(c)
}
// TextIndexToLayoutIndex() is a wrapper around
// gtk_entry_text_index_to_layout_index().
func (v *Entry) TextIndexToLayoutIndex(textIndex int) int {
c := C.gtk_entry_text_index_to_layout_index(v.native(),
C.gint(textIndex))
return int(c)
}
// TODO(jrick) PandoAttrList
/*
func (v *Entry) SetAttributes() {
}
*/
// TODO(jrick) PandoAttrList
/*
func (v *Entry) GetAttributes() {
}
*/
// GetMaxLength() is a wrapper around gtk_entry_get_max_length().
func (v *Entry) GetMaxLength() int {
c := C.gtk_entry_get_max_length(v.native())
return int(c)
}
// GetVisibility() is a wrapper around gtk_entry_get_visibility().
func (v *Entry) GetVisibility() bool {
c := C.gtk_entry_get_visibility(v.native())
return gobool(c)
}
// SetCompletion() is a wrapper around gtk_entry_set_completion().
func (v *Entry) SetCompletion(completion *EntryCompletion) {
C.gtk_entry_set_completion(v.native(), completion.native())
}
// GetCompletion() is a wrapper around gtk_entry_get_completion().
func (v *Entry) GetCompletion() (*EntryCompletion, error) {
c := C.gtk_entry_get_completion(v.native())
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
e := &EntryCompletion{obj}
obj.Ref()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return e, nil
}
// SetCursorHAdjustment() is a wrapper around
// gtk_entry_set_cursor_hadjustment().
func (v *Entry) SetCursorHAdjustment(adjustment *Adjustment) {
C.gtk_entry_set_cursor_hadjustment(v.native(), adjustment.native())
}
// GetCursorHAdjustment() is a wrapper around
// gtk_entry_get_cursor_hadjustment().
func (v *Entry) GetCursorHAdjustment() (*Adjustment, error) {
c := C.gtk_entry_get_cursor_hadjustment(v.native())
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
a := &Adjustment{glib.InitiallyUnowned{obj}}
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return a, nil
}
// SetProgressFraction() is a wrapper around gtk_entry_set_progress_fraction().
func (v *Entry) SetProgressFraction(fraction float64) {
C.gtk_entry_set_progress_fraction(v.native(), C.gdouble(fraction))
}
// GetProgressFraction() is a wrapper around gtk_entry_get_progress_fraction().
func (v *Entry) GetProgressFraction() float64 {
c := C.gtk_entry_get_progress_fraction(v.native())
return float64(c)
}
// SetProgressPulseStep() is a wrapper around
// gtk_entry_set_progress_pulse_step().
func (v *Entry) SetProgressPulseStep(fraction float64) {
C.gtk_entry_set_progress_pulse_step(v.native(), C.gdouble(fraction))
}
// GetProgressPulseStep() is a wrapper around
// gtk_entry_get_progress_pulse_step().
func (v *Entry) GetProgressPulseStep() float64 {
c := C.gtk_entry_get_progress_pulse_step(v.native())
return float64(c)
}
// ProgressPulse() is a wrapper around gtk_entry_progress_pulse().
func (v *Entry) ProgressPulse() {
C.gtk_entry_progress_pulse(v.native())
}
// TODO(jrick) GdkEventKey
/*
func (v *Entry) IMContextFilterKeypress() {
}
*/
// ResetIMContext() is a wrapper around gtk_entry_reset_im_context().
func (v *Entry) ResetIMContext() {
C.gtk_entry_reset_im_context(v.native())
}
// TODO(jrick) GdkPixbuf
/*
func (v *Entry) SetIconFromPixbuf() {
}
*/
// SetIconFromIconName() is a wrapper around
// gtk_entry_set_icon_from_icon_name().
func (v *Entry) SetIconFromIconName(iconPos EntryIconPosition, name string) {
cstr := C.CString(name)
defer C.free(unsafe.Pointer(cstr))
C.gtk_entry_set_icon_from_icon_name(v.native(),
C.GtkEntryIconPosition(iconPos), (*C.gchar)(cstr))
}
// TODO(jrick) GIcon
/*
func (v *Entry) SetIconFromGIcon() {
}
*/
// GetIconStorageType() is a wrapper around gtk_entry_get_icon_storage_type().
func (v *Entry) GetIconStorageType(iconPos EntryIconPosition) ImageType {
c := C.gtk_entry_get_icon_storage_type(v.native(),
C.GtkEntryIconPosition(iconPos))
return ImageType(c)
}
// TODO(jrick) GdkPixbuf
/*
func (v *Entry) GetIconPixbuf() {
}
*/
// GetIconName() is a wrapper around gtk_entry_get_icon_name().
func (v *Entry) GetIconName(iconPos EntryIconPosition) (string, error) {
c := C.gtk_entry_get_icon_name(v.native(),
C.GtkEntryIconPosition(iconPos))
if c == nil {
return "", nilPtrErr
}
return C.GoString((*C.char)(c)), nil
}
// TODO(jrick) GIcon
/*
func (v *Entry) GetIconGIcon() {
}
*/
// SetIconActivatable() is a wrapper around gtk_entry_set_icon_activatable().
func (v *Entry) SetIconActivatable(iconPos EntryIconPosition, activatable bool) {
C.gtk_entry_set_icon_activatable(v.native(),
C.GtkEntryIconPosition(iconPos), gbool(activatable))
}
// GetIconActivatable() is a wrapper around gtk_entry_get_icon_activatable().
func (v *Entry) GetIconActivatable(iconPos EntryIconPosition) bool {
c := C.gtk_entry_get_icon_activatable(v.native(),
C.GtkEntryIconPosition(iconPos))
return gobool(c)
}
// SetIconSensitive() is a wrapper around gtk_entry_set_icon_sensitive().
func (v *Entry) SetIconSensitive(iconPos EntryIconPosition, sensitive bool) {
C.gtk_entry_set_icon_sensitive(v.native(),
C.GtkEntryIconPosition(iconPos), gbool(sensitive))
}
// GetIconSensitive() is a wrapper around gtk_entry_get_icon_sensitive().
func (v *Entry) GetIconSensitive(iconPos EntryIconPosition) bool {
c := C.gtk_entry_get_icon_sensitive(v.native(),
C.GtkEntryIconPosition(iconPos))
return gobool(c)
}
// GetIconAtPos() is a wrapper around gtk_entry_get_icon_at_pos().
func (v *Entry) GetIconAtPos(x, y int) int {
c := C.gtk_entry_get_icon_at_pos(v.native(), C.gint(x), C.gint(y))
return int(c)
}
// SetIconTooltipText() is a wrapper around gtk_entry_set_icon_tooltip_text().
func (v *Entry) SetIconTooltipText(iconPos EntryIconPosition, tooltip string) {
cstr := C.CString(tooltip)
defer C.free(unsafe.Pointer(cstr))
C.gtk_entry_set_icon_tooltip_text(v.native(),
C.GtkEntryIconPosition(iconPos), (*C.gchar)(cstr))
}
// GetIconTooltipText() is a wrapper around gtk_entry_get_icon_tooltip_text().
func (v *Entry) GetIconTooltipText(iconPos EntryIconPosition) (string, error) {
c := C.gtk_entry_get_icon_tooltip_text(v.native(),
C.GtkEntryIconPosition(iconPos))
if c == nil {
return "", nilPtrErr
}
return C.GoString((*C.char)(c)), nil
}
// SetIconTooltipMarkup() is a wrapper around
// gtk_entry_set_icon_tooltip_markup().
func (v *Entry) SetIconTooltipMarkup(iconPos EntryIconPosition, tooltip string) {
cstr := C.CString(tooltip)
defer C.free(unsafe.Pointer(cstr))
C.gtk_entry_set_icon_tooltip_markup(v.native(),
C.GtkEntryIconPosition(iconPos), (*C.gchar)(cstr))
}
// GetIconTooltipMarkup() is a wrapper around
// gtk_entry_get_icon_tooltip_markup().
func (v *Entry) GetIconTooltipMarkup(iconPos EntryIconPosition) (string, error) {
c := C.gtk_entry_get_icon_tooltip_markup(v.native(),
C.GtkEntryIconPosition(iconPos))
if c == nil {
return "", nilPtrErr
}
return C.GoString((*C.char)(c)), nil
}
// TODO(jrick) GdkDragAction
/*
func (v *Entry) SetIconDragSource() {
}
*/
// GetCurrentIconDragSource() is a wrapper around
// gtk_entry_get_current_icon_drag_source().
func (v *Entry) GetCurrentIconDragSource() int {
c := C.gtk_entry_get_current_icon_drag_source(v.native())
return int(c)
}
// TODO(jrick) GdkRectangle
/*
func (v *Entry) GetIconArea() {
}
*/
// SetInputPurpose() is a wrapper around gtk_entry_set_input_purpose().
func (v *Entry) SetInputPurpose(purpose InputPurpose) {
C.gtk_entry_set_input_purpose(v.native(), C.GtkInputPurpose(purpose))
}
// GetInputPurpose() is a wrapper around gtk_entry_get_input_purpose().
func (v *Entry) GetInputPurpose() InputPurpose {
c := C.gtk_entry_get_input_purpose(v.native())
return InputPurpose(c)
}
// SetInputHints() is a wrapper around gtk_entry_set_input_hints().
func (v *Entry) SetInputHints(hints InputHints) {
C.gtk_entry_set_input_hints(v.native(), C.GtkInputHints(hints))
}
// GetInputHints() is a wrapper around gtk_entry_get_input_hints().
func (v *Entry) GetInputHints() InputHints {
c := C.gtk_entry_get_input_hints(v.native())
return InputHints(c)
}
/*
* GtkEntryBuffer
*/
// EntryBuffer is a representation of GTK's GtkEntryBuffer.
type EntryBuffer struct {
*glib.Object
}
// native returns a pointer to the underlying GtkEntryBuffer.
func (v *EntryBuffer) native() *C.GtkEntryBuffer {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkEntryBuffer(p)
}
func marshalEntryBuffer(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapEntryBuffer(obj), nil
}
func wrapEntryBuffer(obj *glib.Object) *EntryBuffer {
return &EntryBuffer{obj}
}
// EntryBufferNew() is a wrapper around gtk_entry_buffer_new().
func EntryBufferNew(initialChars string, nInitialChars int) (*EntryBuffer, error) {
cstr := C.CString(initialChars)
defer C.free(unsafe.Pointer(cstr))
c := C.gtk_entry_buffer_new((*C.gchar)(cstr), C.gint(nInitialChars))
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
e := wrapEntryBuffer(obj)
obj.Ref()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return e, nil
}
// GetText() is a wrapper around gtk_entry_buffer_get_text(). A
// non-nil error is returned in the case that gtk_entry_buffer_get_text
// returns NULL to differentiate between NULL and an empty string.
func (v *EntryBuffer) GetText() (string, error) {
c := C.gtk_entry_buffer_get_text(v.native())
if c == nil {
return "", nilPtrErr
}
return C.GoString((*C.char)(c)), nil
}
// SetText() is a wrapper around gtk_entry_buffer_set_text().
func (v *EntryBuffer) SetText(text string) {
cstr := C.CString(text)
defer C.free(unsafe.Pointer(cstr))
C.gtk_entry_buffer_set_text(v.native(), (*C.gchar)(cstr),
C.gint(len(text)))
}
// GetBytes() is a wrapper around gtk_entry_buffer_get_bytes().
func (v *EntryBuffer) GetBytes() uint {
c := C.gtk_entry_buffer_get_bytes(v.native())
return uint(c)
}
// GetLength() is a wrapper around gtk_entry_buffer_get_length().
func (v *EntryBuffer) GetLength() uint {
c := C.gtk_entry_buffer_get_length(v.native())
return uint(c)
}
// GetMaxLength() is a wrapper around gtk_entry_buffer_get_max_length().
func (v *EntryBuffer) GetMaxLength() int {
c := C.gtk_entry_buffer_get_max_length(v.native())
return int(c)
}
// SetMaxLength() is a wrapper around gtk_entry_buffer_set_max_length().
func (v *EntryBuffer) SetMaxLength(maxLength int) {
C.gtk_entry_buffer_set_max_length(v.native(), C.gint(maxLength))
}
// InsertText() is a wrapper around gtk_entry_buffer_insert_text().
func (v *EntryBuffer) InsertText(position uint, text string) uint {
cstr := C.CString(text)
defer C.free(unsafe.Pointer(cstr))
c := C.gtk_entry_buffer_insert_text(v.native(), C.guint(position),
(*C.gchar)(cstr), C.gint(len(text)))
return uint(c)
}
// DeleteText() is a wrapper around gtk_entry_buffer_delete_text().
func (v *EntryBuffer) DeleteText(position uint, nChars int) uint {
c := C.gtk_entry_buffer_delete_text(v.native(), C.guint(position),
C.gint(nChars))
return uint(c)
}
// EmitDeletedText() is a wrapper around gtk_entry_buffer_emit_deleted_text().
func (v *EntryBuffer) EmitDeletedText(pos, nChars uint) {
C.gtk_entry_buffer_emit_deleted_text(v.native(), C.guint(pos),
C.guint(nChars))
}
// EmitInsertedText() is a wrapper around gtk_entry_buffer_emit_inserted_text().
func (v *EntryBuffer) EmitInsertedText(pos uint, text string) {
cstr := C.CString(text)
defer C.free(unsafe.Pointer(cstr))
C.gtk_entry_buffer_emit_inserted_text(v.native(), C.guint(pos),
(*C.gchar)(cstr), C.guint(len(text)))
}
/*
* GtkEntryCompletion
*/
// EntryCompletion is a representation of GTK's GtkEntryCompletion.
type EntryCompletion struct {
*glib.Object
}
// native returns a pointer to the underlying GtkEntryCompletion.
func (v *EntryCompletion) native() *C.GtkEntryCompletion {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkEntryCompletion(p)
}
func marshalEntryCompletion(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapEntryCompletion(obj), nil
}
func wrapEntryCompletion(obj *glib.Object) *EntryCompletion {
return &EntryCompletion{obj}
}
/*
* GtkEventBox
*/
// EventBox is a representation of GTK's GtkEventBox.
type EventBox struct {
Bin
}
// native returns a pointer to the underlying GtkEventBox.
func (v *EventBox) native() *C.GtkEventBox {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkEventBox(p)
}
func marshalEventBox(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapEventBox(obj), nil
}
func wrapEventBox(obj *glib.Object) *EventBox {
return &EventBox{Bin{Container{Widget{glib.InitiallyUnowned{obj}}}}}
}
// EventBoxNew is a wrapper around gtk_event_box_new().
func EventBoxNew() (*EventBox, error) {
c := C.gtk_event_box_new()
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
e := wrapEventBox(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return e, nil
}
// SetAboveChild is a wrapper around gtk_event_box_set_above_child().
func (v *EventBox) SetAboveChild(aboveChild bool) {
C.gtk_event_box_set_above_child(v.native(), gbool(aboveChild))
}
// GetAboveChild is a wrapper around gtk_event_box_get_above_child().
func (v *EventBox) GetAboveChild() bool {
c := C.gtk_event_box_get_above_child(v.native())
return gobool(c)
}
// SetVisibleWindow is a wrapper around gtk_event_box_set_visible_window().
func (v *EventBox) SetVisibleWindow(visibleWindow bool) {
C.gtk_event_box_set_visible_window(v.native(), gbool(visibleWindow))
}
// GetVisibleWindow is a wrapper around gtk_event_box_get_visible_window().
func (v *EventBox) GetVisibleWindow() bool {
c := C.gtk_event_box_get_visible_window(v.native())
return gobool(c)
}
/*
* GtkFileChooser
*/
// FileChoser is a representation of GTK's GtkFileChooser GInterface.
type FileChooser struct {
*glib.Object
}
// native returns a pointer to the underlying GObject as a GtkFileChooser.
func (v *FileChooser) native() *C.GtkFileChooser {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkFileChooser(p)
}
func marshalFileChooser(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapFileChooser(obj), nil
}
func wrapFileChooser(obj *glib.Object) *FileChooser {
return &FileChooser{obj}
}
// GetFilename is a wrapper around gtk_file_chooser_get_filename().
func (v *FileChooser) GetFilename() string {
c := C.gtk_file_chooser_get_filename(v.native())
s := C.GoString((*C.char)(c))
defer C.g_free((C.gpointer)(c))
return s
}
/*
* GtkFileChooserButton
*/
// FileChooserButton is a representation of GTK's GtkFileChooserButton.
type FileChooserButton struct {
Box
// Interfaces
FileChooser
}
// native returns a pointer to the underlying GtkFileChooserButton.
func (v *FileChooserButton) native() *C.GtkFileChooserButton {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkFileChooserButton(p)
}
func marshalFileChooserButton(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapFileChooserButton(obj), nil
}
func wrapFileChooserButton(obj *glib.Object) *FileChooserButton {
fc := wrapFileChooser(obj)
return &FileChooserButton{Box{Container{Widget{glib.InitiallyUnowned{obj}}}}, *fc}
}
// FileChooserButtonNew is a wrapper around gtk_file_chooser_button_new().
func FileChooserButtonNew(title string, action FileChooserAction) (*FileChooserButton, error) {
cstr := C.CString(title)
defer C.free(unsafe.Pointer(cstr))
c := C.gtk_file_chooser_button_new((*C.gchar)(cstr),
(C.GtkFileChooserAction)(action))
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
f := wrapFileChooserButton(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return f, nil
}
/*
* GtkFileChooserWidget
*/
// FileChooserWidget is a representation of GTK's GtkFileChooserWidget.
type FileChooserWidget struct {
Box
// Interfaces
FileChooser
}
// native returns a pointer to the underlying GtkFileChooserWidget.
func (v *FileChooserWidget) native() *C.GtkFileChooserWidget {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkFileChooserWidget(p)
}
func marshalFileChooserWidget(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapFileChooserWidget(obj), nil
}
func wrapFileChooserWidget(obj *glib.Object) *FileChooserWidget {
fc := wrapFileChooser(obj)
return &FileChooserWidget{Box{Container{Widget{glib.InitiallyUnowned{obj}}}}, *fc}
}
// FileChooserWidgetNew is a wrapper around gtk_gtk_file_chooser_widget_new().
func FileChooserWidgetNew(action FileChooserAction) (*FileChooserWidget, error) {
c := C.gtk_file_chooser_widget_new((C.GtkFileChooserAction)(action))
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
f := wrapFileChooserWidget(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return f, nil
}
/*
* GtkFrame
*/
// Frame is a representation of GTK's GtkFrame.
type Frame struct {
Bin
}
// native returns a pointer to the underlying GtkFrame.
func (v *Frame) native() *C.GtkFrame {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkFrame(p)
}
func marshalFrame(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapFrame(obj), nil
}
func wrapFrame(obj *glib.Object) *Frame {
return &Frame{Bin{Container{Widget{glib.InitiallyUnowned{obj}}}}}
}
// FrameNew is a wrapper around gtk_frame_new().
func FrameNew(label string) (*Frame, error) {
cstr := C.CString(label)
defer C.free(unsafe.Pointer(cstr))
c := C.gtk_frame_new((*C.gchar)(cstr))
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
f := wrapFrame(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return f, nil
}
// SetLabel is a wrapper around gtk_frame_set_label().
func (v *Frame) SetLabel(label string) {
cstr := C.CString(label)
defer C.free(unsafe.Pointer(cstr))
C.gtk_frame_set_label(v.native(), (*C.gchar)(cstr))
}
// SetLabelWidget is a wrapper around gtk_frame_set_label_widget().
func (v *Frame) SetLabelWidget(labelWidget IWidget) {
C.gtk_frame_set_label_widget(v.native(), labelWidget.toWidget())
}
// SetLabelAlign is a wrapper around gtk_frame_set_label_align().
func (v *Frame) SetLabelAlign(xAlign, yAlign float32) {
C.gtk_frame_set_label_align(v.native(), C.gfloat(xAlign),
C.gfloat(yAlign))
}
// SetShadowType is a wrapper around gtk_frame_set_shadow_type().
func (v *Frame) SetShadowType(t ShadowType) {
C.gtk_frame_set_shadow_type(v.native(), C.GtkShadowType(t))
}
// GetLabel is a wrapper around gtk_frame_get_label().
func (v *Frame) GetLabel() string {
c := C.gtk_frame_get_label(v.native())
return C.GoString((*C.char)(c))
}
// GetLabelAlign is a wrapper around gtk_frame_get_label_align().
func (v *Frame) GetLabelAlign() (xAlign, yAlign float32) {
var x, y C.gfloat
C.gtk_frame_get_label_align(v.native(), &x, &y)
return float32(x), float32(y)
}
// GetLabelWidget is a wrapper around gtk_frame_get_label_widget().
func (v *Frame) GetLabelWidget() (*Widget, error) {
c := C.gtk_frame_get_label_widget(v.native())
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
w := wrapWidget(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return w, nil
}
// GetShadowType is a wrapper around gtk_frame_get_shadow_type().
func (v *Frame) GetShadowType() ShadowType {
c := C.gtk_frame_get_shadow_type(v.native())
return ShadowType(c)
}
/*
* GtkGrid
*/
// Grid is a representation of GTK's GtkGrid.
type Grid struct {
Container
// Interfaces
Orientable
}
// native returns a pointer to the underlying GtkGrid.
func (v *Grid) native() *C.GtkGrid {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkGrid(p)
}
func (v *Grid) toOrientable() *C.GtkOrientable {
if v == nil {
return nil
}
return C.toGtkOrientable(unsafe.Pointer(v.GObject))
}
func marshalGrid(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapGrid(obj), nil
}
func wrapGrid(obj *glib.Object) *Grid {
o := wrapOrientable(obj)
return &Grid{Container{Widget{glib.InitiallyUnowned{obj}}}, *o}
}
// GridNew() is a wrapper around gtk_grid_new().
func GridNew() (*Grid, error) {
c := C.gtk_grid_new()
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
g := wrapGrid(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return g, nil
}
// Attach() is a wrapper around gtk_grid_attach().
func (v *Grid) Attach(child IWidget, left, top, width, height int) {
C.gtk_grid_attach(v.native(), child.toWidget(), C.gint(left),
C.gint(top), C.gint(width), C.gint(height))
}
// AttachNextTo() is a wrapper around gtk_grid_attach_next_to().
func (v *Grid) AttachNextTo(child, sibling IWidget, side PositionType, width, height int) {
C.gtk_grid_attach_next_to(v.native(), child.toWidget(),
sibling.toWidget(), C.GtkPositionType(side), C.gint(width),
C.gint(height))
}
// GetChildAt() is a wrapper around gtk_grid_get_child_at().
func (v *Grid) GetChildAt(left, top int) (*Widget, error) {
c := C.gtk_grid_get_child_at(v.native(), C.gint(left), C.gint(top))
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
w := wrapWidget(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return w, nil
}
// InsertRow() is a wrapper around gtk_grid_insert_row().
func (v *Grid) InsertRow(position int) {
C.gtk_grid_insert_row(v.native(), C.gint(position))
}
// InsertColumn() is a wrapper around gtk_grid_insert_column().
func (v *Grid) InsertColumn(position int) {
C.gtk_grid_insert_column(v.native(), C.gint(position))
}
// InsertNextTo() is a wrapper around gtk_grid_insert_next_to()
func (v *Grid) InsertNextTo(sibling IWidget, side PositionType) {
C.gtk_grid_insert_next_to(v.native(), sibling.toWidget(),
C.GtkPositionType(side))
}
// SetRowHomogeneous() is a wrapper around gtk_grid_set_row_homogeneous().
func (v *Grid) SetRowHomogeneous(homogeneous bool) {
C.gtk_grid_set_row_homogeneous(v.native(), gbool(homogeneous))
}
// GetRowHomogeneous() is a wrapper around gtk_grid_get_row_homogeneous().
func (v *Grid) GetRowHomogeneous() bool {
c := C.gtk_grid_get_row_homogeneous(v.native())
return gobool(c)
}
// SetRowSpacing() is a wrapper around gtk_grid_set_row_spacing().
func (v *Grid) SetRowSpacing(spacing uint) {
C.gtk_grid_set_row_spacing(v.native(), C.guint(spacing))
}
// GetRowSpacing() is a wrapper around gtk_grid_get_row_spacing().
func (v *Grid) GetRowSpacing() uint {
c := C.gtk_grid_get_row_spacing(v.native())
return uint(c)
}
// SetColumnHomogeneous() is a wrapper around gtk_grid_set_column_homogeneous().
func (v *Grid) SetColumnHomogeneous(homogeneous bool) {
C.gtk_grid_set_column_homogeneous(v.native(), gbool(homogeneous))
}
// GetColumnHomogeneous() is a wrapper around gtk_grid_get_column_homogeneous().
func (v *Grid) GetColumnHomogeneous() bool {
c := C.gtk_grid_get_column_homogeneous(v.native())
return gobool(c)
}
// SetColumnSpacing() is a wrapper around gtk_grid_set_column_spacing().
func (v *Grid) SetColumnSpacing(spacing uint) {
C.gtk_grid_set_column_spacing(v.native(), C.guint(spacing))
}
// GetColumnSpacing() is a wrapper around gtk_grid_get_column_spacing().
func (v *Grid) GetColumnSpacing() uint {
c := C.gtk_grid_get_column_spacing(v.native())
return uint(c)
}
/*
* GtkImage
*/
// Image is a representation of GTK's GtkImage.
type Image struct {
Misc
}
// native returns a pointer to the underlying GtkImage.
func (v *Image) native() *C.GtkImage {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkImage(p)
}
func marshalImage(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapImage(obj), nil
}
func wrapImage(obj *glib.Object) *Image {
return &Image{Misc{Widget{glib.InitiallyUnowned{obj}}}}
}
// ImageNew() is a wrapper around gtk_image_new().
func ImageNew() (*Image, error) {
c := C.gtk_image_new()
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
i := wrapImage(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return i, nil
}
// ImageNewFromFile() is a wrapper around gtk_image_new_from_file().
func ImageNewFromFile(filename string) (*Image, error) {
cstr := C.CString(filename)
defer C.free(unsafe.Pointer(cstr))
c := C.gtk_image_new_from_file((*C.gchar)(cstr))
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
i := wrapImage(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return i, nil
}
// ImageNewFromResource() is a wrapper around gtk_image_new_from_resource().
func ImageNewFromResource(resourcePath string) (*Image, error) {
cstr := C.CString(resourcePath)
defer C.free(unsafe.Pointer(cstr))
c := C.gtk_image_new_from_resource((*C.gchar)(cstr))
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
i := wrapImage(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return i, nil
}
// ImageNewFromPixbuf is a wrapper around gtk_image_new_from_pixbuf().
func ImageNewFromPixbuf(pixbuf *gdk.Pixbuf) (*Image, error) {
ptr := (*C.GdkPixbuf)(unsafe.Pointer(pixbuf.Native()))
c := C.gtk_image_new_from_pixbuf(ptr)
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
i := wrapImage(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return i, nil
}
// TODO(jrick) GtkIconSet
/*
func ImageNewFromIconSet() {
}
*/
// TODO(jrick) GdkPixbufAnimation
/*
func ImageNewFromAnimation() {
}
*/
// ImageNewFromIconName() is a wrapper around gtk_image_new_from_icon_name().
func ImageNewFromIconName(iconName string, size IconSize) (*Image, error) {
cstr := C.CString(iconName)
defer C.free(unsafe.Pointer(cstr))
c := C.gtk_image_new_from_icon_name((*C.gchar)(cstr),
C.GtkIconSize(size))
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
i := wrapImage(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return i, nil
}
// TODO(jrick) GIcon
/*
func ImageNewFromGIcon() {
}
*/
// Clear() is a wrapper around gtk_image_clear().
func (v *Image) Clear() {
C.gtk_image_clear(v.native())
}
// SetFromFile() is a wrapper around gtk_image_set_from_file().
func (v *Image) SetFromFile(filename string) {
cstr := C.CString(filename)
defer C.free(unsafe.Pointer(cstr))
C.gtk_image_set_from_file(v.native(), (*C.gchar)(cstr))
}
// SetFromResource() is a wrapper around gtk_image_set_from_resource().
func (v *Image) SetFromResource(resourcePath string) {
cstr := C.CString(resourcePath)
defer C.free(unsafe.Pointer(cstr))
C.gtk_image_set_from_resource(v.native(), (*C.gchar)(cstr))
}
// SetFromFixbuf is a wrapper around gtk_image_set_from_pixbuf().
func (v *Image) SetFromPixbuf(pixbuf *gdk.Pixbuf) {
pbptr := (*C.GdkPixbuf)(unsafe.Pointer(pixbuf.Native()))
C.gtk_image_set_from_pixbuf(v.native(), pbptr)
}
// TODO(jrick) GtkIconSet
/*
func (v *Image) SetFromIconSet() {
}
*/
// TODO(jrick) GdkPixbufAnimation
/*
func (v *Image) SetFromAnimation() {
}
*/
// SetFromIconName() is a wrapper around gtk_image_set_from_icon_name().
func (v *Image) SetFromIconName(iconName string, size IconSize) {
cstr := C.CString(iconName)
defer C.free(unsafe.Pointer(cstr))
C.gtk_image_set_from_icon_name(v.native(), (*C.gchar)(cstr),
C.GtkIconSize(size))
}
// TODO(jrick) GIcon
/*
func (v *Image) SetFromGIcon() {
}
*/
// SetPixelSize() is a wrapper around gtk_image_set_pixel_size().
func (v *Image) SetPixelSize(pixelSize int) {
C.gtk_image_set_pixel_size(v.native(), C.gint(pixelSize))
}
// GetStorageType() is a wrapper around gtk_image_get_storage_type().
func (v *Image) GetStorageType() ImageType {
c := C.gtk_image_get_storage_type(v.native())
return ImageType(c)
}
// GetPixbuf() is a wrapper around gtk_image_get_pixbuf().
func (v *Image) GetPixbuf() *gdk.Pixbuf {
c := C.gtk_image_get_pixbuf(v.native())
if c == nil {
return nil
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
pb := &gdk.Pixbuf{obj}
obj.Ref()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return pb
}
// TODO(jrick) GtkIconSet
/*
func (v *Image) GetIconSet() {
}
*/
// TODO(jrick) GdkPixbufAnimation
/*
func (v *Image) GetAnimation() {
}
*/
// GetIconName() is a wrapper around gtk_image_get_icon_name().
func (v *Image) GetIconName() (string, IconSize) {
var iconName *C.gchar
var size C.GtkIconSize
C.gtk_image_get_icon_name(v.native(), &iconName, &size)
return C.GoString((*C.char)(iconName)), IconSize(size)
}
// TODO(jrick) GIcon
/*
func (v *Image) GetGIcon() {
}
*/
// GetPixelSize() is a wrapper around gtk_image_get_pixel_size().
func (v *Image) GetPixelSize() int {
c := C.gtk_image_get_pixel_size(v.native())
return int(c)
}
/*
* GtkLabel
*/
// Label is a representation of GTK's GtkLabel.
type Label struct {
Misc
}
// native returns a pointer to the underlying GtkLabel.
func (v *Label) native() *C.GtkLabel {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkLabel(p)
}
func marshalLabel(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapLabel(obj), nil
}
func wrapLabel(obj *glib.Object) *Label {
return &Label{Misc{Widget{glib.InitiallyUnowned{obj}}}}
}
// LabelNew is a wrapper around gtk_label_new().
func LabelNew(str string) (*Label, error) {
cstr := C.CString(str)
defer C.free(unsafe.Pointer(cstr))
c := C.gtk_label_new((*C.gchar)(cstr))
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
l := wrapLabel(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return l, nil
}
// SetText is a wrapper around gtk_label_set_text().
func (v *Label) SetText(str string) {
cstr := C.CString(str)
defer C.free(unsafe.Pointer(cstr))
C.gtk_label_set_text(v.native(), (*C.gchar)(cstr))
}
// SetMarkup is a wrapper around gtk_label_set_markup().
func (v *Label) SetMarkup(str string) {
cstr := C.CString(str)
defer C.free(unsafe.Pointer(cstr))
C.gtk_label_set_markup(v.native(), (*C.gchar)(cstr))
}
// SetMarkupWithMnemonic is a wrapper around
// gtk_label_set_markup_with_mnemonic().
func (v *Label) SetMarkupWithMnemonic(str string) {
cstr := C.CString(str)
defer C.free(unsafe.Pointer(cstr))
C.gtk_label_set_markup_with_mnemonic(v.native(), (*C.gchar)(cstr))
}
// SetPattern is a wrapper around gtk_label_set_pattern().
func (v *Label) SetPattern(patern string) {
cstr := C.CString(patern)
defer C.free(unsafe.Pointer(cstr))
C.gtk_label_set_pattern(v.native(), (*C.gchar)(cstr))
}
// SetJustify is a wrapper around gtk_label_set_justify().
func (v *Label) SetJustify(jtype Justification) {
C.gtk_label_set_justify(v.native(), C.GtkJustification(jtype))
}
// SetEllipsize is a wrapper around gtk_label_set_ellipsize().
func (v *Label) SetEllipsize(mode pango.EllipsizeMode) {
C.gtk_label_set_ellipsize(v.native(), C.PangoEllipsizeMode(mode))
}
// GetWidthChars is a wrapper around gtk_label_get_width_chars().
func (v *Label) GetWidthChars() int {
c := C.gtk_label_get_width_chars(v.native())
return int(c)
}
// SetWidthChars is a wrapper around gtk_label_set_width_chars().
func (v *Label) SetWidthChars(nChars int) {
C.gtk_label_set_width_chars(v.native(), C.gint(nChars))
}
// GetMaxWidthChars is a wrapper around gtk_label_get_max_width_chars().
func (v *Label) GetMaxWidthChars() int {
c := C.gtk_label_get_max_width_chars(v.native())
return int(c)
}
// SetMaxWidthChars is a wrapper around gtk_label_set_max_width_chars().
func (v *Label) SetMaxWidthChars(nChars int) {
C.gtk_label_set_max_width_chars(v.native(), C.gint(nChars))
}
// GetLineWrap is a wrapper around gtk_label_get_line_wrap().
func (v *Label) GetLineWrap() bool {
c := C.gtk_label_get_line_wrap(v.native())
return gobool(c)
}
// SetLineWrap is a wrapper around gtk_label_set_line_wrap().
func (v *Label) SetLineWrap(wrap bool) {
C.gtk_label_set_line_wrap(v.native(), gbool(wrap))
}
// SetLineWrapMode is a wrapper around gtk_label_set_line_wrap_mode().
func (v *Label) SetLineWrapMode(wrapMode pango.WrapMode) {
C.gtk_label_set_line_wrap_mode(v.native(), C.PangoWrapMode(wrapMode))
}
// GetSelectable is a wrapper around gtk_label_get_selectable().
func (v *Label) GetSelectable() bool {
c := C.gtk_label_get_selectable(v.native())
return gobool(c)
}
// GetText is a wrapper around gtk_label_get_text().
func (v *Label) GetText() (string, error) {
c := C.gtk_label_get_text(v.native())
if c == nil {
return "", nilPtrErr
}
return C.GoString((*C.char)(c)), nil
}
// GetJustify is a wrapper around gtk_label_get_justify().
func (v *Label) GetJustify() Justification {
c := C.gtk_label_get_justify(v.native())
return Justification(c)
}
// GetEllipsize is a wrapper around gtk_label_get_ellipsize().
func (v *Label) GetEllipsize() pango.EllipsizeMode {
c := C.gtk_label_get_ellipsize(v.native())
return pango.EllipsizeMode(c)
}
// GetCurrentUri is a wrapper around gtk_label_get_current_uri().
func (v *Label) GetCurrentUri() string {
c := C.gtk_label_get_current_uri(v.native())
return C.GoString((*C.char)(c))
}
// GetTrackVisitedLinks is a wrapper around gtk_label_get_track_visited_links().
func (v *Label) GetTrackVisitedLinks() bool {
c := C.gtk_label_get_track_visited_links(v.native())
return gobool(c)
}
// SetTrackVisitedLinks is a wrapper around gtk_label_set_track_visited_links().
func (v *Label) SetTrackVisitedLinks(trackLinks bool) {
C.gtk_label_set_track_visited_links(v.native(), gbool(trackLinks))
}
// GetAngle is a wrapper around gtk_label_get_angle().
func (v *Label) GetAngle() float64 {
c := C.gtk_label_get_angle(v.native())
return float64(c)
}
// SetAngle is a wrapper around gtk_label_set_angle().
func (v *Label) SetAngle(angle float64) {
C.gtk_label_set_angle(v.native(), C.gdouble(angle))
}
// GetSelectionBounds is a wrapper around gtk_label_get_selection_bounds().
func (v *Label) GetSelectionBounds() (start, end int, nonEmpty bool) {
var cstart, cend C.gint
c := C.gtk_label_get_selection_bounds(v.native(), &cstart, &cend)
return int(cstart), int(cend), gobool(c)
}
// GetSingleLineMode is a wrapper around gtk_label_get_single_line_mode().
func (v *Label) GetSingleLineMode() bool {
c := C.gtk_label_get_single_line_mode(v.native())
return gobool(c)
}
// SetSingleLineMode is a wrapper around gtk_label_set_single_line_mode().
func (v *Label) SetSingleLineMode(mode bool) {
C.gtk_label_set_single_line_mode(v.native(), gbool(mode))
}
// GetUseMarkup is a wrapper around gtk_label_get_use_markup().
func (v *Label) GetUseMarkup() bool {
c := C.gtk_label_get_use_markup(v.native())
return gobool(c)
}
// SetUseMarkup is a wrapper around gtk_label_set_use_markup().
func (v *Label) SetUseMarkup(use bool) {
C.gtk_label_set_use_markup(v.native(), gbool(use))
}
// GetUseUnderline is a wrapper around gtk_label_get_use_underline().
func (v *Label) GetUseUnderline() bool {
c := C.gtk_label_get_use_underline(v.native())
return gobool(c)
}
// SetUseUnderline is a wrapper around gtk_label_set_use_underline().
func (v *Label) SetUseUnderline(use bool) {
C.gtk_label_set_use_underline(v.native(), gbool(use))
}
// LabelNewWithMnemonic is a wrapper around gtk_label_new_with_mnemonic().
func LabelNewWithMnemonic(str string) (*Label, error) {
cstr := C.CString(str)
defer C.free(unsafe.Pointer(cstr))
c := C.gtk_label_new_with_mnemonic((*C.gchar)(cstr))
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
l := wrapLabel(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return l, nil
}
// SelectRegion is a wrapper around gtk_label_select_region().
func (v *Label) SelectRegion(startOffset, endOffset int) {
C.gtk_label_select_region(v.native(), C.gint(startOffset),
C.gint(endOffset))
}
// SetSelectable is a wrapper around gtk_label_set_selectable().
func (v *Label) SetSelectable(setting bool) {
C.gtk_label_set_selectable(v.native(), gbool(setting))
}
// SetLabel is a wrapper around gtk_label_set_label().
func (v *Label) SetLabel(str string) {
cstr := C.CString(str)
defer C.free(unsafe.Pointer(cstr))
C.gtk_label_set_label(v.native(), (*C.gchar)(cstr))
}
/*
* GtkListStore
*/
// ListStore is a representation of GTK's GtkListStore.
type ListStore struct {
*glib.Object
// Interfaces
TreeModel
}
// native returns a pointer to the underlying GtkListStore.
func (v *ListStore) native() *C.GtkListStore {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkListStore(p)
}
func marshalListStore(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapListStore(obj), nil
}
func wrapListStore(obj *glib.Object) *ListStore {
tm := wrapTreeModel(obj)
return &ListStore{obj, *tm}
}
func (v *ListStore) toTreeModel() *C.GtkTreeModel {
if v == nil {
return nil
}
return C.toGtkTreeModel(unsafe.Pointer(v.GObject))
}
// ListStoreNew is a wrapper around gtk_list_store_newv().
func ListStoreNew(types ...glib.Type) (*ListStore, error) {
gtypes := C.alloc_types(C.int(len(types)))
for n, val := range types {
C.set_type(gtypes, C.int(n), C.GType(val))
}
defer C.g_free(C.gpointer(gtypes))
c := C.gtk_list_store_newv(C.gint(len(types)), gtypes)
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
ls := wrapListStore(obj)
obj.Ref()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return ls, nil
}
// Remove is a wrapper around gtk_list_store_remove().
func (v *ListStore) Remove(iter *TreeIter) bool {
c := C.gtk_list_store_remove(v.native(), iter.native())
return gobool(c)
}
// TODO(jrick)
/*
func (v *ListStore) SetColumnTypes(types ...glib.Type) {
}
*/
// Set() is a wrapper around gtk_list_store_set_value() but provides
// a function similar to gtk_list_store_set() in that multiple columns
// may be set by one call. The length of columns and values slices must
// match, or Set() will return a non-nil error.
//
// As an example, a call to:
// store.Set(iter, []int{0, 1}, []interface{}{"Foo", "Bar"})
// is functionally equivalent to calling the native C GTK function:
// gtk_list_store_set(store, iter, 0, "Foo", 1, "Bar", -1);
func (v *ListStore) Set(iter *TreeIter, columns []int, values []interface{}) error {
if len(columns) != len(values) {
return errors.New("columns and values lengths do not match")
}
for i, val := range values {
if gv, err := glib.GValue(val); err != nil {
return err
} else {
C.gtk_list_store_set_value(v.native(), iter.native(),
C.gint(columns[i]),
(*C.GValue)(unsafe.Pointer(gv.Native())))
}
}
return nil
}
// TODO(jrick)
/*
func (v *ListStore) InsertWithValues(iter *TreeIter, position int, columns []int, values []glib.Value) {
var ccolumns *C.gint
var cvalues *C.GValue
C.gtk_list_store_insert_with_values(v.native(), iter.native(),
C.gint(position), columns, values, C.gint(len(values)))
}
*/
// InsertBefore() is a wrapper around gtk_list_store_insert_before().
func (v *ListStore) InsertBefore(sibling *TreeIter) *TreeIter {
var ti C.GtkTreeIter
C.gtk_list_store_insert_before(v.native(), &ti, sibling.native())
iter := &TreeIter{ti}
return iter
}
// InsertAfter() is a wrapper around gtk_list_store_insert_after().
func (v *ListStore) InsertAfter(sibling *TreeIter) *TreeIter {
var ti C.GtkTreeIter
C.gtk_list_store_insert_after(v.native(), &ti, sibling.native())
iter := &TreeIter{ti}
return iter
}
// Prepend() is a wrapper around gtk_list_store_prepend().
func (v *ListStore) Prepend() *TreeIter {
var ti C.GtkTreeIter
C.gtk_list_store_prepend(v.native(), &ti)
iter := &TreeIter{ti}
return iter
}
// Append() is a wrapper around gtk_list_store_append().
func (v *ListStore) Append() *TreeIter {
var ti C.GtkTreeIter
C.gtk_list_store_append(v.native(), &ti)
iter := &TreeIter{ti}
return iter
}
// Clear() is a wrapper around gtk_list_store_clear().
func (v *ListStore) Clear() {
C.gtk_list_store_clear(v.native())
}
// IterIsValid() is a wrapper around gtk_list_store_iter_is_valid().
func (v *ListStore) IterIsValid(iter *TreeIter) bool {
c := C.gtk_list_store_iter_is_valid(v.native(), iter.native())
return gobool(c)
}
// TODO(jrick)
/*
func (v *ListStore) Reorder(newOrder []int) {
}
*/
// Swap() is a wrapper around gtk_list_store_swap().
func (v *ListStore) Swap(a, b *TreeIter) {
C.gtk_list_store_swap(v.native(), a.native(), b.native())
}
// MoveBefore() is a wrapper around gtk_list_store_move_before().
func (v *ListStore) MoveBefore(iter, position *TreeIter) {
C.gtk_list_store_move_before(v.native(), iter.native(),
position.native())
}
// MoveAfter() is a wrapper around gtk_list_store_move_after().
func (v *ListStore) MoveAfter(iter, position *TreeIter) {
C.gtk_list_store_move_after(v.native(), iter.native(),
position.native())
}
/*
* GtkMenu
*/
// Menu is a representation of GTK's GtkMenu.
type Menu struct {
MenuShell
}
// IMenu is an interface type implemented by all structs embedding
// a Menu. It is meant to be used as an argument type for wrapper
// functions that wrap around a C GTK function taking a
// GtkMenu.
type IMenu interface {
toMenu() *C.GtkMenu
toWidget() *C.GtkWidget
}
// native() returns a pointer to the underlying GtkMenu.
func (v *Menu) native() *C.GtkMenu {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkMenu(p)
}
func (v *Menu) toMenu() *C.GtkMenu {
if v == nil {
return nil
}
return v.native()
}
func marshalMenu(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapMenu(obj), nil
}
func wrapMenu(obj *glib.Object) *Menu {
return &Menu{MenuShell{Container{Widget{glib.InitiallyUnowned{obj}}}}}
}
// MenuNew() is a wrapper around gtk_menu_new().
func MenuNew() (*Menu, error) {
c := C.gtk_menu_new()
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
m := wrapMenu(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return m, nil
}
/*
* GtkMenuBar
*/
// MenuBar is a representation of GTK's GtkMenuBar.
type MenuBar struct {
MenuShell
}
// native() returns a pointer to the underlying GtkMenuBar.
func (v *MenuBar) native() *C.GtkMenuBar {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkMenuBar(p)
}
func marshalMenuBar(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapMenuBar(obj), nil
}
func wrapMenuBar(obj *glib.Object) *MenuBar {
return &MenuBar{MenuShell{Container{Widget{glib.InitiallyUnowned{obj}}}}}
}
// MenuBarNew() is a wrapper around gtk_menu_bar_new().
func MenuBarNew() (*MenuBar, error) {
c := C.gtk_menu_bar_new()
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
m := wrapMenuBar(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return m, nil
}
/*
* GtkMenuButton
*/
// MenuButton is a representation of GTK's GtkMenuButton.
type MenuButton struct {
ToggleButton
}
// native returns a pointer to the underlying GtkMenuButton.
func (v *MenuButton) native() *C.GtkMenuButton {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkMenuButton(p)
}
func marshalMenuButton(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapMenuButton(obj), nil
}
func wrapMenuButton(obj *glib.Object) *MenuButton {
return &MenuButton{ToggleButton{Button{Bin{Container{Widget{
glib.InitiallyUnowned{obj}}}}}}}
}
// MenuButtonNew is a wrapper around gtk_menu_button_new().
func MenuButtonNew() (*MenuButton, error) {
c := C.gtk_menu_button_new()
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
m := wrapMenuButton(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return m, nil
}
// SetPopup is a wrapper around gtk_menu_button_set_popup().
func (v *MenuButton) SetPopup(menu IMenu) {
C.gtk_menu_button_set_popup(v.native(), menu.toWidget())
}
// GetPopup is a wrapper around gtk_menu_button_get_popup().
func (v *MenuButton) GetPopup() *Menu {
c := C.gtk_menu_button_get_popup(v.native())
if c == nil {
return nil
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
m := wrapMenu(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return m
}
// TODO: gtk_menu_button_set_menu_model
// TODO: gtk_menu_button_get_menu_model
// SetDirection is a wrapper around gtk_menu_button_set_direction().
func (v *MenuButton) SetDirection(direction ArrowType) {
C.gtk_menu_button_set_direction(v.native(), C.GtkArrowType(direction))
}
// GetDirection is a wrapper around gtk_menu_button_get_direction().
func (v *MenuButton) GetDirection() ArrowType {
c := C.gtk_menu_button_get_direction(v.native())
return ArrowType(c)
}
// SetAlignWidget is a wrapper around gtk_menu_button_set_align_widget().
func (v *MenuButton) SetAlignWidget(alignWidget IWidget) {
C.gtk_menu_button_set_align_widget(v.native(), alignWidget.toWidget())
}
// GetAlignWidget is a wrapper around gtk_menu_button_get_align_widget().
func (v *MenuButton) GetAlignWidget() *Widget {
c := C.gtk_menu_button_get_align_widget(v.native())
if c == nil {
return nil
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
w := wrapWidget(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return w
}
/*
* GtkMenuItem
*/
// MenuItem is a representation of GTK's GtkMenuItem.
type MenuItem struct {
Bin
}
// IMenuItem is an interface type implemented by all structs
// embedding a MenuItem. It is meant to be used as an argument type
// for wrapper functions that wrap around a C GTK function taking a
// GtkMenuItem.
type IMenuItem interface {
toMenuItem() *C.GtkMenuItem
toWidget() *C.GtkWidget
}
// native returns a pointer to the underlying GtkMenuItem.
func (v *MenuItem) native() *C.GtkMenuItem {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkMenuItem(p)
}
func (v *MenuItem) toMenuItem() *C.GtkMenuItem {
if v == nil {
return nil
}
return v.native()
}
func marshalMenuItem(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapMenuItem(obj), nil
}
func wrapMenuItem(obj *glib.Object) *MenuItem {
return &MenuItem{Bin{Container{Widget{glib.InitiallyUnowned{obj}}}}}
}
// MenuItemNew() is a wrapper around gtk_menu_item_new().
func MenuItemNew() (*MenuItem, error) {
c := C.gtk_menu_item_new()
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
m := wrapMenuItem(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return m, nil
}
// MenuItemNewWithLabel() is a wrapper around gtk_menu_item_new_with_label().
func MenuItemNewWithLabel(label string) (*MenuItem, error) {
cstr := C.CString(label)
defer C.free(unsafe.Pointer(cstr))
c := C.gtk_menu_item_new_with_label((*C.gchar)(cstr))
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
m := wrapMenuItem(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return m, nil
}
// MenuItemNewWithMnemonic() is a wrapper around
// gtk_menu_item_new_with_mnemonic().
func MenuItemNewWithMnemonic(label string) (*MenuItem, error) {
cstr := C.CString(label)
defer C.free(unsafe.Pointer(cstr))
c := C.gtk_menu_item_new_with_mnemonic((*C.gchar)(cstr))
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
m := wrapMenuItem(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return m, nil
}
// SetSubmenu() is a wrapper around gtk_menu_item_set_submenu().
func (v *MenuItem) SetSubmenu(submenu IWidget) {
C.gtk_menu_item_set_submenu(v.native(), submenu.toWidget())
}
/*
* GtkMenuShell
*/
// MenuShell is a representation of GTK's GtkMenuShell.
type MenuShell struct {
Container
}
// native returns a pointer to the underlying GtkMenuShell.
func (v *MenuShell) native() *C.GtkMenuShell {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkMenuShell(p)
}
func marshalMenuShell(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapMenuShell(obj), nil
}
func wrapMenuShell(obj *glib.Object) *MenuShell {
return &MenuShell{Container{Widget{glib.InitiallyUnowned{obj}}}}
}
// Append is a wrapper around gtk_menu_shell_append().
func (v *MenuShell) Append(child IMenuItem) {
C.gtk_menu_shell_append(v.native(), child.toWidget())
}
/*
* GtkMessageDialog
*/
// MessageDialog is a representation of GTK's GtkMessageDialog.
type MessageDialog struct {
Dialog
}
// native returns a pointer to the underlying GtkMessageDialog.
func (v *MessageDialog) native() *C.GtkMessageDialog {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkMessageDialog(p)
}
func marshalMessageDialog(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapMessageDialog(obj), nil
}
func wrapMessageDialog(obj *glib.Object) *MessageDialog {
return &MessageDialog{Dialog{Window{Bin{Container{Widget{glib.InitiallyUnowned{obj}}}}}}}
}
// MessageDialogNew() is a wrapper around gtk_message_dialog_new().
// The text is created and formatted by the format specifier and any
// additional arguments.
func MessageDialogNew(parent IWindow, flags DialogFlags, mType MessageType, buttons ButtonsType, format string, a ...interface{}) *MessageDialog {
s := fmt.Sprintf(format, a...)
cstr := C.CString(s)
defer C.free(unsafe.Pointer(cstr))
var w *C.GtkWindow = nil
if parent != nil {
w = parent.toWindow()
}
c := C._gtk_message_dialog_new(w,
C.GtkDialogFlags(flags), C.GtkMessageType(mType),
C.GtkButtonsType(buttons), cstr)
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
m := wrapMessageDialog(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return m
}
// MessageDialogNewWithMarkup is a wrapper around
// gtk_message_dialog_new_with_markup().
func MessageDialogNewWithMarkup(parent IWindow, flags DialogFlags, mType MessageType, buttons ButtonsType, format string, a ...interface{}) *MessageDialog {
s := fmt.Sprintf(format, a...)
cstr := C.CString(s)
defer C.free(unsafe.Pointer(cstr))
var w *C.GtkWindow = nil
if parent != nil {
w = parent.toWindow()
}
c := C._gtk_message_dialog_new_with_markup(w,
C.GtkDialogFlags(flags), C.GtkMessageType(mType),
C.GtkButtonsType(buttons), cstr)
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
m := wrapMessageDialog(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return m
}
// SetMarkup is a wrapper around gtk_message_dialog_set_markup().
func (v *MessageDialog) SetMarkup(str string) {
cstr := C.CString(str)
defer C.free(unsafe.Pointer(cstr))
C.gtk_message_dialog_set_markup(v.native(), (*C.gchar)(cstr))
}
// FormatSecondaryText is a wrapper around
// gtk_message_dialog_format_secondary_text().
func (v *MessageDialog) FormatSecondaryText(format string, a ...interface{}) {
s := fmt.Sprintf(format, a...)
cstr := C.CString(s)
defer C.free(unsafe.Pointer(cstr))
C._gtk_message_dialog_format_secondary_text(v.native(),
(*C.gchar)(cstr))
}
// FormatSecondaryMarkup is a wrapper around
// gtk_message_dialog_format_secondary_text().
func (v *MessageDialog) FormatSecondaryMarkup(format string, a ...interface{}) {
s := fmt.Sprintf(format, a...)
cstr := C.CString(s)
defer C.free(unsafe.Pointer(cstr))
C._gtk_message_dialog_format_secondary_markup(v.native(),
(*C.gchar)(cstr))
}
// GetMessageArea is intentionally unimplemented. It returns a GtkVBox, which
// is deprecated since GTK 3.2 and for which gotk3 has no bindings.
/*
* GtkMisc
*/
// Misc is a representation of GTK's GtkMisc.
type Misc struct {
Widget
}
// native returns a pointer to the underlying GtkMisc.
func (v *Misc) native() *C.GtkMisc {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkMisc(p)
}
func marshalMisc(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapMisc(obj), nil
}
func wrapMisc(obj *glib.Object) *Misc {
return &Misc{Widget{glib.InitiallyUnowned{obj}}}
}
// GetAlignment is a wrapper around gtk_misc_get_alignment().
func (v *Misc) GetAlignment() (xAlign, yAlign float32) {
var x, y C.gfloat
C.gtk_misc_get_alignment(v.native(), &x, &y)
return float32(x), float32(y)
}
// SetAlignment is a wrapper around gtk_misc_set_alignment().
func (v *Misc) SetAlignment(xAlign, yAlign float32) {
C.gtk_misc_set_alignment(v.native(), C.gfloat(xAlign), C.gfloat(yAlign))
}
// GetPadding is a wrapper around gtk_misc_get_padding().
func (v *Misc) GetPadding() (xpad, ypad int) {
var x, y C.gint
C.gtk_misc_get_padding(v.native(), &x, &y)
return int(x), int(y)
}
// SetPadding is a wrapper around gtk_misc_set_padding().
func (v *Misc) SetPadding(xPad, yPad int) {
C.gtk_misc_set_padding(v.native(), C.gint(xPad), C.gint(yPad))
}
/*
* GtkNotebook
*/
// Notebook is a representation of GTK's GtkNotebook.
type Notebook struct {
Container
}
// native returns a pointer to the underlying GtkNotebook.
func (v *Notebook) native() *C.GtkNotebook {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkNotebook(p)
}
func marshalNotebook(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapNotebook(obj), nil
}
func wrapNotebook(obj *glib.Object) *Notebook {
return &Notebook{Container{Widget{glib.InitiallyUnowned{obj}}}}
}
// NotebookNew() is a wrapper around gtk_notebook_new().
func NotebookNew() (*Notebook, error) {
c := C.gtk_notebook_new()
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
n := wrapNotebook(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return n, nil
}
// AppendPage() is a wrapper around gtk_notebook_append_page().
func (v *Notebook) AppendPage(child IWidget, tabLabel IWidget) int {
var cTabLabel *C.GtkWidget
if tabLabel != nil {
cTabLabel = tabLabel.toWidget()
}
c := C.gtk_notebook_append_page(v.native(), child.toWidget(), cTabLabel)
return int(c)
}
// AppendPageMenu() is a wrapper around gtk_notebook_append_page_menu().
func (v *Notebook) AppendPageMenu(child IWidget, tabLabel IWidget, menuLabel IWidget) int {
c := C.gtk_notebook_append_page_menu(v.native(), child.toWidget(),
tabLabel.toWidget(), menuLabel.toWidget())
return int(c)
}
// PrependPage() is a wrapper around gtk_notebook_prepend_page().
func (v *Notebook) PrependPage(child IWidget, tabLabel IWidget) int {
var cTabLabel *C.GtkWidget
if tabLabel != nil {
cTabLabel = tabLabel.toWidget()
}
c := C.gtk_notebook_prepend_page(v.native(), child.toWidget(), cTabLabel)
return int(c)
}
// PrependPageMenu() is a wrapper around gtk_notebook_prepend_page_menu().
func (v *Notebook) PrependPageMenu(child IWidget, tabLabel IWidget, menuLabel IWidget) int {
c := C.gtk_notebook_prepend_page_menu(v.native(), child.toWidget(),
tabLabel.toWidget(), menuLabel.toWidget())
return int(c)
}
// InsertPage() is a wrapper around gtk_notebook_insert_page().
func (v *Notebook) InsertPage(child IWidget, tabLabel IWidget, position int) int {
c := C.gtk_notebook_insert_page(v.native(), child.toWidget(),
tabLabel.toWidget(), C.gint(position))
return int(c)
}
// InsertPageMenu() is a wrapper around gtk_notebook_insert_page_menu().
func (v *Notebook) InsertPageMenu(child IWidget, tabLabel IWidget, menuLabel IWidget, position int) int {
c := C.gtk_notebook_insert_page_menu(v.native(), child.toWidget(),
tabLabel.toWidget(), menuLabel.toWidget(), C.gint(position))
return int(c)
}
// RemovePage() is a wrapper around gtk_notebook_remove_page().
func (v *Notebook) RemovePage(pageNum int) {
C.gtk_notebook_remove_page(v.native(), C.gint(pageNum))
}
// PageNum() is a wrapper around gtk_notebook_page_num().
func (v *Notebook) PageNum(child IWidget) int {
c := C.gtk_notebook_page_num(v.native(), child.toWidget())
return int(c)
}
// NextPage() is a wrapper around gtk_notebook_next_page().
func (v *Notebook) NextPage() {
C.gtk_notebook_next_page(v.native())
}
// PrevPage() is a wrapper around gtk_notebook_prev_page().
func (v *Notebook) PrevPage() {
C.gtk_notebook_prev_page(v.native())
}
// ReorderChild() is a wrapper around gtk_notebook_reorder_child().
func (v *Notebook) ReorderChild(child IWidget, position int) {
C.gtk_notebook_reorder_child(v.native(), child.toWidget(),
C.gint(position))
}
// SetTabPos() is a wrapper around gtk_notebook_set_tab_pos().
func (v *Notebook) SetTabPos(pos PositionType) {
C.gtk_notebook_set_tab_pos(v.native(), C.GtkPositionType(pos))
}
// SetShowTabs() is a wrapper around gtk_notebook_set_show_tabs().
func (v *Notebook) SetShowTabs(showTabs bool) {
C.gtk_notebook_set_show_tabs(v.native(), gbool(showTabs))
}
// SetShowBorder() is a wrapper around gtk_notebook_set_show_border().
func (v *Notebook) SetShowBorder(showBorder bool) {
C.gtk_notebook_set_show_border(v.native(), gbool(showBorder))
}
// SetScrollable() is a wrapper around gtk_notebook_set_scrollable().
func (v *Notebook) SetScrollable(scrollable bool) {
C.gtk_notebook_set_scrollable(v.native(), gbool(scrollable))
}
// PopupEnable() is a wrapper around gtk_notebook_popup_enable().
func (v *Notebook) PopupEnable() {
C.gtk_notebook_popup_enable(v.native())
}
// PopupDisable() is a wrapper around gtk_notebook_popup_disable().
func (v *Notebook) PopupDisable() {
C.gtk_notebook_popup_disable(v.native())
}
// GetCurrentPage() is a wrapper around gtk_notebook_get_current_page().
func (v *Notebook) GetCurrentPage() int {
c := C.gtk_notebook_get_current_page(v.native())
return int(c)
}
// GetMenuLabel() is a wrapper around gtk_notebook_get_menu_label().
func (v *Notebook) GetMenuLabel(child IWidget) (*Widget, error) {
c := C.gtk_notebook_get_menu_label(v.native(), child.toWidget())
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
w := wrapWidget(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return w, nil
}
// GetNthPage() is a wrapper around gtk_notebook_get_nth_page().
func (v *Notebook) GetNthPage(pageNum int) (*Widget, error) {
c := C.gtk_notebook_get_nth_page(v.native(), C.gint(pageNum))
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
w := wrapWidget(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return w, nil
}
// GetNPages() is a wrapper around gtk_notebook_get_n_pages().
func (v *Notebook) GetNPages() int {
c := C.gtk_notebook_get_n_pages(v.native())
return int(c)
}
// GetTabLabel() is a wrapper around gtk_notebook_get_tab_label().
func (v *Notebook) GetTabLabel(child IWidget) (*Widget, error) {
c := C.gtk_notebook_get_tab_label(v.native(), child.toWidget())
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
w := wrapWidget(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return w, nil
}
// SetMenuLabel() is a wrapper around gtk_notebook_set_menu_label().
func (v *Notebook) SetMenuLabel(child, menuLabel IWidget) {
C.gtk_notebook_set_menu_label(v.native(), child.toWidget(),
menuLabel.toWidget())
}
// SetMenuLabelText() is a wrapper around gtk_notebook_set_menu_label_text().
func (v *Notebook) SetMenuLabelText(child IWidget, menuText string) {
cstr := C.CString(menuText)
defer C.free(unsafe.Pointer(cstr))
C.gtk_notebook_set_menu_label_text(v.native(), child.toWidget(),
(*C.gchar)(cstr))
}
// SetTabLabel() is a wrapper around gtk_notebook_set_tab_label().
func (v *Notebook) SetTabLabel(child, tabLabel IWidget) {
C.gtk_notebook_set_tab_label(v.native(), child.toWidget(),
tabLabel.toWidget())
}
// SetTabLabelText() is a wrapper around gtk_notebook_set_tab_label_text().
func (v *Notebook) SetTabLabelText(child IWidget, tabText string) {
cstr := C.CString(tabText)
defer C.free(unsafe.Pointer(cstr))
C.gtk_notebook_set_tab_label_text(v.native(), child.toWidget(),
(*C.gchar)(cstr))
}
// SetTabReorderable() is a wrapper around gtk_notebook_set_tab_reorderable().
func (v *Notebook) SetTabReorderable(child IWidget, reorderable bool) {
C.gtk_notebook_set_tab_reorderable(v.native(), child.toWidget(),
gbool(reorderable))
}
// SetTabDetachable() is a wrapper around gtk_notebook_set_tab_detachable().
func (v *Notebook) SetTabDetachable(child IWidget, detachable bool) {
C.gtk_notebook_set_tab_detachable(v.native(), child.toWidget(),
gbool(detachable))
}
// GetMenuLabelText() is a wrapper around gtk_notebook_get_menu_label_text().
func (v *Notebook) GetMenuLabelText(child IWidget) (string, error) {
c := C.gtk_notebook_get_menu_label_text(v.native(), child.toWidget())
if c == nil {
return "", errors.New("No menu label for widget")
}
return C.GoString((*C.char)(c)), nil
}
// GetScrollable() is a wrapper around gtk_notebook_get_scrollable().
func (v *Notebook) GetScrollable() bool {
c := C.gtk_notebook_get_scrollable(v.native())
return gobool(c)
}
// GetShowBorder() is a wrapper around gtk_notebook_get_show_border().
func (v *Notebook) GetShowBorder() bool {
c := C.gtk_notebook_get_show_border(v.native())
return gobool(c)
}
// GetShowTabs() is a wrapper around gtk_notebook_get_show_tabs().
func (v *Notebook) GetShowTabs() bool {
c := C.gtk_notebook_get_show_tabs(v.native())
return gobool(c)
}
// GetTabLabelText() is a wrapper around gtk_notebook_get_tab_label_text().
func (v *Notebook) GetTabLabelText(child IWidget) (string, error) {
c := C.gtk_notebook_get_tab_label_text(v.native(), child.toWidget())
if c == nil {
return "", errors.New("No tab label for widget")
}
return C.GoString((*C.char)(c)), nil
}
// GetTabPos() is a wrapper around gtk_notebook_get_tab_pos().
func (v *Notebook) GetTabPos() PositionType {
c := C.gtk_notebook_get_tab_pos(v.native())
return PositionType(c)
}
// GetTabReorderable() is a wrapper around gtk_notebook_get_tab_reorderable().
func (v *Notebook) GetTabReorderable(child IWidget) bool {
c := C.gtk_notebook_get_tab_reorderable(v.native(), child.toWidget())
return gobool(c)
}
// GetTabDetachable() is a wrapper around gtk_notebook_get_tab_detachable().
func (v *Notebook) GetTabDetachable(child IWidget) bool {
c := C.gtk_notebook_get_tab_detachable(v.native(), child.toWidget())
return gobool(c)
}
// SetCurrentPage() is a wrapper around gtk_notebook_set_current_page().
func (v *Notebook) SetCurrentPage(pageNum int) {
C.gtk_notebook_set_current_page(v.native(), C.gint(pageNum))
}
// SetGroupName() is a wrapper around gtk_notebook_set_group_name().
func (v *Notebook) SetGroupName(groupName string) {
cstr := C.CString(groupName)
defer C.free(unsafe.Pointer(cstr))
C.gtk_notebook_set_group_name(v.native(), (*C.gchar)(cstr))
}
// GetGroupName() is a wrapper around gtk_notebook_get_group_name().
func (v *Notebook) GetGroupName() (string, error) {
c := C.gtk_notebook_get_group_name(v.native())
if c == nil {
return "", errors.New("No group name")
}
return C.GoString((*C.char)(c)), nil
}
// SetActionWidget() is a wrapper around gtk_notebook_set_action_widget().
func (v *Notebook) SetActionWidget(widget IWidget, packType PackType) {
C.gtk_notebook_set_action_widget(v.native(), widget.toWidget(),
C.GtkPackType(packType))
}
// GetActionWidget() is a wrapper around gtk_notebook_get_action_widget().
func (v *Notebook) GetActionWidget(packType PackType) (*Widget, error) {
c := C.gtk_notebook_get_action_widget(v.native(),
C.GtkPackType(packType))
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
w := wrapWidget(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return w, nil
}
/*
* GtkOffscreenWindow
*/
// OffscreenWindow is a representation of GTK's GtkOffscreenWindow.
type OffscreenWindow struct {
Window
}
// native returns a pointer to the underlying GtkOffscreenWindow.
func (v *OffscreenWindow) native() *C.GtkOffscreenWindow {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkOffscreenWindow(p)
}
func marshalOffscreenWindow(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapOffscreenWindow(obj), nil
}
func wrapOffscreenWindow(obj *glib.Object) *OffscreenWindow {
return &OffscreenWindow{Window{Bin{Container{Widget{
glib.InitiallyUnowned{obj}}}}}}
}
// OffscreenWindowNew is a wrapper around gtk_offscreen_window_new().
func OffscreenWindowNew() (*OffscreenWindow, error) {
c := C.gtk_offscreen_window_new()
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
o := wrapOffscreenWindow(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return o, nil
}
// GetSurface is a wrapper around gtk_offscreen_window_get_surface().
// The returned surface is safe to use over window resizes.
func (v *OffscreenWindow) GetSurface() (*cairo.Surface, error) {
c := C.gtk_offscreen_window_get_surface(v.native())
if c == nil {
return nil, nilPtrErr
}
cairoPtr := (uintptr)(unsafe.Pointer(c))
s := cairo.NewSurface(cairoPtr, true)
return s, nil
}
// GetPixbuf is a wrapper around gtk_offscreen_window_get_pixbuf().
func (v *OffscreenWindow) GetPixbuf() (*gdk.Pixbuf, error) {
c := C.gtk_offscreen_window_get_pixbuf(v.native())
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
pb := &gdk.Pixbuf{obj}
// Pixbuf is returned with ref count of 1, so don't increment.
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return pb, nil
}
/*
* GtkOrientable
*/
// Orientable is a representation of GTK's GtkOrientable GInterface.
type Orientable struct {
*glib.Object
}
// IOrientable is an interface type implemented by all structs
// embedding an Orientable. It is meant to be used as an argument type
// for wrapper functions that wrap around a C GTK function taking a
// GtkOrientable.
type IOrientable interface {
toOrientable() *C.GtkOrientable
}
// native returns a pointer to the underlying GObject as a GtkOrientable.
func (v *Orientable) native() *C.GtkOrientable {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkOrientable(p)
}
func marshalOrientable(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapOrientable(obj), nil
}
func wrapOrientable(obj *glib.Object) *Orientable {
return &Orientable{obj}
}
// GetOrientation() is a wrapper around gtk_orientable_get_orientation().
func (v *Orientable) GetOrientation() Orientation {
c := C.gtk_orientable_get_orientation(v.native())
return Orientation(c)
}
// SetOrientation() is a wrapper around gtk_orientable_set_orientation().
func (v *Orientable) SetOrientation(orientation Orientation) {
C.gtk_orientable_set_orientation(v.native(),
C.GtkOrientation(orientation))
}
/*
* GtkProgressBar
*/
// ProgressBar is a representation of GTK's GtkProgressBar.
type ProgressBar struct {
Widget
}
// native returns a pointer to the underlying GtkProgressBar.
func (v *ProgressBar) native() *C.GtkProgressBar {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkProgressBar(p)
}
func marshalProgressBar(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapProgressBar(obj), nil
}
func wrapProgressBar(obj *glib.Object) *ProgressBar {
return &ProgressBar{Widget{glib.InitiallyUnowned{obj}}}
}
// ProgressBarNew() is a wrapper around gtk_progress_bar_new().
func ProgressBarNew() (*ProgressBar, error) {
c := C.gtk_progress_bar_new()
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
p := wrapProgressBar(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return p, nil
}
// SetFraction() is a wrapper around gtk_progress_bar_set_fraction().
func (v *ProgressBar) SetFraction(fraction float64) {
C.gtk_progress_bar_set_fraction(v.native(), C.gdouble(fraction))
}
// GetFraction() is a wrapper around gtk_progress_bar_get_fraction().
func (v *ProgressBar) GetFraction() float64 {
c := C.gtk_progress_bar_get_fraction(v.native())
return float64(c)
}
// SetText() is a wrapper around gtk_progress_bar_set_text().
func (v *ProgressBar) SetText(text string) {
cstr := C.CString(text)
defer C.free(unsafe.Pointer(cstr))
C.gtk_progress_bar_set_text(v.native(), (*C.gchar)(cstr))
}
/*
* GtkRadioButton
*/
// RadioButton is a representation of GTK's GtkRadioButton.
type RadioButton struct {
CheckButton
}
// native returns a pointer to the underlying GtkRadioButton.
func (v *RadioButton) native() *C.GtkRadioButton {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkRadioButton(p)
}
func marshalRadioButton(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapRadioButton(obj), nil
}
func wrapRadioButton(obj *glib.Object) *RadioButton {
return &RadioButton{CheckButton{ToggleButton{Button{Bin{Container{
Widget{glib.InitiallyUnowned{obj}}}}}}}}
}
// RadioButtonNew is a wrapper around gtk_radio_button_new().
func RadioButtonNew(group *glib.SList) (*RadioButton, error) {
gslist := (*C.GSList)(unsafe.Pointer(group))
c := C.gtk_radio_button_new(gslist)
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
r := wrapRadioButton(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return r, nil
}
// RadioButtonNewFromWidget is a wrapper around
// gtk_radio_button_new_from_widget().
func RadioButtonNewFromWidget(radioGroupMember *RadioButton) (*RadioButton, error) {
c := C.gtk_radio_button_new_from_widget(radioGroupMember.native())
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
r := wrapRadioButton(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return r, nil
}
// RadioButtonNewWithLabel is a wrapper around
// gtk_radio_button_new_with_label().
func RadioButtonNewWithLabel(group *glib.SList, label string) (*RadioButton, error) {
gslist := (*C.GSList)(unsafe.Pointer(group))
cstr := C.CString(label)
defer C.free(unsafe.Pointer(cstr))
c := C.gtk_radio_button_new_with_label(gslist, (*C.gchar)(cstr))
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
r := wrapRadioButton(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return r, nil
}
// RadioButtonNewWithLabelFromWidget is a wrapper around
// gtk_radio_button_new_with_label_from_widget().
func RadioButtonNewWithLabelFromWidget(radioGroupMember *RadioButton, label string) (*RadioButton, error) {
cstr := C.CString(label)
defer C.free(unsafe.Pointer(cstr))
c := C.gtk_radio_button_new_with_label_from_widget(radioGroupMember.native(),
(*C.gchar)(cstr))
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
r := wrapRadioButton(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return r, nil
}
// RadioButtonNewWithMnemonic is a wrapper around
// gtk_radio_button_new_with_mnemonic()
func RadioButtonNewWithMnemonic(group *glib.SList, label string) (*RadioButton, error) {
gslist := (*C.GSList)(unsafe.Pointer(group))
cstr := C.CString(label)
defer C.free(unsafe.Pointer(cstr))
c := C.gtk_radio_button_new_with_mnemonic(gslist, (*C.gchar)(cstr))
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
r := wrapRadioButton(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return r, nil
}
// RadioButtonNewWithMnemonicFromWidget is a wrapper around
// gtk_radio_button_new_with_mnemonic_from_widget().
func RadioButtonNewWithMnemonicFromWidget(radioGroupMember *RadioButton, label string) (*RadioButton, error) {
cstr := C.CString(label)
defer C.free(unsafe.Pointer(cstr))
c := C.gtk_radio_button_new_with_mnemonic_from_widget(radioGroupMember.native(),
(*C.gchar)(cstr))
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
r := wrapRadioButton(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return r, nil
}
// SetGroup is a wrapper around gtk_radio_button_set_group().
func (v *RadioButton) SetGroup(group *glib.SList) {
gslist := (*C.GSList)(unsafe.Pointer(group))
C.gtk_radio_button_set_group(v.native(), gslist)
}
// GetGroup is a wrapper around gtk_radio_button_set_group().
func (v *RadioButton) GetGroup() (*glib.SList, error) {
c := C.gtk_radio_button_get_group(v.native())
if c == nil {
return nil, nilPtrErr
}
return (*glib.SList)(unsafe.Pointer(c)), nil
}
// JoinGroup is a wrapper around gtk_radio_button_join_group().
func (v *RadioButton) JoinGroup(groupSource *RadioButton) {
C.gtk_radio_button_join_group(v.native(), groupSource.native())
}
/*
* GtkRadioMenuItem
*/
// RadioMenuItem is a representation of GTK's GtkRadioMenuItem.
type RadioMenuItem struct {
CheckMenuItem
}
// native returns a pointer to the underlying GtkRadioMenuItem.
func (v *RadioMenuItem) native() *C.GtkRadioMenuItem {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkRadioMenuItem(p)
}
func marshalRadioMenuItem(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapRadioMenuItem(obj), nil
}
func wrapRadioMenuItem(obj *glib.Object) *RadioMenuItem {
return &RadioMenuItem{CheckMenuItem{MenuItem{Bin{Container{
Widget{glib.InitiallyUnowned{obj}}}}}}}
}
// RadioMenuItemNew is a wrapper around gtk_radio_menu_item_new().
func RadioMenuItemNew(group *glib.SList) (*RadioMenuItem, error) {
gslist := (*C.GSList)(unsafe.Pointer(group))
c := C.gtk_radio_menu_item_new(gslist)
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
r := wrapRadioMenuItem(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return r, nil
}
// RadioMenuItemNewWithLabel is a wrapper around
// gtk_radio_menu_item_new_with_label().
func RadioMenuItemNewWithLabel(group *glib.SList, label string) (*RadioMenuItem, error) {
gslist := (*C.GSList)(unsafe.Pointer(group))
cstr := C.CString(label)
defer C.free(unsafe.Pointer(cstr))
c := C.gtk_radio_menu_item_new_with_label(gslist, (*C.gchar)(cstr))
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
r := wrapRadioMenuItem(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return r, nil
}
// RadioMenuItemNewWithMnemonic is a wrapper around
// gtk_radio_menu_item_new_with_mnemonic().
func RadioMenuItemNewWithMnemonic(group *glib.SList, label string) (*RadioMenuItem, error) {
gslist := (*C.GSList)(unsafe.Pointer(group))
cstr := C.CString(label)
defer C.free(unsafe.Pointer(cstr))
c := C.gtk_radio_menu_item_new_with_mnemonic(gslist, (*C.gchar)(cstr))
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
r := wrapRadioMenuItem(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return r, nil
}
// RadioMenuItemNewFromWidget is a wrapper around
// gtk_radio_menu_item_new_from_widget().
func RadioMenuItemNewFromWidget(group *RadioMenuItem) (*RadioMenuItem, error) {
c := C.gtk_radio_menu_item_new_from_widget(group.native())
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
r := wrapRadioMenuItem(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return r, nil
}
// RadioMenuItemNewWithLabelFromWidget is a wrapper around
// gtk_radio_menu_item_new_with_label_from_widget().
func RadioMenuItemNewWithLabelFromWidget(group *RadioMenuItem, label string) (*RadioMenuItem, error) {
cstr := C.CString(label)
defer C.free(unsafe.Pointer(cstr))
c := C.gtk_radio_menu_item_new_with_label_from_widget(group.native(),
(*C.gchar)(cstr))
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
r := wrapRadioMenuItem(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return r, nil
}
// RadioMenuItemNewWithMnemonicFromWidget is a wrapper around
// gtk_radio_menu_item_new_with_mnemonic_from_widget().
func RadioMenuItemNewWithMnemonicFromWidget(group *RadioMenuItem, label string) (*RadioMenuItem, error) {
cstr := C.CString(label)
defer C.free(unsafe.Pointer(cstr))
c := C.gtk_radio_menu_item_new_with_mnemonic_from_widget(group.native(),
(*C.gchar)(cstr))
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
r := wrapRadioMenuItem(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return r, nil
}
// SetGroup is a wrapper around gtk_radio_menu_item_set_group().
func (v *RadioMenuItem) SetGroup(group *glib.SList) {
gslist := (*C.GSList)(unsafe.Pointer(group))
C.gtk_radio_menu_item_set_group(v.native(), gslist)
}
// GetGroup is a wrapper around gtk_radio_menu_item_get_group().
func (v *RadioMenuItem) GetGroup() (*glib.SList, error) {
c := C.gtk_radio_menu_item_get_group(v.native())
if c == nil {
return nil, nilPtrErr
}
return (*glib.SList)(unsafe.Pointer(c)), nil
}
/*
* GtkRange
*/
// Range is a representation of GTK's GtkRange.
type Range struct {
Widget
}
// native returns a pointer to the underlying GtkRange.
func (v *Range) native() *C.GtkRange {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkRange(p)
}
func marshalRange(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapRange(obj), nil
}
func wrapRange(obj *glib.Object) *Range {
return &Range{Widget{glib.InitiallyUnowned{obj}}}
}
/*
* GtkScrollbar
*/
// Scrollbar is a representation of GTK's GtkScrollbar.
type Scrollbar struct {
Range
}
// native returns a pointer to the underlying GtkScrollbar.
func (v *Scrollbar) native() *C.GtkScrollbar {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkScrollbar(p)
}
func marshalScrollbar(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapScrollbar(obj), nil
}
func wrapScrollbar(obj *glib.Object) *Scrollbar {
return &Scrollbar{Range{Widget{glib.InitiallyUnowned{obj}}}}
}
// ScrollbarNew is a wrapper around gtk_scrollbar_new().
func ScrollbarNew(orientation Orientation, adjustment *Adjustment) (*Scrollbar, error) {
c := C.gtk_scrollbar_new(C.GtkOrientation(orientation), adjustment.native())
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
s := wrapScrollbar(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return s, nil
}
/*
* GtkScrolledWindow
*/
// ScrolledWindow is a representation of GTK's GtkScrolledWindow.
type ScrolledWindow struct {
Bin
}
// native returns a pointer to the underlying GtkScrolledWindow.
func (v *ScrolledWindow) native() *C.GtkScrolledWindow {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkScrolledWindow(p)
}
func marshalScrolledWindow(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapScrolledWindow(obj), nil
}
func wrapScrolledWindow(obj *glib.Object) *ScrolledWindow {
return &ScrolledWindow{Bin{Container{Widget{glib.InitiallyUnowned{obj}}}}}
}
// ScrolledWindowNew() is a wrapper around gtk_scrolled_window_new().
func ScrolledWindowNew(hadjustment, vadjustment *Adjustment) (*ScrolledWindow, error) {
c := C.gtk_scrolled_window_new(hadjustment.native(),
vadjustment.native())
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
s := wrapScrolledWindow(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return s, nil
}
// SetPolicy() is a wrapper around gtk_scrolled_window_set_policy().
func (v *ScrolledWindow) SetPolicy(hScrollbarPolicy, vScrollbarPolicy PolicyType) {
C.gtk_scrolled_window_set_policy(v.native(),
C.GtkPolicyType(hScrollbarPolicy),
C.GtkPolicyType(vScrollbarPolicy))
}
/*
* GtkSearchEntry
*/
// SearchEntry is a reprensentation of GTK's GtkSearchEntry.
type SearchEntry struct {
Entry
}
// native returns a pointer to the underlying GtkSearchEntry.
func (v *SearchEntry) native() *C.GtkSearchEntry {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkSearchEntry(p)
}
func marshalSearchEntry(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapSearchEntry(obj), nil
}
func wrapSearchEntry(obj *glib.Object) *SearchEntry {
e := wrapEditable(obj)
return &SearchEntry{Entry{Widget{glib.InitiallyUnowned{obj}}, *e}}
}
// SearchEntryNew is a wrapper around gtk_search_entry_new().
func SearchEntryNew() (*SearchEntry, error) {
c := C.gtk_search_entry_new()
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
s := wrapSearchEntry(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return s, nil
}
/*
* GtkSeparator
*/
// Separator is a representation of GTK's GtkSeparator.
type Separator struct {
Widget
}
// native returns a pointer to the underlying GtkSeperator.
func (v *Separator) native() *C.GtkSeparator {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkSeparator(p)
}
func marshalSeparator(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapSeparator(obj), nil
}
func wrapSeparator(obj *glib.Object) *Separator {
return &Separator{Widget{glib.InitiallyUnowned{obj}}}
}
// SeparatorNew is a wrapper around gtk_separator_new().
func SeparatorNew(orientation Orientation) (*Separator, error) {
c := C.gtk_separator_new(C.GtkOrientation(orientation))
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
s := wrapSeparator(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return s, nil
}
/*
* GtkSeparatorMenuItem
*/
// SeparatorMenuItem is a representation of GTK's GtkSeparatorMenuItem.
type SeparatorMenuItem struct {
MenuItem
}
// native returns a pointer to the underlying GtkSeparatorMenuItem.
func (v *SeparatorMenuItem) native() *C.GtkSeparatorMenuItem {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkSeparatorMenuItem(p)
}
func marshalSeparatorMenuItem(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapSeparatorMenuItem(obj), nil
}
func wrapSeparatorMenuItem(obj *glib.Object) *SeparatorMenuItem {
return &SeparatorMenuItem{MenuItem{Bin{Container{Widget{glib.InitiallyUnowned{obj}}}}}}
}
// SeparatorMenuItemNew is a wrapper around gtk_separator_menu_item_new().
func SeparatorMenuItemNew() (*SeparatorMenuItem, error) {
c := C.gtk_separator_menu_item_new()
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
s := wrapSeparatorMenuItem(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return s, nil
}
/*
* GtkSeparatorToolItem
*/
// SeparatorToolItem is a representation of GTK's GtkSeparatorToolItem.
type SeparatorToolItem struct {
ToolItem
}
// native returns a pointer to the underlying GtkSeparatorToolItem.
func (v *SeparatorToolItem) native() *C.GtkSeparatorToolItem {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkSeparatorToolItem(p)
}
func marshalSeparatorToolItem(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapSeparatorToolItem(obj), nil
}
func wrapSeparatorToolItem(obj *glib.Object) *SeparatorToolItem {
return &SeparatorToolItem{ToolItem{Bin{Container{Widget{
glib.InitiallyUnowned{obj}}}}}}
}
// SeparatorToolItemNew is a wrapper around gtk_separator_tool_item_new().
func SeparatorToolItemNew() (*SeparatorToolItem, error) {
c := C.gtk_separator_tool_item_new()
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
s := wrapSeparatorToolItem(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return s, nil
}
// SetDraw is a wrapper around gtk_separator_tool_item_set_draw().
func (v *SeparatorToolItem) SetDraw(draw bool) {
C.gtk_separator_tool_item_set_draw(v.native(), gbool(draw))
}
// GetDraw is a wrapper around gtk_separator_tool_item_get_draw().
func (v *SeparatorToolItem) GetDraw() bool {
c := C.gtk_separator_tool_item_get_draw(v.native())
return gobool(c)
}
/*
* GtkSpinButton
*/
// SpinButton is a representation of GTK's GtkSpinButton.
type SpinButton struct {
Entry
}
// native returns a pointer to the underlying GtkSpinButton.
func (v *SpinButton) native() *C.GtkSpinButton {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkSpinButton(p)
}
func marshalSpinButton(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapSpinButton(obj), nil
}
func wrapSpinButton(obj *glib.Object) *SpinButton {
e := wrapEditable(obj)
return &SpinButton{Entry{Widget{glib.InitiallyUnowned{obj}}, *e}}
}
// Configure() is a wrapper around gtk_spin_button_configure().
func (v *SpinButton) Configure(adjustment *Adjustment, climbRate float64, digits uint) {
C.gtk_spin_button_configure(v.native(), adjustment.native(),
C.gdouble(climbRate), C.guint(digits))
}
// SpinButtonNew() is a wrapper around gtk_spin_button_new().
func SpinButtonNew(adjustment *Adjustment, climbRate float64, digits uint) (*SpinButton, error) {
c := C.gtk_spin_button_new(adjustment.native(),
C.gdouble(climbRate), C.guint(digits))
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
s := wrapSpinButton(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return s, nil
}
// SpinButtonNewWithRange() is a wrapper around
// gtk_spin_button_new_with_range().
func SpinButtonNewWithRange(min, max, step float64) (*SpinButton, error) {
c := C.gtk_spin_button_new_with_range(C.gdouble(min), C.gdouble(max),
C.gdouble(step))
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
s := wrapSpinButton(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return s, nil
}
// GetValueAsInt() is a wrapper around gtk_spin_button_get_value_as_int().
func (v *SpinButton) GetValueAsInt() int {
c := C.gtk_spin_button_get_value_as_int(v.native())
return int(c)
}
// SetValue() is a wrapper around gtk_spin_button_set_value().
func (v *SpinButton) SetValue(value float64) {
C.gtk_spin_button_set_value(v.native(), C.gdouble(value))
}
// GetValue() is a wrapper around gtk_spin_button_get_value().
func (v *SpinButton) GetValue() float64 {
c := C.gtk_spin_button_get_value(v.native())
return float64(c)
}
/*
* GtkSpinner
*/
// Spinner is a representation of GTK's GtkSpinner.
type Spinner struct {
Widget
}
// native returns a pointer to the underlying GtkSpinner.
func (v *Spinner) native() *C.GtkSpinner {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkSpinner(p)
}
func marshalSpinner(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapSpinner(obj), nil
}
func wrapSpinner(obj *glib.Object) *Spinner {
return &Spinner{Widget{glib.InitiallyUnowned{obj}}}
}
// SpinnerNew is a wrapper around gtk_spinner_new().
func SpinnerNew() (*Spinner, error) {
c := C.gtk_spinner_new()
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
s := wrapSpinner(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return s, nil
}
// Start is a wrapper around gtk_spinner_start().
func (v *Spinner) Start() {
C.gtk_spinner_start(v.native())
}
// Stop is a wrapper around gtk_spinner_stop().
func (v *Spinner) Stop() {
C.gtk_spinner_stop(v.native())
}
/*
* GtkStatusbar
*/
// Statusbar is a representation of GTK's GtkStatusbar
type Statusbar struct {
Box
}
// native returns a pointer to the underlying GtkStatusbar
func (v *Statusbar) native() *C.GtkStatusbar {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkStatusbar(p)
}
func marshalStatusbar(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapStatusbar(obj), nil
}
func wrapStatusbar(obj *glib.Object) *Statusbar {
return &Statusbar{Box{Container{Widget{glib.InitiallyUnowned{obj}}}}}
}
// StatusbarNew() is a wrapper around gtk_statusbar_new().
func StatusbarNew() (*Statusbar, error) {
c := C.gtk_statusbar_new()
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
s := wrapStatusbar(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return s, nil
}
// GetContextId() is a wrapper around gtk_statusbar_get_context_id().
func (v *Statusbar) GetContextId(contextDescription string) uint {
cstr := C.CString(contextDescription)
defer C.free(unsafe.Pointer(cstr))
c := C.gtk_statusbar_get_context_id(v.native(), (*C.gchar)(cstr))
return uint(c)
}
// Push() is a wrapper around gtk_statusbar_push().
func (v *Statusbar) Push(contextID uint, text string) uint {
cstr := C.CString(text)
defer C.free(unsafe.Pointer(cstr))
c := C.gtk_statusbar_push(v.native(), C.guint(contextID),
(*C.gchar)(cstr))
return uint(c)
}
// Pop() is a wrapper around gtk_statusbar_pop().
func (v *Statusbar) Pop(contextID uint) {
C.gtk_statusbar_pop(v.native(), C.guint(contextID))
}
// GetMessageArea() is a wrapper around gtk_statusbar_get_message_area().
func (v *Statusbar) GetMessageArea() (*Box, error) {
c := C.gtk_statusbar_get_message_area(v.native())
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return &Box{Container{Widget{glib.InitiallyUnowned{obj}}}}, nil
}
/*
* GtkSwitch
*/
// Switch is a representation of GTK's GtkSwitch.
type Switch struct {
Widget
}
// native returns a pointer to the underlying GtkSwitch.
func (v *Switch) native() *C.GtkSwitch {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkSwitch(p)
}
func marshalSwitch(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapSwitch(obj), nil
}
func wrapSwitch(obj *glib.Object) *Switch {
return &Switch{Widget{glib.InitiallyUnowned{obj}}}
}
// SwitchNew is a wrapper around gtk_switch_new().
func SwitchNew() (*Switch, error) {
c := C.gtk_switch_new()
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
s := wrapSwitch(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return s, nil
}
// GetActive is a wrapper around gtk_switch_get_active().
func (v *Switch) GetActive() bool {
c := C.gtk_switch_get_active(v.native())
return gobool(c)
}
// SetActive is a wrapper around gtk_switch_set_active().
func (v *Switch) SetActive(isActive bool) {
C.gtk_switch_set_active(v.native(), gbool(isActive))
}
/*
* GtkTextView
*/
// TextView is a representation of GTK's GtkTextView
type TextView struct {
Container
}
// native returns a pointer to the underlying GtkTextView.
func (v *TextView) native() *C.GtkTextView {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkTextView(p)
}
func marshalTextView(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapTextView(obj), nil
}
func wrapTextView(obj *glib.Object) *TextView {
return &TextView{Container{Widget{glib.InitiallyUnowned{obj}}}}
}
// TextViewNew is a wrapper around gtk_text_view_new().
func TextViewNew() (*TextView, error) {
c := C.gtk_text_view_new()
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
t := wrapTextView(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return t, nil
}
// TextViewNewWithBuffer is a wrapper around gtk_text_view_new_with_buffer().
func TextViewNewWithBuffer(buf *TextBuffer) (*TextView, error) {
cbuf := buf.native()
c := C.gtk_text_view_new_with_buffer(cbuf)
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
t := wrapTextView(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return t, nil
}
// GetBuffer is a wrapper around gtk_text_view_get_buffer().
func (v *TextView) GetBuffer() (*TextBuffer, error) {
c := C.gtk_text_view_get_buffer(v.native())
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
t := wrapTextBuffer(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return t, nil
}
// SetBuffer is a wrapper around gtk_text_view_set_buffer().
func (v *TextView) SetBuffer(buffer *TextBuffer) {
C.gtk_text_view_set_buffer(v.native(), buffer.native())
}
// SetEditable is a wrapper around gtk_text_view_set_editable().
func (v *TextView) SetEditable(editable bool) {
C.gtk_text_view_set_editable(v.native(), gbool(editable))
}
// GetEditable is a wrapper around gtk_text_view_get_editable().
func (v *TextView) GetEditable() bool {
c := C.gtk_text_view_get_editable(v.native())
return gobool(c)
}
// SetWrapMode is a wrapper around gtk_text_view_set_wrap_mode().
func (v *TextView) SetWrapMode(wrapMode WrapMode) {
C.gtk_text_view_set_wrap_mode(v.native(), C.GtkWrapMode(wrapMode))
}
// GetWrapMode is a wrapper around gtk_text_view_get_wrap_mode().
func (v *TextView) GetWrapMode() WrapMode {
return WrapMode(C.gtk_text_view_get_wrap_mode(v.native()))
}
// SetCursorVisible is a wrapper around gtk_text_view_set_cursor_visible().
func (v *TextView) SetCursorVisible(visible bool) {
C.gtk_text_view_set_cursor_visible(v.native(), gbool(visible))
}
// GetCursorVisible is a wrapper around gtk_text_view_get_cursor_visible().
func (v *TextView) GetCursorVisible() bool {
c := C.gtk_text_view_get_cursor_visible(v.native())
return gobool(c)
}
// SetOverwrite is a wrapper around gtk_text_view_set_overwrite().
func (v *TextView) SetOverwrite(overwrite bool) {
C.gtk_text_view_set_overwrite(v.native(), gbool(overwrite))
}
// GetOverwrite is a wrapper around gtk_text_view_get_overwrite().
func (v *TextView) GetOverwrite() bool {
c := C.gtk_text_view_get_overwrite(v.native())
return gobool(c)
}
// SetJustification is a wrapper around gtk_text_view_set_justification().
func (v *TextView) SetJustification(justify Justification) {
C.gtk_text_view_set_justification(v.native(), C.GtkJustification(justify))
}
// GetJustification is a wrapper around gtk_text_view_get_justification().
func (v *TextView) GetJustification() Justification {
c := C.gtk_text_view_get_justification(v.native())
return Justification(c)
}
// SetAcceptsTab is a wrapper around gtk_text_view_set_accepts_tab().
func (v *TextView) SetAcceptsTab(acceptsTab bool) {
C.gtk_text_view_set_accepts_tab(v.native(), gbool(acceptsTab))
}
// GetAcceptsTab is a wrapper around gtk_text_view_get_accepts_tab().
func (v *TextView) GetAcceptsTab() bool {
c := C.gtk_text_view_get_accepts_tab(v.native())
return gobool(c)
}
// SetPixelsAboveLines is a wrapper around gtk_text_view_set_pixels_above_lines().
func (v *TextView) SetPixelsAboveLines(px int) {
C.gtk_text_view_set_pixels_above_lines(v.native(), C.gint(px))
}
// GetPixelsAboveLines is a wrapper around gtk_text_view_get_pixels_above_lines().
func (v *TextView) GetPixelsAboveLines() int {
c := C.gtk_text_view_get_pixels_above_lines(v.native())
return int(c)
}
// SetPixelsBelowLines is a wrapper around gtk_text_view_set_pixels_below_lines().
func (v *TextView) SetPixelsBelowLines(px int) {
C.gtk_text_view_set_pixels_below_lines(v.native(), C.gint(px))
}
// GetPixelsBelowLines is a wrapper around gtk_text_view_get_pixels_below_lines().
func (v *TextView) GetPixelsBelowLines() int {
c := C.gtk_text_view_get_pixels_below_lines(v.native())
return int(c)
}
// SetPixelsInsideWrap is a wrapper around gtk_text_view_set_pixels_inside_wrap().
func (v *TextView) SetPixelsInsideWrap(px int) {
C.gtk_text_view_set_pixels_inside_wrap(v.native(), C.gint(px))
}
// GetPixelsInsideWrap is a wrapper around gtk_text_view_get_pixels_inside_wrap().
func (v *TextView) GetPixelsInsideWrap() int {
c := C.gtk_text_view_get_pixels_inside_wrap(v.native())
return int(c)
}
// SetLeftMargin is a wrapper around gtk_text_view_set_left_margin().
func (v *TextView) SetLeftMargin(margin int) {
C.gtk_text_view_set_left_margin(v.native(), C.gint(margin))
}
// GetLeftMargin is a wrapper around gtk_text_view_get_left_margin().
func (v *TextView) GetLeftMargin() int {
c := C.gtk_text_view_get_left_margin(v.native())
return int(c)
}
// SetRightMargin is a wrapper around gtk_text_view_set_right_margin().
func (v *TextView) SetRightMargin(margin int) {
C.gtk_text_view_set_right_margin(v.native(), C.gint(margin))
}
// GetRightMargin is a wrapper around gtk_text_view_get_right_margin().
func (v *TextView) GetRightMargin() int {
c := C.gtk_text_view_get_right_margin(v.native())
return int(c)
}
// SetIndent is a wrapper around gtk_text_view_set_indent().
func (v *TextView) SetIndent(indent int) {
C.gtk_text_view_set_indent(v.native(), C.gint(indent))
}
// GetIndent is a wrapper around gtk_text_view_get_indent().
func (v *TextView) GetIndent() int {
c := C.gtk_text_view_get_indent(v.native())
return int(c)
}
// SetInputHints is a wrapper around gtk_text_view_set_input_hints().
func (v *TextView) SetInputHints(hints InputHints) {
C.gtk_text_view_set_input_hints(v.native(), C.GtkInputHints(hints))
}
// GetInputHints is a wrapper around gtk_text_view_get_input_hints().
func (v *TextView) GetInputHints() InputHints {
c := C.gtk_text_view_get_input_hints(v.native())
return InputHints(c)
}
// SetInputPurpose is a wrapper around gtk_text_view_set_input_purpose().
func (v *TextView) SetInputPurpose(purpose InputPurpose) {
C.gtk_text_view_set_input_purpose(v.native(),
C.GtkInputPurpose(purpose))
}
// GetInputPurpose is a wrapper around gtk_text_view_get_input_purpose().
func (v *TextView) GetInputPurpose() InputPurpose {
c := C.gtk_text_view_get_input_purpose(v.native())
return InputPurpose(c)
}
/*
* GtkTextTagTable
*/
type TextTagTable struct {
*glib.Object
}
// native returns a pointer to the underlying GObject as a GtkTextTagTable.
func (v *TextTagTable) native() *C.GtkTextTagTable {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkTextTagTable(p)
}
func marshalTextTagTable(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapTextTagTable(obj), nil
}
func wrapTextTagTable(obj *glib.Object) *TextTagTable {
return &TextTagTable{obj}
}
func TextTagTableNew() (*TextTagTable, error) {
c := C.gtk_text_tag_table_new()
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
t := wrapTextTagTable(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return t, nil
}
/*
* GtkTextBuffer
*/
// TextBuffer is a representation of GTK's GtkTextBuffer.
type TextBuffer struct {
*glib.Object
}
// native returns a pointer to the underlying GtkTextBuffer.
func (v *TextBuffer) native() *C.GtkTextBuffer {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkTextBuffer(p)
}
func marshalTextBuffer(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapTextBuffer(obj), nil
}
func wrapTextBuffer(obj *glib.Object) *TextBuffer {
return &TextBuffer{obj}
}
// TextBufferNew() is a wrapper around gtk_text_buffer_new().
func TextBufferNew(table *TextTagTable) (*TextBuffer, error) {
c := C.gtk_text_buffer_new(table.native())
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
e := wrapTextBuffer(obj)
obj.Ref()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return e, nil
}
func (v *TextBuffer) GetBounds() (start, end *TextIter) {
start, end = new(TextIter), new(TextIter)
C.gtk_text_buffer_get_bounds(v.native(), (*C.GtkTextIter)(start), (*C.GtkTextIter)(end))
return
}
func (v *TextBuffer) GetText(start, end *TextIter, includeHiddenChars bool) (string, error) {
c := C.gtk_text_buffer_get_text(
v.native(), (*C.GtkTextIter)(start), (*C.GtkTextIter)(end), gbool(includeHiddenChars),
)
if c == nil {
return "", nilPtrErr
}
return C.GoString((*C.char)(c)), nil
}
func (v *TextBuffer) SetText(text string) {
cstr := C.CString(text)
defer C.free(unsafe.Pointer(cstr))
C.gtk_text_buffer_set_text(v.native(), (*C.gchar)(cstr),
C.gint(len(text)))
}
/*
* GtkTextIter
*/
// TextIter is a representation of GTK's GtkTextIter
type TextIter C.GtkTextIter
func marshalTextIter(p uintptr) (interface{}, error) {
c := C.g_value_get_boxed((*C.GValue)(unsafe.Pointer(p)))
return (*TextIter)(unsafe.Pointer(c)), nil
}
/*
* GtkToggleButton
*/
// ToggleButton is a representation of GTK's GtkToggleButton.
type ToggleButton struct {
Button
}
// native returns a pointer to the underlying GtkToggleButton.
func (v *ToggleButton) native() *C.GtkToggleButton {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkToggleButton(p)
}
func marshalToggleButton(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapToggleButton(obj), nil
}
func wrapToggleButton(obj *glib.Object) *ToggleButton {
return &ToggleButton{Button{Bin{Container{Widget{
glib.InitiallyUnowned{obj}}}}}}
}
// ToggleButtonNew is a wrapper around gtk_toggle_button_new().
func ToggleButtonNew() (*ToggleButton, error) {
c := C.gtk_toggle_button_new()
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
tb := wrapToggleButton(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return tb, nil
}
// ToggleButtonNewWithLabel is a wrapper around
// gtk_toggle_button_new_with_label().
func ToggleButtonNewWithLabel(label string) (*ToggleButton, error) {
cstr := C.CString(label)
defer C.free(unsafe.Pointer(cstr))
c := C.gtk_toggle_button_new_with_label((*C.gchar)(cstr))
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
tb := wrapToggleButton(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return tb, nil
}
// ToggleButtonNewWithMnemonic is a wrapper around
// gtk_toggle_button_new_with_mnemonic().
func ToggleButtonNewWithMnemonic(label string) (*ToggleButton, error) {
cstr := C.CString(label)
defer C.free(unsafe.Pointer(cstr))
c := C.gtk_toggle_button_new_with_mnemonic((*C.gchar)(cstr))
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
tb := wrapToggleButton(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return tb, nil
}
// GetActive is a wrapper around gtk_toggle_button_get_active().
func (v *ToggleButton) GetActive() bool {
c := C.gtk_toggle_button_get_active(v.native())
return gobool(c)
}
// SetActive is a wrapper around gtk_toggle_button_set_active().
func (v *ToggleButton) SetActive(isActive bool) {
C.gtk_toggle_button_set_active(v.native(), gbool(isActive))
}
/*
* GtkToolbar
*/
// Toolbar is a representation of GTK's GtkToolbar.
type Toolbar struct {
Container
}
// native returns a pointer to the underlying GtkToolbar.
func (v *Toolbar) native() *C.GtkToolbar {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkToolbar(p)
}
func marshalToolbar(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapToolbar(obj), nil
}
func wrapToolbar(obj *glib.Object) *Toolbar {
return &Toolbar{Container{Widget{glib.InitiallyUnowned{obj}}}}
}
// ToolbarNew is a wrapper around gtk_toolbar_new().
func ToolbarNew() (*Toolbar, error) {
c := C.gtk_toolbar_new()
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
tb := wrapToolbar(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return tb, nil
}
// Insert is a wrapper around gtk_toolbar_insert().
func (v *Toolbar) Insert(item IToolItem, pos int) {
C.gtk_toolbar_insert(v.native(), item.toToolItem(), C.gint(pos))
}
// GetItemIndex is a wrapper around gtk_toolbar_get_item_index().
func (v *Toolbar) GetItemIndex(item IToolItem) int {
c := C.gtk_toolbar_get_item_index(v.native(), item.toToolItem())
return int(c)
}
// GetNItems is a wrapper around gtk_toolbar_get_n_items().
func (v *Toolbar) GetNItems() int {
c := C.gtk_toolbar_get_n_items(v.native())
return int(c)
}
// GetNthItem is a wrapper around gtk_toolbar_get_nth_item().
func (v *Toolbar) GetNthItem(n int) *ToolItem {
c := C.gtk_toolbar_get_nth_item(v.native(), C.gint(n))
if c == nil {
return nil
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
ti := wrapToolItem(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return ti
}
// GetDropIndex is a wrapper around gtk_toolbar_get_drop_index().
func (v *Toolbar) GetDropIndex(x, y int) int {
c := C.gtk_toolbar_get_drop_index(v.native(), C.gint(x), C.gint(y))
return int(c)
}
// SetDropHighlightItem is a wrapper around
// gtk_toolbar_set_drop_highlight_item().
func (v *Toolbar) SetDropHighlightItem(toolItem IToolItem, index int) {
C.gtk_toolbar_set_drop_highlight_item(v.native(),
toolItem.toToolItem(), C.gint(index))
}
// SetShowArrow is a wrapper around gtk_toolbar_set_show_arrow().
func (v *Toolbar) SetShowArrow(showArrow bool) {
C.gtk_toolbar_set_show_arrow(v.native(), gbool(showArrow))
}
// UnsetIconSize is a wrapper around gtk_toolbar_unset_icon_size().
func (v *Toolbar) UnsetIconSize() {
C.gtk_toolbar_unset_icon_size(v.native())
}
// GetShowArrow is a wrapper around gtk_toolbar_get_show_arrow().
func (v *Toolbar) GetShowArrow() bool {
c := C.gtk_toolbar_get_show_arrow(v.native())
return gobool(c)
}
// GetStyle is a wrapper around gtk_toolbar_get_style().
func (v *Toolbar) GetStyle() ToolbarStyle {
c := C.gtk_toolbar_get_style(v.native())
return ToolbarStyle(c)
}
// GetIconSize is a wrapper around gtk_toolbar_get_icon_size().
func (v *Toolbar) GetIconSize() IconSize {
c := C.gtk_toolbar_get_icon_size(v.native())
return IconSize(c)
}
// GetReliefStyle is a wrapper around gtk_toolbar_get_relief_style().
func (v *Toolbar) GetReliefStyle() ReliefStyle {
c := C.gtk_toolbar_get_relief_style(v.native())
return ReliefStyle(c)
}
// SetStyle is a wrapper around gtk_toolbar_set_style().
func (v *Toolbar) SetStyle(style ToolbarStyle) {
C.gtk_toolbar_set_style(v.native(), C.GtkToolbarStyle(style))
}
// SetIconSize is a wrapper around gtk_toolbar_set_icon_size().
func (v *Toolbar) SetIconSize(iconSize IconSize) {
C.gtk_toolbar_set_icon_size(v.native(), C.GtkIconSize(iconSize))
}
// UnsetStyle is a wrapper around gtk_toolbar_unset_style().
func (v *Toolbar) UnsetStyle() {
C.gtk_toolbar_unset_style(v.native())
}
/*
* GtkToolButton
*/
// ToolButton is a representation of GTK's GtkToolButton.
type ToolButton struct {
ToolItem
}
// native returns a pointer to the underlying GtkToolButton.
func (v *ToolButton) native() *C.GtkToolButton {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkToolButton(p)
}
func marshalToolButton(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapToolButton(obj), nil
}
func wrapToolButton(obj *glib.Object) *ToolButton {
return &ToolButton{ToolItem{Bin{Container{Widget{
glib.InitiallyUnowned{obj}}}}}}
}
// ToolButtonNew is a wrapper around gtk_tool_button_new().
func ToolButtonNew(iconWidget IWidget, label string) (*ToolButton, error) {
cstr := C.CString(label)
defer C.free(unsafe.Pointer(cstr))
c := C.gtk_tool_button_new(iconWidget.toWidget(), (*C.gchar)(cstr))
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
tb := wrapToolButton(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return tb, nil
}
// SetLabel is a wrapper around gtk_tool_button_set_label().
func (v *ToolButton) SetLabel(label string) {
cstr := C.CString(label)
defer C.free(unsafe.Pointer(cstr))
C.gtk_tool_button_set_label(v.native(), (*C.gchar)(cstr))
}
// GetLabel is a wrapper aroud gtk_tool_button_get_label().
func (v *ToolButton) GetLabel() string {
c := C.gtk_tool_button_get_label(v.native())
return C.GoString((*C.char)(c))
}
// SetUseUnderline is a wrapper around gtk_tool_button_set_use_underline().
func (v *ToolButton) SetGetUnderline(useUnderline bool) {
C.gtk_tool_button_set_use_underline(v.native(), gbool(useUnderline))
}
// GetUseUnderline is a wrapper around gtk_tool_button_get_use_underline().
func (v *ToolButton) GetuseUnderline() bool {
c := C.gtk_tool_button_get_use_underline(v.native())
return gobool(c)
}
// SetIconName is a wrapper around gtk_tool_button_set_icon_name().
func (v *ToolButton) SetIconName(iconName string) {
cstr := C.CString(iconName)
defer C.free(unsafe.Pointer(cstr))
C.gtk_tool_button_set_icon_name(v.native(), (*C.gchar)(cstr))
}
// GetIconName is a wrapper around gtk_tool_button_get_icon_name().
func (v *ToolButton) GetIconName() string {
c := C.gtk_tool_button_get_icon_name(v.native())
return C.GoString((*C.char)(c))
}
// SetIconWidget is a wrapper around gtk_tool_button_set_icon_widget().
func (v *ToolButton) SetIconWidget(iconWidget IWidget) {
C.gtk_tool_button_set_icon_widget(v.native(), iconWidget.toWidget())
}
// GetIconWidget is a wrapper around gtk_tool_button_get_icon_widget().
func (v *ToolButton) GetIconWidget() *Widget {
c := C.gtk_tool_button_get_icon_widget(v.native())
if c == nil {
return nil
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
w := wrapWidget(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return w
}
// SetLabelWidget is a wrapper around gtk_tool_button_set_label_widget().
func (v *ToolButton) SetLabelWidget(labelWidget IWidget) {
C.gtk_tool_button_set_label_widget(v.native(), labelWidget.toWidget())
}
// GetLabelWidget is a wrapper around gtk_tool_button_get_label_widget().
func (v *ToolButton) GetLabelWidget() *Widget {
c := C.gtk_tool_button_get_label_widget(v.native())
if c == nil {
return nil
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
w := wrapWidget(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return w
}
/*
* GtkToolItem
*/
// ToolItem is a representation of GTK's GtkToolItem.
type ToolItem struct {
Bin
}
// IToolItem is an interface type implemented by all structs embedding
// a ToolItem. It is meant to be used as an argument type for wrapper
// functions that wrap around a C GTK function taking a GtkToolItem.
type IToolItem interface {
toToolItem() *C.GtkToolItem
}
// native returns a pointer to the underlying GtkToolItem.
func (v *ToolItem) native() *C.GtkToolItem {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkToolItem(p)
}
func (v *ToolItem) toToolItem() *C.GtkToolItem {
return v.native()
}
func marshalToolItem(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapToolItem(obj), nil
}
func wrapToolItem(obj *glib.Object) *ToolItem {
return &ToolItem{Bin{Container{Widget{glib.InitiallyUnowned{obj}}}}}
}
// ToolItemNew is a wrapper around gtk_tool_item_new().
func ToolItemNew() (*ToolItem, error) {
c := C.gtk_tool_item_new()
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
ti := wrapToolItem(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return ti, nil
}
// SetHomogeneous is a wrapper around gtk_tool_item_set_homogeneous().
func (v *ToolItem) SetHomogeneous(homogeneous bool) {
C.gtk_tool_item_set_homogeneous(v.native(), gbool(homogeneous))
}
// GetHomogeneous is a wrapper around gtk_tool_item_get_homogeneous().
func (v *ToolItem) GetHomogeneous() bool {
c := C.gtk_tool_item_get_homogeneous(v.native())
return gobool(c)
}
// SetExpand is a wrapper around gtk_tool_item_set_expand().
func (v *ToolItem) SetExpand(expand bool) {
C.gtk_tool_item_set_expand(v.native(), gbool(expand))
}
// GetExpand is a wrapper around gtk_tool_item_get_expand().
func (v *ToolItem) GetExpand() bool {
c := C.gtk_tool_item_get_expand(v.native())
return gobool(c)
}
// SetTooltipText is a wrapper around gtk_tool_item_set_tooltip_text().
func (v *ToolItem) SetTooltipText(text string) {
cstr := C.CString(text)
defer C.free(unsafe.Pointer(cstr))
C.gtk_tool_item_set_tooltip_text(v.native(), (*C.gchar)(cstr))
}
// SetTooltipMarkup is a wrapper around gtk_tool_item_set_tooltip_markup().
func (v *ToolItem) SetTooltipMarkup(text string) {
cstr := C.CString(text)
defer C.free(unsafe.Pointer(cstr))
C.gtk_tool_item_set_tooltip_markup(v.native(), (*C.gchar)(cstr))
}
// SetUseDragWindow is a wrapper around gtk_tool_item_set_use_drag_window().
func (v *ToolItem) SetUseDragWindow(useDragWindow bool) {
C.gtk_tool_item_set_use_drag_window(v.native(), gbool(useDragWindow))
}
// GetUseDragWindow is a wrapper around gtk_tool_item_get_use_drag_window().
func (v *ToolItem) GetUseDragWindow() bool {
c := C.gtk_tool_item_get_use_drag_window(v.native())
return gobool(c)
}
// SetVisibleHorizontal is a wrapper around
// gtk_tool_item_set_visible_horizontal().
func (v *ToolItem) SetVisibleHorizontal(visibleHorizontal bool) {
C.gtk_tool_item_set_visible_horizontal(v.native(),
gbool(visibleHorizontal))
}
// GetVisibleHorizontal is a wrapper around
// gtk_tool_item_get_visible_horizontal().
func (v *ToolItem) GetVisibleHorizontal() bool {
c := C.gtk_tool_item_get_visible_horizontal(v.native())
return gobool(c)
}
// SetVisibleVertical is a wrapper around gtk_tool_item_set_visible_vertical().
func (v *ToolItem) SetVisibleVertical(visibleVertical bool) {
C.gtk_tool_item_set_visible_vertical(v.native(), gbool(visibleVertical))
}
// GetVisibleVertical is a wrapper around gtk_tool_item_get_visible_vertical().
func (v *ToolItem) GetVisibleVertical() bool {
c := C.gtk_tool_item_get_visible_vertical(v.native())
return gobool(c)
}
// SetIsImportant is a wrapper around gtk_tool_item_set_is_important().
func (v *ToolItem) SetIsImportant(isImportant bool) {
C.gtk_tool_item_set_is_important(v.native(), gbool(isImportant))
}
// GetIsImportant is a wrapper around gtk_tool_item_get_is_important().
func (v *ToolItem) GetIsImportant() bool {
c := C.gtk_tool_item_get_is_important(v.native())
return gobool(c)
}
// TODO: gtk_tool_item_get_ellipsize_mode
// GetIconSize is a wrapper around gtk_tool_item_get_icon_size().
func (v *ToolItem) GetIconSize() IconSize {
c := C.gtk_tool_item_get_icon_size(v.native())
return IconSize(c)
}
// GetOrientation is a wrapper around gtk_tool_item_get_orientation().
func (v *ToolItem) GetOrientation() Orientation {
c := C.gtk_tool_item_get_orientation(v.native())
return Orientation(c)
}
// GetToolbarStyle is a wrapper around gtk_tool_item_get_toolbar_style().
func (v *ToolItem) gtk_tool_item_get_toolbar_style() ToolbarStyle {
c := C.gtk_tool_item_get_toolbar_style(v.native())
return ToolbarStyle(c)
}
// GetReliefStyle is a wrapper around gtk_tool_item_get_relief_style().
func (v *ToolItem) GetReliefStyle() ReliefStyle {
c := C.gtk_tool_item_get_relief_style(v.native())
return ReliefStyle(c)
}
// GetTextAlignment is a wrapper around gtk_tool_item_get_text_alignment().
func (v *ToolItem) GetTextAlignment() float32 {
c := C.gtk_tool_item_get_text_alignment(v.native())
return float32(c)
}
// GetTextOrientation is a wrapper around gtk_tool_item_get_text_orientation().
func (v *ToolItem) GetTextOrientation() Orientation {
c := C.gtk_tool_item_get_text_orientation(v.native())
return Orientation(c)
}
// RetrieveProxyMenuItem is a wrapper around
// gtk_tool_item_retrieve_proxy_menu_item()
func (v *ToolItem) RetrieveProxyMenuItem() *MenuItem {
c := C.gtk_tool_item_retrieve_proxy_menu_item(v.native())
if c == nil {
return nil
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
m := wrapMenuItem(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return m
}
// SetProxyMenuItem is a wrapper around gtk_tool_item_set_proxy_menu_item().
func (v *ToolItem) SetProxyMenuItem(menuItemId string, menuItem IMenuItem) {
cstr := C.CString(menuItemId)
defer C.free(unsafe.Pointer(cstr))
C.gtk_tool_item_set_proxy_menu_item(v.native(), (*C.gchar)(cstr),
C.toGtkWidget(unsafe.Pointer(menuItem.toMenuItem())))
}
// RebuildMenu is a wrapper around gtk_tool_item_rebuild_menu().
func (v *ToolItem) RebuildMenu() {
C.gtk_tool_item_rebuild_menu(v.native())
}
// ToolbarReconfigured is a wrapper around gtk_tool_item_toolbar_reconfigured().
func (v *ToolItem) ToolbarReconfigured() {
C.gtk_tool_item_toolbar_reconfigured(v.native())
}
// TODO: gtk_tool_item_get_text_size_group
/*
* GtkTreeIter
*/
// TreeIter is a representation of GTK's GtkTreeIter.
type TreeIter struct {
GtkTreeIter C.GtkTreeIter
}
// native returns a pointer to the underlying GtkTreeIter.
func (v *TreeIter) native() *C.GtkTreeIter {
if v == nil {
return nil
}
return &v.GtkTreeIter
}
func marshalTreeIter(p uintptr) (interface{}, error) {
c := C.g_value_get_boxed((*C.GValue)(unsafe.Pointer(p)))
return (*TreeIter)(unsafe.Pointer(c)), nil
}
func (v *TreeIter) free() {
C.gtk_tree_iter_free(v.native())
}
// Copy() is a wrapper around gtk_tree_iter_copy().
func (v *TreeIter) Copy() (*TreeIter, error) {
c := C.gtk_tree_iter_copy(v.native())
if c == nil {
return nil, nilPtrErr
}
t := &TreeIter{*c}
runtime.SetFinalizer(t, (*TreeIter).free)
return t, nil
}
/*
* GtkTreeModel
*/
// TreeModel is a representation of GTK's GtkTreeModel GInterface.
type TreeModel struct {
*glib.Object
}
// ITreeModel is an interface type implemented by all structs
// embedding a TreeModel. It is meant to be used as an argument type
// for wrapper functions that wrap around a C GTK function taking a
// GtkTreeModel.
type ITreeModel interface {
toTreeModel() *C.GtkTreeModel
}
// native returns a pointer to the underlying GObject as a GtkTreeModel.
func (v *TreeModel) native() *C.GtkTreeModel {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkTreeModel(p)
}
func (v *TreeModel) toTreeModel() *C.GtkTreeModel {
if v == nil {
return nil
}
return v.native()
}
func marshalTreeModel(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapTreeModel(obj), nil
}
func wrapTreeModel(obj *glib.Object) *TreeModel {
return &TreeModel{obj}
}
// GetFlags() is a wrapper around gtk_tree_model_get_flags().
func (v *TreeModel) GetFlags() TreeModelFlags {
c := C.gtk_tree_model_get_flags(v.native())
return TreeModelFlags(c)
}
// GetNColumns() is a wrapper around gtk_tree_model_get_n_columns().
func (v *TreeModel) GetNColumns() int {
c := C.gtk_tree_model_get_n_columns(v.native())
return int(c)
}
// GetColumnType() is a wrapper around gtk_tree_model_get_column_type().
func (v *TreeModel) GetColumnType(index int) glib.Type {
c := C.gtk_tree_model_get_column_type(v.native(), C.gint(index))
return glib.Type(c)
}
// GetIter() is a wrapper around gtk_tree_model_get_iter().
func (v *TreeModel) GetIter(path *TreePath) (*TreeIter, error) {
var iter C.GtkTreeIter
c := C.gtk_tree_model_get_iter(v.native(), &iter, path.native())
if !gobool(c) {
return nil, errors.New("Unable to set iterator")
}
t := &TreeIter{iter}
return t, nil
}
// GetIterFromString() is a wrapper around
// gtk_tree_model_get_iter_from_string().
func (v *TreeModel) GetIterFromString(path string) (*TreeIter, error) {
var iter C.GtkTreeIter
cstr := C.CString(path)
defer C.free(unsafe.Pointer(cstr))
c := C.gtk_tree_model_get_iter_from_string(v.native(), &iter,
(*C.gchar)(cstr))
if !gobool(c) {
return nil, errors.New("Unable to set iterator")
}
t := &TreeIter{iter}
return t, nil
}
// GetIterFirst() is a wrapper around gtk_tree_model_get_iter_first().
func (v *TreeModel) GetIterFirst() (*TreeIter, bool) {
var iter C.GtkTreeIter
c := C.gtk_tree_model_get_iter_first(v.native(), &iter)
if !gobool(c) {
return nil, false
}
t := &TreeIter{iter}
return t, true
}
// GetPath() is a wrapper around gtk_tree_model_get_path().
func (v *TreeModel) GetPath(iter *TreeIter) (*TreePath, error) {
c := C.gtk_tree_model_get_path(v.native(), iter.native())
if c == nil {
return nil, nilPtrErr
}
p := &TreePath{c}
runtime.SetFinalizer(p, (*TreePath).free)
return p, nil
}
// GetValue() is a wrapper around gtk_tree_model_get_value().
func (v *TreeModel) GetValue(iter *TreeIter, column int) (*glib.Value, error) {
val, err := glib.ValueAlloc()
if err != nil {
return nil, err
}
C.gtk_tree_model_get_value(
(*C.GtkTreeModel)(unsafe.Pointer(v.native())),
iter.native(),
C.gint(column),
(*C.GValue)(unsafe.Pointer(val.Native())))
return val, nil
}
// IterNext() is a wrapper around gtk_tree_model_iter_next().
func (v *TreeModel) IterNext(iter *TreeIter) bool {
c := C.gtk_tree_model_iter_next(v.native(), iter.native())
return gobool(c)
}
/*
* GtkTreePath
*/
// TreePath is a representation of GTK's GtkTreePath.
type TreePath struct {
GtkTreePath *C.GtkTreePath
}
// native returns a pointer to the underlying GtkTreePath.
func (v *TreePath) native() *C.GtkTreePath {
if v == nil {
return nil
}
return v.GtkTreePath
}
func marshalTreePath(p uintptr) (interface{}, error) {
c := C.g_value_get_boxed((*C.GValue)(unsafe.Pointer(p)))
return &TreePath{(*C.GtkTreePath)(unsafe.Pointer(c))}, nil
}
func (v *TreePath) free() {
C.gtk_tree_path_free(v.native())
}
// String is a wrapper around gtk_tree_path_to_string().
func (v *TreePath) String() string {
c := C.gtk_tree_path_to_string(v.native())
return C.GoString((*C.char)(c))
}
/*
* GtkTreeSelection
*/
// TreeSelection is a representation of GTK's GtkTreeSelection.
type TreeSelection struct {
*glib.Object
}
// native returns a pointer to the underlying GtkTreeSelection.
func (v *TreeSelection) native() *C.GtkTreeSelection {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkTreeSelection(p)
}
func marshalTreeSelection(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapTreeSelection(obj), nil
}
func wrapTreeSelection(obj *glib.Object) *TreeSelection {
return &TreeSelection{obj}
}
// GetSelected() is a wrapper around gtk_tree_selection_get_selected().
func (v *TreeSelection) GetSelected(model *ITreeModel, iter *TreeIter) bool {
var pcmodel **C.GtkTreeModel
if pcmodel != nil {
cmodel := (*model).toTreeModel()
pcmodel = &cmodel
} else {
pcmodel = nil
}
c := C.gtk_tree_selection_get_selected(v.native(),
pcmodel, iter.native())
return gobool(c)
}
/*
* GtkTreeView
*/
// TreeView is a representation of GTK's GtkTreeView.
type TreeView struct {
Container
}
// native returns a pointer to the underlying GtkTreeView.
func (v *TreeView) native() *C.GtkTreeView {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkTreeView(p)
}
func marshalTreeView(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapTreeView(obj), nil
}
func wrapTreeView(obj *glib.Object) *TreeView {
return &TreeView{Container{Widget{glib.InitiallyUnowned{obj}}}}
}
// TreeViewNew() is a wrapper around gtk_tree_view_new().
func TreeViewNew() (*TreeView, error) {
c := C.gtk_tree_view_new()
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
t := wrapTreeView(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return t, nil
}
// TreeViewNewWithModel() is a wrapper around gtk_tree_view_new_with_model().
func TreeViewNewWithModel(model ITreeModel) (*TreeView, error) {
c := C.gtk_tree_view_new_with_model(model.toTreeModel())
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
t := wrapTreeView(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return t, nil
}
// GetModel() is a wrapper around gtk_tree_view_get_model().
func (v *TreeView) GetModel() (*TreeModel, error) {
c := C.gtk_tree_view_get_model(v.native())
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
t := wrapTreeModel(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return t, nil
}
// SetModel() is a wrapper around gtk_tree_view_set_model().
func (v *TreeView) SetModel(model ITreeModel) {
C.gtk_tree_view_set_model(v.native(), model.toTreeModel())
}
// GetSelection() is a wrapper around gtk_tree_view_get_selection().
func (v *TreeView) GetSelection() (*TreeSelection, error) {
c := C.gtk_tree_view_get_selection(v.native())
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
s := wrapTreeSelection(obj)
obj.Ref()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return s, nil
}
// AppendColumn() is a wrapper around gtk_tree_view_append_column().
func (v *TreeView) AppendColumn(column *TreeViewColumn) int {
c := C.gtk_tree_view_append_column(v.native(), column.native())
return int(c)
}
/*
* GtkTreeViewColumn
*/
// TreeViewColumns is a representation of GTK's GtkTreeViewColumn.
type TreeViewColumn struct {
glib.InitiallyUnowned
}
// native returns a pointer to the underlying GtkTreeViewColumn.
func (v *TreeViewColumn) native() *C.GtkTreeViewColumn {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkTreeViewColumn(p)
}
func marshalTreeViewColumn(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapTreeViewColumn(obj), nil
}
func wrapTreeViewColumn(obj *glib.Object) *TreeViewColumn {
return &TreeViewColumn{glib.InitiallyUnowned{obj}}
}
// TreeViewColumnNew() is a wrapper around gtk_tree_view_column_new().
func TreeViewColumnNew() (*TreeViewColumn, error) {
c := C.gtk_tree_view_column_new()
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
t := wrapTreeViewColumn(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return t, nil
}
// TreeViewColumnNewWithAttribute() is a wrapper around
// gtk_tree_view_column_new_with_attributes() that only sets one
// attribute for one column.
func TreeViewColumnNewWithAttribute(title string, renderer ICellRenderer, attribute string, column int) (*TreeViewColumn, error) {
t_cstr := C.CString(title)
defer C.free(unsafe.Pointer(t_cstr))
a_cstr := C.CString(attribute)
defer C.free(unsafe.Pointer(a_cstr))
c := C._gtk_tree_view_column_new_with_attributes_one((*C.gchar)(t_cstr),
renderer.toCellRenderer(), (*C.gchar)(a_cstr), C.gint(column))
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
t := wrapTreeViewColumn(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return t, nil
}
// AddAttribute() is a wrapper around gtk_tree_view_column_add_attribute().
func (v *TreeViewColumn) AddAttribute(renderer ICellRenderer, attribute string, column int) {
cstr := C.CString(attribute)
defer C.free(unsafe.Pointer(cstr))
C.gtk_tree_view_column_add_attribute(v.native(),
renderer.toCellRenderer(), (*C.gchar)(cstr), C.gint(column))
}
// SetExpand() is a wrapper around gtk_tree_view_column_set_expand().
func (v *TreeViewColumn) SetExpand(expand bool) {
C.gtk_tree_view_column_set_expand(v.native(), gbool(expand))
}
// GetExpand() is a wrapper around gtk_tree_view_column_get_expand().
func (v *TreeViewColumn) GetExpand() bool {
c := C.gtk_tree_view_column_get_expand(v.native())
return gobool(c)
}
// SetMinWidth() is a wrapper around gtk_tree_view_column_set_min_width().
func (v *TreeViewColumn) SetMinWidth(minWidth int) {
C.gtk_tree_view_column_set_min_width(v.native(), C.gint(minWidth))
}
// GetMinWidth() is a wrapper around gtk_tree_view_column_get_min_width().
func (v *TreeViewColumn) GetMinWidth() int {
c := C.gtk_tree_view_column_get_min_width(v.native())
return int(c)
}
/*
* GtkWidget
*/
// Widget is a representation of GTK's GtkWidget.
type Widget struct {
glib.InitiallyUnowned
}
// IWidget is an interface type implemented by all structs
// embedding a Widget. It is meant to be used as an argument type
// for wrapper functions that wrap around a C GTK function taking a
// GtkWidget.
type IWidget interface {
toWidget() *C.GtkWidget
}
// native returns a pointer to the underlying GtkWidget.
func (v *Widget) native() *C.GtkWidget {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkWidget(p)
}
func (v *Widget) toWidget() *C.GtkWidget {
if v == nil {
return nil
}
return v.native()
}
func marshalWidget(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapWidget(obj), nil
}
func wrapWidget(obj *glib.Object) *Widget {
return &Widget{glib.InitiallyUnowned{obj}}
}
// Destroy is a wrapper around gtk_widget_destroy().
func (v *Widget) Destroy() {
C.gtk_widget_destroy(v.native())
}
// InDestruction is a wrapper around gtk_widget_in_destruction().
func (v *Widget) InDestruction() bool {
return gobool(C.gtk_widget_in_destruction(v.native()))
}
// TODO(jrick) this may require some rethinking
/*
func (v *Widget) Destroyed(widgetPointer **Widget) {
}
*/
// Unparent is a wrapper around gtk_widget_unparent().
func (v *Widget) Unparent() {
C.gtk_widget_unparent(v.native())
}
// Show is a wrapper around gtk_widget_show().
func (v *Widget) Show() {
C.gtk_widget_show(v.native())
}
// Hide is a wrapper around gtk_widget_hide().
func (v *Widget) Hide() {
C.gtk_widget_hide(v.native())
}
// GetCanFocus is a wrapper around gtk_widget_get_can_focus().
func (v *Widget) GetCanFocus() bool {
c := C.gtk_widget_get_can_focus(v.native())
return gobool(c)
}
// SetCanFocus is a wrapper around gtk_widget_set_can_focus().
func (v *Widget) SetCanFocus(canFocus bool) {
C.gtk_widget_set_can_focus(v.native(), gbool(canFocus))
}
// GetMapped is a wrapper around gtk_window_get_mapped().
func (v *Widget) GetMapped() bool {
c := C.gtk_widget_get_mapped(v.native())
return gobool(c)
}
// SetMapped is a wrapper around gtk_widget_set_mapped().
func (v *Widget) SetMapped(mapped bool) {
C.gtk_widget_set_can_focus(v.native(), gbool(mapped))
}
// GetRealized is a wrapper around gtk_window_get_realized().
func (v *Widget) GetRealized() bool {
c := C.gtk_widget_get_realized(v.native())
return gobool(c)
}
// SetRealized is a wrapper around gtk_widget_set_realized().
func (v *Widget) SetRealized(realized bool) {
C.gtk_widget_set_realized(v.native(), gbool(realized))
}
// GetDoubleBuffered is a wrapper around gtk_widget_get_double_buffered().
func (v *Widget) GetDoubleBuffered() bool {
c := C.gtk_widget_get_double_buffered(v.native())
return gobool(c)
}
// GetHasWindow is a wrapper around gtk_widget_get_has_window().
func (v *Widget) GetHasWindow() bool {
c := C.gtk_widget_get_has_window(v.native())
return gobool(c)
}
// SetHasWindow is a wrapper around gtk_widget_set_has_window().
func (v *Widget) SetHasWindow(hasWindow bool) {
C.gtk_widget_set_has_window(v.native(), gbool(hasWindow))
}
// ShowNow is a wrapper around gtk_widget_show_now().
func (v *Widget) ShowNow() {
C.gtk_widget_show_now(v.native())
}
// ShowAll is a wrapper around gtk_widget_show_all().
func (v *Widget) ShowAll() {
C.gtk_widget_show_all(v.native())
}
// SetNoShowAll is a wrapper around gtk_widget_set_no_show_all().
func (v *Widget) SetNoShowAll(noShowAll bool) {
C.gtk_widget_set_no_show_all(v.native(), gbool(noShowAll))
}
// GetNoShowAll is a wrapper around gtk_widget_get_no_show_all().
func (v *Widget) GetNoShowAll() bool {
c := C.gtk_widget_get_no_show_all(v.native())
return gobool(c)
}
// Map is a wrapper around gtk_widget_map().
func (v *Widget) Map() {
C.gtk_widget_map(v.native())
}
// Unmap is a wrapper around gtk_widget_unmap().
func (v *Widget) Unmap() {
C.gtk_widget_unmap(v.native())
}
//void gtk_widget_realize(GtkWidget *widget);
//void gtk_widget_unrealize(GtkWidget *widget);
//void gtk_widget_draw(GtkWidget *widget, cairo_t *cr);
//void gtk_widget_queue_resize(GtkWidget *widget);
//void gtk_widget_queue_resize_no_redraw(GtkWidget *widget);
//GdkFrameClock *gtk_widget_get_frame_clock(GtkWidget *widget);
//guint gtk_widget_add_tick_callback (GtkWidget *widget,
// GtkTickCallback callback,
// gpointer user_data,
// GDestroyNotify notify);
//void gtk_widget_remove_tick_callback(GtkWidget *widget, guint id);
// TODO(jrick) GtkAllocation
/*
func (v *Widget) SizeAllocate() {
}
*/
// TODO(jrick) GtkAccelGroup GdkModifierType GtkAccelFlags
/*
func (v *Widget) AddAccelerator() {
}
*/
// TODO(jrick) GtkAccelGroup GdkModifierType
/*
func (v *Widget) RemoveAccelerator() {
}
*/
// TODO(jrick) GtkAccelGroup
/*
func (v *Widget) SetAccelPath() {
}
*/
// TODO(jrick) GList
/*
func (v *Widget) ListAccelClosures() {
}
*/
// GetAllocatedWidth() is a wrapper around gtk_widget_get_allocated_width().
func (v *Widget) GetAllocatedWidth() int {
return int(C.gtk_widget_get_allocated_width(v.native()))
}
// GetAllocatedHeight() is a wrapper around gtk_widget_get_allocated_height().
func (v *Widget) GetAllocatedHeight() int {
return int(C.gtk_widget_get_allocated_height(v.native()))
}
//gboolean gtk_widget_can_activate_accel(GtkWidget *widget, guint signal_id);
// Event() is a wrapper around gtk_widget_event().
func (v *Widget) Event(event *gdk.Event) bool {
c := C.gtk_widget_event(v.native(),
(*C.GdkEvent)(unsafe.Pointer(event.Native())))
return gobool(c)
}
// Activate() is a wrapper around gtk_widget_activate().
func (v *Widget) Activate() bool {
return gobool(C.gtk_widget_activate(v.native()))
}
// Reparent() is a wrapper around gtk_widget_reparent().
func (v *Widget) Reparent(newParent IWidget) {
C.gtk_widget_reparent(v.native(), newParent.toWidget())
}
// TODO(jrick) GdkRectangle
/*
func (v *Widget) Intersect() {
}
*/
// IsFocus() is a wrapper around gtk_widget_is_focus().
func (v *Widget) IsFocus() bool {
return gobool(C.gtk_widget_is_focus(v.native()))
}
// GrabFocus() is a wrapper around gtk_widget_grab_focus().
func (v *Widget) GrabFocus() {
C.gtk_widget_grab_focus(v.native())
}
// GrabDefault() is a wrapper around gtk_widget_grab_default().
func (v *Widget) GrabDefault() {
C.gtk_widget_grab_default(v.native())
}
// SetName() is a wrapper around gtk_widget_set_name().
func (v *Widget) SetName(name string) {
cstr := C.CString(name)
defer C.free(unsafe.Pointer(cstr))
C.gtk_widget_set_name(v.native(), (*C.gchar)(cstr))
}
// GetName() is a wrapper around gtk_widget_get_name(). A non-nil
// error is returned in the case that gtk_widget_get_name returns NULL to
// differentiate between NULL and an empty string.
func (v *Widget) GetName() (string, error) {
c := C.gtk_widget_get_name(v.native())
if c == nil {
return "", nilPtrErr
}
return C.GoString((*C.char)(c)), nil
}
// GetSensitive is a wrapper around gtk_widget_get_sensitive().
func (v *Widget) GetSensitive() bool {
c := C.gtk_widget_get_sensitive(v.native())
return gobool(c)
}
// IsSensitive is a wrapper around gtk_widget_is_sensitive().
func (v *Widget) IsSensitive() bool {
c := C.gtk_widget_is_sensitive(v.native())
return gobool(c)
}
// SetSensitive is a wrapper around gtk_widget_set_sensitive().
func (v *Widget) SetSensitive(sensitive bool) {
C.gtk_widget_set_sensitive(v.native(), gbool(sensitive))
}
// GetVisible is a wrapper around gtk_widget_get_visible().
func (v *Widget) GetVisible() bool {
c := C.gtk_widget_get_visible(v.native())
return gobool(c)
}
// SetVisible is a wrapper around gtk_widget_set_visible().
func (v *Widget) SetVisible(visible bool) {
C.gtk_widget_set_visible(v.native(), gbool(visible))
}
// SetParent is a wrapper around gtk_widget_set_parent().
func (v *Widget) SetParent(parent IWidget) {
C.gtk_widget_set_parent(v.native(), parent.toWidget())
}
// GetParent is a wrapper around gtk_widget_get_parent().
func (v *Widget) GetParent() (*Widget, error) {
c := C.gtk_widget_get_parent(v.native())
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
w := wrapWidget(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return w, nil
}
// SetSizeRequest is a wrapper around gtk_widget_set_size_request().
func (v *Widget) SetSizeRequest(width, height int) {
C.gtk_widget_set_size_request(v.native(), C.gint(width), C.gint(height))
}
// GetSizeRequest is a wrapper around gtk_widget_get_size_request().
func (v *Widget) GetSizeRequest() (width, height int) {
var w, h C.gint
C.gtk_widget_get_size_request(v.native(), &w, &h)
return int(w), int(h)
}
// SetParentWindow is a wrapper around gtk_widget_set_parent_window().
func (v *Widget) SetParentWindow(parentWindow *gdk.Window) {
C.gtk_widget_set_parent_window(v.native(),
(*C.GdkWindow)(unsafe.Pointer(parentWindow.Native())))
}
// GetParentWindow is a wrapper around gtk_widget_get_parent_window().
func (v *Widget) GetParentWindow() (*gdk.Window, error) {
c := C.gtk_widget_get_parent_window(v.native())
if v == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
w := &gdk.Window{obj}
w.Ref()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return w, nil
}
// SetEvents is a wrapper around gtk_widget_set_events().
func (v *Widget) SetEvents(events int) {
C.gtk_widget_set_events(v.native(), C.gint(events))
}
// GetEvents is a wrapper around gtk_widget_get_events().
func (v *Widget) GetEvents() int {
return int(C.gtk_widget_get_events(v.native()))
}
// AddEvents is a wrapper around gtk_widget_add_events().
func (v *Widget) AddEvents(events int) {
C.gtk_widget_add_events(v.native(), C.gint(events))
}
// HasDefault is a wrapper around gtk_widget_has_default().
func (v *Widget) HasDefault() bool {
c := C.gtk_widget_has_default(v.native())
return gobool(c)
}
// HasFocus is a wrapper around gtk_widget_has_focus().
func (v *Widget) HasFocus() bool {
c := C.gtk_widget_has_focus(v.native())
return gobool(c)
}
// HasVisibleFocus is a wrapper around gtk_widget_has_visible_focus().
func (v *Widget) HasVisibleFocus() bool {
c := C.gtk_widget_has_visible_focus(v.native())
return gobool(c)
}
// HasGrab is a wrapper around gtk_widget_has_grab().
func (v *Widget) HasGrab() bool {
c := C.gtk_widget_has_grab(v.native())
return gobool(c)
}
// IsDrawable is a wrapper around gtk_widget_is_drawable().
func (v *Widget) IsDrawable() bool {
c := C.gtk_widget_is_drawable(v.native())
return gobool(c)
}
// IsToplevel is a wrapper around gtk_widget_is_toplevel().
func (v *Widget) IsToplevel() bool {
c := C.gtk_widget_is_toplevel(v.native())
return gobool(c)
}
// TODO(jrick) GdkEventMask
/*
func (v *Widget) SetDeviceEvents() {
}
*/
// TODO(jrick) GdkEventMask
/*
func (v *Widget) GetDeviceEvents() {
}
*/
// TODO(jrick) GdkEventMask
/*
func (v *Widget) AddDeviceEvents() {
}
*/
// SetDeviceEnabled is a wrapper around gtk_widget_set_device_enabled().
func (v *Widget) SetDeviceEnabled(device *gdk.Device, enabled bool) {
C.gtk_widget_set_device_enabled(v.native(),
(*C.GdkDevice)(unsafe.Pointer(device.Native())), gbool(enabled))
}
// GetDeviceEnabled is a wrapper around gtk_widget_get_device_enabled().
func (v *Widget) GetDeviceEnabled(device *gdk.Device) bool {
c := C.gtk_widget_get_device_enabled(v.native(),
(*C.GdkDevice)(unsafe.Pointer(device.Native())))
return gobool(c)
}
// GetToplevel is a wrapper around gtk_widget_get_toplevel().
func (v *Widget) GetToplevel() (*Widget, error) {
c := C.gtk_widget_get_toplevel(v.native())
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
w := wrapWidget(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return w, nil
}
// GetTooltipText is a wrapper around gtk_widget_get_tooltip_text().
// A non-nil error is returned in the case that
// gtk_widget_get_tooltip_text returns NULL to differentiate between NULL
// and an empty string.
func (v *Widget) GetTooltipText() (string, error) {
c := C.gtk_widget_get_tooltip_text(v.native())
if c == nil {
return "", nilPtrErr
}
return C.GoString((*C.char)(c)), nil
}
// SetTooltipText is a wrapper around gtk_widget_set_tooltip_text().
func (v *Widget) SetTooltipText(text string) {
cstr := C.CString(text)
defer C.free(unsafe.Pointer(cstr))
C.gtk_widget_set_tooltip_text(v.native(), (*C.gchar)(cstr))
}
// OverrideFont is a wrapper around gtk_widget_override_font().
func (v *Widget) OverrideFont(description string) {
cstr := C.CString(description)
defer C.free(unsafe.Pointer(cstr))
c := C.pango_font_description_from_string(cstr)
C.gtk_widget_override_font(v.native(), c)
}
// GetHAlign is a wrapper around gtk_widget_get_halign().
func (v *Widget) GetHAlign() Align {
c := C.gtk_widget_get_halign(v.native())
return Align(c)
}
// SetHAlign is a wrapper around gtk_widget_set_halign().
func (v *Widget) SetHAlign(align Align) {
C.gtk_widget_set_halign(v.native(), C.GtkAlign(align))
}
// GetVAlign is a wrapper around gtk_widget_get_valign().
func (v *Widget) GetVAlign() Align {
c := C.gtk_widget_get_valign(v.native())
return Align(c)
}
// SetVAlign is a wrapper around gtk_widget_set_valign().
func (v *Widget) SetVAlign(align Align) {
C.gtk_widget_set_valign(v.native(), C.GtkAlign(align))
}
// GetMarginTop is a wrapper around gtk_widget_get_margin_top().
func (v *Widget) GetMarginTop() int {
c := C.gtk_widget_get_margin_top(v.native())
return int(c)
}
// SetMarginTop is a wrapper around gtk_widget_set_margin_top().
func (v *Widget) SetMarginTop(margin int) {
C.gtk_widget_set_margin_top(v.native(), C.gint(margin))
}
// GetMarginBottom is a wrapper around gtk_widget_get_margin_bottom().
func (v *Widget) GetMarginBottom() int {
c := C.gtk_widget_get_margin_bottom(v.native())
return int(c)
}
// SetMarginBottom is a wrapper around gtk_widget_set_margin_bottom().
func (v *Widget) SetMarginBottom(margin int) {
C.gtk_widget_set_margin_bottom(v.native(), C.gint(margin))
}
// GetHExpand is a wrapper around gtk_widget_get_hexpand().
func (v *Widget) GetHExpand() bool {
c := C.gtk_widget_get_hexpand(v.native())
return gobool(c)
}
// SetHExpand is a wrapper around gtk_widget_set_hexpand().
func (v *Widget) SetHExpand(expand bool) {
C.gtk_widget_set_hexpand(v.native(), gbool(expand))
}
// GetVExpand is a wrapper around gtk_widget_get_vexpand().
func (v *Widget) GetVExpand() bool {
c := C.gtk_widget_get_vexpand(v.native())
return gobool(c)
}
// SetVExpand is a wrapper around gtk_widget_set_vexpand().
func (v *Widget) SetVExpand(expand bool) {
C.gtk_widget_set_vexpand(v.native(), gbool(expand))
}
/*
* GtkWindow
*/
// Window is a representation of GTK's GtkWindow.
type Window struct {
Bin
}
// IWindow is an interface type implemented by all structs embedding a
// Window. It is meant to be used as an argument type for wrapper
// functions that wrap around a C GTK function taking a GtkWindow.
type IWindow interface {
toWindow() *C.GtkWindow
}
// native returns a pointer to the underlying GtkWindow.
func (v *Window) native() *C.GtkWindow {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkWindow(p)
}
func (v *Window) toWindow() *C.GtkWindow {
if v == nil {
return nil
}
return v.native()
}
func marshalWindow(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapWindow(obj), nil
}
func wrapWindow(obj *glib.Object) *Window {
return &Window{Bin{Container{Widget{glib.InitiallyUnowned{obj}}}}}
}
// WindowNew is a wrapper around gtk_window_new().
func WindowNew(t WindowType) (*Window, error) {
c := C.gtk_window_new(C.GtkWindowType(t))
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
w := wrapWindow(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return w, nil
}
// SetTitle is a wrapper around gtk_window_set_title().
func (v *Window) SetTitle(title string) {
cstr := C.CString(title)
defer C.free(unsafe.Pointer(cstr))
C.gtk_window_set_title(v.native(), (*C.gchar)(cstr))
}
// SetResizable is a wrapper around gtk_window_set_resizable().
func (v *Window) SetResizable(resizable bool) {
C.gtk_window_set_resizable(v.native(), gbool(resizable))
}
// GetResizable is a wrapper around gtk_window_get_resizable().
func (v *Window) GetResizable() bool {
c := C.gtk_window_get_resizable(v.native())
return gobool(c)
}
// TODO gtk_window_add_accel_group().
// ActivateFocus is a wrapper around gtk_window_activate_focus().
func (v *Window) ActivateFocus() bool {
c := C.gtk_window_activate_focus(v.native())
return gobool(c)
}
// ActivateDefault is a wrapper around gtk_window_activate_default().
func (v *Window) ActivateDefault() bool {
c := C.gtk_window_activate_default(v.native())
return gobool(c)
}
// SetModal is a wrapper around gtk_window_set_modal().
func (v *Window) SetModal(modal bool) {
C.gtk_window_set_modal(v.native(), gbool(modal))
}
// SetDefaultSize is a wrapper around gtk_window_set_default_size().
func (v *Window) SetDefaultSize(width, height int) {
C.gtk_window_set_default_size(v.native(), C.gint(width), C.gint(height))
}
// SetDefaultGeometry is a wrapper around gtk_window_set_default_geometry().
func (v *Window) SetDefaultGeometry(width, height int) {
C.gtk_window_set_default_geometry(v.native(), C.gint(width),
C.gint(height))
}
// TODO(jrick) GdkGeometry GdkWindowHints.
/*
func (v *Window) SetGeometryHints() {
}
*/
// TODO(jrick) GdkGravity.
/*
func (v *Window) SetGravity() {
}
*/
// TODO(jrick) GdkGravity.
/*
func (v *Window) GetGravity() {
}
*/
// SetPosition is a wrapper around gtk_window_set_position().
func (v *Window) SetPosition(position WindowPosition) {
C.gtk_window_set_position(v.native(), C.GtkWindowPosition(position))
}
// SetTransientFor is a wrapper around gtk_window_set_transient_for().
func (v *Window) SetTransientFor(parent IWindow) {
var pw *C.GtkWindow = nil
if parent != nil {
pw = parent.toWindow()
}
C.gtk_window_set_transient_for(v.native(), pw)
}
// TODO gtk_window_set_attached_to().
// SetDestroyWithParent is a wrapper around
// gtk_window_set_destroy_with_parent().
func (v *Window) SetDestroyWithParent(setting bool) {
C.gtk_window_set_destroy_with_parent(v.native(), gbool(setting))
}
// SetHideTitlebarWhenMaximized is a wrapper around
// gtk_window_set_hide_titlebar_when_maximized().
func (v *Window) SetHideTitlebarWhenMaximized(setting bool) {
C.gtk_window_set_hide_titlebar_when_maximized(v.native(),
gbool(setting))
}
// TODO gtk_window_set_screen().
// IsActive is a wrapper around gtk_window_is_active().
func (v *Window) IsActive() bool {
c := C.gtk_window_is_active(v.native())
return gobool(c)
}
// HasToplevelFocus is a wrapper around gtk_window_has_toplevel_focus().
func (v *Window) HasToplevelFocus() bool {
c := C.gtk_window_has_toplevel_focus(v.native())
return gobool(c)
}
// TODO gtk_window_list_toplevels().
// TODO gtk_window_add_mnemonic().
// TODO gtk_window_remove_mnemonic().
// TODO gtk_window_mnemonic_activate().
// TODO gtk_window_activate_key().
// TODO gtk_window_propogate_key_event().
// TODO gtk_window_get_focus().
// TODO gtk_window_set_focus().
// TODO gtk_window_get_default_widget().
// TODO gtk_window_set_default().
// Present is a wrapper around gtk_window_present().
func (v *Window) Present() {
C.gtk_window_present(v.native())
}
// PresentWithTime is a wrapper around gtk_window_present_with_time().
func (v *Window) PresentWithTime(ts uint32) {
C.gtk_window_present_with_time(v.native(), C.guint32(ts))
}
// Iconify is a wrapper around gtk_window_iconify().
func (v *Window) Iconify() {
C.gtk_window_iconify(v.native())
}
// Deiconify is a wrapper around gtk_window_deiconify().
func (v *Window) Deiconify() {
C.gtk_window_deiconify(v.native())
}
// Stick is a wrapper around gtk_window_stick().
func (v *Window) Stick() {
C.gtk_window_stick(v.native())
}
// Unstick is a wrapper around gtk_window_unstick().
func (v *Window) Unstick() {
C.gtk_window_unstick(v.native())
}
// Maximize is a wrapper around gtk_window_maximize().
func (v *Window) Maximize() {
C.gtk_window_maximize(v.native())
}
// Unmaximize is a wrapper around gtk_window_unmaximize().
func (v *Window) Unmaximize() {
C.gtk_window_unmaximize(v.native())
}
// Fullscreen is a wrapper around gtk_window_fullscreen().
func (v *Window) Fullscreen() {
C.gtk_window_fullscreen(v.native())
}
// Unfullscreen is a wrapper around gtk_window_unfullscreen().
func (v *Window) Unfullscreen() {
C.gtk_window_unfullscreen(v.native())
}
// SetKeepAbove is a wrapper around gtk_window_set_keep_above().
func (v *Window) SetKeepAbove(setting bool) {
C.gtk_window_set_keep_above(v.native(), gbool(setting))
}
// SetKeepBelow is a wrapper around gtk_window_set_keep_below().
func (v *Window) SetKeepBelow(setting bool) {
C.gtk_window_set_keep_below(v.native(), gbool(setting))
}
// TODO gtk_window_begin_resize_drag().
// TODO gtk_window_begin_move_drag().
// SetDecorated is a wrapper around gtk_window_set_decorated().
func (v *Window) SetDecorated(setting bool) {
C.gtk_window_set_decorated(v.native(), gbool(setting))
}
// SetDeletable is a wrapper around gtk_window_set_deletable().
func (v *Window) SetDeletable(setting bool) {
C.gtk_window_set_deletable(v.native(), gbool(setting))
}
// TODO gtk_window_set_mnemonic_modifier().
// TODO gtk_window_set_type_hint().
// SetSkipTaskbarHint is a wrapper around gtk_window_set_skip_taskbar_hint().
func (v *Window) SetSkipTaskbarHint(setting bool) {
C.gtk_window_set_skip_taskbar_hint(v.native(), gbool(setting))
}
// SetSkipPagerHint is a wrapper around gtk_window_set_skip_pager_hint().
func (v *Window) SetSkipPagerHint(setting bool) {
C.gtk_window_set_skip_pager_hint(v.native(), gbool(setting))
}
// SetUrgencyHint is a wrapper around gtk_window_set_urgency_hint().
func (v *Window) SetUrgencyHint(setting bool) {
C.gtk_window_set_urgency_hint(v.native(), gbool(setting))
}
// SetAcceptFocus is a wrapper around gtk_window_set_accept_focus().
func (v *Window) SetAcceptFocus(setting bool) {
C.gtk_window_set_accept_focus(v.native(), gbool(setting))
}
// SetFocusOnMap is a wrapper around gtk_window_set_focus_on_map().
func (v *Window) SetFocusOnMap(setting bool) {
C.gtk_window_set_focus_on_map(v.native(), gbool(setting))
}
// TODO gtk_window_set_startup_id().
// TODO gtk_window_set_role().
// GetDecorated is a wrapper around gtk_window_get_decorated().
func (v *Window) GetDecorated() bool {
c := C.gtk_window_get_decorated(v.native())
return gobool(c)
}
// GetDeletable is a wrapper around gtk_window_get_deletable().
func (v *Window) GetDeletable() bool {
c := C.gtk_window_get_deletable(v.native())
return gobool(c)
}
// TODO get_default_icon_list().
// TODO get_default_icon_name().
// GetDefaultSize is a wrapper around gtk_window_get_default_size().
func (v *Window) GetDefaultSize() (width, height int) {
var w, h C.gint
C.gtk_window_get_default_size(v.native(), &w, &h)
return int(w), int(h)
}
// GetDestroyWithParent is a wrapper around
// gtk_window_get_destroy_with_parent().
func (v *Window) GetDestroyWithParent() bool {
c := C.gtk_window_get_destroy_with_parent(v.native())
return gobool(c)
}
// GetHideTitlebarWhenMaximized is a wrapper around
// gtk_window_get_hide_titlebar_when_maximized().
func (v *Window) GetHideTitlebarWhenMaximized() bool {
c := C.gtk_window_get_hide_titlebar_when_maximized(v.native())
return gobool(c)
}
// TODO gtk_window_get_icon().
// TODO gtk_window_get_icon_list().
// TODO gtk_window_get_icon_name().
// TODO gtk_window_get_mnemonic_modifier().
// GetModal is a wrapper around gtk_window_get_modal().
func (v *Window) GetModal() bool {
c := C.gtk_window_get_modal(v.native())
return gobool(c)
}
// GetPosition is a wrapper around gtk_window_get_position().
func (v *Window) GetPosition() (root_x, root_y int) {
var x, y C.gint
C.gtk_window_get_position(v.native(), &x, &y)
return int(x), int(y)
}
// TODO gtk_window_get_role().
// GetSize is a wrapper around gtk_window_get_size().
func (v *Window) GetSize() (width, height int) {
var w, h C.gint
C.gtk_window_get_size(v.native(), &w, &h)
return int(w), int(h)
}
// TODO gtk_window_get_title().
// TODO gtk_window_get_transient_for().
// TODO gtk_window_get_attached_to().
// TODO gtk_window_get_type_hint().
// GetSkipTaskbarHint is a wrapper around gtk_window_get_skip_taskbar_hint().
func (v *Window) GetSkipTaskbarHint() bool {
c := C.gtk_window_get_skip_taskbar_hint(v.native())
return gobool(c)
}
// GetSkipPagerHint is a wrapper around gtk_window_get_skip_pager_hint().
func (v *Window) GetSkipPagerHint() bool {
c := C.gtk_window_get_skip_taskbar_hint(v.native())
return gobool(c)
}
// GetUrgencyHint is a wrapper around gtk_window_get_urgency_hint().
func (v *Window) GetUrgencyHint() bool {
c := C.gtk_window_get_urgency_hint(v.native())
return gobool(c)
}
// GetAcceptFocus is a wrapper around gtk_window_get_accept_focus().
func (v *Window) GetAcceptFocus() bool {
c := C.gtk_window_get_accept_focus(v.native())
return gobool(c)
}
// GetFocusOnMap is a wrapper around gtk_window_get_focus_on_map().
func (v *Window) GetFocusOnMap() bool {
c := C.gtk_window_get_focus_on_map(v.native())
return gobool(c)
}
// TODO gtk_window_get_group().
// HasGroup is a wrapper around gtk_window_has_group().
func (v *Window) HasGroup() bool {
c := C.gtk_window_has_group(v.native())
return gobool(c)
}
// TODO gtk_window_get_window_type().
// Move is a wrapper around gtk_window_move().
func (v *Window) Move(x, y int) {
C.gtk_window_move(v.native(), C.gint(x), C.gint(y))
}
// TODO gtk_window_parse_geometry().
// Resize is a wrapper around gtk_window_resize().
func (v *Window) Resize(width, height int) {
C.gtk_window_resize(v.native(), C.gint(width), C.gint(height))
}
// ResizeToGeometry is a wrapper around gtk_window_resize_to_geometry().
func (v *Window) ResizeToGeometry(width, height int) {
C.gtk_window_resize_to_geometry(v.native(), C.gint(width), C.gint(height))
}
// TODO gtk_window_set_default_icon_list().
// TODO gtk_window_set_default_icon().
// TODO gtk_window_set_default_icon_from_file().
// TODO gtk_window_set_default_icon_name().
// TODO gtk_window_set_icon().
// TODO gtk_window_set_icon_list().
// SetIconFromFile is a wrapper around gtk_window_set_icon_from_file().
func (v *Window) SetIconFromFile(file string) error {
cstr := C.CString(file)
defer C.free(unsafe.Pointer(cstr))
var err *C.GError = nil
res := C.gtk_window_set_icon_from_file(v.native(), (*C.gchar)(cstr), &err)
if res == 0 {
defer C.g_error_free(err)
return errors.New(C.GoString((*C.char)(C.error_get_message(err))))
}
return nil
}
// TODO gtk_window_set_icon_name().
// SetAutoStartupNotification is a wrapper around
// gtk_window_set_auto_startup_notification().
// This doesn't seem write. Might need to rethink?
/*
func (v *Window) SetAutoStartupNotification(setting bool) {
C.gtk_window_set_auto_startup_notification(gbool(setting))
}
*/
// GetMnemonicsVisible is a wrapper around
// gtk_window_get_mnemonics_visible().
func (v *Window) GetMnemonicsVisible() bool {
c := C.gtk_window_get_mnemonics_visible(v.native())
return gobool(c)
}
// SetMnemonicsVisible is a wrapper around
// gtk_window_get_mnemonics_visible().
func (v *Window) SetMnemonicsVisible(setting bool) {
C.gtk_window_set_mnemonics_visible(v.native(), gbool(setting))
}
// GetFocusVisible is a wrapper around gtk_window_get_focus_visible().
func (v *Window) GetFocusVisible() bool {
c := C.gtk_window_get_focus_visible(v.native())
return gobool(c)
}
// SetFocusVisible is a wrapper around gtk_window_set_focus_visible().
func (v *Window) SetFocusVisible(setting bool) {
C.gtk_window_set_focus_visible(v.native(), gbool(setting))
}
// SetHasResizeGrip is a wrapper around gtk_window_set_has_resize_grip().
func (v *Window) SetHasResizeGrip(setting bool) {
C.gtk_window_set_has_resize_grip(v.native(), gbool(setting))
}
// GetHasResizeGrip is a wrapper around gtk_window_get_has_resize_grip().
func (v *Window) GetHasResizeGrip() bool {
c := C.gtk_window_get_has_resize_grip(v.native())
return gobool(c)
}
// ResizeGripIsVisible is a wrapper around
// gtk_window_resize_grip_is_visible().
func (v *Window) ResizeGripIsVisible() bool {
c := C.gtk_window_resize_grip_is_visible(v.native())
return gobool(c)
}
// TODO gtk_window_get_resize_grip_area().
// TODO gtk_window_set_application().
// TODO gtk_window_get_application().
var cast_3_10_func func(string, *glib.Object) glib.IObject
// cast takes a native GObject and casts it to the appropriate Go struct.
func cast(c *C.GObject) (glib.IObject, error) {
var (
className = C.GoString((*C.char)(C.object_get_class_name(c)))
obj = &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
g glib.IObject
)
runtime.SetFinalizer(obj, (*glib.Object).Unref)
switch className {
case "GtkAboutDialog":
g = wrapAboutDialog(obj)
case "GtkAdjustment":
g = wrapAdjustment(obj)
case "GtkAlignment":
g = wrapAlignment(obj)
case "GtkArrow":
g = wrapArrow(obj)
case "GtkBin":
g = wrapBin(obj)
case "GtkBox":
g = wrapBox(obj)
case "GtkButton":
g = wrapButton(obj)
case "GtkCalendar":
g = wrapCalendar(obj)
case "GtkCellLayout":
g = wrapCellLayout(obj)
case "GtkCellRenderer":
g = wrapCellRenderer(obj)
case "GtkCellRendererText":
g = wrapCellRendererText(obj)
case "GtkCellRendererToggle":
g = wrapCellRendererToggle(obj)
case "GtkCheckButton":
g = wrapCheckButton(obj)
case "GtkCheckMenuItem":
g = wrapCheckMenuItem(obj)
case "GtkClipboard":
g = wrapClipboard(obj)
case "GtkComboBox":
g = wrapComboBox(obj)
case "GtkContainer":
g = wrapContainer(obj)
case "GtkDialog":
g = wrapDialog(obj)
case "GtkDrawingArea":
g = wrapDrawingArea(obj)
case "GtkEditable":
g = wrapEditable(obj)
case "GtkEntry":
g = wrapEntry(obj)
case "GtkEntryBuffer":
g = wrapEntryBuffer(obj)
case "GtkEntryCompletion":
g = wrapEntryCompletion(obj)
case "GtkEventBox":
g = wrapEventBox(obj)
case "GtkFrame":
g = wrapFrame(obj)
case "GtkFileChooser":
g = wrapFileChooser(obj)
case "GtkFileChooserButton":
g = wrapFileChooserButton(obj)
case "GtkFileChooserWidget":
g = wrapFileChooserWidget(obj)
case "GtkGrid":
g = wrapGrid(obj)
case "GtkImage":
g = wrapImage(obj)
case "GtkLabel":
g = wrapLabel(obj)
case "GtkListStore":
g = wrapListStore(obj)
case "GtkMenu":
g = wrapMenu(obj)
case "GtkMenuBar":
g = wrapMenuBar(obj)
case "GtkMenuButton":
g = wrapMenuButton(obj)
case "GtkMenuItem":
g = wrapMenuItem(obj)
case "GtkMenuShell":
g = wrapMenuShell(obj)
case "GtkMessageDialog":
g = wrapMessageDialog(obj)
case "GtkMisc":
g = wrapMisc(obj)
case "GtkNotebook":
g = wrapNotebook(obj)
case "GtkOffscreenWindow":
g = wrapOffscreenWindow(obj)
case "GtkOrientable":
g = wrapOrientable(obj)
case "GtkProgressBar":
g = wrapProgressBar(obj)
case "GtkRadioButton":
g = wrapRadioButton(obj)
case "GtkRadioMenuItem":
g = wrapRadioMenuItem(obj)
case "GtkRange":
g = wrapRange(obj)
case "GtkScrollbar":
g = wrapScrollbar(obj)
case "GtkScrolledWindow":
g = wrapScrolledWindow(obj)
case "GtkSearchEntry":
g = wrapSearchEntry(obj)
case "GtkSeparator":
g = wrapSeparator(obj)
case "GtkSeparatorMenuItem":
g = wrapSeparatorMenuItem(obj)
case "GtkSeparatorToolItem":
g = wrapSeparatorToolItem(obj)
case "GtkSpinButton":
g = wrapSpinButton(obj)
case "GtkSpinner":
g = wrapSpinner(obj)
case "GtkStatusbar":
g = wrapStatusbar(obj)
case "GtkSwitch":
g = wrapSwitch(obj)
case "GtkTextView":
g = wrapTextView(obj)
case "GtkTextBuffer":
g = wrapTextBuffer(obj)
case "GtkTextTagTable":
g = wrapTextTagTable(obj)
case "GtkToggleButton":
g = wrapToggleButton(obj)
case "GtkToolbar":
g = wrapToolbar(obj)
case "GtkToolButton":
g = wrapToolButton(obj)
case "GtkToolItem":
g = wrapToolItem(obj)
case "GtkTreeModel":
g = wrapTreeModel(obj)
case "GtkTreeSelection":
g = wrapTreeSelection(obj)
case "GtkTreeView":
g = wrapTreeView(obj)
case "GtkTreeViewColumn":
g = wrapTreeViewColumn(obj)
case "GtkWidget":
g = wrapWidget(obj)
case "GtkWindow":
g = wrapWindow(obj)
default:
switch {
case cast_3_10_func != nil:
g = cast_3_10_func(className, obj)
if g != nil {
return g, nil
}
}
return nil, errors.New("unrecognized class name '" + className + "'")
}
return g, nil
}
struct__GList -> GList
// Copyright (c) 2013-2014 Conformal Systems <info@conformal.com>
//
// This file originated from: http://opensource.conformal.com/
//
// Permission to use, copy, modify, and distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
// copyright notice and this permission notice appear in all copies.
//
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
// Go bindings for GTK+ 3. Supports version 3.6 and later.
//
// Functions use the same names as the native C function calls, but use
// CamelCase. In cases where native GTK uses pointers to values to
// simulate multiple return values, Go's native multiple return values
// are used instead. Whenever a native GTK call could return an
// unexpected NULL pointer, an additonal error is returned in the Go
// binding.
//
// GTK's C API documentation can be very useful for understanding how the
// functions in this package work and what each type is for. This
// documentation can be found at https://developer.gnome.org/gtk3/.
//
// In addition to Go versions of the C GTK functions, every struct type
// includes a method named Native (either by direct implementation, or
// by means of struct embedding). These methods return a uintptr of the
// native C object the binding type represents. These pointers may be
// type switched to a native C pointer using unsafe and used with cgo
// function calls outside this package.
//
// Memory management is handled in proper Go fashion, using runtime
// finalizers to properly free memory when it is no longer needed. Each
// time a Go type is created with a pointer to a GObject, a reference is
// added for Go, sinking the floating reference when necessary. After
// going out of scope and the next time Go's garbage collector is run, a
// finalizer is run to remove Go's reference to the GObject. When this
// reference count hits zero (when neither Go nor GTK holds ownership)
// the object will be freed internally by GTK.
package gtk
// #cgo pkg-config: gtk+-3.0
// #include <gtk/gtk.h>
// #include "gtk.go.h"
import "C"
import (
"errors"
"fmt"
"runtime"
"unsafe"
"github.com/conformal/gotk3/cairo"
"github.com/conformal/gotk3/gdk"
"github.com/conformal/gotk3/glib"
"github.com/conformal/gotk3/pango"
)
func init() {
tm := []glib.TypeMarshaler{
// Enums
{glib.Type(C.gtk_align_get_type()), marshalAlign},
{glib.Type(C.gtk_accel_flags_get_type()), marshalAccelFlags},
{glib.Type(C.gtk_arrow_placement_get_type()), marshalArrowPlacement},
{glib.Type(C.gtk_arrow_type_get_type()), marshalArrowType},
{glib.Type(C.gtk_assistant_page_type_get_type()), marshalAssistantPageType},
{glib.Type(C.gtk_buttons_type_get_type()), marshalButtonsType},
{glib.Type(C.gtk_calendar_display_options_get_type()), marshalCalendarDisplayOptions},
{glib.Type(C.gtk_dialog_flags_get_type()), marshalDialogFlags},
{glib.Type(C.gtk_entry_icon_position_get_type()), marshalEntryIconPosition},
{glib.Type(C.gtk_file_chooser_action_get_type()), marshalFileChooserAction},
{glib.Type(C.gtk_icon_size_get_type()), marshalIconSize},
{glib.Type(C.gtk_image_type_get_type()), marshalImageType},
{glib.Type(C.gtk_input_hints_get_type()), marshalInputHints},
{glib.Type(C.gtk_input_purpose_get_type()), marshalInputPurpose},
{glib.Type(C.gtk_justification_get_type()), marshalJustification},
{glib.Type(C.gtk_license_get_type()), marshalLicense},
{glib.Type(C.gtk_message_type_get_type()), marshalMessageType},
{glib.Type(C.gtk_orientation_get_type()), marshalOrientation},
{glib.Type(C.gtk_pack_type_get_type()), marshalPackType},
{glib.Type(C.gtk_path_type_get_type()), marshalPathType},
{glib.Type(C.gtk_policy_type_get_type()), marshalPolicyType},
{glib.Type(C.gtk_position_type_get_type()), marshalPositionType},
{glib.Type(C.gtk_relief_style_get_type()), marshalReliefStyle},
{glib.Type(C.gtk_response_type_get_type()), marshalResponseType},
{glib.Type(C.gtk_selection_mode_get_type()), marshalSelectionMode},
{glib.Type(C.gtk_shadow_type_get_type()), marshalShadowType},
{glib.Type(C.gtk_state_flags_get_type()), marshalStateFlags},
{glib.Type(C.gtk_toolbar_style_get_type()), marshalToolbarStyle},
{glib.Type(C.gtk_tree_model_flags_get_type()), marshalTreeModelFlags},
{glib.Type(C.gtk_window_position_get_type()), marshalWindowPosition},
{glib.Type(C.gtk_window_type_get_type()), marshalWindowType},
{glib.Type(C.gtk_wrap_mode_get_type()), marshalWrapMode},
// Objects/Interfaces
{glib.Type(C.gtk_about_dialog_get_type()), marshalAboutDialog},
{glib.Type(C.gtk_adjustment_get_type()), marshalAdjustment},
{glib.Type(C.gtk_alignment_get_type()), marshalAlignment},
{glib.Type(C.gtk_arrow_get_type()), marshalArrow},
{glib.Type(C.gtk_assistant_get_type()), marshalAssistant},
{glib.Type(C.gtk_bin_get_type()), marshalBin},
{glib.Type(C.gtk_builder_get_type()), marshalBuilder},
{glib.Type(C.gtk_button_get_type()), marshalButton},
{glib.Type(C.gtk_box_get_type()), marshalBox},
{glib.Type(C.gtk_calendar_get_type()), marshalCalendar},
{glib.Type(C.gtk_cell_layout_get_type()), marshalCellLayout},
{glib.Type(C.gtk_cell_renderer_get_type()), marshalCellRenderer},
{glib.Type(C.gtk_cell_renderer_text_get_type()), marshalCellRendererText},
{glib.Type(C.gtk_cell_renderer_toggle_get_type()), marshalCellRendererToggle},
{glib.Type(C.gtk_check_button_get_type()), marshalCheckButton},
{glib.Type(C.gtk_check_menu_item_get_type()), marshalCheckMenuItem},
{glib.Type(C.gtk_clipboard_get_type()), marshalClipboard},
{glib.Type(C.gtk_combo_box_get_type()), marshalComboBox},
{glib.Type(C.gtk_container_get_type()), marshalContainer},
{glib.Type(C.gtk_dialog_get_type()), marshalDialog},
{glib.Type(C.gtk_drawing_area_get_type()), marshalDrawingArea},
{glib.Type(C.gtk_editable_get_type()), marshalEditable},
{glib.Type(C.gtk_entry_get_type()), marshalEntry},
{glib.Type(C.gtk_entry_buffer_get_type()), marshalEntryBuffer},
{glib.Type(C.gtk_entry_completion_get_type()), marshalEntryCompletion},
{glib.Type(C.gtk_event_box_get_type()), marshalEventBox},
{glib.Type(C.gtk_file_chooser_get_type()), marshalFileChooser},
{glib.Type(C.gtk_file_chooser_button_get_type()), marshalFileChooserButton},
{glib.Type(C.gtk_file_chooser_widget_get_type()), marshalFileChooserWidget},
{glib.Type(C.gtk_frame_get_type()), marshalFrame},
{glib.Type(C.gtk_grid_get_type()), marshalGrid},
{glib.Type(C.gtk_image_get_type()), marshalImage},
{glib.Type(C.gtk_label_get_type()), marshalLabel},
{glib.Type(C.gtk_list_store_get_type()), marshalListStore},
{glib.Type(C.gtk_menu_get_type()), marshalMenu},
{glib.Type(C.gtk_menu_bar_get_type()), marshalMenuBar},
{glib.Type(C.gtk_menu_button_get_type()), marshalMenuButton},
{glib.Type(C.gtk_menu_item_get_type()), marshalMenuItem},
{glib.Type(C.gtk_menu_shell_get_type()), marshalMenuShell},
{glib.Type(C.gtk_message_dialog_get_type()), marshalMessageDialog},
{glib.Type(C.gtk_misc_get_type()), marshalMisc},
{glib.Type(C.gtk_notebook_get_type()), marshalNotebook},
{glib.Type(C.gtk_offscreen_window_get_type()), marshalOffscreenWindow},
{glib.Type(C.gtk_orientable_get_type()), marshalOrientable},
{glib.Type(C.gtk_progress_bar_get_type()), marshalProgressBar},
{glib.Type(C.gtk_radio_button_get_type()), marshalRadioButton},
{glib.Type(C.gtk_radio_menu_item_get_type()), marshalRadioMenuItem},
{glib.Type(C.gtk_range_get_type()), marshalRange},
{glib.Type(C.gtk_scrollbar_get_type()), marshalScrollbar},
{glib.Type(C.gtk_scrolled_window_get_type()), marshalScrolledWindow},
{glib.Type(C.gtk_search_entry_get_type()), marshalSearchEntry},
{glib.Type(C.gtk_separator_get_type()), marshalSeparator},
{glib.Type(C.gtk_separator_menu_item_get_type()), marshalSeparatorMenuItem},
{glib.Type(C.gtk_separator_tool_item_get_type()), marshalSeparatorToolItem},
{glib.Type(C.gtk_spin_button_get_type()), marshalSpinButton},
{glib.Type(C.gtk_spinner_get_type()), marshalSpinner},
{glib.Type(C.gtk_statusbar_get_type()), marshalStatusbar},
{glib.Type(C.gtk_switch_get_type()), marshalSwitch},
{glib.Type(C.gtk_text_view_get_type()), marshalTextView},
{glib.Type(C.gtk_text_tag_table_get_type()), marshalTextTagTable},
{glib.Type(C.gtk_text_buffer_get_type()), marshalTextBuffer},
{glib.Type(C.gtk_toggle_button_get_type()), marshalToggleButton},
{glib.Type(C.gtk_toolbar_get_type()), marshalToolbar},
{glib.Type(C.gtk_tool_button_get_type()), marshalToolButton},
{glib.Type(C.gtk_tool_item_get_type()), marshalToolItem},
{glib.Type(C.gtk_tree_model_get_type()), marshalTreeModel},
{glib.Type(C.gtk_tree_selection_get_type()), marshalTreeSelection},
{glib.Type(C.gtk_tree_view_get_type()), marshalTreeView},
{glib.Type(C.gtk_tree_view_column_get_type()), marshalTreeViewColumn},
{glib.Type(C.gtk_widget_get_type()), marshalWidget},
{glib.Type(C.gtk_window_get_type()), marshalWindow},
// Boxed
{glib.Type(C.gtk_text_iter_get_type()), marshalTextIter},
{glib.Type(C.gtk_tree_iter_get_type()), marshalTreeIter},
{glib.Type(C.gtk_tree_path_get_type()), marshalTreePath},
}
glib.RegisterGValueMarshalers(tm)
}
/*
* Type conversions
*/
func gbool(b bool) C.gboolean {
if b {
return C.gboolean(1)
}
return C.gboolean(0)
}
func gobool(b C.gboolean) bool {
if b != 0 {
return true
}
return false
}
// Wrapper function for TestBoolConvs since cgo can't be used with
// testing package
func testBoolConvs() error {
b := gobool(gbool(true))
if b != true {
return errors.New("Unexpected bool conversion result")
}
cb := gbool(gobool(C.gboolean(0)))
if cb != C.gboolean(0) {
return errors.New("Unexpected bool conversion result")
}
return nil
}
/*
* Unexported vars
*/
var nilPtrErr = errors.New("cgo returned unexpected nil pointer")
/*
* Constants
*/
// Align is a representation of GTK's GtkAlign.
type Align int
const (
ALIGN_FILL Align = C.GTK_ALIGN_FILL
ALIGN_START Align = C.GTK_ALIGN_START
ALIGN_END Align = C.GTK_ALIGN_END
ALIGN_CENTER Align = C.GTK_ALIGN_CENTER
)
func marshalAlign(p uintptr) (interface{}, error) {
c := C.g_value_get_enum((*C.GValue)(unsafe.Pointer(p)))
return Align(c), nil
}
// AccelFlags is a representation of GTK's GtkAccelFlags
type AccelFlags int
const (
ACCEL_VISIBLE AccelFlags = C.GTK_ACCEL_VISIBLE
ACCEL_LOCKED AccelFlags = C.GTK_ACCEL_LOCKED
ACCEL_MASK AccelFlags = C.GTK_ACCEL_MASK
)
func marshalAccelFlags(p uintptr) (interface{}, error) {
c := C.g_value_get_enum((*C.GValue)(unsafe.Pointer(p)))
return AccelFlags(c), nil
}
// ArrowPlacement is a representation of GTK's GtkArrowPlacement.
type ArrowPlacement int
const (
ARROWS_BOTH ArrowPlacement = C.GTK_ARROWS_BOTH
ARROWS_START ArrowPlacement = C.GTK_ARROWS_START
ARROWS_END ArrowPlacement = C.GTK_ARROWS_END
)
func marshalArrowPlacement(p uintptr) (interface{}, error) {
c := C.g_value_get_enum((*C.GValue)(unsafe.Pointer(p)))
return ArrowPlacement(c), nil
}
// ArrowType is a representation of GTK's GtkArrowType.
type ArrowType int
const (
ARROW_UP ArrowType = C.GTK_ARROW_UP
ARROW_DOWN ArrowType = C.GTK_ARROW_DOWN
ARROW_LEFT ArrowType = C.GTK_ARROW_LEFT
ARROW_RIGHT ArrowType = C.GTK_ARROW_RIGHT
ARROW_NONE ArrowType = C.GTK_ARROW_NONE
)
func marshalArrowType(p uintptr) (interface{}, error) {
c := C.g_value_get_enum((*C.GValue)(unsafe.Pointer(p)))
return ArrowType(c), nil
}
// AssistantPageType is a representation of GTK's GtkAssistantPageType.
type AssistantPageType int
const (
ASSISTANT_PAGE_CONTENT AssistantPageType = C.GTK_ASSISTANT_PAGE_CONTENT
ASSISTANT_PAGE_INTRO AssistantPageType = C.GTK_ASSISTANT_PAGE_INTRO
ASSISTANT_PAGE_CONFIRM AssistantPageType = C.GTK_ASSISTANT_PAGE_CONFIRM
ASSISTANT_PAGE_SUMMARY AssistantPageType = C.GTK_ASSISTANT_PAGE_SUMMARY
ASSISTANT_PAGE_PROGRESS AssistantPageType = C.GTK_ASSISTANT_PAGE_PROGRESS
ASSISTANT_PAGE_CUSTOM AssistantPageType = C.GTK_ASSISTANT_PAGE_CUSTOM
)
func marshalAssistantPageType(p uintptr) (interface{}, error) {
c := C.g_value_get_enum((*C.GValue)(unsafe.Pointer(p)))
return AssistantPageType(c), nil
}
// ButtonsType is a representation of GTK's GtkButtonsType.
type ButtonsType int
const (
BUTTONS_NONE ButtonsType = C.GTK_BUTTONS_NONE
BUTTONS_OK ButtonsType = C.GTK_BUTTONS_OK
BUTTONS_CLOSE ButtonsType = C.GTK_BUTTONS_CLOSE
BUTTONS_CANCEL ButtonsType = C.GTK_BUTTONS_CANCEL
BUTTONS_YES_NO ButtonsType = C.GTK_BUTTONS_YES_NO
BUTTONS_OK_CANCEL ButtonsType = C.GTK_BUTTONS_OK_CANCEL
)
func marshalButtonsType(p uintptr) (interface{}, error) {
c := C.g_value_get_enum((*C.GValue)(unsafe.Pointer(p)))
return ButtonsType(c), nil
}
// CalendarDisplayOptions is a representation of GTK's GtkCalendarDisplayOptions
type CalendarDisplayOptions int
const (
CALENDAR_SHOW_HEADING CalendarDisplayOptions = C.GTK_CALENDAR_SHOW_HEADING
CALENDAR_SHOW_DAY_NAMES CalendarDisplayOptions = C.GTK_CALENDAR_SHOW_DAY_NAMES
CALENDAR_NO_MONTH_CHANGE CalendarDisplayOptions = C.GTK_CALENDAR_NO_MONTH_CHANGE
CALENDAR_SHOW_WEEK_NUMBERS CalendarDisplayOptions = C.GTK_CALENDAR_SHOW_WEEK_NUMBERS
CALENDAR_SHOW_DETAILS CalendarDisplayOptions = C.GTK_CALENDAR_SHOW_DETAILS
)
func marshalCalendarDisplayOptions(p uintptr) (interface{}, error) {
c := C.g_value_get_enum((*C.GValue)(unsafe.Pointer(p)))
return CalendarDisplayOptions(c), nil
}
// DialogFlags is a representation of GTK's GtkDialogFlags.
type DialogFlags int
const (
DIALOG_MODAL DialogFlags = C.GTK_DIALOG_MODAL
DIALOG_DESTROY_WITH_PARENT DialogFlags = C.GTK_DIALOG_DESTROY_WITH_PARENT
)
func marshalDialogFlags(p uintptr) (interface{}, error) {
c := C.g_value_get_enum((*C.GValue)(unsafe.Pointer(p)))
return DialogFlags(c), nil
}
// EntryIconPosition is a representation of GTK's GtkEntryIconPosition.
type EntryIconPosition int
const (
ENTRY_ICON_PRIMARY EntryIconPosition = C.GTK_ENTRY_ICON_PRIMARY
ENTRY_ICON_SECONDARY EntryIconPosition = C.GTK_ENTRY_ICON_SECONDARY
)
func marshalEntryIconPosition(p uintptr) (interface{}, error) {
c := C.g_value_get_enum((*C.GValue)(unsafe.Pointer(p)))
return EntryIconPosition(c), nil
}
// FileChooserAction is a representation of GTK's GtkFileChooserAction.
type FileChooserAction int
const (
FILE_CHOOSER_ACTION_OPEN FileChooserAction = C.GTK_FILE_CHOOSER_ACTION_OPEN
FILE_CHOOSER_ACTION_SAVE FileChooserAction = C.GTK_FILE_CHOOSER_ACTION_SAVE
FILE_CHOOSER_ACTION_SELECT_FOLDER FileChooserAction = C.GTK_FILE_CHOOSER_ACTION_SELECT_FOLDER
FILE_CHOOSER_ACTION_CREATE_FOLDER FileChooserAction = C.GTK_FILE_CHOOSER_ACTION_CREATE_FOLDER
)
func marshalFileChooserAction(p uintptr) (interface{}, error) {
c := C.g_value_get_enum((*C.GValue)(unsafe.Pointer(p)))
return FileChooserAction(c), nil
}
// IconSize is a representation of GTK's GtkIconSize.
type IconSize int
const (
ICON_SIZE_INVALID IconSize = C.GTK_ICON_SIZE_INVALID
ICON_SIZE_MENU IconSize = C.GTK_ICON_SIZE_MENU
ICON_SIZE_SMALL_TOOLBAR IconSize = C.GTK_ICON_SIZE_SMALL_TOOLBAR
ICON_SIZE_LARGE_TOOLBAR IconSize = C.GTK_ICON_SIZE_LARGE_TOOLBAR
ICON_SIZE_BUTTON IconSize = C.GTK_ICON_SIZE_BUTTON
ICON_SIZE_DND IconSize = C.GTK_ICON_SIZE_DND
ICON_SIZE_DIALOG IconSize = C.GTK_ICON_SIZE_DIALOG
)
func marshalIconSize(p uintptr) (interface{}, error) {
c := C.g_value_get_enum((*C.GValue)(unsafe.Pointer(p)))
return IconSize(c), nil
}
// ImageType is a representation of GTK's GtkImageType.
type ImageType int
const (
IMAGE_EMPTY ImageType = C.GTK_IMAGE_EMPTY
IMAGE_PIXBUF ImageType = C.GTK_IMAGE_PIXBUF
IMAGE_STOCK ImageType = C.GTK_IMAGE_STOCK
IMAGE_ICON_SET ImageType = C.GTK_IMAGE_ICON_SET
IMAGE_ANIMATION ImageType = C.GTK_IMAGE_ANIMATION
IMAGE_ICON_NAME ImageType = C.GTK_IMAGE_ICON_NAME
IMAGE_GICON ImageType = C.GTK_IMAGE_GICON
)
func marshalImageType(p uintptr) (interface{}, error) {
c := C.g_value_get_enum((*C.GValue)(unsafe.Pointer(p)))
return ImageType(c), nil
}
// InputHints is a representation of GTK's GtkInputHints.
type InputHints int
const (
INPUT_HINT_NONE InputHints = C.GTK_INPUT_HINT_NONE
INPUT_HINT_SPELLCHECK InputHints = C.GTK_INPUT_HINT_SPELLCHECK
INPUT_HINT_NO_SPELLCHECK InputHints = C.GTK_INPUT_HINT_NO_SPELLCHECK
INPUT_HINT_WORD_COMPLETION InputHints = C.GTK_INPUT_HINT_WORD_COMPLETION
INPUT_HINT_LOWERCASE InputHints = C.GTK_INPUT_HINT_LOWERCASE
INPUT_HINT_UPPERCASE_CHARS InputHints = C.GTK_INPUT_HINT_UPPERCASE_CHARS
INPUT_HINT_UPPERCASE_WORDS InputHints = C.GTK_INPUT_HINT_UPPERCASE_WORDS
INPUT_HINT_UPPERCASE_SENTENCES InputHints = C.GTK_INPUT_HINT_UPPERCASE_SENTENCES
INPUT_HINT_INHIBIT_OSK InputHints = C.GTK_INPUT_HINT_INHIBIT_OSK
)
func marshalInputHints(p uintptr) (interface{}, error) {
c := C.g_value_get_enum((*C.GValue)(unsafe.Pointer(p)))
return InputHints(c), nil
}
// InputPurpose is a representation of GTK's GtkInputPurpose.
type InputPurpose int
const (
INPUT_PURPOSE_FREE_FORM InputPurpose = C.GTK_INPUT_PURPOSE_FREE_FORM
INPUT_PURPOSE_ALPHA InputPurpose = C.GTK_INPUT_PURPOSE_ALPHA
INPUT_PURPOSE_DIGITS InputPurpose = C.GTK_INPUT_PURPOSE_DIGITS
INPUT_PURPOSE_NUMBER InputPurpose = C.GTK_INPUT_PURPOSE_NUMBER
INPUT_PURPOSE_PHONE InputPurpose = C.GTK_INPUT_PURPOSE_PHONE
INPUT_PURPOSE_URL InputPurpose = C.GTK_INPUT_PURPOSE_URL
INPUT_PURPOSE_EMAIL InputPurpose = C.GTK_INPUT_PURPOSE_EMAIL
INPUT_PURPOSE_NAME InputPurpose = C.GTK_INPUT_PURPOSE_NAME
INPUT_PURPOSE_PASSWORD InputPurpose = C.GTK_INPUT_PURPOSE_PASSWORD
INPUT_PURPOSE_PIN InputPurpose = C.GTK_INPUT_PURPOSE_PIN
)
func marshalInputPurpose(p uintptr) (interface{}, error) {
c := C.g_value_get_enum((*C.GValue)(unsafe.Pointer(p)))
return InputPurpose(c), nil
}
// Justify is a representation of GTK's GtkJustification.
type Justification int
const (
JUSTIFY_LEFT Justification = C.GTK_JUSTIFY_LEFT
JUSTIFY_RIGHT Justification = C.GTK_JUSTIFY_RIGHT
JUSTIFY_CENTER Justification = C.GTK_JUSTIFY_CENTER
JUSTIFY_FILL Justification = C.GTK_JUSTIFY_FILL
)
func marshalJustification(p uintptr) (interface{}, error) {
c := C.g_value_get_enum((*C.GValue)(unsafe.Pointer(p)))
return Justification(c), nil
}
// License is a representation of GTK's GtkLicense.
type License int
const (
LICENSE_UNKNOWN License = C.GTK_LICENSE_UNKNOWN
LICENSE_CUSTOM License = C.GTK_LICENSE_CUSTOM
LICENSE_GPL_2_0 License = C.GTK_LICENSE_GPL_2_0
LICENSE_GPL_3_0 License = C.GTK_LICENSE_GPL_3_0
LICENSE_LGPL_2_1 License = C.GTK_LICENSE_LGPL_2_1
LICENSE_LGPL_3_0 License = C.GTK_LICENSE_LGPL_3_0
LICENSE_BSD License = C.GTK_LICENSE_BSD
LICENSE_MIT_X11 License = C.GTK_LICENSE_MIT_X11
LICENSE_GTK_ARTISTIC License = C.GTK_LICENSE_ARTISTIC
)
func marshalLicense(p uintptr) (interface{}, error) {
c := C.g_value_get_enum((*C.GValue)(unsafe.Pointer(p)))
return License(c), nil
}
// MessageType is a representation of GTK's GtkMessageType.
type MessageType int
const (
MESSAGE_INFO MessageType = C.GTK_MESSAGE_INFO
MESSAGE_WARNING MessageType = C.GTK_MESSAGE_WARNING
MESSAGE_QUESTION MessageType = C.GTK_MESSAGE_QUESTION
MESSAGE_ERROR MessageType = C.GTK_MESSAGE_ERROR
MESSAGE_OTHER MessageType = C.GTK_MESSAGE_OTHER
)
func marshalMessageType(p uintptr) (interface{}, error) {
c := C.g_value_get_enum((*C.GValue)(unsafe.Pointer(p)))
return MessageType(c), nil
}
// Orientation is a representation of GTK's GtkOrientation.
type Orientation int
const (
ORIENTATION_HORIZONTAL Orientation = C.GTK_ORIENTATION_HORIZONTAL
ORIENTATION_VERTICAL Orientation = C.GTK_ORIENTATION_VERTICAL
)
func marshalOrientation(p uintptr) (interface{}, error) {
c := C.g_value_get_enum((*C.GValue)(unsafe.Pointer(p)))
return Orientation(c), nil
}
// PackType is a representation of GTK's GtkPackType.
type PackType int
const (
PACK_START PackType = C.GTK_PACK_START
PACK_END PackType = C.GTK_PACK_END
)
func marshalPackType(p uintptr) (interface{}, error) {
c := C.g_value_get_enum((*C.GValue)(unsafe.Pointer(p)))
return PackType(c), nil
}
// PathType is a representation of GTK's GtkPathType.
type PathType int
const (
PATH_WIDGET PathType = C.GTK_PATH_WIDGET
PATH_WIDGET_CLASS PathType = C.GTK_PATH_WIDGET_CLASS
PATH_CLASS PathType = C.GTK_PATH_CLASS
)
func marshalPathType(p uintptr) (interface{}, error) {
c := C.g_value_get_enum((*C.GValue)(unsafe.Pointer(p)))
return PathType(c), nil
}
// PolicyType is a representation of GTK's GtkPolicyType.
type PolicyType int
const (
POLICY_ALWAYS PolicyType = C.GTK_POLICY_ALWAYS
POLICY_AUTOMATIC PolicyType = C.GTK_POLICY_AUTOMATIC
POLICY_NEVER PolicyType = C.GTK_POLICY_NEVER
)
func marshalPolicyType(p uintptr) (interface{}, error) {
c := C.g_value_get_enum((*C.GValue)(unsafe.Pointer(p)))
return PolicyType(c), nil
}
// PositionType is a representation of GTK's GtkPositionType.
type PositionType int
const (
POS_LEFT PositionType = C.GTK_POS_LEFT
POS_RIGHT PositionType = C.GTK_POS_RIGHT
POS_TOP PositionType = C.GTK_POS_TOP
POS_BOTTOM PositionType = C.GTK_POS_BOTTOM
)
func marshalPositionType(p uintptr) (interface{}, error) {
c := C.g_value_get_enum((*C.GValue)(unsafe.Pointer(p)))
return PositionType(c), nil
}
// ReliefStyle is a representation of GTK's GtkReliefStyle.
type ReliefStyle int
const (
RELIEF_NORMAL ReliefStyle = C.GTK_RELIEF_NORMAL
RELIEF_HALF ReliefStyle = C.GTK_RELIEF_HALF
RELIEF_NONE ReliefStyle = C.GTK_RELIEF_NONE
)
func marshalReliefStyle(p uintptr) (interface{}, error) {
c := C.g_value_get_enum((*C.GValue)(unsafe.Pointer(p)))
return ReliefStyle(c), nil
}
// ResponseType is a representation of GTK's GtkResponseType.
type ResponseType int
const (
RESPONSE_NONE ResponseType = C.GTK_RESPONSE_NONE
RESPONSE_REJECT ResponseType = C.GTK_RESPONSE_REJECT
RESPONSE_ACCEPT ResponseType = C.GTK_RESPONSE_ACCEPT
RESPONSE_DELETE_EVENT ResponseType = C.GTK_RESPONSE_DELETE_EVENT
RESPONSE_OK ResponseType = C.GTK_RESPONSE_OK
RESPONSE_CANCEL ResponseType = C.GTK_RESPONSE_CANCEL
RESPONSE_CLOSE ResponseType = C.GTK_RESPONSE_CLOSE
RESPONSE_YES ResponseType = C.GTK_RESPONSE_YES
RESPONSE_NO ResponseType = C.GTK_RESPONSE_NO
RESPONSE_APPLY ResponseType = C.GTK_RESPONSE_APPLY
RESPONSE_HELP ResponseType = C.GTK_RESPONSE_HELP
)
func marshalResponseType(p uintptr) (interface{}, error) {
c := C.g_value_get_enum((*C.GValue)(unsafe.Pointer(p)))
return ResponseType(c), nil
}
// SelectionMode is a representation of GTK's GtkSelectionMode.
type SelectionMode int
const (
SELECTION_NONE SelectionMode = C.GTK_SELECTION_NONE
SELECTION_SINGLE SelectionMode = C.GTK_SELECTION_SINGLE
SELECTION_BROWSE SelectionMode = C.GTK_SELECTION_BROWSE
SELECTION_MULTIPLE SelectionMode = C.GTK_SELECTION_MULTIPLE
)
func marshalSelectionMode(p uintptr) (interface{}, error) {
c := C.g_value_get_enum((*C.GValue)(unsafe.Pointer(p)))
return SelectionMode(c), nil
}
// ShadowType is a representation of GTK's GtkShadowType.
type ShadowType int
const (
SHADOW_NONE ShadowType = C.GTK_SHADOW_NONE
SHADOW_IN ShadowType = C.GTK_SHADOW_IN
SHADOW_OUT ShadowType = C.GTK_SHADOW_OUT
SHADOW_ETCHED_IN ShadowType = C.GTK_SHADOW_ETCHED_IN
SHADOW_ETCHED_OUT ShadowType = C.GTK_SHADOW_ETCHED_OUT
)
func marshalShadowType(p uintptr) (interface{}, error) {
c := C.g_value_get_enum((*C.GValue)(unsafe.Pointer(p)))
return ShadowType(c), nil
}
// StateFlags is a representation of GTK's GtkStateFlags.
type StateFlags int
const (
STATE_FLAG_NORMAL StateFlags = C.GTK_STATE_FLAG_NORMAL
STATE_FLAG_ACTIVE StateFlags = C.GTK_STATE_FLAG_ACTIVE
STATE_FLAG_PRELIGHT StateFlags = C.GTK_STATE_FLAG_PRELIGHT
STATE_FLAG_SELECTED StateFlags = C.GTK_STATE_FLAG_SELECTED
STATE_FLAG_INSENSITIVE StateFlags = C.GTK_STATE_FLAG_INSENSITIVE
STATE_FLAG_INCONSISTENT StateFlags = C.GTK_STATE_FLAG_INCONSISTENT
STATE_FLAG_FOCUSED StateFlags = C.GTK_STATE_FLAG_FOCUSED
STATE_FLAG_BACKDROP StateFlags = C.GTK_STATE_FLAG_BACKDROP
)
func marshalStateFlags(p uintptr) (interface{}, error) {
c := C.g_value_get_enum((*C.GValue)(unsafe.Pointer(p)))
return StateFlags(c), nil
}
// ToolbarStyle is a representation of GTK's GtkToolbarStyle.
type ToolbarStyle int
const (
TOOLBAR_ICONS ToolbarStyle = C.GTK_TOOLBAR_ICONS
TOOLBAR_TEXT ToolbarStyle = C.GTK_TOOLBAR_TEXT
TOOLBAR_BOTH ToolbarStyle = C.GTK_TOOLBAR_BOTH
TOOLBAR_BOTH_HORIZ ToolbarStyle = C.GTK_TOOLBAR_BOTH_HORIZ
)
func marshalToolbarStyle(p uintptr) (interface{}, error) {
c := C.g_value_get_enum((*C.GValue)(unsafe.Pointer(p)))
return ToolbarStyle(c), nil
}
// TreeModelFlags is a representation of GTK's GtkTreeModelFlags.
type TreeModelFlags int
const (
TREE_MODEL_ITERS_PERSIST TreeModelFlags = C.GTK_TREE_MODEL_ITERS_PERSIST
TREE_MODEL_LIST_ONLY TreeModelFlags = C.GTK_TREE_MODEL_LIST_ONLY
)
func marshalTreeModelFlags(p uintptr) (interface{}, error) {
c := C.g_value_get_enum((*C.GValue)(unsafe.Pointer(p)))
return TreeModelFlags(c), nil
}
// WindowPosition is a representation of GTK's GtkWindowPosition.
type WindowPosition int
const (
WIN_POS_NONE WindowPosition = C.GTK_WIN_POS_NONE
WIN_POS_CENTER WindowPosition = C.GTK_WIN_POS_CENTER
WIN_POS_MOUSE WindowPosition = C.GTK_WIN_POS_MOUSE
WIN_POS_CENTER_ALWAYS WindowPosition = C.GTK_WIN_POS_CENTER_ALWAYS
WIN_POS_CENTER_ON_PARENT WindowPosition = C.GTK_WIN_POS_CENTER_ON_PARENT
)
func marshalWindowPosition(p uintptr) (interface{}, error) {
c := C.g_value_get_enum((*C.GValue)(unsafe.Pointer(p)))
return WindowPosition(c), nil
}
// WindowType is a representation of GTK's GtkWindowType.
type WindowType int
const (
WINDOW_TOPLEVEL WindowType = C.GTK_WINDOW_TOPLEVEL
WINDOW_POPUP WindowType = C.GTK_WINDOW_POPUP
)
func marshalWindowType(p uintptr) (interface{}, error) {
c := C.g_value_get_enum((*C.GValue)(unsafe.Pointer(p)))
return WindowType(c), nil
}
// WrapMode is a representation of GTK's GtkWrapMode.
type WrapMode int
const (
WRAP_NONE WrapMode = C.GTK_WRAP_NONE
WRAP_CHAR WrapMode = C.GTK_WRAP_CHAR
WRAP_WORD WrapMode = C.GTK_WRAP_WORD
WRAP_WORD_CHAR WrapMode = C.GTK_WRAP_WORD_CHAR
)
func marshalWrapMode(p uintptr) (interface{}, error) {
c := C.g_value_get_enum((*C.GValue)(unsafe.Pointer(p)))
return WrapMode(c), nil
}
/*
* Init and main event loop
*/
/*
Init() is a wrapper around gtk_init() and must be called before any
other GTK calls and is used to initialize everything necessary.
In addition to setting up GTK for usage, a pointer to a slice of
strings may be passed in to parse standard GTK command line arguments.
args will be modified to remove any flags that were handled.
Alternatively, nil may be passed in to not perform any command line
parsing.
*/
func Init(args *[]string) {
if args != nil {
argc := C.int(len(*args))
argv := make([]*C.char, argc)
for i, arg := range *args {
argv[i] = C.CString(arg)
}
C.gtk_init((*C.int)(unsafe.Pointer(&argc)),
(***C.char)(unsafe.Pointer(&argv)))
unhandled := make([]string, argc)
for i := 0; i < int(argc); i++ {
unhandled[i] = C.GoString(argv[i])
C.free(unsafe.Pointer(argv[i]))
}
*args = unhandled
} else {
C.gtk_init(nil, nil)
}
}
// Main() is a wrapper around gtk_main() and runs the GTK main loop,
// blocking until MainQuit() is called.
func Main() {
C.gtk_main()
}
// MainQuit() is a wrapper around gtk_main_quit() is used to terminate
// the GTK main loop (started by Main()).
func MainQuit() {
C.gtk_main_quit()
}
/*
* GtkAboutDialog
*/
// AboutDialog is a representation of GTK's GtkAboutDialog.
type AboutDialog struct {
Dialog
}
// native returns a pointer to the underlying GtkAboutDialog.
func (v *AboutDialog) native() *C.GtkAboutDialog {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkAboutDialog(p)
}
func marshalAboutDialog(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapAboutDialog(obj), nil
}
func wrapAboutDialog(obj *glib.Object) *AboutDialog {
return &AboutDialog{Dialog{Window{Bin{Container{Widget{glib.InitiallyUnowned{obj}}}}}}}
}
// AboutDialogNew is a wrapper around gtk_about_dialog_new().
func AboutDialogNew() (*AboutDialog, error) {
c := C.gtk_about_dialog_new()
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
a := wrapAboutDialog(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return a, nil
}
// GetComments is a wrapper around gtk_about_dialog_get_comments().
func (v *AboutDialog) GetComments() string {
c := C.gtk_about_dialog_get_comments(v.native())
return C.GoString((*C.char)(c))
}
// SetComments is a wrapper around gtk_about_dialog_set_comments().
func (v *AboutDialog) SetComments(comments string) {
cstr := C.CString(comments)
defer C.free(unsafe.Pointer(cstr))
C.gtk_about_dialog_set_comments(v.native(), (*C.gchar)(cstr))
}
// GetCopyright is a wrapper around gtk_about_dialog_get_copyright().
func (v *AboutDialog) GetCopyright() string {
c := C.gtk_about_dialog_get_copyright(v.native())
return C.GoString((*C.char)(c))
}
// SetCopyright is a wrapper around gtk_about_dialog_set_copyright().
func (v *AboutDialog) SetCopyright(copyright string) {
cstr := C.CString(copyright)
defer C.free(unsafe.Pointer(cstr))
C.gtk_about_dialog_set_copyright(v.native(), (*C.gchar)(cstr))
}
// GetLicense is a wrapper around gtk_about_dialog_get_license().
func (v *AboutDialog) GetLicense() string {
c := C.gtk_about_dialog_get_license(v.native())
return C.GoString((*C.char)(c))
}
// SetLicense is a wrapper around gtk_about_dialog_set_license().
func (v *AboutDialog) SetLicense(license string) {
cstr := C.CString(license)
defer C.free(unsafe.Pointer(cstr))
C.gtk_about_dialog_set_license(v.native(), (*C.gchar)(cstr))
}
// GetLicenseType is a wrapper around gtk_about_dialog_get_license_type().
func (v *AboutDialog) GetLicenseType() License {
c := C.gtk_about_dialog_get_license_type(v.native())
return License(c)
}
// SetLicenseType is a wrapper around gtk_about_dialog_set_license_type().
func (v *AboutDialog) SetLicenseType(license License) {
C.gtk_about_dialog_set_license_type(v.native(), C.GtkLicense(license))
}
// GetLogoIconName is a wrapper around gtk_about_dialog_get_logo_icon_name().
func (v *AboutDialog) GetLogoIconName() string {
c := C.gtk_about_dialog_get_logo_icon_name(v.native())
return C.GoString((*C.char)(c))
}
// SetLogoIconName is a wrapper around gtk_about_dialog_set_logo_icon_name().
func (v *AboutDialog) SetLogoIconName(name string) {
cstr := C.CString(name)
defer C.free(unsafe.Pointer(cstr))
C.gtk_about_dialog_set_logo_icon_name(v.native(), (*C.gchar)(cstr))
}
// GetProgramName is a wrapper around gtk_about_dialog_get_program_name().
func (v *AboutDialog) GetProgramName() string {
c := C.gtk_about_dialog_get_program_name(v.native())
return C.GoString((*C.char)(c))
}
// SetProgramName is a wrapper around gtk_about_dialog_set_program_name().
func (v *AboutDialog) SetProgramName(name string) {
cstr := C.CString(name)
defer C.free(unsafe.Pointer(cstr))
C.gtk_about_dialog_set_program_name(v.native(), (*C.gchar)(cstr))
}
// GetTranslatorCredits is a wrapper around gtk_about_dialog_get_translator_credits().
func (v *AboutDialog) GetTranslatorCredits() string {
c := C.gtk_about_dialog_get_translator_credits(v.native())
return C.GoString((*C.char)(c))
}
// SetTranslatorCredits is a wrapper around gtk_about_dialog_set_translator_credits().
func (v *AboutDialog) SetTranslatorCredits(translatorCredits string) {
cstr := C.CString(translatorCredits)
defer C.free(unsafe.Pointer(cstr))
C.gtk_about_dialog_set_translator_credits(v.native(), (*C.gchar)(cstr))
}
// GetVersion is a wrapper around gtk_about_dialog_get_version().
func (v *AboutDialog) GetVersion() string {
c := C.gtk_about_dialog_get_version(v.native())
return C.GoString((*C.char)(c))
}
// SetVersion is a wrapper around gtk_about_dialog_set_version().
func (v *AboutDialog) SetVersion(version string) {
cstr := C.CString(version)
defer C.free(unsafe.Pointer(cstr))
C.gtk_about_dialog_set_version(v.native(), (*C.gchar)(cstr))
}
// GetWebsite is a wrapper around gtk_about_dialog_get_website().
func (v *AboutDialog) GetWebsite() string {
c := C.gtk_about_dialog_get_website(v.native())
return C.GoString((*C.char)(c))
}
// SetWebsite is a wrapper around gtk_about_dialog_set_website().
func (v *AboutDialog) SetWebsite(website string) {
cstr := C.CString(website)
defer C.free(unsafe.Pointer(cstr))
C.gtk_about_dialog_set_website(v.native(), (*C.gchar)(cstr))
}
// GetWebsiteLabel is a wrapper around gtk_about_dialog_get_website_label().
func (v *AboutDialog) GetWebsiteLabel() string {
c := C.gtk_about_dialog_get_website_label(v.native())
return C.GoString((*C.char)(c))
}
// SetWebsiteLabel is a wrapper around gtk_about_dialog_set_website_label().
func (v *AboutDialog) SetWebsiteLabel(websiteLabel string) {
cstr := C.CString(websiteLabel)
defer C.free(unsafe.Pointer(cstr))
C.gtk_about_dialog_set_website_label(v.native(), (*C.gchar)(cstr))
}
// GetWrapLicense is a wrapper around gtk_about_dialog_get_wrap_license().
func (v *AboutDialog) GetWrapLicense() bool {
return gobool(C.gtk_about_dialog_get_wrap_license(v.native()))
}
// SetWrapLicense is a wrapper around gtk_about_dialog_set_wrap_license().
func (v *AboutDialog) SetWrapLicense(wrapLicense bool) {
C.gtk_about_dialog_set_wrap_license(v.native(), gbool(wrapLicense))
}
/*
* GtkAdjustment
*/
// Adjustment is a representation of GTK's GtkAdjustment.
type Adjustment struct {
glib.InitiallyUnowned
}
// native returns a pointer to the underlying GtkAdjustment.
func (v *Adjustment) native() *C.GtkAdjustment {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkAdjustment(p)
}
func marshalAdjustment(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapAdjustment(obj), nil
}
func wrapAdjustment(obj *glib.Object) *Adjustment {
return &Adjustment{glib.InitiallyUnowned{obj}}
}
/*
* GtkAlignment
*/
type Alignment struct {
Bin
}
// native returns a pointer to the underlying GtkAlignment.
func (v *Alignment) native() *C.GtkAlignment {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkAlignment(p)
}
func marshalAlignment(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapAlignment(obj), nil
}
func wrapAlignment(obj *glib.Object) *Alignment {
return &Alignment{Bin{Container{Widget{glib.InitiallyUnowned{obj}}}}}
}
// AlignmentNew is a wrapper around gtk_alignment_new().
func AlignmentNew(xalign, yalign, xscale, yscale float32) (*Alignment, error) {
c := C.gtk_alignment_new(C.gfloat(xalign), C.gfloat(yalign), C.gfloat(xscale),
C.gfloat(yscale))
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
a := wrapAlignment(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return a, nil
}
// Set is a wrapper around gtk_alignment_set().
func (v *Alignment) Set(xalign, yalign, xscale, yscale float32) {
C.gtk_alignment_set(v.native(), C.gfloat(xalign), C.gfloat(yalign),
C.gfloat(xscale), C.gfloat(yscale))
}
// GetPadding is a wrapper around gtk_alignment_get_padding().
func (v *Alignment) GetPadding() (top, bottom, left, right uint) {
var ctop, cbottom, cleft, cright C.guint
C.gtk_alignment_get_padding(v.native(), &ctop, &cbottom, &cleft,
&cright)
return uint(ctop), uint(cbottom), uint(cleft), uint(cright)
}
// SetPadding is a wrapper around gtk_alignment_set_padding().
func (v *Alignment) SetPadding(top, bottom, left, right uint) {
C.gtk_alignment_set_padding(v.native(), C.guint(top), C.guint(bottom),
C.guint(left), C.guint(right))
}
/*
* GtkArrow
*/
// Arrow is a representation of GTK's GtkArrow.
type Arrow struct {
Misc
}
// native returns a pointer to the underlying GtkButton.
func (v *Arrow) native() *C.GtkArrow {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkArrow(p)
}
func marshalArrow(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapArrow(obj), nil
}
func wrapArrow(obj *glib.Object) *Arrow {
return &Arrow{Misc{Widget{glib.InitiallyUnowned{obj}}}}
}
// ArrowNew is a wrapper around gtk_arrow_new().
func ArrowNew(arrowType ArrowType, shadowType ShadowType) (*Arrow, error) {
c := C.gtk_arrow_new(C.GtkArrowType(arrowType),
C.GtkShadowType(shadowType))
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
a := wrapArrow(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return a, nil
}
// Set is a wrapper around gtk_arrow_set().
func (v *Arrow) Set(arrowType ArrowType, shadowType ShadowType) {
C.gtk_arrow_set(v.native(), C.GtkArrowType(arrowType), C.GtkShadowType(shadowType))
}
/*
* GtkAssistant
*/
// Assistant is a representation of GTK's GtkAssistant.
type Assistant struct {
Window
}
// native returns a pointer to the underlying GtkAssistant.
func (v *Assistant) native() *C.GtkAssistant {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkAssistant(p)
}
func marshalAssistant(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapAssistant(obj), nil
}
func wrapAssistant(obj *glib.Object) *Assistant {
return &Assistant{Window{Bin{Container{Widget{glib.InitiallyUnowned{obj}}}}}}
}
// AssistantNew is a wrapper around gtk_assistant_new().
func AssistantNew() (*Assistant, error) {
c := C.gtk_assistant_new()
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
a := wrapAssistant(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return a, nil
}
// GetCurrentPage is a wrapper around gtk_assistant_get_current_page().
func (v *Assistant) GetCurrentPage() int {
c := C.gtk_assistant_get_current_page(v.native())
return int(c)
}
// SetCurrentPage is a wrapper around gtk_assistant_set_current_page().
func (v *Assistant) SetCurrentPage(pageNum int) {
C.gtk_assistant_set_current_page(v.native(), C.gint(pageNum))
}
// GetNPages is a wrapper around gtk_assistant_get_n_pages().
func (v *Assistant) GetNPages() int {
c := C.gtk_assistant_get_n_pages(v.native())
return int(c)
}
// GetNthPage is a wrapper around gtk_assistant_get_nth_page().
func (v *Assistant) GetNthPage(pageNum int) *Widget {
c := C.gtk_assistant_get_nth_page(v.native(), C.gint(pageNum))
if c == nil {
return nil
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
w := wrapWidget(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return w
}
// PrependPage is a wrapper around gtk_assistant_prepend_page().
func (v *Assistant) PrependPage(page IWidget) int {
c := C.gtk_assistant_prepend_page(v.native(), page.toWidget())
return int(c)
}
// AppendPage is a wrapper around gtk_assistant_append_page().
func (v *Assistant) AppendPage(page IWidget) int {
c := C.gtk_assistant_append_page(v.native(), page.toWidget())
return int(c)
}
// InsertPage is a wrapper around gtk_assistant_insert_page().
func (v *Assistant) InsertPage(page IWidget, position int) int {
c := C.gtk_assistant_insert_page(v.native(), page.toWidget(),
C.gint(position))
return int(c)
}
// RemovePage is a wrapper around gtk_assistant_remove_page().
func (v *Assistant) RemovePage(pageNum int) {
C.gtk_assistant_remove_page(v.native(), C.gint(pageNum))
}
// TODO: gtk_assistant_set_forward_page_func
// SetPageType is a wrapper around gtk_assistant_set_page_type().
func (v *Assistant) SetPageType(page IWidget, ptype AssistantPageType) {
C.gtk_assistant_set_page_type(v.native(), page.toWidget(),
C.GtkAssistantPageType(ptype))
}
// GetPageType is a wrapper around gtk_assistant_get_page_type().
func (v *Assistant) GetPageType(page IWidget) AssistantPageType {
c := C.gtk_assistant_get_page_type(v.native(), page.toWidget())
return AssistantPageType(c)
}
// SetPageTitle is a wrapper around gtk_assistant_set_page_title().
func (v *Assistant) SetPageTitle(page IWidget, title string) {
cstr := C.CString(title)
defer C.free(unsafe.Pointer(cstr))
C.gtk_assistant_set_page_title(v.native(), page.toWidget(),
(*C.gchar)(cstr))
}
// GetPageTitle is a wrapper around gtk_assistant_get_page_title().
func (v *Assistant) GetPageTitle(page IWidget) string {
c := C.gtk_assistant_get_page_title(v.native(), page.toWidget())
return C.GoString((*C.char)(c))
}
// SetPageComplete is a wrapper around gtk_assistant_set_page_complete().
func (v *Assistant) SetPageComplete(page IWidget, complete bool) {
C.gtk_assistant_set_page_complete(v.native(), page.toWidget(),
gbool(complete))
}
// GetPageComplete is a wrapper around gtk_assistant_get_page_complete().
func (v *Assistant) GetPageComplete(page IWidget) bool {
c := C.gtk_assistant_get_page_complete(v.native(), page.toWidget())
return gobool(c)
}
// AddActionWidget is a wrapper around gtk_assistant_add_action_widget().
func (v *Assistant) AddActionWidget(child IWidget) {
C.gtk_assistant_add_action_widget(v.native(), child.toWidget())
}
// RemoveActionWidget is a wrapper around gtk_assistant_remove_action_widget().
func (v *Assistant) RemoveActionWidget(child IWidget) {
C.gtk_assistant_remove_action_widget(v.native(), child.toWidget())
}
// UpdateButtonsState is a wrapper around gtk_assistant_update_buttons_state().
func (v *Assistant) UpdateButtonsState() {
C.gtk_assistant_update_buttons_state(v.native())
}
// Commit is a wrapper around gtk_assistant_commit().
func (v *Assistant) Commit() {
C.gtk_assistant_commit(v.native())
}
// NextPage is a wrapper around gtk_assistant_next_page().
func (v *Assistant) NextPage() {
C.gtk_assistant_next_page(v.native())
}
// PreviousPage is a wrapper around gtk_assistant_previous_page().
func (v *Assistant) PreviousPage() {
C.gtk_assistant_previous_page(v.native())
}
/*
* GtkBin
*/
// Bin is a representation of GTK's GtkBin.
type Bin struct {
Container
}
// native returns a pointer to the underlying GtkBin.
func (v *Bin) native() *C.GtkBin {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkBin(p)
}
func marshalBin(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapBin(obj), nil
}
func wrapBin(obj *glib.Object) *Bin {
return &Bin{Container{Widget{glib.InitiallyUnowned{obj}}}}
}
// GetChild is a wrapper around gtk_bin_get_child().
func (v *Bin) GetChild() (*Widget, error) {
c := C.gtk_bin_get_child(v.native())
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
w := wrapWidget(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return w, nil
}
/*
* GtkBuilder
*/
// Builder is a representation of GTK's GtkBuilder.
type Builder struct {
*glib.Object
}
// native() returns a pointer to the underlying GtkBuilder.
func (b *Builder) native() *C.GtkBuilder {
if b == nil || b.GObject == nil {
return nil
}
p := unsafe.Pointer(b.GObject)
return C.toGtkBuilder(p)
}
func marshalBuilder(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return &Builder{obj}, nil
}
// BuilderNew is a wrapper around gtk_builder_new().
func BuilderNew() (*Builder, error) {
c := C.gtk_builder_new()
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
b := &Builder{obj}
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return b, nil
}
// AddFromFile is a wrapper around gtk_builder_add_from_file().
func (b *Builder) AddFromFile(filename string) error {
cstr := C.CString(filename)
defer C.free(unsafe.Pointer(cstr))
var err *C.GError = nil
res := C.gtk_builder_add_from_file(b.native(), (*C.gchar)(cstr), &err)
if res == 0 {
defer C.g_error_free(err)
return errors.New(C.GoString((*C.char)(C.error_get_message(err))))
}
return nil
}
// AddFromResource is a wrapper around gtk_builder_add_from_resource().
func (b *Builder) AddFromResource(path string) error {
cstr := C.CString(path)
defer C.free(unsafe.Pointer(cstr))
var err *C.GError = nil
res := C.gtk_builder_add_from_resource(b.native(), (*C.gchar)(cstr), &err)
if res == 0 {
defer C.g_error_free(err)
return errors.New(C.GoString((*C.char)(C.error_get_message(err))))
}
return nil
}
// AddFromString is a wrapper around gtk_builder_add_from_string().
func (b *Builder) AddFromString(str string) error {
cstr := C.CString(str)
defer C.free(unsafe.Pointer(cstr))
length := (C.gsize)(len(str))
var err *C.GError = nil
res := C.gtk_builder_add_from_string(b.native(), (*C.gchar)(cstr), length, &err)
if res == 0 {
defer C.g_error_free(err)
return errors.New(C.GoString((*C.char)(C.error_get_message(err))))
}
return nil
}
// GetObject is a wrapper around gtk_builder_get_object(). The returned result
// is an IObject, so it will need to be type-asserted to the appropriate type before
// being used. For example, to get an object and type assert it as a window:
//
// obj, err := builder.GetObject("window")
// if err != nil {
// // object not found
// return
// }
// if w, ok := obj.(*gtk.Window); ok {
// // do stuff with w here
// } else {
// // not a *gtk.Window
// }
//
func (b *Builder) GetObject(name string) (glib.IObject, error) {
cstr := C.CString(name)
defer C.free(unsafe.Pointer(cstr))
c := C.gtk_builder_get_object(b.native(), (*C.gchar)(cstr))
if c == nil {
return nil, errors.New("object '" + name + "' not found")
}
obj, err := cast(c)
if err != nil {
return nil, err
}
return obj, nil
}
/*
* GtkButton
*/
// Button is a representation of GTK's GtkButton.
type Button struct {
Bin
}
// native() returns a pointer to the underlying GtkButton.
func (v *Button) native() *C.GtkButton {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkButton(p)
}
func marshalButton(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapButton(obj), nil
}
func wrapButton(obj *glib.Object) *Button {
return &Button{Bin{Container{Widget{glib.InitiallyUnowned{obj}}}}}
}
// ButtonNew() is a wrapper around gtk_button_new().
func ButtonNew() (*Button, error) {
c := C.gtk_button_new()
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
b := wrapButton(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return b, nil
}
// ButtonNewWithLabel() is a wrapper around gtk_button_new_with_label().
func ButtonNewWithLabel(label string) (*Button, error) {
cstr := C.CString(label)
defer C.free(unsafe.Pointer(cstr))
c := C.gtk_button_new_with_label((*C.gchar)(cstr))
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
b := wrapButton(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return b, nil
}
// ButtonNewWithMnemonic() is a wrapper around gtk_button_new_with_mnemonic().
func ButtonNewWithMnemonic(label string) (*Button, error) {
cstr := C.CString(label)
defer C.free(unsafe.Pointer(cstr))
c := C.gtk_button_new_with_mnemonic((*C.gchar)(cstr))
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
b := wrapButton(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return b, nil
}
// Clicked() is a wrapper around gtk_button_clicked().
func (v *Button) Clicked() {
C.gtk_button_clicked(v.native())
}
// SetRelief() is a wrapper around gtk_button_set_relief().
func (v *Button) SetRelief(newStyle ReliefStyle) {
C.gtk_button_set_relief(v.native(), C.GtkReliefStyle(newStyle))
}
// GetRelief() is a wrapper around gtk_button_get_relief().
func (v *Button) GetRelief() ReliefStyle {
c := C.gtk_button_get_relief(v.native())
return ReliefStyle(c)
}
// SetLabel() is a wrapper around gtk_button_set_label().
func (v *Button) SetLabel(label string) {
cstr := C.CString(label)
defer C.free(unsafe.Pointer(cstr))
C.gtk_button_set_label(v.native(), (*C.gchar)(cstr))
}
// GetLabel() is a wrapper around gtk_button_get_label().
func (v *Button) GetLabel() (string, error) {
c := C.gtk_button_get_label(v.native())
if c == nil {
return "", nilPtrErr
}
return C.GoString((*C.char)(c)), nil
}
// SetUseUnderline() is a wrapper around gtk_button_set_use_underline().
func (v *Button) SetUseUnderline(useUnderline bool) {
C.gtk_button_set_use_underline(v.native(), gbool(useUnderline))
}
// GetUseUnderline() is a wrapper around gtk_button_get_use_underline().
func (v *Button) GetUseUnderline() bool {
c := C.gtk_button_get_use_underline(v.native())
return gobool(c)
}
// SetFocusOnClick() is a wrapper around gtk_button_set_focus_on_click().
func (v *Button) SetFocusOnClick(focusOnClick bool) {
C.gtk_button_set_focus_on_click(v.native(), gbool(focusOnClick))
}
// GetFocusOnClick() is a wrapper around gtk_button_get_focus_on_click().
func (v *Button) GetFocusOnClick() bool {
c := C.gtk_button_get_focus_on_click(v.native())
return gobool(c)
}
// SetAlignment() is a wrapper around gtk_button_set_alignment().
func (v *Button) SetAlignment(xalign, yalign float32) {
C.gtk_button_set_alignment(v.native(), (C.gfloat)(xalign),
(C.gfloat)(yalign))
}
// GetAlignment() is a wrapper around gtk_button_get_alignment().
func (v *Button) GetAlignment() (xalign, yalign float32) {
var x, y C.gfloat
C.gtk_button_get_alignment(v.native(), &x, &y)
return float32(x), float32(y)
}
// SetImage() is a wrapper around gtk_button_set_image().
func (v *Button) SetImage(image IWidget) {
C.gtk_button_set_image(v.native(), image.toWidget())
}
// GetImage() is a wrapper around gtk_button_get_image().
func (v *Button) GetImage() (*Widget, error) {
c := C.gtk_button_get_image(v.native())
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
w := wrapWidget(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return w, nil
}
// SetImagePosition() is a wrapper around gtk_button_set_image_position().
func (v *Button) SetImagePosition(position PositionType) {
C.gtk_button_set_image_position(v.native(), C.GtkPositionType(position))
}
// GetImagePosition() is a wrapper around gtk_button_get_image_position().
func (v *Button) GetImagePosition() PositionType {
c := C.gtk_button_get_image_position(v.native())
return PositionType(c)
}
// SetAlwaysShowImage() is a wrapper around gtk_button_set_always_show_image().
func (v *Button) SetAlwaysShowImage(alwaysShow bool) {
C.gtk_button_set_always_show_image(v.native(), gbool(alwaysShow))
}
// GetAlwaysShowImage() is a wrapper around gtk_button_get_always_show_image().
func (v *Button) GetAlwaysShowImage() bool {
c := C.gtk_button_get_always_show_image(v.native())
return gobool(c)
}
// GetEventWindow() is a wrapper around gtk_button_get_event_window().
func (v *Button) GetEventWindow() (*gdk.Window, error) {
c := C.gtk_button_get_event_window(v.native())
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
w := &gdk.Window{obj}
w.Ref()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return w, nil
}
/*
* GtkBox
*/
// Box is a representation of GTK's GtkBox.
type Box struct {
Container
}
// native() returns a pointer to the underlying GtkBox.
func (v *Box) native() *C.GtkBox {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkBox(p)
}
func marshalBox(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapBox(obj), nil
}
func wrapBox(obj *glib.Object) *Box {
return &Box{Container{Widget{glib.InitiallyUnowned{obj}}}}
}
// BoxNew() is a wrapper around gtk_box_new().
func BoxNew(orientation Orientation, spacing int) (*Box, error) {
c := C.gtk_box_new(C.GtkOrientation(orientation), C.gint(spacing))
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
b := wrapBox(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return b, nil
}
// PackStart() is a wrapper around gtk_box_pack_start().
func (v *Box) PackStart(child IWidget, expand, fill bool, padding uint) {
C.gtk_box_pack_start(v.native(), child.toWidget(), gbool(expand),
gbool(fill), C.guint(padding))
}
// PackEnd() is a wrapper around gtk_box_pack_end().
func (v *Box) PackEnd(child IWidget, expand, fill bool, padding uint) {
C.gtk_box_pack_end(v.native(), child.toWidget(), gbool(expand),
gbool(fill), C.guint(padding))
}
// GetHomogeneous() is a wrapper around gtk_box_get_homogeneous().
func (v *Box) GetHomogeneous() bool {
c := C.gtk_box_get_homogeneous(v.native())
return gobool(c)
}
// SetHomogeneous() is a wrapper around gtk_box_set_homogeneous().
func (v *Box) SetHomogeneous(homogeneous bool) {
C.gtk_box_set_homogeneous(v.native(), gbool(homogeneous))
}
// GetSpacing() is a wrapper around gtk_box_get_spacing().
func (v *Box) GetSpacing() int {
c := C.gtk_box_get_spacing(v.native())
return int(c)
}
// SetSpacing() is a wrapper around gtk_box_set_spacing()
func (v *Box) SetSpacing(spacing int) {
C.gtk_box_set_spacing(v.native(), C.gint(spacing))
}
// ReorderChild() is a wrapper around gtk_box_reorder_child().
func (v *Box) ReorderChild(child IWidget, position int) {
C.gtk_box_reorder_child(v.native(), child.toWidget(), C.gint(position))
}
// QueryChildPacking() is a wrapper around gtk_box_query_child_packing().
func (v *Box) QueryChildPacking(child IWidget) (expand, fill bool, padding uint, packType PackType) {
var cexpand, cfill C.gboolean
var cpadding C.guint
var cpackType C.GtkPackType
C.gtk_box_query_child_packing(v.native(), child.toWidget(), &cexpand,
&cfill, &cpadding, &cpackType)
return gobool(cexpand), gobool(cfill), uint(cpadding), PackType(cpackType)
}
// SetChildPacking() is a wrapper around gtk_box_set_child_packing().
func (v *Box) SetChildPacking(child IWidget, expand, fill bool, padding uint, packType PackType) {
C.gtk_box_set_child_packing(v.native(), child.toWidget(), gbool(expand),
gbool(fill), C.guint(padding), C.GtkPackType(packType))
}
/*
* GtkCalendar
*/
// Calendar is a representation of GTK's GtkCalendar.
type Calendar struct {
Widget
}
// native() returns a pointer to the underlying GtkCalendar.
func (v *Calendar) native() *C.GtkCalendar {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkCalendar(p)
}
func marshalCalendar(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapCalendar(obj), nil
}
func wrapCalendar(obj *glib.Object) *Calendar {
return &Calendar{Widget{glib.InitiallyUnowned{obj}}}
}
// CalendarNew is a wrapper around gtk_calendar_new().
func CalendarNew() (*Calendar, error) {
c := C.gtk_calendar_new()
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
a := wrapCalendar(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return a, nil
}
// SelectMonth is a wrapper around gtk_calendar_select_month().
func (v *Calendar) SelectMonth(month, year uint) {
C.gtk_calendar_select_month(v.native(), C.guint(month), C.guint(year))
}
// SelectDay is a wrapper around gtk_calendar_select_day().
func (v *Calendar) SelectDay(day uint) {
C.gtk_calendar_select_day(v.native(), C.guint(day))
}
// MarkDay is a wrapper around gtk_calendar_mark_day().
func (v *Calendar) MarkDay(day uint) {
C.gtk_calendar_mark_day(v.native(), C.guint(day))
}
// UnmarkDay is a wrapper around gtk_calendar_unmark_day().
func (v *Calendar) UnmarkDay(day uint) {
C.gtk_calendar_unmark_day(v.native(), C.guint(day))
}
// GetDayIsMarked is a wrapper around gtk_calendar_get_day_is_marked().
func (v *Calendar) GetDayIsMarked(day uint) bool {
c := C.gtk_calendar_get_day_is_marked(v.native(), C.guint(day))
return gobool(c)
}
// ClearMarks is a wrapper around gtk_calendar_clear_marks().
func (v *Calendar) ClearMarks() {
C.gtk_calendar_clear_marks(v.native())
}
// GetDisplayOptions is a wrapper around gtk_calendar_get_display_options().
func (v *Calendar) GetDisplayOptions() CalendarDisplayOptions {
c := C.gtk_calendar_get_display_options(v.native())
return CalendarDisplayOptions(c)
}
// SetDisplayOptions is a wrapper around gtk_calendar_set_display_options().
func (v *Calendar) SetDisplayOptions(flags CalendarDisplayOptions) {
C.gtk_calendar_set_display_options(v.native(),
C.GtkCalendarDisplayOptions(flags))
}
// GetDate is a wrapper around gtk_calendar_get_date().
func (v *Calendar) GetDate() (year, month, day uint) {
var cyear, cmonth, cday C.guint
C.gtk_calendar_get_date(v.native(), &cyear, &cmonth, &cday)
return uint(cyear), uint(cmonth), uint(cday)
}
// TODO gtk_calendar_set_detail_func
// GetDetailWidthChars is a wrapper around gtk_calendar_get_detail_width_chars().
func (v *Calendar) GetDetailWidthChars() int {
c := C.gtk_calendar_get_detail_width_chars(v.native())
return int(c)
}
// SetDetailWidthChars is a wrapper around gtk_calendar_set_detail_width_chars().
func (v *Calendar) SetDetailWidthChars(chars int) {
C.gtk_calendar_set_detail_width_chars(v.native(), C.gint(chars))
}
// GetDetailHeightRows is a wrapper around gtk_calendar_get_detail_height_rows().
func (v *Calendar) GetDetailHeightRows() int {
c := C.gtk_calendar_get_detail_height_rows(v.native())
return int(c)
}
// SetDetailHeightRows is a wrapper around gtk_calendar_set_detail_height_rows().
func (v *Calendar) SetDetailHeightRows(rows int) {
C.gtk_calendar_set_detail_height_rows(v.native(), C.gint(rows))
}
/*
* GtkCellLayout
*/
// CellLayout is a representation of GTK's GtkCellLayout GInterface.
type CellLayout struct {
*glib.Object
}
// ICellLayout is an interface type implemented by all structs
// embedding a CellLayout. It is meant to be used as an argument type
// for wrapper functions that wrap around a C GTK function taking a
// GtkCellLayout.
type ICellLayout interface {
toCellLayout() *C.GtkCellLayout
}
// native() returns a pointer to the underlying GObject as a GtkCellLayout.
func (v *CellLayout) native() *C.GtkCellLayout {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkCellLayout(p)
}
func marshalCellLayout(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapCellLayout(obj), nil
}
func wrapCellLayout(obj *glib.Object) *CellLayout {
return &CellLayout{obj}
}
func (v *CellLayout) toCellLayout() *C.GtkCellLayout {
if v == nil {
return nil
}
return v.native()
}
// PackStart() is a wrapper around gtk_cell_layout_pack_start().
func (v *CellLayout) PackStart(cell ICellRenderer, expand bool) {
C.gtk_cell_layout_pack_start(v.native(), cell.toCellRenderer(),
gbool(expand))
}
// AddAttribute() is a wrapper around gtk_cell_layout_add_attribute().
func (v *CellLayout) AddAttribute(cell ICellRenderer, attribute string, column int) {
cstr := C.CString(attribute)
defer C.free(unsafe.Pointer(cstr))
C.gtk_cell_layout_add_attribute(v.native(), cell.toCellRenderer(),
(*C.gchar)(cstr), C.gint(column))
}
/*
* GtkCellRenderer
*/
// CellRenderer is a representation of GTK's GtkCellRenderer.
type CellRenderer struct {
glib.InitiallyUnowned
}
// ICellRenderer is an interface type implemented by all structs
// embedding a CellRenderer. It is meant to be used as an argument type
// for wrapper functions that wrap around a C GTK function taking a
// GtkCellRenderer.
type ICellRenderer interface {
toCellRenderer() *C.GtkCellRenderer
}
// native returns a pointer to the underlying GtkCellRenderer.
func (v *CellRenderer) native() *C.GtkCellRenderer {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkCellRenderer(p)
}
func (v *CellRenderer) toCellRenderer() *C.GtkCellRenderer {
if v == nil {
return nil
}
return v.native()
}
func marshalCellRenderer(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapCellRenderer(obj), nil
}
func wrapCellRenderer(obj *glib.Object) *CellRenderer {
return &CellRenderer{glib.InitiallyUnowned{obj}}
}
/*
* GtkCellRendererText
*/
// CellRendererText is a representation of GTK's GtkCellRendererText.
type CellRendererText struct {
CellRenderer
}
// native returns a pointer to the underlying GtkCellRendererText.
func (v *CellRendererText) native() *C.GtkCellRendererText {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkCellRendererText(p)
}
func marshalCellRendererText(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapCellRendererText(obj), nil
}
func wrapCellRendererText(obj *glib.Object) *CellRendererText {
return &CellRendererText{CellRenderer{glib.InitiallyUnowned{obj}}}
}
// CellRendererTextNew is a wrapper around gtk_cell_renderer_text_new().
func CellRendererTextNew() (*CellRendererText, error) {
c := C.gtk_cell_renderer_text_new()
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
crt := wrapCellRendererText(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return crt, nil
}
/*
* GtkCellRendererToggle
*/
// CellRendererToggle is a representation of GTK's GtkCellRendererToggle.
type CellRendererToggle struct {
CellRenderer
}
// native returns a pointer to the underlying GtkCellRendererToggle.
func (v *CellRendererToggle) native() *C.GtkCellRendererToggle {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkCellRendererToggle(p)
}
func (v *CellRendererToggle) toCellRenderer() *C.GtkCellRenderer {
if v == nil {
return nil
}
return v.CellRenderer.native()
}
func marshalCellRendererToggle(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapCellRendererToggle(obj), nil
}
func wrapCellRendererToggle(obj *glib.Object) *CellRendererToggle {
return &CellRendererToggle{CellRenderer{glib.InitiallyUnowned{obj}}}
}
// CellRendererToggleNew is a wrapper around gtk_cell_renderer_toggle_new().
func CellRendererToggleNew() (*CellRendererToggle, error) {
c := C.gtk_cell_renderer_toggle_new()
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
crt := wrapCellRendererToggle(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return crt, nil
}
// SetRadio is a wrapper around gtk_cell_renderer_toggle_set_radio().
func (v *CellRendererToggle) SetRadio(set bool) {
C.gtk_cell_renderer_toggle_set_radio(v.native(), gbool(set))
}
// GetRadio is a wrapper around gtk_cell_renderer_toggle_get_radio().
func (v *CellRendererToggle) GetRadio() bool {
c := C.gtk_cell_renderer_toggle_get_radio(v.native())
return gobool(c)
}
// SetActive is a wrapper arround gtk_cell_renderer_set_active().
func (v *CellRendererToggle) SetActive(active bool) {
C.gtk_cell_renderer_toggle_set_active(v.native(), gbool(active))
}
// GetActive is a wrapper around gtk_cell_renderer_get_active().
func (v *CellRendererToggle) GetActive() bool {
c := C.gtk_cell_renderer_toggle_get_active(v.native())
return gobool(c)
}
// SetActivatable is a wrapper around gtk_cell_renderer_set_activatable().
func (v *CellRendererToggle) SetActivatable(activatable bool) {
C.gtk_cell_renderer_toggle_set_activatable(v.native(),
gbool(activatable))
}
// GetActivatable is a wrapper around gtk_cell_renderer_get_activatable().
func (v *CellRendererToggle) GetActivatable() bool {
c := C.gtk_cell_renderer_toggle_get_activatable(v.native())
return gobool(c)
}
/*
* GtkCheckButton
*/
// CheckButton is a wrapper around GTK's GtkCheckButton.
type CheckButton struct {
ToggleButton
}
// native returns a pointer to the underlying GtkCheckButton.
func (v *CheckButton) native() *C.GtkCheckButton {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkCheckButton(p)
}
func marshalCheckButton(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapCheckButton(obj), nil
}
func wrapCheckButton(obj *glib.Object) *CheckButton {
return &CheckButton{ToggleButton{Button{Bin{Container{Widget{
glib.InitiallyUnowned{obj}}}}}}}
}
// CheckButtonNew is a wrapper around gtk_check_button_new().
func CheckButtonNew() (*CheckButton, error) {
c := C.gtk_check_button_new()
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
cb := wrapCheckButton(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return cb, nil
}
// CheckButtonNewWithLabel is a wrapper around
// gtk_check_button_new_with_label().
func CheckButtonNewWithLabel(label string) (*CheckButton, error) {
cstr := C.CString(label)
defer C.free(unsafe.Pointer(cstr))
c := C.gtk_check_button_new_with_label((*C.gchar)(cstr))
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
cb := wrapCheckButton(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return cb, nil
}
// CheckButtonNewWithMnemonic is a wrapper around
// gtk_check_button_new_with_mnemonic().
func CheckButtonNewWithMnemonic(label string) (*CheckButton, error) {
cstr := C.CString(label)
defer C.free(unsafe.Pointer(cstr))
c := C.gtk_check_button_new_with_mnemonic((*C.gchar)(cstr))
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
cb := wrapCheckButton(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return cb, nil
}
/*
* GtkCheckMenuItem
*/
type CheckMenuItem struct {
MenuItem
}
// native returns a pointer to the underlying GtkCheckMenuItem.
func (v *CheckMenuItem) native() *C.GtkCheckMenuItem {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkCheckMenuItem(p)
}
func marshalCheckMenuItem(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapCheckMenuItem(obj), nil
}
func wrapCheckMenuItem(obj *glib.Object) *CheckMenuItem {
return &CheckMenuItem{MenuItem{Bin{Container{Widget{
glib.InitiallyUnowned{obj}}}}}}
}
// CheckMenuItemNew is a wrapper around gtk_check_menu_item_new().
func CheckMenuItemNew() (*CheckMenuItem, error) {
c := C.gtk_check_menu_item_new()
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
cm := wrapCheckMenuItem(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return cm, nil
}
// CheckMenuItemNewWithLabel is a wrapper around
// gtk_check_menu_item_new_with_label().
func CheckMenuItemNewWithLabel(label string) (*CheckMenuItem, error) {
cstr := C.CString(label)
defer C.free(unsafe.Pointer(cstr))
c := C.gtk_check_menu_item_new_with_label((*C.gchar)(cstr))
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
cm := wrapCheckMenuItem(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return cm, nil
}
// CheckMenuItemNewWithMnemonic is a wrapper around
// gtk_check_menu_item_new_with_mnemonic().
func CheckMenuItemNewWithMnemonic(label string) (*CheckMenuItem, error) {
cstr := C.CString(label)
defer C.free(unsafe.Pointer(cstr))
c := C.gtk_check_menu_item_new_with_mnemonic((*C.gchar)(cstr))
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
cm := wrapCheckMenuItem(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return cm, nil
}
// GetActive is a wrapper around gtk_check_menu_item_get_active().
func (v *CheckMenuItem) GetActive() bool {
c := C.gtk_check_menu_item_get_active(v.native())
return gobool(c)
}
// SetActive is a wrapper around gtk_check_menu_item_set_active().
func (v *CheckMenuItem) SetActive(isActive bool) {
C.gtk_check_menu_item_set_active(v.native(), gbool(isActive))
}
// Toggled is a wrapper around gtk_check_menu_item_toggled().
func (v *CheckMenuItem) Toggled() {
C.gtk_check_menu_item_toggled(v.native())
}
// GetInconsistent is a wrapper around gtk_check_menu_item_get_inconsistent().
func (v *CheckMenuItem) GetInconsistent() bool {
c := C.gtk_check_menu_item_get_inconsistent(v.native())
return gobool(c)
}
// SetInconsistent is a wrapper around gtk_check_menu_item_set_inconsistent().
func (v *CheckMenuItem) SetInconsistent(setting bool) {
C.gtk_check_menu_item_set_inconsistent(v.native(), gbool(setting))
}
// SetDrawAsRadio is a wrapper around gtk_check_menu_item_set_draw_as_radio().
func (v *CheckMenuItem) SetDrawAsRadio(drawAsRadio bool) {
C.gtk_check_menu_item_set_draw_as_radio(v.native(), gbool(drawAsRadio))
}
// GetDrawAsRadio is a wrapper around gtk_check_menu_item_get_draw_as_radio().
func (v *CheckMenuItem) GetDrawAsRadio() bool {
c := C.gtk_check_menu_item_get_draw_as_radio(v.native())
return gobool(c)
}
/*
* GtkClipboard
*/
// Clipboard is a wrapper around GTK's GtkClipboard.
type Clipboard struct {
*glib.Object
}
// native returns a pointer to the underlying GtkClipboard.
func (v *Clipboard) native() *C.GtkClipboard {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkClipboard(p)
}
func marshalClipboard(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapClipboard(obj), nil
}
func wrapClipboard(obj *glib.Object) *Clipboard {
return &Clipboard{obj}
}
// ClipboardGet() is a wrapper around gtk_clipboard_get().
func ClipboardGet(atom gdk.Atom) (*Clipboard, error) {
c := C.gtk_clipboard_get(C.GdkAtom(unsafe.Pointer(atom)))
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
cb := &Clipboard{obj}
obj.Ref()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return cb, nil
}
// ClipboardGetForDisplay() is a wrapper around gtk_clipboard_get_for_display().
func ClipboardGetForDisplay(display *gdk.Display, atom gdk.Atom) (*Clipboard, error) {
displayPtr := (*C.GdkDisplay)(unsafe.Pointer(display.Native()))
c := C.gtk_clipboard_get_for_display(displayPtr,
C.GdkAtom(unsafe.Pointer(atom)))
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
cb := &Clipboard{obj}
obj.Ref()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return cb, nil
}
// SetText() is a wrapper around gtk_clipboard_set_text().
func (v *Clipboard) SetText(text string) {
cstr := C.CString(text)
defer C.free(unsafe.Pointer(cstr))
C.gtk_clipboard_set_text(v.native(), (*C.gchar)(cstr),
C.gint(len(text)))
}
/*
* GtkComboBox
*/
// ComboBox is a representation of GTK's GtkComboBox.
type ComboBox struct {
Bin
// Interfaces
CellLayout
}
// native returns a pointer to the underlying GtkComboBox.
func (v *ComboBox) native() *C.GtkComboBox {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkComboBox(p)
}
func (v *ComboBox) toCellLayout() *C.GtkCellLayout {
if v == nil {
return nil
}
return C.toGtkCellLayout(unsafe.Pointer(v.GObject))
}
func marshalComboBox(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapComboBox(obj), nil
}
func wrapComboBox(obj *glib.Object) *ComboBox {
cl := wrapCellLayout(obj)
return &ComboBox{Bin{Container{Widget{glib.InitiallyUnowned{obj}}}}, *cl}
}
// ComboBoxNew() is a wrapper around gtk_combo_box_new().
func ComboBoxNew() (*ComboBox, error) {
c := C.gtk_combo_box_new()
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
cb := wrapComboBox(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return cb, nil
}
// ComboBoxNewWithEntry() is a wrapper around gtk_combo_box_new_with_entry().
func ComboBoxNewWithEntry() (*ComboBox, error) {
c := C.gtk_combo_box_new_with_entry()
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
cb := wrapComboBox(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return cb, nil
}
// ComboBoxNewWithModel() is a wrapper around gtk_combo_box_new_with_model().
func ComboBoxNewWithModel(model ITreeModel) (*ComboBox, error) {
c := C.gtk_combo_box_new_with_model(model.toTreeModel())
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
cb := wrapComboBox(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return cb, nil
}
// GetActive() is a wrapper around gtk_combo_box_get_active().
func (v *ComboBox) GetActive() int {
c := C.gtk_combo_box_get_active(v.native())
return int(c)
}
// SetActive() is a wrapper around gtk_combo_box_set_active().
func (v *ComboBox) SetActive(index int) {
C.gtk_combo_box_set_active(v.native(), C.gint(index))
}
/*
* GtkContainer
*/
// Container is a representation of GTK's GtkContainer.
type Container struct {
Widget
}
// native returns a pointer to the underlying GtkContainer.
func (v *Container) native() *C.GtkContainer {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkContainer(p)
}
func marshalContainer(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapContainer(obj), nil
}
func wrapContainer(obj *glib.Object) *Container {
return &Container{Widget{glib.InitiallyUnowned{obj}}}
}
// Add is a wrapper around gtk_container_add().
func (v *Container) Add(w IWidget) {
C.gtk_container_add(v.native(), w.toWidget())
}
// Remove is a wrapper around gtk_container_remove().
func (v *Container) Remove(w IWidget) {
C.gtk_container_remove(v.native(), w.toWidget())
}
// TODO: gtk_container_add_with_properties
// CheckResize is a wrapper around gtk_container_check_resize().
func (v *Container) CheckResize() {
C.gtk_container_check_resize(v.native())
}
// TODO: gtk_container_foreach
// TODO: gtk_container_get_children
// TODO: gtk_container_get_path_for_child
// SetReallocateRedraws is a wrapper around
// gtk_container_set_reallocate_redraws().
func (v *Container) SetReallocateRedraws(needsRedraws bool) {
C.gtk_container_set_reallocate_redraws(v.native(), gbool(needsRedraws))
}
// GetFocusChild is a wrapper around gtk_container_get_focus_child().
func (v *Container) GetFocusChild() *Widget {
c := C.gtk_container_get_focus_child(v.native())
if c == nil {
return nil
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
w := wrapWidget(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return w
}
// SetFocusChild is a wrapper around gtk_container_set_focus_child().
func (v *Container) SetFocusChild(child IWidget) {
C.gtk_container_set_focus_child(v.native(), child.toWidget())
}
// GetFocusVAdjustment is a wrapper around
// gtk_container_get_focus_vadjustment().
func (v *Container) GetFocusVAdjustment() *Adjustment {
c := C.gtk_container_get_focus_vadjustment(v.native())
if c == nil {
return nil
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
a := wrapAdjustment(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return a
}
// SetFocusVAdjustment is a wrapper around
// gtk_container_set_focus_vadjustment().
func (v *Container) SetFocusVAdjustment(adjustment *Adjustment) {
C.gtk_container_set_focus_vadjustment(v.native(), adjustment.native())
}
// GetFocusHAdjustment is a wrapper around
// gtk_container_get_focus_hadjustment().
func (v *Container) GetFocusHAdjustment() *Adjustment {
c := C.gtk_container_get_focus_hadjustment(v.native())
if c == nil {
return nil
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
a := wrapAdjustment(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return a
}
// SetFocusHAdjustment is a wrapper around
// gtk_container_set_focus_hadjustment().
func (v *Container) SetFocusHAdjustment(adjustment *Adjustment) {
C.gtk_container_set_focus_hadjustment(v.native(), adjustment.native())
}
// ChildType is a wrapper around gtk_container_child_type().
func (v *Container) ChildType() glib.Type {
c := C.gtk_container_child_type(v.native())
return glib.Type(c)
}
// TODO: gtk_container_child_get_valist
// TODO: gtk_container_child_set_valist
// ChildNotify is a wrapper around gtk_container_child_notify().
func (v *Container) ChildNotify(child IWidget, childProperty string) {
cstr := C.CString(childProperty)
defer C.free(unsafe.Pointer(cstr))
C.gtk_container_child_notify(v.native(), child.toWidget(),
(*C.gchar)(cstr))
}
// TODO: gtk_container_forall
// GetBorderWidth is a wrapper around gtk_container_get_border_width().
func (v *Container) GetBorderWidth() uint {
c := C.gtk_container_get_border_width(v.native())
return uint(c)
}
// SetBorderWidth is a wrapper around gtk_container_set_border_width().
func (v *Container) SetBorderWidth(borderWidth uint) {
C.gtk_container_set_border_width(v.native(), C.guint(borderWidth))
}
// PropagateDraw is a wrapper around gtk_container_propagate_draw().
func (v *Container) PropagateDraw(child IWidget, cr *cairo.Context) {
context := (*C.cairo_t)(unsafe.Pointer(cr.Native()))
C.gtk_container_propagate_draw(v.native(), child.toWidget(), context)
}
// GetFocusChain is a wrapper around gtk_container_get_focus_chain().
func (v *Container) GetFocusChain() ([]*Widget, bool) {
var cwlist *C.GList
c := C.gtk_container_get_focus_chain(v.native(), &cwlist)
var widgets []*Widget
wlist := (*glib.List)(unsafe.Pointer(cwlist))
for ; wlist.Data != uintptr(unsafe.Pointer(nil)); wlist = wlist.Next {
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(wlist.Data))}
w := wrapWidget(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
widgets = append(widgets, w)
}
return widgets, gobool(c)
}
// SetFocusChain is a wrapper around gtk_container_set_focus_chain().
func (v *Container) SetFocusChain(focusableWidgets []IWidget) {
var list *glib.List
for _, w := range focusableWidgets {
data := uintptr(unsafe.Pointer(w.toWidget()))
list = list.Append(data)
}
glist := (*C.GList)(unsafe.Pointer(list))
C.gtk_container_set_focus_chain(v.native(), glist)
}
/*
* GtkDialog
*/
// Dialog is a representation of GTK's GtkDialog.
type Dialog struct {
Window
}
// native returns a pointer to the underlying GtkDialog.
func (v *Dialog) native() *C.GtkDialog {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkDialog(p)
}
func marshalDialog(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapDialog(obj), nil
}
func wrapDialog(obj *glib.Object) *Dialog {
return &Dialog{Window{Bin{Container{Widget{glib.InitiallyUnowned{obj}}}}}}
}
// DialogNew() is a wrapper around gtk_dialog_new().
func DialogNew() (*Dialog, error) {
c := C.gtk_dialog_new()
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
d := wrapDialog(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return d, nil
}
// Run() is a wrapper around gtk_dialog_run().
func (v *Dialog) Run() int {
c := C.gtk_dialog_run(v.native())
return int(c)
}
// Response() is a wrapper around gtk_dialog_response().
func (v *Dialog) Response(response ResponseType) {
C.gtk_dialog_response(v.native(), C.gint(response))
}
// AddButton() is a wrapper around gtk_dialog_add_button(). text may
// be either the literal button text, or if using GTK 3.8 or earlier, a
// Stock type converted to a string.
func (v *Dialog) AddButton(text string, id ResponseType) (*Button, error) {
cstr := C.CString(text)
defer C.free(unsafe.Pointer(cstr))
c := C.gtk_dialog_add_button(v.native(), (*C.gchar)(cstr), C.gint(id))
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
b := &Button{Bin{Container{Widget{glib.InitiallyUnowned{obj}}}}}
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return b, nil
}
// AddActionWidget() is a wrapper around gtk_dialog_add_action_widget().
func (v *Dialog) AddActionWidget(child IWidget, id ResponseType) {
C.gtk_dialog_add_action_widget(v.native(), child.toWidget(), C.gint(id))
}
// SetDefaultResponse() is a wrapper around gtk_dialog_set_default_response().
func (v *Dialog) SetDefaultResponse(id ResponseType) {
C.gtk_dialog_set_default_response(v.native(), C.gint(id))
}
// SetResponseSensitive() is a wrapper around
// gtk_dialog_set_response_sensitive().
func (v *Dialog) SetResponseSensitive(id ResponseType, setting bool) {
C.gtk_dialog_set_response_sensitive(v.native(), C.gint(id),
gbool(setting))
}
// GetResponseForWidget() is a wrapper around
// gtk_dialog_get_response_for_widget().
func (v *Dialog) GetResponseForWidget(widget IWidget) ResponseType {
c := C.gtk_dialog_get_response_for_widget(v.native(), widget.toWidget())
return ResponseType(c)
}
// GetWidgetForResponse() is a wrapper around
// gtk_dialog_get_widget_for_response().
func (v *Dialog) GetWidgetForResponse(id ResponseType) (*Widget, error) {
c := C.gtk_dialog_get_widget_for_response(v.native(), C.gint(id))
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
w := wrapWidget(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return w, nil
}
// GetContentArea() is a wrapper around gtk_dialog_get_content_area().
func (v *Dialog) GetContentArea() (*Box, error) {
c := C.gtk_dialog_get_content_area(v.native())
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
b := &Box{Container{Widget{glib.InitiallyUnowned{obj}}}}
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return b, nil
}
// TODO(jrick)
/*
func (v *gdk.Screen) AlternativeDialogButtonOrder() bool {
c := C.gtk_alternative_dialog_button_order(v.native())
return gobool(c)
}
*/
// TODO(jrick)
/*
func SetAlternativeButtonOrder(ids ...ResponseType) {
}
*/
/*
* GtkDrawingArea
*/
// DrawingArea is a representation of GTK's GtkDrawingArea.
type DrawingArea struct {
Widget
}
// native returns a pointer to the underlying GtkDrawingArea.
func (v *DrawingArea) native() *C.GtkDrawingArea {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkDrawingArea(p)
}
func marshalDrawingArea(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapDrawingArea(obj), nil
}
func wrapDrawingArea(obj *glib.Object) *DrawingArea {
return &DrawingArea{Widget{glib.InitiallyUnowned{obj}}}
}
// DrawingAreaNew is a wrapper around gtk_drawing_area_new().
func DrawingAreaNew() (*DrawingArea, error) {
c := C.gtk_drawing_area_new()
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
d := wrapDrawingArea(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return d, nil
}
/*
* GtkEditable
*/
// Editable is a representation of GTK's GtkEditable GInterface.
type Editable struct {
*glib.Object
}
// IEditable is an interface type implemented by all structs
// embedding an Editable. It is meant to be used as an argument type
// for wrapper functions that wrap around a C GTK function taking a
// GtkEditable.
type IEditable interface {
toEditable() *C.GtkEditable
}
// native() returns a pointer to the underlying GObject as a GtkEditable.
func (v *Editable) native() *C.GtkEditable {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkEditable(p)
}
func marshalEditable(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapEditable(obj), nil
}
func wrapEditable(obj *glib.Object) *Editable {
return &Editable{obj}
}
func (v *Editable) toEditable() *C.GtkEditable {
if v == nil {
return nil
}
return v.native()
}
// SelectRegion is a wrapper around gtk_editable_select_region().
func (v *Editable) SelectRegion(startPos, endPos int) {
C.gtk_editable_select_region(v.native(), C.gint(startPos),
C.gint(endPos))
}
// GetSelectionBounds is a wrapper around gtk_editable_get_selection_bounds().
func (v *Editable) GetSelectionBounds() (start, end int, nonEmpty bool) {
var cstart, cend C.gint
c := C.gtk_editable_get_selection_bounds(v.native(), &cstart, &cend)
return int(cstart), int(cend), gobool(c)
}
// InsertText is a wrapper around gtk_editable_insert_text(). The returned
// int is the position after the inserted text.
func (v *Editable) InsertText(newText string, position int) int {
cstr := C.CString(newText)
defer C.free(unsafe.Pointer(cstr))
pos := new(C.gint)
*pos = C.gint(position)
C.gtk_editable_insert_text(v.native(), (*C.gchar)(cstr),
C.gint(len(newText)), pos)
return int(*pos)
}
// DeleteText is a wrapper around gtk_editable_delete_text().
func (v *Editable) DeleteText(startPos, endPos int) {
C.gtk_editable_delete_text(v.native(), C.gint(startPos), C.gint(endPos))
}
// GetChars is a wrapper around gtk_editable_get_chars().
func (v *Editable) GetChars(startPos, endPos int) string {
c := C.gtk_editable_get_chars(v.native(), C.gint(startPos),
C.gint(endPos))
defer C.free(unsafe.Pointer(c))
return C.GoString((*C.char)(c))
}
// CutClipboard is a wrapper around gtk_editable_cut_clipboard().
func (v *Editable) CutClipboard() {
C.gtk_editable_cut_clipboard(v.native())
}
// CopyClipboard is a wrapper around gtk_editable_copy_clipboard().
func (v *Editable) CopyClipboard() {
C.gtk_editable_copy_clipboard(v.native())
}
// PasteClipboard is a wrapper around gtk_editable_paste_clipboard().
func (v *Editable) PasteClipboard() {
C.gtk_editable_paste_clipboard(v.native())
}
// DeleteSelection is a wrapper around gtk_editable_delete_selection().
func (v *Editable) DeleteSelection() {
C.gtk_editable_delete_selection(v.native())
}
// SetPosition is a wrapper around gtk_editable_set_position().
func (v *Editable) SetPosition(position int) {
C.gtk_editable_set_position(v.native(), C.gint(position))
}
// GetPosition is a wrapper around gtk_editable_get_position().
func (v *Editable) GetPosition() int {
c := C.gtk_editable_get_position(v.native())
return int(c)
}
// SetEditable is a wrapper around gtk_editable_set_editable().
func (v *Editable) SetEditable(isEditable bool) {
C.gtk_editable_set_editable(v.native(), gbool(isEditable))
}
// GetEditable is a wrapper around gtk_editable_get_editable().
func (v *Editable) GetEditable() bool {
c := C.gtk_editable_get_editable(v.native())
return gobool(c)
}
/*
* GtkEntry
*/
// Entry is a representation of GTK's GtkEntry.
type Entry struct {
Widget
// Interfaces
Editable
}
type IEntry interface {
toEntry() *C.GtkEntry
}
func (v *Entry) toEntry() *C.GtkEntry {
return v.native()
}
// native returns a pointer to the underlying GtkEntry.
func (v *Entry) native() *C.GtkEntry {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkEntry(p)
}
func marshalEntry(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapEntry(obj), nil
}
func wrapEntry(obj *glib.Object) *Entry {
e := wrapEditable(obj)
return &Entry{Widget{glib.InitiallyUnowned{obj}}, *e}
}
// EntryNew() is a wrapper around gtk_entry_new().
func EntryNew() (*Entry, error) {
c := C.gtk_entry_new()
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
e := wrapEntry(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return e, nil
}
// EntryNewWithBuffer() is a wrapper around gtk_entry_new_with_buffer().
func EntryNewWithBuffer(buffer *EntryBuffer) (*Entry, error) {
c := C.gtk_entry_new_with_buffer(buffer.native())
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
e := wrapEntry(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return e, nil
}
// GetBuffer() is a wrapper around gtk_entry_get_buffer().
func (v *Entry) GetBuffer() (*EntryBuffer, error) {
c := C.gtk_entry_get_buffer(v.native())
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
e := &EntryBuffer{obj}
obj.Ref()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return e, nil
}
// SetBuffer() is a wrapper around gtk_entry_set_buffer().
func (v *Entry) SetBuffer(buffer *EntryBuffer) {
C.gtk_entry_set_buffer(v.native(), buffer.native())
}
// SetText() is a wrapper around gtk_entry_set_text().
func (v *Entry) SetText(text string) {
cstr := C.CString(text)
defer C.free(unsafe.Pointer(cstr))
C.gtk_entry_set_text(v.native(), (*C.gchar)(cstr))
}
// GetText() is a wrapper around gtk_entry_get_text().
func (v *Entry) GetText() (string, error) {
c := C.gtk_entry_get_text(v.native())
if c == nil {
return "", nilPtrErr
}
return C.GoString((*C.char)(c)), nil
}
// GetTextLength() is a wrapper around gtk_entry_get_text_length().
func (v *Entry) GetTextLength() uint16 {
c := C.gtk_entry_get_text_length(v.native())
return uint16(c)
}
// TODO(jrick) GdkRectangle
/*
func (v *Entry) GetTextArea() {
}
*/
// SetVisibility() is a wrapper around gtk_entry_set_visibility().
func (v *Entry) SetVisibility(visible bool) {
C.gtk_entry_set_visibility(v.native(), gbool(visible))
}
// SetInvisibleChar() is a wrapper around gtk_entry_set_invisible_char().
func (v *Entry) SetInvisibleChar(ch rune) {
C.gtk_entry_set_invisible_char(v.native(), C.gunichar(ch))
}
// UnsetInvisibleChar() is a wrapper around gtk_entry_unset_invisible_char().
func (v *Entry) UnsetInvisibleChar() {
C.gtk_entry_unset_invisible_char(v.native())
}
// SetMaxLength() is a wrapper around gtk_entry_set_max_length().
func (v *Entry) SetMaxLength(len int) {
C.gtk_entry_set_max_length(v.native(), C.gint(len))
}
// GetActivatesDefault() is a wrapper around gtk_entry_get_activates_default().
func (v *Entry) GetActivatesDefault() bool {
c := C.gtk_entry_get_activates_default(v.native())
return gobool(c)
}
// GetHasFrame() is a wrapper around gtk_entry_get_has_frame().
func (v *Entry) GetHasFrame() bool {
c := C.gtk_entry_get_has_frame(v.native())
return gobool(c)
}
// GetWidthChars() is a wrapper around gtk_entry_get_width_chars().
func (v *Entry) GetWidthChars() int {
c := C.gtk_entry_get_width_chars(v.native())
return int(c)
}
// SetActivatesDefault() is a wrapper around gtk_entry_set_activates_default().
func (v *Entry) SetActivatesDefault(setting bool) {
C.gtk_entry_set_activates_default(v.native(), gbool(setting))
}
// SetHasFrame() is a wrapper around gtk_entry_set_has_frame().
func (v *Entry) SetHasFrame(setting bool) {
C.gtk_entry_set_has_frame(v.native(), gbool(setting))
}
// SetWidthChars() is a wrapper around gtk_entry_set_width_chars().
func (v *Entry) SetWidthChars(nChars int) {
C.gtk_entry_set_width_chars(v.native(), C.gint(nChars))
}
// GetInvisibleChar() is a wrapper around gtk_entry_get_invisible_char().
func (v *Entry) GetInvisibleChar() rune {
c := C.gtk_entry_get_invisible_char(v.native())
return rune(c)
}
// SetAlignment() is a wrapper around gtk_entry_set_alignment().
func (v *Entry) SetAlignment(xalign float32) {
C.gtk_entry_set_alignment(v.native(), C.gfloat(xalign))
}
// GetAlignment() is a wrapper around gtk_entry_get_alignment().
func (v *Entry) GetAlignment() float32 {
c := C.gtk_entry_get_alignment(v.native())
return float32(c)
}
// SetPlaceholderText() is a wrapper around gtk_entry_set_placeholder_text().
func (v *Entry) SetPlaceholderText(text string) {
cstr := C.CString(text)
defer C.free(unsafe.Pointer(cstr))
C.gtk_entry_set_placeholder_text(v.native(), (*C.gchar)(cstr))
}
// GetPlaceholderText() is a wrapper around gtk_entry_get_placeholder_text().
func (v *Entry) GetPlaceholderText() (string, error) {
c := C.gtk_entry_get_placeholder_text(v.native())
if c == nil {
return "", nilPtrErr
}
return C.GoString((*C.char)(c)), nil
}
// SetOverwriteMode() is a wrapper around gtk_entry_set_overwrite_mode().
func (v *Entry) SetOverwriteMode(overwrite bool) {
C.gtk_entry_set_overwrite_mode(v.native(), gbool(overwrite))
}
// GetOverwriteMode() is a wrapper around gtk_entry_get_overwrite_mode().
func (v *Entry) GetOverwriteMode() bool {
c := C.gtk_entry_get_overwrite_mode(v.native())
return gobool(c)
}
// TODO(jrick) Pangolayout
/*
func (v *Entry) GetLayout() {
}
*/
// GetLayoutOffsets() is a wrapper around gtk_entry_get_layout_offsets().
func (v *Entry) GetLayoutOffsets() (x, y int) {
var gx, gy C.gint
C.gtk_entry_get_layout_offsets(v.native(), &gx, &gy)
return int(gx), int(gy)
}
// LayoutIndexToTextIndex() is a wrapper around
// gtk_entry_layout_index_to_text_index().
func (v *Entry) LayoutIndexToTextIndex(layoutIndex int) int {
c := C.gtk_entry_layout_index_to_text_index(v.native(),
C.gint(layoutIndex))
return int(c)
}
// TextIndexToLayoutIndex() is a wrapper around
// gtk_entry_text_index_to_layout_index().
func (v *Entry) TextIndexToLayoutIndex(textIndex int) int {
c := C.gtk_entry_text_index_to_layout_index(v.native(),
C.gint(textIndex))
return int(c)
}
// TODO(jrick) PandoAttrList
/*
func (v *Entry) SetAttributes() {
}
*/
// TODO(jrick) PandoAttrList
/*
func (v *Entry) GetAttributes() {
}
*/
// GetMaxLength() is a wrapper around gtk_entry_get_max_length().
func (v *Entry) GetMaxLength() int {
c := C.gtk_entry_get_max_length(v.native())
return int(c)
}
// GetVisibility() is a wrapper around gtk_entry_get_visibility().
func (v *Entry) GetVisibility() bool {
c := C.gtk_entry_get_visibility(v.native())
return gobool(c)
}
// SetCompletion() is a wrapper around gtk_entry_set_completion().
func (v *Entry) SetCompletion(completion *EntryCompletion) {
C.gtk_entry_set_completion(v.native(), completion.native())
}
// GetCompletion() is a wrapper around gtk_entry_get_completion().
func (v *Entry) GetCompletion() (*EntryCompletion, error) {
c := C.gtk_entry_get_completion(v.native())
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
e := &EntryCompletion{obj}
obj.Ref()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return e, nil
}
// SetCursorHAdjustment() is a wrapper around
// gtk_entry_set_cursor_hadjustment().
func (v *Entry) SetCursorHAdjustment(adjustment *Adjustment) {
C.gtk_entry_set_cursor_hadjustment(v.native(), adjustment.native())
}
// GetCursorHAdjustment() is a wrapper around
// gtk_entry_get_cursor_hadjustment().
func (v *Entry) GetCursorHAdjustment() (*Adjustment, error) {
c := C.gtk_entry_get_cursor_hadjustment(v.native())
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
a := &Adjustment{glib.InitiallyUnowned{obj}}
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return a, nil
}
// SetProgressFraction() is a wrapper around gtk_entry_set_progress_fraction().
func (v *Entry) SetProgressFraction(fraction float64) {
C.gtk_entry_set_progress_fraction(v.native(), C.gdouble(fraction))
}
// GetProgressFraction() is a wrapper around gtk_entry_get_progress_fraction().
func (v *Entry) GetProgressFraction() float64 {
c := C.gtk_entry_get_progress_fraction(v.native())
return float64(c)
}
// SetProgressPulseStep() is a wrapper around
// gtk_entry_set_progress_pulse_step().
func (v *Entry) SetProgressPulseStep(fraction float64) {
C.gtk_entry_set_progress_pulse_step(v.native(), C.gdouble(fraction))
}
// GetProgressPulseStep() is a wrapper around
// gtk_entry_get_progress_pulse_step().
func (v *Entry) GetProgressPulseStep() float64 {
c := C.gtk_entry_get_progress_pulse_step(v.native())
return float64(c)
}
// ProgressPulse() is a wrapper around gtk_entry_progress_pulse().
func (v *Entry) ProgressPulse() {
C.gtk_entry_progress_pulse(v.native())
}
// TODO(jrick) GdkEventKey
/*
func (v *Entry) IMContextFilterKeypress() {
}
*/
// ResetIMContext() is a wrapper around gtk_entry_reset_im_context().
func (v *Entry) ResetIMContext() {
C.gtk_entry_reset_im_context(v.native())
}
// TODO(jrick) GdkPixbuf
/*
func (v *Entry) SetIconFromPixbuf() {
}
*/
// SetIconFromIconName() is a wrapper around
// gtk_entry_set_icon_from_icon_name().
func (v *Entry) SetIconFromIconName(iconPos EntryIconPosition, name string) {
cstr := C.CString(name)
defer C.free(unsafe.Pointer(cstr))
C.gtk_entry_set_icon_from_icon_name(v.native(),
C.GtkEntryIconPosition(iconPos), (*C.gchar)(cstr))
}
// TODO(jrick) GIcon
/*
func (v *Entry) SetIconFromGIcon() {
}
*/
// GetIconStorageType() is a wrapper around gtk_entry_get_icon_storage_type().
func (v *Entry) GetIconStorageType(iconPos EntryIconPosition) ImageType {
c := C.gtk_entry_get_icon_storage_type(v.native(),
C.GtkEntryIconPosition(iconPos))
return ImageType(c)
}
// TODO(jrick) GdkPixbuf
/*
func (v *Entry) GetIconPixbuf() {
}
*/
// GetIconName() is a wrapper around gtk_entry_get_icon_name().
func (v *Entry) GetIconName(iconPos EntryIconPosition) (string, error) {
c := C.gtk_entry_get_icon_name(v.native(),
C.GtkEntryIconPosition(iconPos))
if c == nil {
return "", nilPtrErr
}
return C.GoString((*C.char)(c)), nil
}
// TODO(jrick) GIcon
/*
func (v *Entry) GetIconGIcon() {
}
*/
// SetIconActivatable() is a wrapper around gtk_entry_set_icon_activatable().
func (v *Entry) SetIconActivatable(iconPos EntryIconPosition, activatable bool) {
C.gtk_entry_set_icon_activatable(v.native(),
C.GtkEntryIconPosition(iconPos), gbool(activatable))
}
// GetIconActivatable() is a wrapper around gtk_entry_get_icon_activatable().
func (v *Entry) GetIconActivatable(iconPos EntryIconPosition) bool {
c := C.gtk_entry_get_icon_activatable(v.native(),
C.GtkEntryIconPosition(iconPos))
return gobool(c)
}
// SetIconSensitive() is a wrapper around gtk_entry_set_icon_sensitive().
func (v *Entry) SetIconSensitive(iconPos EntryIconPosition, sensitive bool) {
C.gtk_entry_set_icon_sensitive(v.native(),
C.GtkEntryIconPosition(iconPos), gbool(sensitive))
}
// GetIconSensitive() is a wrapper around gtk_entry_get_icon_sensitive().
func (v *Entry) GetIconSensitive(iconPos EntryIconPosition) bool {
c := C.gtk_entry_get_icon_sensitive(v.native(),
C.GtkEntryIconPosition(iconPos))
return gobool(c)
}
// GetIconAtPos() is a wrapper around gtk_entry_get_icon_at_pos().
func (v *Entry) GetIconAtPos(x, y int) int {
c := C.gtk_entry_get_icon_at_pos(v.native(), C.gint(x), C.gint(y))
return int(c)
}
// SetIconTooltipText() is a wrapper around gtk_entry_set_icon_tooltip_text().
func (v *Entry) SetIconTooltipText(iconPos EntryIconPosition, tooltip string) {
cstr := C.CString(tooltip)
defer C.free(unsafe.Pointer(cstr))
C.gtk_entry_set_icon_tooltip_text(v.native(),
C.GtkEntryIconPosition(iconPos), (*C.gchar)(cstr))
}
// GetIconTooltipText() is a wrapper around gtk_entry_get_icon_tooltip_text().
func (v *Entry) GetIconTooltipText(iconPos EntryIconPosition) (string, error) {
c := C.gtk_entry_get_icon_tooltip_text(v.native(),
C.GtkEntryIconPosition(iconPos))
if c == nil {
return "", nilPtrErr
}
return C.GoString((*C.char)(c)), nil
}
// SetIconTooltipMarkup() is a wrapper around
// gtk_entry_set_icon_tooltip_markup().
func (v *Entry) SetIconTooltipMarkup(iconPos EntryIconPosition, tooltip string) {
cstr := C.CString(tooltip)
defer C.free(unsafe.Pointer(cstr))
C.gtk_entry_set_icon_tooltip_markup(v.native(),
C.GtkEntryIconPosition(iconPos), (*C.gchar)(cstr))
}
// GetIconTooltipMarkup() is a wrapper around
// gtk_entry_get_icon_tooltip_markup().
func (v *Entry) GetIconTooltipMarkup(iconPos EntryIconPosition) (string, error) {
c := C.gtk_entry_get_icon_tooltip_markup(v.native(),
C.GtkEntryIconPosition(iconPos))
if c == nil {
return "", nilPtrErr
}
return C.GoString((*C.char)(c)), nil
}
// TODO(jrick) GdkDragAction
/*
func (v *Entry) SetIconDragSource() {
}
*/
// GetCurrentIconDragSource() is a wrapper around
// gtk_entry_get_current_icon_drag_source().
func (v *Entry) GetCurrentIconDragSource() int {
c := C.gtk_entry_get_current_icon_drag_source(v.native())
return int(c)
}
// TODO(jrick) GdkRectangle
/*
func (v *Entry) GetIconArea() {
}
*/
// SetInputPurpose() is a wrapper around gtk_entry_set_input_purpose().
func (v *Entry) SetInputPurpose(purpose InputPurpose) {
C.gtk_entry_set_input_purpose(v.native(), C.GtkInputPurpose(purpose))
}
// GetInputPurpose() is a wrapper around gtk_entry_get_input_purpose().
func (v *Entry) GetInputPurpose() InputPurpose {
c := C.gtk_entry_get_input_purpose(v.native())
return InputPurpose(c)
}
// SetInputHints() is a wrapper around gtk_entry_set_input_hints().
func (v *Entry) SetInputHints(hints InputHints) {
C.gtk_entry_set_input_hints(v.native(), C.GtkInputHints(hints))
}
// GetInputHints() is a wrapper around gtk_entry_get_input_hints().
func (v *Entry) GetInputHints() InputHints {
c := C.gtk_entry_get_input_hints(v.native())
return InputHints(c)
}
/*
* GtkEntryBuffer
*/
// EntryBuffer is a representation of GTK's GtkEntryBuffer.
type EntryBuffer struct {
*glib.Object
}
// native returns a pointer to the underlying GtkEntryBuffer.
func (v *EntryBuffer) native() *C.GtkEntryBuffer {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkEntryBuffer(p)
}
func marshalEntryBuffer(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapEntryBuffer(obj), nil
}
func wrapEntryBuffer(obj *glib.Object) *EntryBuffer {
return &EntryBuffer{obj}
}
// EntryBufferNew() is a wrapper around gtk_entry_buffer_new().
func EntryBufferNew(initialChars string, nInitialChars int) (*EntryBuffer, error) {
cstr := C.CString(initialChars)
defer C.free(unsafe.Pointer(cstr))
c := C.gtk_entry_buffer_new((*C.gchar)(cstr), C.gint(nInitialChars))
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
e := wrapEntryBuffer(obj)
obj.Ref()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return e, nil
}
// GetText() is a wrapper around gtk_entry_buffer_get_text(). A
// non-nil error is returned in the case that gtk_entry_buffer_get_text
// returns NULL to differentiate between NULL and an empty string.
func (v *EntryBuffer) GetText() (string, error) {
c := C.gtk_entry_buffer_get_text(v.native())
if c == nil {
return "", nilPtrErr
}
return C.GoString((*C.char)(c)), nil
}
// SetText() is a wrapper around gtk_entry_buffer_set_text().
func (v *EntryBuffer) SetText(text string) {
cstr := C.CString(text)
defer C.free(unsafe.Pointer(cstr))
C.gtk_entry_buffer_set_text(v.native(), (*C.gchar)(cstr),
C.gint(len(text)))
}
// GetBytes() is a wrapper around gtk_entry_buffer_get_bytes().
func (v *EntryBuffer) GetBytes() uint {
c := C.gtk_entry_buffer_get_bytes(v.native())
return uint(c)
}
// GetLength() is a wrapper around gtk_entry_buffer_get_length().
func (v *EntryBuffer) GetLength() uint {
c := C.gtk_entry_buffer_get_length(v.native())
return uint(c)
}
// GetMaxLength() is a wrapper around gtk_entry_buffer_get_max_length().
func (v *EntryBuffer) GetMaxLength() int {
c := C.gtk_entry_buffer_get_max_length(v.native())
return int(c)
}
// SetMaxLength() is a wrapper around gtk_entry_buffer_set_max_length().
func (v *EntryBuffer) SetMaxLength(maxLength int) {
C.gtk_entry_buffer_set_max_length(v.native(), C.gint(maxLength))
}
// InsertText() is a wrapper around gtk_entry_buffer_insert_text().
func (v *EntryBuffer) InsertText(position uint, text string) uint {
cstr := C.CString(text)
defer C.free(unsafe.Pointer(cstr))
c := C.gtk_entry_buffer_insert_text(v.native(), C.guint(position),
(*C.gchar)(cstr), C.gint(len(text)))
return uint(c)
}
// DeleteText() is a wrapper around gtk_entry_buffer_delete_text().
func (v *EntryBuffer) DeleteText(position uint, nChars int) uint {
c := C.gtk_entry_buffer_delete_text(v.native(), C.guint(position),
C.gint(nChars))
return uint(c)
}
// EmitDeletedText() is a wrapper around gtk_entry_buffer_emit_deleted_text().
func (v *EntryBuffer) EmitDeletedText(pos, nChars uint) {
C.gtk_entry_buffer_emit_deleted_text(v.native(), C.guint(pos),
C.guint(nChars))
}
// EmitInsertedText() is a wrapper around gtk_entry_buffer_emit_inserted_text().
func (v *EntryBuffer) EmitInsertedText(pos uint, text string) {
cstr := C.CString(text)
defer C.free(unsafe.Pointer(cstr))
C.gtk_entry_buffer_emit_inserted_text(v.native(), C.guint(pos),
(*C.gchar)(cstr), C.guint(len(text)))
}
/*
* GtkEntryCompletion
*/
// EntryCompletion is a representation of GTK's GtkEntryCompletion.
type EntryCompletion struct {
*glib.Object
}
// native returns a pointer to the underlying GtkEntryCompletion.
func (v *EntryCompletion) native() *C.GtkEntryCompletion {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkEntryCompletion(p)
}
func marshalEntryCompletion(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapEntryCompletion(obj), nil
}
func wrapEntryCompletion(obj *glib.Object) *EntryCompletion {
return &EntryCompletion{obj}
}
/*
* GtkEventBox
*/
// EventBox is a representation of GTK's GtkEventBox.
type EventBox struct {
Bin
}
// native returns a pointer to the underlying GtkEventBox.
func (v *EventBox) native() *C.GtkEventBox {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkEventBox(p)
}
func marshalEventBox(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapEventBox(obj), nil
}
func wrapEventBox(obj *glib.Object) *EventBox {
return &EventBox{Bin{Container{Widget{glib.InitiallyUnowned{obj}}}}}
}
// EventBoxNew is a wrapper around gtk_event_box_new().
func EventBoxNew() (*EventBox, error) {
c := C.gtk_event_box_new()
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
e := wrapEventBox(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return e, nil
}
// SetAboveChild is a wrapper around gtk_event_box_set_above_child().
func (v *EventBox) SetAboveChild(aboveChild bool) {
C.gtk_event_box_set_above_child(v.native(), gbool(aboveChild))
}
// GetAboveChild is a wrapper around gtk_event_box_get_above_child().
func (v *EventBox) GetAboveChild() bool {
c := C.gtk_event_box_get_above_child(v.native())
return gobool(c)
}
// SetVisibleWindow is a wrapper around gtk_event_box_set_visible_window().
func (v *EventBox) SetVisibleWindow(visibleWindow bool) {
C.gtk_event_box_set_visible_window(v.native(), gbool(visibleWindow))
}
// GetVisibleWindow is a wrapper around gtk_event_box_get_visible_window().
func (v *EventBox) GetVisibleWindow() bool {
c := C.gtk_event_box_get_visible_window(v.native())
return gobool(c)
}
/*
* GtkFileChooser
*/
// FileChoser is a representation of GTK's GtkFileChooser GInterface.
type FileChooser struct {
*glib.Object
}
// native returns a pointer to the underlying GObject as a GtkFileChooser.
func (v *FileChooser) native() *C.GtkFileChooser {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkFileChooser(p)
}
func marshalFileChooser(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapFileChooser(obj), nil
}
func wrapFileChooser(obj *glib.Object) *FileChooser {
return &FileChooser{obj}
}
// GetFilename is a wrapper around gtk_file_chooser_get_filename().
func (v *FileChooser) GetFilename() string {
c := C.gtk_file_chooser_get_filename(v.native())
s := C.GoString((*C.char)(c))
defer C.g_free((C.gpointer)(c))
return s
}
/*
* GtkFileChooserButton
*/
// FileChooserButton is a representation of GTK's GtkFileChooserButton.
type FileChooserButton struct {
Box
// Interfaces
FileChooser
}
// native returns a pointer to the underlying GtkFileChooserButton.
func (v *FileChooserButton) native() *C.GtkFileChooserButton {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkFileChooserButton(p)
}
func marshalFileChooserButton(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapFileChooserButton(obj), nil
}
func wrapFileChooserButton(obj *glib.Object) *FileChooserButton {
fc := wrapFileChooser(obj)
return &FileChooserButton{Box{Container{Widget{glib.InitiallyUnowned{obj}}}}, *fc}
}
// FileChooserButtonNew is a wrapper around gtk_file_chooser_button_new().
func FileChooserButtonNew(title string, action FileChooserAction) (*FileChooserButton, error) {
cstr := C.CString(title)
defer C.free(unsafe.Pointer(cstr))
c := C.gtk_file_chooser_button_new((*C.gchar)(cstr),
(C.GtkFileChooserAction)(action))
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
f := wrapFileChooserButton(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return f, nil
}
/*
* GtkFileChooserWidget
*/
// FileChooserWidget is a representation of GTK's GtkFileChooserWidget.
type FileChooserWidget struct {
Box
// Interfaces
FileChooser
}
// native returns a pointer to the underlying GtkFileChooserWidget.
func (v *FileChooserWidget) native() *C.GtkFileChooserWidget {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkFileChooserWidget(p)
}
func marshalFileChooserWidget(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapFileChooserWidget(obj), nil
}
func wrapFileChooserWidget(obj *glib.Object) *FileChooserWidget {
fc := wrapFileChooser(obj)
return &FileChooserWidget{Box{Container{Widget{glib.InitiallyUnowned{obj}}}}, *fc}
}
// FileChooserWidgetNew is a wrapper around gtk_gtk_file_chooser_widget_new().
func FileChooserWidgetNew(action FileChooserAction) (*FileChooserWidget, error) {
c := C.gtk_file_chooser_widget_new((C.GtkFileChooserAction)(action))
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
f := wrapFileChooserWidget(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return f, nil
}
/*
* GtkFrame
*/
// Frame is a representation of GTK's GtkFrame.
type Frame struct {
Bin
}
// native returns a pointer to the underlying GtkFrame.
func (v *Frame) native() *C.GtkFrame {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkFrame(p)
}
func marshalFrame(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapFrame(obj), nil
}
func wrapFrame(obj *glib.Object) *Frame {
return &Frame{Bin{Container{Widget{glib.InitiallyUnowned{obj}}}}}
}
// FrameNew is a wrapper around gtk_frame_new().
func FrameNew(label string) (*Frame, error) {
cstr := C.CString(label)
defer C.free(unsafe.Pointer(cstr))
c := C.gtk_frame_new((*C.gchar)(cstr))
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
f := wrapFrame(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return f, nil
}
// SetLabel is a wrapper around gtk_frame_set_label().
func (v *Frame) SetLabel(label string) {
cstr := C.CString(label)
defer C.free(unsafe.Pointer(cstr))
C.gtk_frame_set_label(v.native(), (*C.gchar)(cstr))
}
// SetLabelWidget is a wrapper around gtk_frame_set_label_widget().
func (v *Frame) SetLabelWidget(labelWidget IWidget) {
C.gtk_frame_set_label_widget(v.native(), labelWidget.toWidget())
}
// SetLabelAlign is a wrapper around gtk_frame_set_label_align().
func (v *Frame) SetLabelAlign(xAlign, yAlign float32) {
C.gtk_frame_set_label_align(v.native(), C.gfloat(xAlign),
C.gfloat(yAlign))
}
// SetShadowType is a wrapper around gtk_frame_set_shadow_type().
func (v *Frame) SetShadowType(t ShadowType) {
C.gtk_frame_set_shadow_type(v.native(), C.GtkShadowType(t))
}
// GetLabel is a wrapper around gtk_frame_get_label().
func (v *Frame) GetLabel() string {
c := C.gtk_frame_get_label(v.native())
return C.GoString((*C.char)(c))
}
// GetLabelAlign is a wrapper around gtk_frame_get_label_align().
func (v *Frame) GetLabelAlign() (xAlign, yAlign float32) {
var x, y C.gfloat
C.gtk_frame_get_label_align(v.native(), &x, &y)
return float32(x), float32(y)
}
// GetLabelWidget is a wrapper around gtk_frame_get_label_widget().
func (v *Frame) GetLabelWidget() (*Widget, error) {
c := C.gtk_frame_get_label_widget(v.native())
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
w := wrapWidget(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return w, nil
}
// GetShadowType is a wrapper around gtk_frame_get_shadow_type().
func (v *Frame) GetShadowType() ShadowType {
c := C.gtk_frame_get_shadow_type(v.native())
return ShadowType(c)
}
/*
* GtkGrid
*/
// Grid is a representation of GTK's GtkGrid.
type Grid struct {
Container
// Interfaces
Orientable
}
// native returns a pointer to the underlying GtkGrid.
func (v *Grid) native() *C.GtkGrid {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkGrid(p)
}
func (v *Grid) toOrientable() *C.GtkOrientable {
if v == nil {
return nil
}
return C.toGtkOrientable(unsafe.Pointer(v.GObject))
}
func marshalGrid(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapGrid(obj), nil
}
func wrapGrid(obj *glib.Object) *Grid {
o := wrapOrientable(obj)
return &Grid{Container{Widget{glib.InitiallyUnowned{obj}}}, *o}
}
// GridNew() is a wrapper around gtk_grid_new().
func GridNew() (*Grid, error) {
c := C.gtk_grid_new()
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
g := wrapGrid(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return g, nil
}
// Attach() is a wrapper around gtk_grid_attach().
func (v *Grid) Attach(child IWidget, left, top, width, height int) {
C.gtk_grid_attach(v.native(), child.toWidget(), C.gint(left),
C.gint(top), C.gint(width), C.gint(height))
}
// AttachNextTo() is a wrapper around gtk_grid_attach_next_to().
func (v *Grid) AttachNextTo(child, sibling IWidget, side PositionType, width, height int) {
C.gtk_grid_attach_next_to(v.native(), child.toWidget(),
sibling.toWidget(), C.GtkPositionType(side), C.gint(width),
C.gint(height))
}
// GetChildAt() is a wrapper around gtk_grid_get_child_at().
func (v *Grid) GetChildAt(left, top int) (*Widget, error) {
c := C.gtk_grid_get_child_at(v.native(), C.gint(left), C.gint(top))
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
w := wrapWidget(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return w, nil
}
// InsertRow() is a wrapper around gtk_grid_insert_row().
func (v *Grid) InsertRow(position int) {
C.gtk_grid_insert_row(v.native(), C.gint(position))
}
// InsertColumn() is a wrapper around gtk_grid_insert_column().
func (v *Grid) InsertColumn(position int) {
C.gtk_grid_insert_column(v.native(), C.gint(position))
}
// InsertNextTo() is a wrapper around gtk_grid_insert_next_to()
func (v *Grid) InsertNextTo(sibling IWidget, side PositionType) {
C.gtk_grid_insert_next_to(v.native(), sibling.toWidget(),
C.GtkPositionType(side))
}
// SetRowHomogeneous() is a wrapper around gtk_grid_set_row_homogeneous().
func (v *Grid) SetRowHomogeneous(homogeneous bool) {
C.gtk_grid_set_row_homogeneous(v.native(), gbool(homogeneous))
}
// GetRowHomogeneous() is a wrapper around gtk_grid_get_row_homogeneous().
func (v *Grid) GetRowHomogeneous() bool {
c := C.gtk_grid_get_row_homogeneous(v.native())
return gobool(c)
}
// SetRowSpacing() is a wrapper around gtk_grid_set_row_spacing().
func (v *Grid) SetRowSpacing(spacing uint) {
C.gtk_grid_set_row_spacing(v.native(), C.guint(spacing))
}
// GetRowSpacing() is a wrapper around gtk_grid_get_row_spacing().
func (v *Grid) GetRowSpacing() uint {
c := C.gtk_grid_get_row_spacing(v.native())
return uint(c)
}
// SetColumnHomogeneous() is a wrapper around gtk_grid_set_column_homogeneous().
func (v *Grid) SetColumnHomogeneous(homogeneous bool) {
C.gtk_grid_set_column_homogeneous(v.native(), gbool(homogeneous))
}
// GetColumnHomogeneous() is a wrapper around gtk_grid_get_column_homogeneous().
func (v *Grid) GetColumnHomogeneous() bool {
c := C.gtk_grid_get_column_homogeneous(v.native())
return gobool(c)
}
// SetColumnSpacing() is a wrapper around gtk_grid_set_column_spacing().
func (v *Grid) SetColumnSpacing(spacing uint) {
C.gtk_grid_set_column_spacing(v.native(), C.guint(spacing))
}
// GetColumnSpacing() is a wrapper around gtk_grid_get_column_spacing().
func (v *Grid) GetColumnSpacing() uint {
c := C.gtk_grid_get_column_spacing(v.native())
return uint(c)
}
/*
* GtkImage
*/
// Image is a representation of GTK's GtkImage.
type Image struct {
Misc
}
// native returns a pointer to the underlying GtkImage.
func (v *Image) native() *C.GtkImage {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkImage(p)
}
func marshalImage(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapImage(obj), nil
}
func wrapImage(obj *glib.Object) *Image {
return &Image{Misc{Widget{glib.InitiallyUnowned{obj}}}}
}
// ImageNew() is a wrapper around gtk_image_new().
func ImageNew() (*Image, error) {
c := C.gtk_image_new()
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
i := wrapImage(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return i, nil
}
// ImageNewFromFile() is a wrapper around gtk_image_new_from_file().
func ImageNewFromFile(filename string) (*Image, error) {
cstr := C.CString(filename)
defer C.free(unsafe.Pointer(cstr))
c := C.gtk_image_new_from_file((*C.gchar)(cstr))
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
i := wrapImage(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return i, nil
}
// ImageNewFromResource() is a wrapper around gtk_image_new_from_resource().
func ImageNewFromResource(resourcePath string) (*Image, error) {
cstr := C.CString(resourcePath)
defer C.free(unsafe.Pointer(cstr))
c := C.gtk_image_new_from_resource((*C.gchar)(cstr))
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
i := wrapImage(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return i, nil
}
// ImageNewFromPixbuf is a wrapper around gtk_image_new_from_pixbuf().
func ImageNewFromPixbuf(pixbuf *gdk.Pixbuf) (*Image, error) {
ptr := (*C.GdkPixbuf)(unsafe.Pointer(pixbuf.Native()))
c := C.gtk_image_new_from_pixbuf(ptr)
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
i := wrapImage(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return i, nil
}
// TODO(jrick) GtkIconSet
/*
func ImageNewFromIconSet() {
}
*/
// TODO(jrick) GdkPixbufAnimation
/*
func ImageNewFromAnimation() {
}
*/
// ImageNewFromIconName() is a wrapper around gtk_image_new_from_icon_name().
func ImageNewFromIconName(iconName string, size IconSize) (*Image, error) {
cstr := C.CString(iconName)
defer C.free(unsafe.Pointer(cstr))
c := C.gtk_image_new_from_icon_name((*C.gchar)(cstr),
C.GtkIconSize(size))
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
i := wrapImage(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return i, nil
}
// TODO(jrick) GIcon
/*
func ImageNewFromGIcon() {
}
*/
// Clear() is a wrapper around gtk_image_clear().
func (v *Image) Clear() {
C.gtk_image_clear(v.native())
}
// SetFromFile() is a wrapper around gtk_image_set_from_file().
func (v *Image) SetFromFile(filename string) {
cstr := C.CString(filename)
defer C.free(unsafe.Pointer(cstr))
C.gtk_image_set_from_file(v.native(), (*C.gchar)(cstr))
}
// SetFromResource() is a wrapper around gtk_image_set_from_resource().
func (v *Image) SetFromResource(resourcePath string) {
cstr := C.CString(resourcePath)
defer C.free(unsafe.Pointer(cstr))
C.gtk_image_set_from_resource(v.native(), (*C.gchar)(cstr))
}
// SetFromFixbuf is a wrapper around gtk_image_set_from_pixbuf().
func (v *Image) SetFromPixbuf(pixbuf *gdk.Pixbuf) {
pbptr := (*C.GdkPixbuf)(unsafe.Pointer(pixbuf.Native()))
C.gtk_image_set_from_pixbuf(v.native(), pbptr)
}
// TODO(jrick) GtkIconSet
/*
func (v *Image) SetFromIconSet() {
}
*/
// TODO(jrick) GdkPixbufAnimation
/*
func (v *Image) SetFromAnimation() {
}
*/
// SetFromIconName() is a wrapper around gtk_image_set_from_icon_name().
func (v *Image) SetFromIconName(iconName string, size IconSize) {
cstr := C.CString(iconName)
defer C.free(unsafe.Pointer(cstr))
C.gtk_image_set_from_icon_name(v.native(), (*C.gchar)(cstr),
C.GtkIconSize(size))
}
// TODO(jrick) GIcon
/*
func (v *Image) SetFromGIcon() {
}
*/
// SetPixelSize() is a wrapper around gtk_image_set_pixel_size().
func (v *Image) SetPixelSize(pixelSize int) {
C.gtk_image_set_pixel_size(v.native(), C.gint(pixelSize))
}
// GetStorageType() is a wrapper around gtk_image_get_storage_type().
func (v *Image) GetStorageType() ImageType {
c := C.gtk_image_get_storage_type(v.native())
return ImageType(c)
}
// GetPixbuf() is a wrapper around gtk_image_get_pixbuf().
func (v *Image) GetPixbuf() *gdk.Pixbuf {
c := C.gtk_image_get_pixbuf(v.native())
if c == nil {
return nil
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
pb := &gdk.Pixbuf{obj}
obj.Ref()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return pb
}
// TODO(jrick) GtkIconSet
/*
func (v *Image) GetIconSet() {
}
*/
// TODO(jrick) GdkPixbufAnimation
/*
func (v *Image) GetAnimation() {
}
*/
// GetIconName() is a wrapper around gtk_image_get_icon_name().
func (v *Image) GetIconName() (string, IconSize) {
var iconName *C.gchar
var size C.GtkIconSize
C.gtk_image_get_icon_name(v.native(), &iconName, &size)
return C.GoString((*C.char)(iconName)), IconSize(size)
}
// TODO(jrick) GIcon
/*
func (v *Image) GetGIcon() {
}
*/
// GetPixelSize() is a wrapper around gtk_image_get_pixel_size().
func (v *Image) GetPixelSize() int {
c := C.gtk_image_get_pixel_size(v.native())
return int(c)
}
/*
* GtkLabel
*/
// Label is a representation of GTK's GtkLabel.
type Label struct {
Misc
}
// native returns a pointer to the underlying GtkLabel.
func (v *Label) native() *C.GtkLabel {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkLabel(p)
}
func marshalLabel(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapLabel(obj), nil
}
func wrapLabel(obj *glib.Object) *Label {
return &Label{Misc{Widget{glib.InitiallyUnowned{obj}}}}
}
// LabelNew is a wrapper around gtk_label_new().
func LabelNew(str string) (*Label, error) {
cstr := C.CString(str)
defer C.free(unsafe.Pointer(cstr))
c := C.gtk_label_new((*C.gchar)(cstr))
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
l := wrapLabel(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return l, nil
}
// SetText is a wrapper around gtk_label_set_text().
func (v *Label) SetText(str string) {
cstr := C.CString(str)
defer C.free(unsafe.Pointer(cstr))
C.gtk_label_set_text(v.native(), (*C.gchar)(cstr))
}
// SetMarkup is a wrapper around gtk_label_set_markup().
func (v *Label) SetMarkup(str string) {
cstr := C.CString(str)
defer C.free(unsafe.Pointer(cstr))
C.gtk_label_set_markup(v.native(), (*C.gchar)(cstr))
}
// SetMarkupWithMnemonic is a wrapper around
// gtk_label_set_markup_with_mnemonic().
func (v *Label) SetMarkupWithMnemonic(str string) {
cstr := C.CString(str)
defer C.free(unsafe.Pointer(cstr))
C.gtk_label_set_markup_with_mnemonic(v.native(), (*C.gchar)(cstr))
}
// SetPattern is a wrapper around gtk_label_set_pattern().
func (v *Label) SetPattern(patern string) {
cstr := C.CString(patern)
defer C.free(unsafe.Pointer(cstr))
C.gtk_label_set_pattern(v.native(), (*C.gchar)(cstr))
}
// SetJustify is a wrapper around gtk_label_set_justify().
func (v *Label) SetJustify(jtype Justification) {
C.gtk_label_set_justify(v.native(), C.GtkJustification(jtype))
}
// SetEllipsize is a wrapper around gtk_label_set_ellipsize().
func (v *Label) SetEllipsize(mode pango.EllipsizeMode) {
C.gtk_label_set_ellipsize(v.native(), C.PangoEllipsizeMode(mode))
}
// GetWidthChars is a wrapper around gtk_label_get_width_chars().
func (v *Label) GetWidthChars() int {
c := C.gtk_label_get_width_chars(v.native())
return int(c)
}
// SetWidthChars is a wrapper around gtk_label_set_width_chars().
func (v *Label) SetWidthChars(nChars int) {
C.gtk_label_set_width_chars(v.native(), C.gint(nChars))
}
// GetMaxWidthChars is a wrapper around gtk_label_get_max_width_chars().
func (v *Label) GetMaxWidthChars() int {
c := C.gtk_label_get_max_width_chars(v.native())
return int(c)
}
// SetMaxWidthChars is a wrapper around gtk_label_set_max_width_chars().
func (v *Label) SetMaxWidthChars(nChars int) {
C.gtk_label_set_max_width_chars(v.native(), C.gint(nChars))
}
// GetLineWrap is a wrapper around gtk_label_get_line_wrap().
func (v *Label) GetLineWrap() bool {
c := C.gtk_label_get_line_wrap(v.native())
return gobool(c)
}
// SetLineWrap is a wrapper around gtk_label_set_line_wrap().
func (v *Label) SetLineWrap(wrap bool) {
C.gtk_label_set_line_wrap(v.native(), gbool(wrap))
}
// SetLineWrapMode is a wrapper around gtk_label_set_line_wrap_mode().
func (v *Label) SetLineWrapMode(wrapMode pango.WrapMode) {
C.gtk_label_set_line_wrap_mode(v.native(), C.PangoWrapMode(wrapMode))
}
// GetSelectable is a wrapper around gtk_label_get_selectable().
func (v *Label) GetSelectable() bool {
c := C.gtk_label_get_selectable(v.native())
return gobool(c)
}
// GetText is a wrapper around gtk_label_get_text().
func (v *Label) GetText() (string, error) {
c := C.gtk_label_get_text(v.native())
if c == nil {
return "", nilPtrErr
}
return C.GoString((*C.char)(c)), nil
}
// GetJustify is a wrapper around gtk_label_get_justify().
func (v *Label) GetJustify() Justification {
c := C.gtk_label_get_justify(v.native())
return Justification(c)
}
// GetEllipsize is a wrapper around gtk_label_get_ellipsize().
func (v *Label) GetEllipsize() pango.EllipsizeMode {
c := C.gtk_label_get_ellipsize(v.native())
return pango.EllipsizeMode(c)
}
// GetCurrentUri is a wrapper around gtk_label_get_current_uri().
func (v *Label) GetCurrentUri() string {
c := C.gtk_label_get_current_uri(v.native())
return C.GoString((*C.char)(c))
}
// GetTrackVisitedLinks is a wrapper around gtk_label_get_track_visited_links().
func (v *Label) GetTrackVisitedLinks() bool {
c := C.gtk_label_get_track_visited_links(v.native())
return gobool(c)
}
// SetTrackVisitedLinks is a wrapper around gtk_label_set_track_visited_links().
func (v *Label) SetTrackVisitedLinks(trackLinks bool) {
C.gtk_label_set_track_visited_links(v.native(), gbool(trackLinks))
}
// GetAngle is a wrapper around gtk_label_get_angle().
func (v *Label) GetAngle() float64 {
c := C.gtk_label_get_angle(v.native())
return float64(c)
}
// SetAngle is a wrapper around gtk_label_set_angle().
func (v *Label) SetAngle(angle float64) {
C.gtk_label_set_angle(v.native(), C.gdouble(angle))
}
// GetSelectionBounds is a wrapper around gtk_label_get_selection_bounds().
func (v *Label) GetSelectionBounds() (start, end int, nonEmpty bool) {
var cstart, cend C.gint
c := C.gtk_label_get_selection_bounds(v.native(), &cstart, &cend)
return int(cstart), int(cend), gobool(c)
}
// GetSingleLineMode is a wrapper around gtk_label_get_single_line_mode().
func (v *Label) GetSingleLineMode() bool {
c := C.gtk_label_get_single_line_mode(v.native())
return gobool(c)
}
// SetSingleLineMode is a wrapper around gtk_label_set_single_line_mode().
func (v *Label) SetSingleLineMode(mode bool) {
C.gtk_label_set_single_line_mode(v.native(), gbool(mode))
}
// GetUseMarkup is a wrapper around gtk_label_get_use_markup().
func (v *Label) GetUseMarkup() bool {
c := C.gtk_label_get_use_markup(v.native())
return gobool(c)
}
// SetUseMarkup is a wrapper around gtk_label_set_use_markup().
func (v *Label) SetUseMarkup(use bool) {
C.gtk_label_set_use_markup(v.native(), gbool(use))
}
// GetUseUnderline is a wrapper around gtk_label_get_use_underline().
func (v *Label) GetUseUnderline() bool {
c := C.gtk_label_get_use_underline(v.native())
return gobool(c)
}
// SetUseUnderline is a wrapper around gtk_label_set_use_underline().
func (v *Label) SetUseUnderline(use bool) {
C.gtk_label_set_use_underline(v.native(), gbool(use))
}
// LabelNewWithMnemonic is a wrapper around gtk_label_new_with_mnemonic().
func LabelNewWithMnemonic(str string) (*Label, error) {
cstr := C.CString(str)
defer C.free(unsafe.Pointer(cstr))
c := C.gtk_label_new_with_mnemonic((*C.gchar)(cstr))
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
l := wrapLabel(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return l, nil
}
// SelectRegion is a wrapper around gtk_label_select_region().
func (v *Label) SelectRegion(startOffset, endOffset int) {
C.gtk_label_select_region(v.native(), C.gint(startOffset),
C.gint(endOffset))
}
// SetSelectable is a wrapper around gtk_label_set_selectable().
func (v *Label) SetSelectable(setting bool) {
C.gtk_label_set_selectable(v.native(), gbool(setting))
}
// SetLabel is a wrapper around gtk_label_set_label().
func (v *Label) SetLabel(str string) {
cstr := C.CString(str)
defer C.free(unsafe.Pointer(cstr))
C.gtk_label_set_label(v.native(), (*C.gchar)(cstr))
}
/*
* GtkListStore
*/
// ListStore is a representation of GTK's GtkListStore.
type ListStore struct {
*glib.Object
// Interfaces
TreeModel
}
// native returns a pointer to the underlying GtkListStore.
func (v *ListStore) native() *C.GtkListStore {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkListStore(p)
}
func marshalListStore(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapListStore(obj), nil
}
func wrapListStore(obj *glib.Object) *ListStore {
tm := wrapTreeModel(obj)
return &ListStore{obj, *tm}
}
func (v *ListStore) toTreeModel() *C.GtkTreeModel {
if v == nil {
return nil
}
return C.toGtkTreeModel(unsafe.Pointer(v.GObject))
}
// ListStoreNew is a wrapper around gtk_list_store_newv().
func ListStoreNew(types ...glib.Type) (*ListStore, error) {
gtypes := C.alloc_types(C.int(len(types)))
for n, val := range types {
C.set_type(gtypes, C.int(n), C.GType(val))
}
defer C.g_free(C.gpointer(gtypes))
c := C.gtk_list_store_newv(C.gint(len(types)), gtypes)
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
ls := wrapListStore(obj)
obj.Ref()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return ls, nil
}
// Remove is a wrapper around gtk_list_store_remove().
func (v *ListStore) Remove(iter *TreeIter) bool {
c := C.gtk_list_store_remove(v.native(), iter.native())
return gobool(c)
}
// TODO(jrick)
/*
func (v *ListStore) SetColumnTypes(types ...glib.Type) {
}
*/
// Set() is a wrapper around gtk_list_store_set_value() but provides
// a function similar to gtk_list_store_set() in that multiple columns
// may be set by one call. The length of columns and values slices must
// match, or Set() will return a non-nil error.
//
// As an example, a call to:
// store.Set(iter, []int{0, 1}, []interface{}{"Foo", "Bar"})
// is functionally equivalent to calling the native C GTK function:
// gtk_list_store_set(store, iter, 0, "Foo", 1, "Bar", -1);
func (v *ListStore) Set(iter *TreeIter, columns []int, values []interface{}) error {
if len(columns) != len(values) {
return errors.New("columns and values lengths do not match")
}
for i, val := range values {
if gv, err := glib.GValue(val); err != nil {
return err
} else {
C.gtk_list_store_set_value(v.native(), iter.native(),
C.gint(columns[i]),
(*C.GValue)(unsafe.Pointer(gv.Native())))
}
}
return nil
}
// TODO(jrick)
/*
func (v *ListStore) InsertWithValues(iter *TreeIter, position int, columns []int, values []glib.Value) {
var ccolumns *C.gint
var cvalues *C.GValue
C.gtk_list_store_insert_with_values(v.native(), iter.native(),
C.gint(position), columns, values, C.gint(len(values)))
}
*/
// InsertBefore() is a wrapper around gtk_list_store_insert_before().
func (v *ListStore) InsertBefore(sibling *TreeIter) *TreeIter {
var ti C.GtkTreeIter
C.gtk_list_store_insert_before(v.native(), &ti, sibling.native())
iter := &TreeIter{ti}
return iter
}
// InsertAfter() is a wrapper around gtk_list_store_insert_after().
func (v *ListStore) InsertAfter(sibling *TreeIter) *TreeIter {
var ti C.GtkTreeIter
C.gtk_list_store_insert_after(v.native(), &ti, sibling.native())
iter := &TreeIter{ti}
return iter
}
// Prepend() is a wrapper around gtk_list_store_prepend().
func (v *ListStore) Prepend() *TreeIter {
var ti C.GtkTreeIter
C.gtk_list_store_prepend(v.native(), &ti)
iter := &TreeIter{ti}
return iter
}
// Append() is a wrapper around gtk_list_store_append().
func (v *ListStore) Append() *TreeIter {
var ti C.GtkTreeIter
C.gtk_list_store_append(v.native(), &ti)
iter := &TreeIter{ti}
return iter
}
// Clear() is a wrapper around gtk_list_store_clear().
func (v *ListStore) Clear() {
C.gtk_list_store_clear(v.native())
}
// IterIsValid() is a wrapper around gtk_list_store_iter_is_valid().
func (v *ListStore) IterIsValid(iter *TreeIter) bool {
c := C.gtk_list_store_iter_is_valid(v.native(), iter.native())
return gobool(c)
}
// TODO(jrick)
/*
func (v *ListStore) Reorder(newOrder []int) {
}
*/
// Swap() is a wrapper around gtk_list_store_swap().
func (v *ListStore) Swap(a, b *TreeIter) {
C.gtk_list_store_swap(v.native(), a.native(), b.native())
}
// MoveBefore() is a wrapper around gtk_list_store_move_before().
func (v *ListStore) MoveBefore(iter, position *TreeIter) {
C.gtk_list_store_move_before(v.native(), iter.native(),
position.native())
}
// MoveAfter() is a wrapper around gtk_list_store_move_after().
func (v *ListStore) MoveAfter(iter, position *TreeIter) {
C.gtk_list_store_move_after(v.native(), iter.native(),
position.native())
}
/*
* GtkMenu
*/
// Menu is a representation of GTK's GtkMenu.
type Menu struct {
MenuShell
}
// IMenu is an interface type implemented by all structs embedding
// a Menu. It is meant to be used as an argument type for wrapper
// functions that wrap around a C GTK function taking a
// GtkMenu.
type IMenu interface {
toMenu() *C.GtkMenu
toWidget() *C.GtkWidget
}
// native() returns a pointer to the underlying GtkMenu.
func (v *Menu) native() *C.GtkMenu {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkMenu(p)
}
func (v *Menu) toMenu() *C.GtkMenu {
if v == nil {
return nil
}
return v.native()
}
func marshalMenu(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapMenu(obj), nil
}
func wrapMenu(obj *glib.Object) *Menu {
return &Menu{MenuShell{Container{Widget{glib.InitiallyUnowned{obj}}}}}
}
// MenuNew() is a wrapper around gtk_menu_new().
func MenuNew() (*Menu, error) {
c := C.gtk_menu_new()
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
m := wrapMenu(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return m, nil
}
/*
* GtkMenuBar
*/
// MenuBar is a representation of GTK's GtkMenuBar.
type MenuBar struct {
MenuShell
}
// native() returns a pointer to the underlying GtkMenuBar.
func (v *MenuBar) native() *C.GtkMenuBar {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkMenuBar(p)
}
func marshalMenuBar(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapMenuBar(obj), nil
}
func wrapMenuBar(obj *glib.Object) *MenuBar {
return &MenuBar{MenuShell{Container{Widget{glib.InitiallyUnowned{obj}}}}}
}
// MenuBarNew() is a wrapper around gtk_menu_bar_new().
func MenuBarNew() (*MenuBar, error) {
c := C.gtk_menu_bar_new()
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
m := wrapMenuBar(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return m, nil
}
/*
* GtkMenuButton
*/
// MenuButton is a representation of GTK's GtkMenuButton.
type MenuButton struct {
ToggleButton
}
// native returns a pointer to the underlying GtkMenuButton.
func (v *MenuButton) native() *C.GtkMenuButton {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkMenuButton(p)
}
func marshalMenuButton(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapMenuButton(obj), nil
}
func wrapMenuButton(obj *glib.Object) *MenuButton {
return &MenuButton{ToggleButton{Button{Bin{Container{Widget{
glib.InitiallyUnowned{obj}}}}}}}
}
// MenuButtonNew is a wrapper around gtk_menu_button_new().
func MenuButtonNew() (*MenuButton, error) {
c := C.gtk_menu_button_new()
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
m := wrapMenuButton(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return m, nil
}
// SetPopup is a wrapper around gtk_menu_button_set_popup().
func (v *MenuButton) SetPopup(menu IMenu) {
C.gtk_menu_button_set_popup(v.native(), menu.toWidget())
}
// GetPopup is a wrapper around gtk_menu_button_get_popup().
func (v *MenuButton) GetPopup() *Menu {
c := C.gtk_menu_button_get_popup(v.native())
if c == nil {
return nil
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
m := wrapMenu(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return m
}
// TODO: gtk_menu_button_set_menu_model
// TODO: gtk_menu_button_get_menu_model
// SetDirection is a wrapper around gtk_menu_button_set_direction().
func (v *MenuButton) SetDirection(direction ArrowType) {
C.gtk_menu_button_set_direction(v.native(), C.GtkArrowType(direction))
}
// GetDirection is a wrapper around gtk_menu_button_get_direction().
func (v *MenuButton) GetDirection() ArrowType {
c := C.gtk_menu_button_get_direction(v.native())
return ArrowType(c)
}
// SetAlignWidget is a wrapper around gtk_menu_button_set_align_widget().
func (v *MenuButton) SetAlignWidget(alignWidget IWidget) {
C.gtk_menu_button_set_align_widget(v.native(), alignWidget.toWidget())
}
// GetAlignWidget is a wrapper around gtk_menu_button_get_align_widget().
func (v *MenuButton) GetAlignWidget() *Widget {
c := C.gtk_menu_button_get_align_widget(v.native())
if c == nil {
return nil
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
w := wrapWidget(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return w
}
/*
* GtkMenuItem
*/
// MenuItem is a representation of GTK's GtkMenuItem.
type MenuItem struct {
Bin
}
// IMenuItem is an interface type implemented by all structs
// embedding a MenuItem. It is meant to be used as an argument type
// for wrapper functions that wrap around a C GTK function taking a
// GtkMenuItem.
type IMenuItem interface {
toMenuItem() *C.GtkMenuItem
toWidget() *C.GtkWidget
}
// native returns a pointer to the underlying GtkMenuItem.
func (v *MenuItem) native() *C.GtkMenuItem {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkMenuItem(p)
}
func (v *MenuItem) toMenuItem() *C.GtkMenuItem {
if v == nil {
return nil
}
return v.native()
}
func marshalMenuItem(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapMenuItem(obj), nil
}
func wrapMenuItem(obj *glib.Object) *MenuItem {
return &MenuItem{Bin{Container{Widget{glib.InitiallyUnowned{obj}}}}}
}
// MenuItemNew() is a wrapper around gtk_menu_item_new().
func MenuItemNew() (*MenuItem, error) {
c := C.gtk_menu_item_new()
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
m := wrapMenuItem(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return m, nil
}
// MenuItemNewWithLabel() is a wrapper around gtk_menu_item_new_with_label().
func MenuItemNewWithLabel(label string) (*MenuItem, error) {
cstr := C.CString(label)
defer C.free(unsafe.Pointer(cstr))
c := C.gtk_menu_item_new_with_label((*C.gchar)(cstr))
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
m := wrapMenuItem(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return m, nil
}
// MenuItemNewWithMnemonic() is a wrapper around
// gtk_menu_item_new_with_mnemonic().
func MenuItemNewWithMnemonic(label string) (*MenuItem, error) {
cstr := C.CString(label)
defer C.free(unsafe.Pointer(cstr))
c := C.gtk_menu_item_new_with_mnemonic((*C.gchar)(cstr))
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
m := wrapMenuItem(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return m, nil
}
// SetSubmenu() is a wrapper around gtk_menu_item_set_submenu().
func (v *MenuItem) SetSubmenu(submenu IWidget) {
C.gtk_menu_item_set_submenu(v.native(), submenu.toWidget())
}
/*
* GtkMenuShell
*/
// MenuShell is a representation of GTK's GtkMenuShell.
type MenuShell struct {
Container
}
// native returns a pointer to the underlying GtkMenuShell.
func (v *MenuShell) native() *C.GtkMenuShell {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkMenuShell(p)
}
func marshalMenuShell(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapMenuShell(obj), nil
}
func wrapMenuShell(obj *glib.Object) *MenuShell {
return &MenuShell{Container{Widget{glib.InitiallyUnowned{obj}}}}
}
// Append is a wrapper around gtk_menu_shell_append().
func (v *MenuShell) Append(child IMenuItem) {
C.gtk_menu_shell_append(v.native(), child.toWidget())
}
/*
* GtkMessageDialog
*/
// MessageDialog is a representation of GTK's GtkMessageDialog.
type MessageDialog struct {
Dialog
}
// native returns a pointer to the underlying GtkMessageDialog.
func (v *MessageDialog) native() *C.GtkMessageDialog {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkMessageDialog(p)
}
func marshalMessageDialog(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapMessageDialog(obj), nil
}
func wrapMessageDialog(obj *glib.Object) *MessageDialog {
return &MessageDialog{Dialog{Window{Bin{Container{Widget{glib.InitiallyUnowned{obj}}}}}}}
}
// MessageDialogNew() is a wrapper around gtk_message_dialog_new().
// The text is created and formatted by the format specifier and any
// additional arguments.
func MessageDialogNew(parent IWindow, flags DialogFlags, mType MessageType, buttons ButtonsType, format string, a ...interface{}) *MessageDialog {
s := fmt.Sprintf(format, a...)
cstr := C.CString(s)
defer C.free(unsafe.Pointer(cstr))
var w *C.GtkWindow = nil
if parent != nil {
w = parent.toWindow()
}
c := C._gtk_message_dialog_new(w,
C.GtkDialogFlags(flags), C.GtkMessageType(mType),
C.GtkButtonsType(buttons), cstr)
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
m := wrapMessageDialog(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return m
}
// MessageDialogNewWithMarkup is a wrapper around
// gtk_message_dialog_new_with_markup().
func MessageDialogNewWithMarkup(parent IWindow, flags DialogFlags, mType MessageType, buttons ButtonsType, format string, a ...interface{}) *MessageDialog {
s := fmt.Sprintf(format, a...)
cstr := C.CString(s)
defer C.free(unsafe.Pointer(cstr))
var w *C.GtkWindow = nil
if parent != nil {
w = parent.toWindow()
}
c := C._gtk_message_dialog_new_with_markup(w,
C.GtkDialogFlags(flags), C.GtkMessageType(mType),
C.GtkButtonsType(buttons), cstr)
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
m := wrapMessageDialog(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return m
}
// SetMarkup is a wrapper around gtk_message_dialog_set_markup().
func (v *MessageDialog) SetMarkup(str string) {
cstr := C.CString(str)
defer C.free(unsafe.Pointer(cstr))
C.gtk_message_dialog_set_markup(v.native(), (*C.gchar)(cstr))
}
// FormatSecondaryText is a wrapper around
// gtk_message_dialog_format_secondary_text().
func (v *MessageDialog) FormatSecondaryText(format string, a ...interface{}) {
s := fmt.Sprintf(format, a...)
cstr := C.CString(s)
defer C.free(unsafe.Pointer(cstr))
C._gtk_message_dialog_format_secondary_text(v.native(),
(*C.gchar)(cstr))
}
// FormatSecondaryMarkup is a wrapper around
// gtk_message_dialog_format_secondary_text().
func (v *MessageDialog) FormatSecondaryMarkup(format string, a ...interface{}) {
s := fmt.Sprintf(format, a...)
cstr := C.CString(s)
defer C.free(unsafe.Pointer(cstr))
C._gtk_message_dialog_format_secondary_markup(v.native(),
(*C.gchar)(cstr))
}
// GetMessageArea is intentionally unimplemented. It returns a GtkVBox, which
// is deprecated since GTK 3.2 and for which gotk3 has no bindings.
/*
* GtkMisc
*/
// Misc is a representation of GTK's GtkMisc.
type Misc struct {
Widget
}
// native returns a pointer to the underlying GtkMisc.
func (v *Misc) native() *C.GtkMisc {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkMisc(p)
}
func marshalMisc(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapMisc(obj), nil
}
func wrapMisc(obj *glib.Object) *Misc {
return &Misc{Widget{glib.InitiallyUnowned{obj}}}
}
// GetAlignment is a wrapper around gtk_misc_get_alignment().
func (v *Misc) GetAlignment() (xAlign, yAlign float32) {
var x, y C.gfloat
C.gtk_misc_get_alignment(v.native(), &x, &y)
return float32(x), float32(y)
}
// SetAlignment is a wrapper around gtk_misc_set_alignment().
func (v *Misc) SetAlignment(xAlign, yAlign float32) {
C.gtk_misc_set_alignment(v.native(), C.gfloat(xAlign), C.gfloat(yAlign))
}
// GetPadding is a wrapper around gtk_misc_get_padding().
func (v *Misc) GetPadding() (xpad, ypad int) {
var x, y C.gint
C.gtk_misc_get_padding(v.native(), &x, &y)
return int(x), int(y)
}
// SetPadding is a wrapper around gtk_misc_set_padding().
func (v *Misc) SetPadding(xPad, yPad int) {
C.gtk_misc_set_padding(v.native(), C.gint(xPad), C.gint(yPad))
}
/*
* GtkNotebook
*/
// Notebook is a representation of GTK's GtkNotebook.
type Notebook struct {
Container
}
// native returns a pointer to the underlying GtkNotebook.
func (v *Notebook) native() *C.GtkNotebook {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkNotebook(p)
}
func marshalNotebook(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapNotebook(obj), nil
}
func wrapNotebook(obj *glib.Object) *Notebook {
return &Notebook{Container{Widget{glib.InitiallyUnowned{obj}}}}
}
// NotebookNew() is a wrapper around gtk_notebook_new().
func NotebookNew() (*Notebook, error) {
c := C.gtk_notebook_new()
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
n := wrapNotebook(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return n, nil
}
// AppendPage() is a wrapper around gtk_notebook_append_page().
func (v *Notebook) AppendPage(child IWidget, tabLabel IWidget) int {
var cTabLabel *C.GtkWidget
if tabLabel != nil {
cTabLabel = tabLabel.toWidget()
}
c := C.gtk_notebook_append_page(v.native(), child.toWidget(), cTabLabel)
return int(c)
}
// AppendPageMenu() is a wrapper around gtk_notebook_append_page_menu().
func (v *Notebook) AppendPageMenu(child IWidget, tabLabel IWidget, menuLabel IWidget) int {
c := C.gtk_notebook_append_page_menu(v.native(), child.toWidget(),
tabLabel.toWidget(), menuLabel.toWidget())
return int(c)
}
// PrependPage() is a wrapper around gtk_notebook_prepend_page().
func (v *Notebook) PrependPage(child IWidget, tabLabel IWidget) int {
var cTabLabel *C.GtkWidget
if tabLabel != nil {
cTabLabel = tabLabel.toWidget()
}
c := C.gtk_notebook_prepend_page(v.native(), child.toWidget(), cTabLabel)
return int(c)
}
// PrependPageMenu() is a wrapper around gtk_notebook_prepend_page_menu().
func (v *Notebook) PrependPageMenu(child IWidget, tabLabel IWidget, menuLabel IWidget) int {
c := C.gtk_notebook_prepend_page_menu(v.native(), child.toWidget(),
tabLabel.toWidget(), menuLabel.toWidget())
return int(c)
}
// InsertPage() is a wrapper around gtk_notebook_insert_page().
func (v *Notebook) InsertPage(child IWidget, tabLabel IWidget, position int) int {
c := C.gtk_notebook_insert_page(v.native(), child.toWidget(),
tabLabel.toWidget(), C.gint(position))
return int(c)
}
// InsertPageMenu() is a wrapper around gtk_notebook_insert_page_menu().
func (v *Notebook) InsertPageMenu(child IWidget, tabLabel IWidget, menuLabel IWidget, position int) int {
c := C.gtk_notebook_insert_page_menu(v.native(), child.toWidget(),
tabLabel.toWidget(), menuLabel.toWidget(), C.gint(position))
return int(c)
}
// RemovePage() is a wrapper around gtk_notebook_remove_page().
func (v *Notebook) RemovePage(pageNum int) {
C.gtk_notebook_remove_page(v.native(), C.gint(pageNum))
}
// PageNum() is a wrapper around gtk_notebook_page_num().
func (v *Notebook) PageNum(child IWidget) int {
c := C.gtk_notebook_page_num(v.native(), child.toWidget())
return int(c)
}
// NextPage() is a wrapper around gtk_notebook_next_page().
func (v *Notebook) NextPage() {
C.gtk_notebook_next_page(v.native())
}
// PrevPage() is a wrapper around gtk_notebook_prev_page().
func (v *Notebook) PrevPage() {
C.gtk_notebook_prev_page(v.native())
}
// ReorderChild() is a wrapper around gtk_notebook_reorder_child().
func (v *Notebook) ReorderChild(child IWidget, position int) {
C.gtk_notebook_reorder_child(v.native(), child.toWidget(),
C.gint(position))
}
// SetTabPos() is a wrapper around gtk_notebook_set_tab_pos().
func (v *Notebook) SetTabPos(pos PositionType) {
C.gtk_notebook_set_tab_pos(v.native(), C.GtkPositionType(pos))
}
// SetShowTabs() is a wrapper around gtk_notebook_set_show_tabs().
func (v *Notebook) SetShowTabs(showTabs bool) {
C.gtk_notebook_set_show_tabs(v.native(), gbool(showTabs))
}
// SetShowBorder() is a wrapper around gtk_notebook_set_show_border().
func (v *Notebook) SetShowBorder(showBorder bool) {
C.gtk_notebook_set_show_border(v.native(), gbool(showBorder))
}
// SetScrollable() is a wrapper around gtk_notebook_set_scrollable().
func (v *Notebook) SetScrollable(scrollable bool) {
C.gtk_notebook_set_scrollable(v.native(), gbool(scrollable))
}
// PopupEnable() is a wrapper around gtk_notebook_popup_enable().
func (v *Notebook) PopupEnable() {
C.gtk_notebook_popup_enable(v.native())
}
// PopupDisable() is a wrapper around gtk_notebook_popup_disable().
func (v *Notebook) PopupDisable() {
C.gtk_notebook_popup_disable(v.native())
}
// GetCurrentPage() is a wrapper around gtk_notebook_get_current_page().
func (v *Notebook) GetCurrentPage() int {
c := C.gtk_notebook_get_current_page(v.native())
return int(c)
}
// GetMenuLabel() is a wrapper around gtk_notebook_get_menu_label().
func (v *Notebook) GetMenuLabel(child IWidget) (*Widget, error) {
c := C.gtk_notebook_get_menu_label(v.native(), child.toWidget())
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
w := wrapWidget(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return w, nil
}
// GetNthPage() is a wrapper around gtk_notebook_get_nth_page().
func (v *Notebook) GetNthPage(pageNum int) (*Widget, error) {
c := C.gtk_notebook_get_nth_page(v.native(), C.gint(pageNum))
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
w := wrapWidget(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return w, nil
}
// GetNPages() is a wrapper around gtk_notebook_get_n_pages().
func (v *Notebook) GetNPages() int {
c := C.gtk_notebook_get_n_pages(v.native())
return int(c)
}
// GetTabLabel() is a wrapper around gtk_notebook_get_tab_label().
func (v *Notebook) GetTabLabel(child IWidget) (*Widget, error) {
c := C.gtk_notebook_get_tab_label(v.native(), child.toWidget())
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
w := wrapWidget(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return w, nil
}
// SetMenuLabel() is a wrapper around gtk_notebook_set_menu_label().
func (v *Notebook) SetMenuLabel(child, menuLabel IWidget) {
C.gtk_notebook_set_menu_label(v.native(), child.toWidget(),
menuLabel.toWidget())
}
// SetMenuLabelText() is a wrapper around gtk_notebook_set_menu_label_text().
func (v *Notebook) SetMenuLabelText(child IWidget, menuText string) {
cstr := C.CString(menuText)
defer C.free(unsafe.Pointer(cstr))
C.gtk_notebook_set_menu_label_text(v.native(), child.toWidget(),
(*C.gchar)(cstr))
}
// SetTabLabel() is a wrapper around gtk_notebook_set_tab_label().
func (v *Notebook) SetTabLabel(child, tabLabel IWidget) {
C.gtk_notebook_set_tab_label(v.native(), child.toWidget(),
tabLabel.toWidget())
}
// SetTabLabelText() is a wrapper around gtk_notebook_set_tab_label_text().
func (v *Notebook) SetTabLabelText(child IWidget, tabText string) {
cstr := C.CString(tabText)
defer C.free(unsafe.Pointer(cstr))
C.gtk_notebook_set_tab_label_text(v.native(), child.toWidget(),
(*C.gchar)(cstr))
}
// SetTabReorderable() is a wrapper around gtk_notebook_set_tab_reorderable().
func (v *Notebook) SetTabReorderable(child IWidget, reorderable bool) {
C.gtk_notebook_set_tab_reorderable(v.native(), child.toWidget(),
gbool(reorderable))
}
// SetTabDetachable() is a wrapper around gtk_notebook_set_tab_detachable().
func (v *Notebook) SetTabDetachable(child IWidget, detachable bool) {
C.gtk_notebook_set_tab_detachable(v.native(), child.toWidget(),
gbool(detachable))
}
// GetMenuLabelText() is a wrapper around gtk_notebook_get_menu_label_text().
func (v *Notebook) GetMenuLabelText(child IWidget) (string, error) {
c := C.gtk_notebook_get_menu_label_text(v.native(), child.toWidget())
if c == nil {
return "", errors.New("No menu label for widget")
}
return C.GoString((*C.char)(c)), nil
}
// GetScrollable() is a wrapper around gtk_notebook_get_scrollable().
func (v *Notebook) GetScrollable() bool {
c := C.gtk_notebook_get_scrollable(v.native())
return gobool(c)
}
// GetShowBorder() is a wrapper around gtk_notebook_get_show_border().
func (v *Notebook) GetShowBorder() bool {
c := C.gtk_notebook_get_show_border(v.native())
return gobool(c)
}
// GetShowTabs() is a wrapper around gtk_notebook_get_show_tabs().
func (v *Notebook) GetShowTabs() bool {
c := C.gtk_notebook_get_show_tabs(v.native())
return gobool(c)
}
// GetTabLabelText() is a wrapper around gtk_notebook_get_tab_label_text().
func (v *Notebook) GetTabLabelText(child IWidget) (string, error) {
c := C.gtk_notebook_get_tab_label_text(v.native(), child.toWidget())
if c == nil {
return "", errors.New("No tab label for widget")
}
return C.GoString((*C.char)(c)), nil
}
// GetTabPos() is a wrapper around gtk_notebook_get_tab_pos().
func (v *Notebook) GetTabPos() PositionType {
c := C.gtk_notebook_get_tab_pos(v.native())
return PositionType(c)
}
// GetTabReorderable() is a wrapper around gtk_notebook_get_tab_reorderable().
func (v *Notebook) GetTabReorderable(child IWidget) bool {
c := C.gtk_notebook_get_tab_reorderable(v.native(), child.toWidget())
return gobool(c)
}
// GetTabDetachable() is a wrapper around gtk_notebook_get_tab_detachable().
func (v *Notebook) GetTabDetachable(child IWidget) bool {
c := C.gtk_notebook_get_tab_detachable(v.native(), child.toWidget())
return gobool(c)
}
// SetCurrentPage() is a wrapper around gtk_notebook_set_current_page().
func (v *Notebook) SetCurrentPage(pageNum int) {
C.gtk_notebook_set_current_page(v.native(), C.gint(pageNum))
}
// SetGroupName() is a wrapper around gtk_notebook_set_group_name().
func (v *Notebook) SetGroupName(groupName string) {
cstr := C.CString(groupName)
defer C.free(unsafe.Pointer(cstr))
C.gtk_notebook_set_group_name(v.native(), (*C.gchar)(cstr))
}
// GetGroupName() is a wrapper around gtk_notebook_get_group_name().
func (v *Notebook) GetGroupName() (string, error) {
c := C.gtk_notebook_get_group_name(v.native())
if c == nil {
return "", errors.New("No group name")
}
return C.GoString((*C.char)(c)), nil
}
// SetActionWidget() is a wrapper around gtk_notebook_set_action_widget().
func (v *Notebook) SetActionWidget(widget IWidget, packType PackType) {
C.gtk_notebook_set_action_widget(v.native(), widget.toWidget(),
C.GtkPackType(packType))
}
// GetActionWidget() is a wrapper around gtk_notebook_get_action_widget().
func (v *Notebook) GetActionWidget(packType PackType) (*Widget, error) {
c := C.gtk_notebook_get_action_widget(v.native(),
C.GtkPackType(packType))
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
w := wrapWidget(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return w, nil
}
/*
* GtkOffscreenWindow
*/
// OffscreenWindow is a representation of GTK's GtkOffscreenWindow.
type OffscreenWindow struct {
Window
}
// native returns a pointer to the underlying GtkOffscreenWindow.
func (v *OffscreenWindow) native() *C.GtkOffscreenWindow {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkOffscreenWindow(p)
}
func marshalOffscreenWindow(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapOffscreenWindow(obj), nil
}
func wrapOffscreenWindow(obj *glib.Object) *OffscreenWindow {
return &OffscreenWindow{Window{Bin{Container{Widget{
glib.InitiallyUnowned{obj}}}}}}
}
// OffscreenWindowNew is a wrapper around gtk_offscreen_window_new().
func OffscreenWindowNew() (*OffscreenWindow, error) {
c := C.gtk_offscreen_window_new()
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
o := wrapOffscreenWindow(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return o, nil
}
// GetSurface is a wrapper around gtk_offscreen_window_get_surface().
// The returned surface is safe to use over window resizes.
func (v *OffscreenWindow) GetSurface() (*cairo.Surface, error) {
c := C.gtk_offscreen_window_get_surface(v.native())
if c == nil {
return nil, nilPtrErr
}
cairoPtr := (uintptr)(unsafe.Pointer(c))
s := cairo.NewSurface(cairoPtr, true)
return s, nil
}
// GetPixbuf is a wrapper around gtk_offscreen_window_get_pixbuf().
func (v *OffscreenWindow) GetPixbuf() (*gdk.Pixbuf, error) {
c := C.gtk_offscreen_window_get_pixbuf(v.native())
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
pb := &gdk.Pixbuf{obj}
// Pixbuf is returned with ref count of 1, so don't increment.
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return pb, nil
}
/*
* GtkOrientable
*/
// Orientable is a representation of GTK's GtkOrientable GInterface.
type Orientable struct {
*glib.Object
}
// IOrientable is an interface type implemented by all structs
// embedding an Orientable. It is meant to be used as an argument type
// for wrapper functions that wrap around a C GTK function taking a
// GtkOrientable.
type IOrientable interface {
toOrientable() *C.GtkOrientable
}
// native returns a pointer to the underlying GObject as a GtkOrientable.
func (v *Orientable) native() *C.GtkOrientable {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkOrientable(p)
}
func marshalOrientable(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapOrientable(obj), nil
}
func wrapOrientable(obj *glib.Object) *Orientable {
return &Orientable{obj}
}
// GetOrientation() is a wrapper around gtk_orientable_get_orientation().
func (v *Orientable) GetOrientation() Orientation {
c := C.gtk_orientable_get_orientation(v.native())
return Orientation(c)
}
// SetOrientation() is a wrapper around gtk_orientable_set_orientation().
func (v *Orientable) SetOrientation(orientation Orientation) {
C.gtk_orientable_set_orientation(v.native(),
C.GtkOrientation(orientation))
}
/*
* GtkProgressBar
*/
// ProgressBar is a representation of GTK's GtkProgressBar.
type ProgressBar struct {
Widget
}
// native returns a pointer to the underlying GtkProgressBar.
func (v *ProgressBar) native() *C.GtkProgressBar {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkProgressBar(p)
}
func marshalProgressBar(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapProgressBar(obj), nil
}
func wrapProgressBar(obj *glib.Object) *ProgressBar {
return &ProgressBar{Widget{glib.InitiallyUnowned{obj}}}
}
// ProgressBarNew() is a wrapper around gtk_progress_bar_new().
func ProgressBarNew() (*ProgressBar, error) {
c := C.gtk_progress_bar_new()
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
p := wrapProgressBar(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return p, nil
}
// SetFraction() is a wrapper around gtk_progress_bar_set_fraction().
func (v *ProgressBar) SetFraction(fraction float64) {
C.gtk_progress_bar_set_fraction(v.native(), C.gdouble(fraction))
}
// GetFraction() is a wrapper around gtk_progress_bar_get_fraction().
func (v *ProgressBar) GetFraction() float64 {
c := C.gtk_progress_bar_get_fraction(v.native())
return float64(c)
}
// SetText() is a wrapper around gtk_progress_bar_set_text().
func (v *ProgressBar) SetText(text string) {
cstr := C.CString(text)
defer C.free(unsafe.Pointer(cstr))
C.gtk_progress_bar_set_text(v.native(), (*C.gchar)(cstr))
}
/*
* GtkRadioButton
*/
// RadioButton is a representation of GTK's GtkRadioButton.
type RadioButton struct {
CheckButton
}
// native returns a pointer to the underlying GtkRadioButton.
func (v *RadioButton) native() *C.GtkRadioButton {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkRadioButton(p)
}
func marshalRadioButton(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapRadioButton(obj), nil
}
func wrapRadioButton(obj *glib.Object) *RadioButton {
return &RadioButton{CheckButton{ToggleButton{Button{Bin{Container{
Widget{glib.InitiallyUnowned{obj}}}}}}}}
}
// RadioButtonNew is a wrapper around gtk_radio_button_new().
func RadioButtonNew(group *glib.SList) (*RadioButton, error) {
gslist := (*C.GSList)(unsafe.Pointer(group))
c := C.gtk_radio_button_new(gslist)
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
r := wrapRadioButton(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return r, nil
}
// RadioButtonNewFromWidget is a wrapper around
// gtk_radio_button_new_from_widget().
func RadioButtonNewFromWidget(radioGroupMember *RadioButton) (*RadioButton, error) {
c := C.gtk_radio_button_new_from_widget(radioGroupMember.native())
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
r := wrapRadioButton(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return r, nil
}
// RadioButtonNewWithLabel is a wrapper around
// gtk_radio_button_new_with_label().
func RadioButtonNewWithLabel(group *glib.SList, label string) (*RadioButton, error) {
gslist := (*C.GSList)(unsafe.Pointer(group))
cstr := C.CString(label)
defer C.free(unsafe.Pointer(cstr))
c := C.gtk_radio_button_new_with_label(gslist, (*C.gchar)(cstr))
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
r := wrapRadioButton(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return r, nil
}
// RadioButtonNewWithLabelFromWidget is a wrapper around
// gtk_radio_button_new_with_label_from_widget().
func RadioButtonNewWithLabelFromWidget(radioGroupMember *RadioButton, label string) (*RadioButton, error) {
cstr := C.CString(label)
defer C.free(unsafe.Pointer(cstr))
c := C.gtk_radio_button_new_with_label_from_widget(radioGroupMember.native(),
(*C.gchar)(cstr))
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
r := wrapRadioButton(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return r, nil
}
// RadioButtonNewWithMnemonic is a wrapper around
// gtk_radio_button_new_with_mnemonic()
func RadioButtonNewWithMnemonic(group *glib.SList, label string) (*RadioButton, error) {
gslist := (*C.GSList)(unsafe.Pointer(group))
cstr := C.CString(label)
defer C.free(unsafe.Pointer(cstr))
c := C.gtk_radio_button_new_with_mnemonic(gslist, (*C.gchar)(cstr))
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
r := wrapRadioButton(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return r, nil
}
// RadioButtonNewWithMnemonicFromWidget is a wrapper around
// gtk_radio_button_new_with_mnemonic_from_widget().
func RadioButtonNewWithMnemonicFromWidget(radioGroupMember *RadioButton, label string) (*RadioButton, error) {
cstr := C.CString(label)
defer C.free(unsafe.Pointer(cstr))
c := C.gtk_radio_button_new_with_mnemonic_from_widget(radioGroupMember.native(),
(*C.gchar)(cstr))
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
r := wrapRadioButton(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return r, nil
}
// SetGroup is a wrapper around gtk_radio_button_set_group().
func (v *RadioButton) SetGroup(group *glib.SList) {
gslist := (*C.GSList)(unsafe.Pointer(group))
C.gtk_radio_button_set_group(v.native(), gslist)
}
// GetGroup is a wrapper around gtk_radio_button_set_group().
func (v *RadioButton) GetGroup() (*glib.SList, error) {
c := C.gtk_radio_button_get_group(v.native())
if c == nil {
return nil, nilPtrErr
}
return (*glib.SList)(unsafe.Pointer(c)), nil
}
// JoinGroup is a wrapper around gtk_radio_button_join_group().
func (v *RadioButton) JoinGroup(groupSource *RadioButton) {
C.gtk_radio_button_join_group(v.native(), groupSource.native())
}
/*
* GtkRadioMenuItem
*/
// RadioMenuItem is a representation of GTK's GtkRadioMenuItem.
type RadioMenuItem struct {
CheckMenuItem
}
// native returns a pointer to the underlying GtkRadioMenuItem.
func (v *RadioMenuItem) native() *C.GtkRadioMenuItem {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkRadioMenuItem(p)
}
func marshalRadioMenuItem(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapRadioMenuItem(obj), nil
}
func wrapRadioMenuItem(obj *glib.Object) *RadioMenuItem {
return &RadioMenuItem{CheckMenuItem{MenuItem{Bin{Container{
Widget{glib.InitiallyUnowned{obj}}}}}}}
}
// RadioMenuItemNew is a wrapper around gtk_radio_menu_item_new().
func RadioMenuItemNew(group *glib.SList) (*RadioMenuItem, error) {
gslist := (*C.GSList)(unsafe.Pointer(group))
c := C.gtk_radio_menu_item_new(gslist)
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
r := wrapRadioMenuItem(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return r, nil
}
// RadioMenuItemNewWithLabel is a wrapper around
// gtk_radio_menu_item_new_with_label().
func RadioMenuItemNewWithLabel(group *glib.SList, label string) (*RadioMenuItem, error) {
gslist := (*C.GSList)(unsafe.Pointer(group))
cstr := C.CString(label)
defer C.free(unsafe.Pointer(cstr))
c := C.gtk_radio_menu_item_new_with_label(gslist, (*C.gchar)(cstr))
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
r := wrapRadioMenuItem(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return r, nil
}
// RadioMenuItemNewWithMnemonic is a wrapper around
// gtk_radio_menu_item_new_with_mnemonic().
func RadioMenuItemNewWithMnemonic(group *glib.SList, label string) (*RadioMenuItem, error) {
gslist := (*C.GSList)(unsafe.Pointer(group))
cstr := C.CString(label)
defer C.free(unsafe.Pointer(cstr))
c := C.gtk_radio_menu_item_new_with_mnemonic(gslist, (*C.gchar)(cstr))
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
r := wrapRadioMenuItem(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return r, nil
}
// RadioMenuItemNewFromWidget is a wrapper around
// gtk_radio_menu_item_new_from_widget().
func RadioMenuItemNewFromWidget(group *RadioMenuItem) (*RadioMenuItem, error) {
c := C.gtk_radio_menu_item_new_from_widget(group.native())
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
r := wrapRadioMenuItem(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return r, nil
}
// RadioMenuItemNewWithLabelFromWidget is a wrapper around
// gtk_radio_menu_item_new_with_label_from_widget().
func RadioMenuItemNewWithLabelFromWidget(group *RadioMenuItem, label string) (*RadioMenuItem, error) {
cstr := C.CString(label)
defer C.free(unsafe.Pointer(cstr))
c := C.gtk_radio_menu_item_new_with_label_from_widget(group.native(),
(*C.gchar)(cstr))
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
r := wrapRadioMenuItem(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return r, nil
}
// RadioMenuItemNewWithMnemonicFromWidget is a wrapper around
// gtk_radio_menu_item_new_with_mnemonic_from_widget().
func RadioMenuItemNewWithMnemonicFromWidget(group *RadioMenuItem, label string) (*RadioMenuItem, error) {
cstr := C.CString(label)
defer C.free(unsafe.Pointer(cstr))
c := C.gtk_radio_menu_item_new_with_mnemonic_from_widget(group.native(),
(*C.gchar)(cstr))
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
r := wrapRadioMenuItem(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return r, nil
}
// SetGroup is a wrapper around gtk_radio_menu_item_set_group().
func (v *RadioMenuItem) SetGroup(group *glib.SList) {
gslist := (*C.GSList)(unsafe.Pointer(group))
C.gtk_radio_menu_item_set_group(v.native(), gslist)
}
// GetGroup is a wrapper around gtk_radio_menu_item_get_group().
func (v *RadioMenuItem) GetGroup() (*glib.SList, error) {
c := C.gtk_radio_menu_item_get_group(v.native())
if c == nil {
return nil, nilPtrErr
}
return (*glib.SList)(unsafe.Pointer(c)), nil
}
/*
* GtkRange
*/
// Range is a representation of GTK's GtkRange.
type Range struct {
Widget
}
// native returns a pointer to the underlying GtkRange.
func (v *Range) native() *C.GtkRange {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkRange(p)
}
func marshalRange(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapRange(obj), nil
}
func wrapRange(obj *glib.Object) *Range {
return &Range{Widget{glib.InitiallyUnowned{obj}}}
}
/*
* GtkScrollbar
*/
// Scrollbar is a representation of GTK's GtkScrollbar.
type Scrollbar struct {
Range
}
// native returns a pointer to the underlying GtkScrollbar.
func (v *Scrollbar) native() *C.GtkScrollbar {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkScrollbar(p)
}
func marshalScrollbar(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapScrollbar(obj), nil
}
func wrapScrollbar(obj *glib.Object) *Scrollbar {
return &Scrollbar{Range{Widget{glib.InitiallyUnowned{obj}}}}
}
// ScrollbarNew is a wrapper around gtk_scrollbar_new().
func ScrollbarNew(orientation Orientation, adjustment *Adjustment) (*Scrollbar, error) {
c := C.gtk_scrollbar_new(C.GtkOrientation(orientation), adjustment.native())
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
s := wrapScrollbar(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return s, nil
}
/*
* GtkScrolledWindow
*/
// ScrolledWindow is a representation of GTK's GtkScrolledWindow.
type ScrolledWindow struct {
Bin
}
// native returns a pointer to the underlying GtkScrolledWindow.
func (v *ScrolledWindow) native() *C.GtkScrolledWindow {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkScrolledWindow(p)
}
func marshalScrolledWindow(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapScrolledWindow(obj), nil
}
func wrapScrolledWindow(obj *glib.Object) *ScrolledWindow {
return &ScrolledWindow{Bin{Container{Widget{glib.InitiallyUnowned{obj}}}}}
}
// ScrolledWindowNew() is a wrapper around gtk_scrolled_window_new().
func ScrolledWindowNew(hadjustment, vadjustment *Adjustment) (*ScrolledWindow, error) {
c := C.gtk_scrolled_window_new(hadjustment.native(),
vadjustment.native())
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
s := wrapScrolledWindow(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return s, nil
}
// SetPolicy() is a wrapper around gtk_scrolled_window_set_policy().
func (v *ScrolledWindow) SetPolicy(hScrollbarPolicy, vScrollbarPolicy PolicyType) {
C.gtk_scrolled_window_set_policy(v.native(),
C.GtkPolicyType(hScrollbarPolicy),
C.GtkPolicyType(vScrollbarPolicy))
}
/*
* GtkSearchEntry
*/
// SearchEntry is a reprensentation of GTK's GtkSearchEntry.
type SearchEntry struct {
Entry
}
// native returns a pointer to the underlying GtkSearchEntry.
func (v *SearchEntry) native() *C.GtkSearchEntry {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkSearchEntry(p)
}
func marshalSearchEntry(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapSearchEntry(obj), nil
}
func wrapSearchEntry(obj *glib.Object) *SearchEntry {
e := wrapEditable(obj)
return &SearchEntry{Entry{Widget{glib.InitiallyUnowned{obj}}, *e}}
}
// SearchEntryNew is a wrapper around gtk_search_entry_new().
func SearchEntryNew() (*SearchEntry, error) {
c := C.gtk_search_entry_new()
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
s := wrapSearchEntry(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return s, nil
}
/*
* GtkSeparator
*/
// Separator is a representation of GTK's GtkSeparator.
type Separator struct {
Widget
}
// native returns a pointer to the underlying GtkSeperator.
func (v *Separator) native() *C.GtkSeparator {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkSeparator(p)
}
func marshalSeparator(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapSeparator(obj), nil
}
func wrapSeparator(obj *glib.Object) *Separator {
return &Separator{Widget{glib.InitiallyUnowned{obj}}}
}
// SeparatorNew is a wrapper around gtk_separator_new().
func SeparatorNew(orientation Orientation) (*Separator, error) {
c := C.gtk_separator_new(C.GtkOrientation(orientation))
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
s := wrapSeparator(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return s, nil
}
/*
* GtkSeparatorMenuItem
*/
// SeparatorMenuItem is a representation of GTK's GtkSeparatorMenuItem.
type SeparatorMenuItem struct {
MenuItem
}
// native returns a pointer to the underlying GtkSeparatorMenuItem.
func (v *SeparatorMenuItem) native() *C.GtkSeparatorMenuItem {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkSeparatorMenuItem(p)
}
func marshalSeparatorMenuItem(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapSeparatorMenuItem(obj), nil
}
func wrapSeparatorMenuItem(obj *glib.Object) *SeparatorMenuItem {
return &SeparatorMenuItem{MenuItem{Bin{Container{Widget{glib.InitiallyUnowned{obj}}}}}}
}
// SeparatorMenuItemNew is a wrapper around gtk_separator_menu_item_new().
func SeparatorMenuItemNew() (*SeparatorMenuItem, error) {
c := C.gtk_separator_menu_item_new()
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
s := wrapSeparatorMenuItem(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return s, nil
}
/*
* GtkSeparatorToolItem
*/
// SeparatorToolItem is a representation of GTK's GtkSeparatorToolItem.
type SeparatorToolItem struct {
ToolItem
}
// native returns a pointer to the underlying GtkSeparatorToolItem.
func (v *SeparatorToolItem) native() *C.GtkSeparatorToolItem {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkSeparatorToolItem(p)
}
func marshalSeparatorToolItem(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapSeparatorToolItem(obj), nil
}
func wrapSeparatorToolItem(obj *glib.Object) *SeparatorToolItem {
return &SeparatorToolItem{ToolItem{Bin{Container{Widget{
glib.InitiallyUnowned{obj}}}}}}
}
// SeparatorToolItemNew is a wrapper around gtk_separator_tool_item_new().
func SeparatorToolItemNew() (*SeparatorToolItem, error) {
c := C.gtk_separator_tool_item_new()
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
s := wrapSeparatorToolItem(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return s, nil
}
// SetDraw is a wrapper around gtk_separator_tool_item_set_draw().
func (v *SeparatorToolItem) SetDraw(draw bool) {
C.gtk_separator_tool_item_set_draw(v.native(), gbool(draw))
}
// GetDraw is a wrapper around gtk_separator_tool_item_get_draw().
func (v *SeparatorToolItem) GetDraw() bool {
c := C.gtk_separator_tool_item_get_draw(v.native())
return gobool(c)
}
/*
* GtkSpinButton
*/
// SpinButton is a representation of GTK's GtkSpinButton.
type SpinButton struct {
Entry
}
// native returns a pointer to the underlying GtkSpinButton.
func (v *SpinButton) native() *C.GtkSpinButton {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkSpinButton(p)
}
func marshalSpinButton(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapSpinButton(obj), nil
}
func wrapSpinButton(obj *glib.Object) *SpinButton {
e := wrapEditable(obj)
return &SpinButton{Entry{Widget{glib.InitiallyUnowned{obj}}, *e}}
}
// Configure() is a wrapper around gtk_spin_button_configure().
func (v *SpinButton) Configure(adjustment *Adjustment, climbRate float64, digits uint) {
C.gtk_spin_button_configure(v.native(), adjustment.native(),
C.gdouble(climbRate), C.guint(digits))
}
// SpinButtonNew() is a wrapper around gtk_spin_button_new().
func SpinButtonNew(adjustment *Adjustment, climbRate float64, digits uint) (*SpinButton, error) {
c := C.gtk_spin_button_new(adjustment.native(),
C.gdouble(climbRate), C.guint(digits))
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
s := wrapSpinButton(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return s, nil
}
// SpinButtonNewWithRange() is a wrapper around
// gtk_spin_button_new_with_range().
func SpinButtonNewWithRange(min, max, step float64) (*SpinButton, error) {
c := C.gtk_spin_button_new_with_range(C.gdouble(min), C.gdouble(max),
C.gdouble(step))
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
s := wrapSpinButton(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return s, nil
}
// GetValueAsInt() is a wrapper around gtk_spin_button_get_value_as_int().
func (v *SpinButton) GetValueAsInt() int {
c := C.gtk_spin_button_get_value_as_int(v.native())
return int(c)
}
// SetValue() is a wrapper around gtk_spin_button_set_value().
func (v *SpinButton) SetValue(value float64) {
C.gtk_spin_button_set_value(v.native(), C.gdouble(value))
}
// GetValue() is a wrapper around gtk_spin_button_get_value().
func (v *SpinButton) GetValue() float64 {
c := C.gtk_spin_button_get_value(v.native())
return float64(c)
}
/*
* GtkSpinner
*/
// Spinner is a representation of GTK's GtkSpinner.
type Spinner struct {
Widget
}
// native returns a pointer to the underlying GtkSpinner.
func (v *Spinner) native() *C.GtkSpinner {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkSpinner(p)
}
func marshalSpinner(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapSpinner(obj), nil
}
func wrapSpinner(obj *glib.Object) *Spinner {
return &Spinner{Widget{glib.InitiallyUnowned{obj}}}
}
// SpinnerNew is a wrapper around gtk_spinner_new().
func SpinnerNew() (*Spinner, error) {
c := C.gtk_spinner_new()
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
s := wrapSpinner(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return s, nil
}
// Start is a wrapper around gtk_spinner_start().
func (v *Spinner) Start() {
C.gtk_spinner_start(v.native())
}
// Stop is a wrapper around gtk_spinner_stop().
func (v *Spinner) Stop() {
C.gtk_spinner_stop(v.native())
}
/*
* GtkStatusbar
*/
// Statusbar is a representation of GTK's GtkStatusbar
type Statusbar struct {
Box
}
// native returns a pointer to the underlying GtkStatusbar
func (v *Statusbar) native() *C.GtkStatusbar {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkStatusbar(p)
}
func marshalStatusbar(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapStatusbar(obj), nil
}
func wrapStatusbar(obj *glib.Object) *Statusbar {
return &Statusbar{Box{Container{Widget{glib.InitiallyUnowned{obj}}}}}
}
// StatusbarNew() is a wrapper around gtk_statusbar_new().
func StatusbarNew() (*Statusbar, error) {
c := C.gtk_statusbar_new()
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
s := wrapStatusbar(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return s, nil
}
// GetContextId() is a wrapper around gtk_statusbar_get_context_id().
func (v *Statusbar) GetContextId(contextDescription string) uint {
cstr := C.CString(contextDescription)
defer C.free(unsafe.Pointer(cstr))
c := C.gtk_statusbar_get_context_id(v.native(), (*C.gchar)(cstr))
return uint(c)
}
// Push() is a wrapper around gtk_statusbar_push().
func (v *Statusbar) Push(contextID uint, text string) uint {
cstr := C.CString(text)
defer C.free(unsafe.Pointer(cstr))
c := C.gtk_statusbar_push(v.native(), C.guint(contextID),
(*C.gchar)(cstr))
return uint(c)
}
// Pop() is a wrapper around gtk_statusbar_pop().
func (v *Statusbar) Pop(contextID uint) {
C.gtk_statusbar_pop(v.native(), C.guint(contextID))
}
// GetMessageArea() is a wrapper around gtk_statusbar_get_message_area().
func (v *Statusbar) GetMessageArea() (*Box, error) {
c := C.gtk_statusbar_get_message_area(v.native())
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return &Box{Container{Widget{glib.InitiallyUnowned{obj}}}}, nil
}
/*
* GtkSwitch
*/
// Switch is a representation of GTK's GtkSwitch.
type Switch struct {
Widget
}
// native returns a pointer to the underlying GtkSwitch.
func (v *Switch) native() *C.GtkSwitch {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkSwitch(p)
}
func marshalSwitch(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapSwitch(obj), nil
}
func wrapSwitch(obj *glib.Object) *Switch {
return &Switch{Widget{glib.InitiallyUnowned{obj}}}
}
// SwitchNew is a wrapper around gtk_switch_new().
func SwitchNew() (*Switch, error) {
c := C.gtk_switch_new()
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
s := wrapSwitch(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return s, nil
}
// GetActive is a wrapper around gtk_switch_get_active().
func (v *Switch) GetActive() bool {
c := C.gtk_switch_get_active(v.native())
return gobool(c)
}
// SetActive is a wrapper around gtk_switch_set_active().
func (v *Switch) SetActive(isActive bool) {
C.gtk_switch_set_active(v.native(), gbool(isActive))
}
/*
* GtkTextView
*/
// TextView is a representation of GTK's GtkTextView
type TextView struct {
Container
}
// native returns a pointer to the underlying GtkTextView.
func (v *TextView) native() *C.GtkTextView {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkTextView(p)
}
func marshalTextView(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapTextView(obj), nil
}
func wrapTextView(obj *glib.Object) *TextView {
return &TextView{Container{Widget{glib.InitiallyUnowned{obj}}}}
}
// TextViewNew is a wrapper around gtk_text_view_new().
func TextViewNew() (*TextView, error) {
c := C.gtk_text_view_new()
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
t := wrapTextView(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return t, nil
}
// TextViewNewWithBuffer is a wrapper around gtk_text_view_new_with_buffer().
func TextViewNewWithBuffer(buf *TextBuffer) (*TextView, error) {
cbuf := buf.native()
c := C.gtk_text_view_new_with_buffer(cbuf)
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
t := wrapTextView(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return t, nil
}
// GetBuffer is a wrapper around gtk_text_view_get_buffer().
func (v *TextView) GetBuffer() (*TextBuffer, error) {
c := C.gtk_text_view_get_buffer(v.native())
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
t := wrapTextBuffer(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return t, nil
}
// SetBuffer is a wrapper around gtk_text_view_set_buffer().
func (v *TextView) SetBuffer(buffer *TextBuffer) {
C.gtk_text_view_set_buffer(v.native(), buffer.native())
}
// SetEditable is a wrapper around gtk_text_view_set_editable().
func (v *TextView) SetEditable(editable bool) {
C.gtk_text_view_set_editable(v.native(), gbool(editable))
}
// GetEditable is a wrapper around gtk_text_view_get_editable().
func (v *TextView) GetEditable() bool {
c := C.gtk_text_view_get_editable(v.native())
return gobool(c)
}
// SetWrapMode is a wrapper around gtk_text_view_set_wrap_mode().
func (v *TextView) SetWrapMode(wrapMode WrapMode) {
C.gtk_text_view_set_wrap_mode(v.native(), C.GtkWrapMode(wrapMode))
}
// GetWrapMode is a wrapper around gtk_text_view_get_wrap_mode().
func (v *TextView) GetWrapMode() WrapMode {
return WrapMode(C.gtk_text_view_get_wrap_mode(v.native()))
}
// SetCursorVisible is a wrapper around gtk_text_view_set_cursor_visible().
func (v *TextView) SetCursorVisible(visible bool) {
C.gtk_text_view_set_cursor_visible(v.native(), gbool(visible))
}
// GetCursorVisible is a wrapper around gtk_text_view_get_cursor_visible().
func (v *TextView) GetCursorVisible() bool {
c := C.gtk_text_view_get_cursor_visible(v.native())
return gobool(c)
}
// SetOverwrite is a wrapper around gtk_text_view_set_overwrite().
func (v *TextView) SetOverwrite(overwrite bool) {
C.gtk_text_view_set_overwrite(v.native(), gbool(overwrite))
}
// GetOverwrite is a wrapper around gtk_text_view_get_overwrite().
func (v *TextView) GetOverwrite() bool {
c := C.gtk_text_view_get_overwrite(v.native())
return gobool(c)
}
// SetJustification is a wrapper around gtk_text_view_set_justification().
func (v *TextView) SetJustification(justify Justification) {
C.gtk_text_view_set_justification(v.native(), C.GtkJustification(justify))
}
// GetJustification is a wrapper around gtk_text_view_get_justification().
func (v *TextView) GetJustification() Justification {
c := C.gtk_text_view_get_justification(v.native())
return Justification(c)
}
// SetAcceptsTab is a wrapper around gtk_text_view_set_accepts_tab().
func (v *TextView) SetAcceptsTab(acceptsTab bool) {
C.gtk_text_view_set_accepts_tab(v.native(), gbool(acceptsTab))
}
// GetAcceptsTab is a wrapper around gtk_text_view_get_accepts_tab().
func (v *TextView) GetAcceptsTab() bool {
c := C.gtk_text_view_get_accepts_tab(v.native())
return gobool(c)
}
// SetPixelsAboveLines is a wrapper around gtk_text_view_set_pixels_above_lines().
func (v *TextView) SetPixelsAboveLines(px int) {
C.gtk_text_view_set_pixels_above_lines(v.native(), C.gint(px))
}
// GetPixelsAboveLines is a wrapper around gtk_text_view_get_pixels_above_lines().
func (v *TextView) GetPixelsAboveLines() int {
c := C.gtk_text_view_get_pixels_above_lines(v.native())
return int(c)
}
// SetPixelsBelowLines is a wrapper around gtk_text_view_set_pixels_below_lines().
func (v *TextView) SetPixelsBelowLines(px int) {
C.gtk_text_view_set_pixels_below_lines(v.native(), C.gint(px))
}
// GetPixelsBelowLines is a wrapper around gtk_text_view_get_pixels_below_lines().
func (v *TextView) GetPixelsBelowLines() int {
c := C.gtk_text_view_get_pixels_below_lines(v.native())
return int(c)
}
// SetPixelsInsideWrap is a wrapper around gtk_text_view_set_pixels_inside_wrap().
func (v *TextView) SetPixelsInsideWrap(px int) {
C.gtk_text_view_set_pixels_inside_wrap(v.native(), C.gint(px))
}
// GetPixelsInsideWrap is a wrapper around gtk_text_view_get_pixels_inside_wrap().
func (v *TextView) GetPixelsInsideWrap() int {
c := C.gtk_text_view_get_pixels_inside_wrap(v.native())
return int(c)
}
// SetLeftMargin is a wrapper around gtk_text_view_set_left_margin().
func (v *TextView) SetLeftMargin(margin int) {
C.gtk_text_view_set_left_margin(v.native(), C.gint(margin))
}
// GetLeftMargin is a wrapper around gtk_text_view_get_left_margin().
func (v *TextView) GetLeftMargin() int {
c := C.gtk_text_view_get_left_margin(v.native())
return int(c)
}
// SetRightMargin is a wrapper around gtk_text_view_set_right_margin().
func (v *TextView) SetRightMargin(margin int) {
C.gtk_text_view_set_right_margin(v.native(), C.gint(margin))
}
// GetRightMargin is a wrapper around gtk_text_view_get_right_margin().
func (v *TextView) GetRightMargin() int {
c := C.gtk_text_view_get_right_margin(v.native())
return int(c)
}
// SetIndent is a wrapper around gtk_text_view_set_indent().
func (v *TextView) SetIndent(indent int) {
C.gtk_text_view_set_indent(v.native(), C.gint(indent))
}
// GetIndent is a wrapper around gtk_text_view_get_indent().
func (v *TextView) GetIndent() int {
c := C.gtk_text_view_get_indent(v.native())
return int(c)
}
// SetInputHints is a wrapper around gtk_text_view_set_input_hints().
func (v *TextView) SetInputHints(hints InputHints) {
C.gtk_text_view_set_input_hints(v.native(), C.GtkInputHints(hints))
}
// GetInputHints is a wrapper around gtk_text_view_get_input_hints().
func (v *TextView) GetInputHints() InputHints {
c := C.gtk_text_view_get_input_hints(v.native())
return InputHints(c)
}
// SetInputPurpose is a wrapper around gtk_text_view_set_input_purpose().
func (v *TextView) SetInputPurpose(purpose InputPurpose) {
C.gtk_text_view_set_input_purpose(v.native(),
C.GtkInputPurpose(purpose))
}
// GetInputPurpose is a wrapper around gtk_text_view_get_input_purpose().
func (v *TextView) GetInputPurpose() InputPurpose {
c := C.gtk_text_view_get_input_purpose(v.native())
return InputPurpose(c)
}
/*
* GtkTextTagTable
*/
type TextTagTable struct {
*glib.Object
}
// native returns a pointer to the underlying GObject as a GtkTextTagTable.
func (v *TextTagTable) native() *C.GtkTextTagTable {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkTextTagTable(p)
}
func marshalTextTagTable(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapTextTagTable(obj), nil
}
func wrapTextTagTable(obj *glib.Object) *TextTagTable {
return &TextTagTable{obj}
}
func TextTagTableNew() (*TextTagTable, error) {
c := C.gtk_text_tag_table_new()
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
t := wrapTextTagTable(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return t, nil
}
/*
* GtkTextBuffer
*/
// TextBuffer is a representation of GTK's GtkTextBuffer.
type TextBuffer struct {
*glib.Object
}
// native returns a pointer to the underlying GtkTextBuffer.
func (v *TextBuffer) native() *C.GtkTextBuffer {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkTextBuffer(p)
}
func marshalTextBuffer(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapTextBuffer(obj), nil
}
func wrapTextBuffer(obj *glib.Object) *TextBuffer {
return &TextBuffer{obj}
}
// TextBufferNew() is a wrapper around gtk_text_buffer_new().
func TextBufferNew(table *TextTagTable) (*TextBuffer, error) {
c := C.gtk_text_buffer_new(table.native())
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
e := wrapTextBuffer(obj)
obj.Ref()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return e, nil
}
func (v *TextBuffer) GetBounds() (start, end *TextIter) {
start, end = new(TextIter), new(TextIter)
C.gtk_text_buffer_get_bounds(v.native(), (*C.GtkTextIter)(start), (*C.GtkTextIter)(end))
return
}
func (v *TextBuffer) GetText(start, end *TextIter, includeHiddenChars bool) (string, error) {
c := C.gtk_text_buffer_get_text(
v.native(), (*C.GtkTextIter)(start), (*C.GtkTextIter)(end), gbool(includeHiddenChars),
)
if c == nil {
return "", nilPtrErr
}
return C.GoString((*C.char)(c)), nil
}
func (v *TextBuffer) SetText(text string) {
cstr := C.CString(text)
defer C.free(unsafe.Pointer(cstr))
C.gtk_text_buffer_set_text(v.native(), (*C.gchar)(cstr),
C.gint(len(text)))
}
/*
* GtkTextIter
*/
// TextIter is a representation of GTK's GtkTextIter
type TextIter C.GtkTextIter
func marshalTextIter(p uintptr) (interface{}, error) {
c := C.g_value_get_boxed((*C.GValue)(unsafe.Pointer(p)))
return (*TextIter)(unsafe.Pointer(c)), nil
}
/*
* GtkToggleButton
*/
// ToggleButton is a representation of GTK's GtkToggleButton.
type ToggleButton struct {
Button
}
// native returns a pointer to the underlying GtkToggleButton.
func (v *ToggleButton) native() *C.GtkToggleButton {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkToggleButton(p)
}
func marshalToggleButton(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapToggleButton(obj), nil
}
func wrapToggleButton(obj *glib.Object) *ToggleButton {
return &ToggleButton{Button{Bin{Container{Widget{
glib.InitiallyUnowned{obj}}}}}}
}
// ToggleButtonNew is a wrapper around gtk_toggle_button_new().
func ToggleButtonNew() (*ToggleButton, error) {
c := C.gtk_toggle_button_new()
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
tb := wrapToggleButton(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return tb, nil
}
// ToggleButtonNewWithLabel is a wrapper around
// gtk_toggle_button_new_with_label().
func ToggleButtonNewWithLabel(label string) (*ToggleButton, error) {
cstr := C.CString(label)
defer C.free(unsafe.Pointer(cstr))
c := C.gtk_toggle_button_new_with_label((*C.gchar)(cstr))
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
tb := wrapToggleButton(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return tb, nil
}
// ToggleButtonNewWithMnemonic is a wrapper around
// gtk_toggle_button_new_with_mnemonic().
func ToggleButtonNewWithMnemonic(label string) (*ToggleButton, error) {
cstr := C.CString(label)
defer C.free(unsafe.Pointer(cstr))
c := C.gtk_toggle_button_new_with_mnemonic((*C.gchar)(cstr))
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
tb := wrapToggleButton(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return tb, nil
}
// GetActive is a wrapper around gtk_toggle_button_get_active().
func (v *ToggleButton) GetActive() bool {
c := C.gtk_toggle_button_get_active(v.native())
return gobool(c)
}
// SetActive is a wrapper around gtk_toggle_button_set_active().
func (v *ToggleButton) SetActive(isActive bool) {
C.gtk_toggle_button_set_active(v.native(), gbool(isActive))
}
/*
* GtkToolbar
*/
// Toolbar is a representation of GTK's GtkToolbar.
type Toolbar struct {
Container
}
// native returns a pointer to the underlying GtkToolbar.
func (v *Toolbar) native() *C.GtkToolbar {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkToolbar(p)
}
func marshalToolbar(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapToolbar(obj), nil
}
func wrapToolbar(obj *glib.Object) *Toolbar {
return &Toolbar{Container{Widget{glib.InitiallyUnowned{obj}}}}
}
// ToolbarNew is a wrapper around gtk_toolbar_new().
func ToolbarNew() (*Toolbar, error) {
c := C.gtk_toolbar_new()
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
tb := wrapToolbar(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return tb, nil
}
// Insert is a wrapper around gtk_toolbar_insert().
func (v *Toolbar) Insert(item IToolItem, pos int) {
C.gtk_toolbar_insert(v.native(), item.toToolItem(), C.gint(pos))
}
// GetItemIndex is a wrapper around gtk_toolbar_get_item_index().
func (v *Toolbar) GetItemIndex(item IToolItem) int {
c := C.gtk_toolbar_get_item_index(v.native(), item.toToolItem())
return int(c)
}
// GetNItems is a wrapper around gtk_toolbar_get_n_items().
func (v *Toolbar) GetNItems() int {
c := C.gtk_toolbar_get_n_items(v.native())
return int(c)
}
// GetNthItem is a wrapper around gtk_toolbar_get_nth_item().
func (v *Toolbar) GetNthItem(n int) *ToolItem {
c := C.gtk_toolbar_get_nth_item(v.native(), C.gint(n))
if c == nil {
return nil
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
ti := wrapToolItem(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return ti
}
// GetDropIndex is a wrapper around gtk_toolbar_get_drop_index().
func (v *Toolbar) GetDropIndex(x, y int) int {
c := C.gtk_toolbar_get_drop_index(v.native(), C.gint(x), C.gint(y))
return int(c)
}
// SetDropHighlightItem is a wrapper around
// gtk_toolbar_set_drop_highlight_item().
func (v *Toolbar) SetDropHighlightItem(toolItem IToolItem, index int) {
C.gtk_toolbar_set_drop_highlight_item(v.native(),
toolItem.toToolItem(), C.gint(index))
}
// SetShowArrow is a wrapper around gtk_toolbar_set_show_arrow().
func (v *Toolbar) SetShowArrow(showArrow bool) {
C.gtk_toolbar_set_show_arrow(v.native(), gbool(showArrow))
}
// UnsetIconSize is a wrapper around gtk_toolbar_unset_icon_size().
func (v *Toolbar) UnsetIconSize() {
C.gtk_toolbar_unset_icon_size(v.native())
}
// GetShowArrow is a wrapper around gtk_toolbar_get_show_arrow().
func (v *Toolbar) GetShowArrow() bool {
c := C.gtk_toolbar_get_show_arrow(v.native())
return gobool(c)
}
// GetStyle is a wrapper around gtk_toolbar_get_style().
func (v *Toolbar) GetStyle() ToolbarStyle {
c := C.gtk_toolbar_get_style(v.native())
return ToolbarStyle(c)
}
// GetIconSize is a wrapper around gtk_toolbar_get_icon_size().
func (v *Toolbar) GetIconSize() IconSize {
c := C.gtk_toolbar_get_icon_size(v.native())
return IconSize(c)
}
// GetReliefStyle is a wrapper around gtk_toolbar_get_relief_style().
func (v *Toolbar) GetReliefStyle() ReliefStyle {
c := C.gtk_toolbar_get_relief_style(v.native())
return ReliefStyle(c)
}
// SetStyle is a wrapper around gtk_toolbar_set_style().
func (v *Toolbar) SetStyle(style ToolbarStyle) {
C.gtk_toolbar_set_style(v.native(), C.GtkToolbarStyle(style))
}
// SetIconSize is a wrapper around gtk_toolbar_set_icon_size().
func (v *Toolbar) SetIconSize(iconSize IconSize) {
C.gtk_toolbar_set_icon_size(v.native(), C.GtkIconSize(iconSize))
}
// UnsetStyle is a wrapper around gtk_toolbar_unset_style().
func (v *Toolbar) UnsetStyle() {
C.gtk_toolbar_unset_style(v.native())
}
/*
* GtkToolButton
*/
// ToolButton is a representation of GTK's GtkToolButton.
type ToolButton struct {
ToolItem
}
// native returns a pointer to the underlying GtkToolButton.
func (v *ToolButton) native() *C.GtkToolButton {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkToolButton(p)
}
func marshalToolButton(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapToolButton(obj), nil
}
func wrapToolButton(obj *glib.Object) *ToolButton {
return &ToolButton{ToolItem{Bin{Container{Widget{
glib.InitiallyUnowned{obj}}}}}}
}
// ToolButtonNew is a wrapper around gtk_tool_button_new().
func ToolButtonNew(iconWidget IWidget, label string) (*ToolButton, error) {
cstr := C.CString(label)
defer C.free(unsafe.Pointer(cstr))
c := C.gtk_tool_button_new(iconWidget.toWidget(), (*C.gchar)(cstr))
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
tb := wrapToolButton(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return tb, nil
}
// SetLabel is a wrapper around gtk_tool_button_set_label().
func (v *ToolButton) SetLabel(label string) {
cstr := C.CString(label)
defer C.free(unsafe.Pointer(cstr))
C.gtk_tool_button_set_label(v.native(), (*C.gchar)(cstr))
}
// GetLabel is a wrapper aroud gtk_tool_button_get_label().
func (v *ToolButton) GetLabel() string {
c := C.gtk_tool_button_get_label(v.native())
return C.GoString((*C.char)(c))
}
// SetUseUnderline is a wrapper around gtk_tool_button_set_use_underline().
func (v *ToolButton) SetGetUnderline(useUnderline bool) {
C.gtk_tool_button_set_use_underline(v.native(), gbool(useUnderline))
}
// GetUseUnderline is a wrapper around gtk_tool_button_get_use_underline().
func (v *ToolButton) GetuseUnderline() bool {
c := C.gtk_tool_button_get_use_underline(v.native())
return gobool(c)
}
// SetIconName is a wrapper around gtk_tool_button_set_icon_name().
func (v *ToolButton) SetIconName(iconName string) {
cstr := C.CString(iconName)
defer C.free(unsafe.Pointer(cstr))
C.gtk_tool_button_set_icon_name(v.native(), (*C.gchar)(cstr))
}
// GetIconName is a wrapper around gtk_tool_button_get_icon_name().
func (v *ToolButton) GetIconName() string {
c := C.gtk_tool_button_get_icon_name(v.native())
return C.GoString((*C.char)(c))
}
// SetIconWidget is a wrapper around gtk_tool_button_set_icon_widget().
func (v *ToolButton) SetIconWidget(iconWidget IWidget) {
C.gtk_tool_button_set_icon_widget(v.native(), iconWidget.toWidget())
}
// GetIconWidget is a wrapper around gtk_tool_button_get_icon_widget().
func (v *ToolButton) GetIconWidget() *Widget {
c := C.gtk_tool_button_get_icon_widget(v.native())
if c == nil {
return nil
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
w := wrapWidget(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return w
}
// SetLabelWidget is a wrapper around gtk_tool_button_set_label_widget().
func (v *ToolButton) SetLabelWidget(labelWidget IWidget) {
C.gtk_tool_button_set_label_widget(v.native(), labelWidget.toWidget())
}
// GetLabelWidget is a wrapper around gtk_tool_button_get_label_widget().
func (v *ToolButton) GetLabelWidget() *Widget {
c := C.gtk_tool_button_get_label_widget(v.native())
if c == nil {
return nil
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
w := wrapWidget(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return w
}
/*
* GtkToolItem
*/
// ToolItem is a representation of GTK's GtkToolItem.
type ToolItem struct {
Bin
}
// IToolItem is an interface type implemented by all structs embedding
// a ToolItem. It is meant to be used as an argument type for wrapper
// functions that wrap around a C GTK function taking a GtkToolItem.
type IToolItem interface {
toToolItem() *C.GtkToolItem
}
// native returns a pointer to the underlying GtkToolItem.
func (v *ToolItem) native() *C.GtkToolItem {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkToolItem(p)
}
func (v *ToolItem) toToolItem() *C.GtkToolItem {
return v.native()
}
func marshalToolItem(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapToolItem(obj), nil
}
func wrapToolItem(obj *glib.Object) *ToolItem {
return &ToolItem{Bin{Container{Widget{glib.InitiallyUnowned{obj}}}}}
}
// ToolItemNew is a wrapper around gtk_tool_item_new().
func ToolItemNew() (*ToolItem, error) {
c := C.gtk_tool_item_new()
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
ti := wrapToolItem(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return ti, nil
}
// SetHomogeneous is a wrapper around gtk_tool_item_set_homogeneous().
func (v *ToolItem) SetHomogeneous(homogeneous bool) {
C.gtk_tool_item_set_homogeneous(v.native(), gbool(homogeneous))
}
// GetHomogeneous is a wrapper around gtk_tool_item_get_homogeneous().
func (v *ToolItem) GetHomogeneous() bool {
c := C.gtk_tool_item_get_homogeneous(v.native())
return gobool(c)
}
// SetExpand is a wrapper around gtk_tool_item_set_expand().
func (v *ToolItem) SetExpand(expand bool) {
C.gtk_tool_item_set_expand(v.native(), gbool(expand))
}
// GetExpand is a wrapper around gtk_tool_item_get_expand().
func (v *ToolItem) GetExpand() bool {
c := C.gtk_tool_item_get_expand(v.native())
return gobool(c)
}
// SetTooltipText is a wrapper around gtk_tool_item_set_tooltip_text().
func (v *ToolItem) SetTooltipText(text string) {
cstr := C.CString(text)
defer C.free(unsafe.Pointer(cstr))
C.gtk_tool_item_set_tooltip_text(v.native(), (*C.gchar)(cstr))
}
// SetTooltipMarkup is a wrapper around gtk_tool_item_set_tooltip_markup().
func (v *ToolItem) SetTooltipMarkup(text string) {
cstr := C.CString(text)
defer C.free(unsafe.Pointer(cstr))
C.gtk_tool_item_set_tooltip_markup(v.native(), (*C.gchar)(cstr))
}
// SetUseDragWindow is a wrapper around gtk_tool_item_set_use_drag_window().
func (v *ToolItem) SetUseDragWindow(useDragWindow bool) {
C.gtk_tool_item_set_use_drag_window(v.native(), gbool(useDragWindow))
}
// GetUseDragWindow is a wrapper around gtk_tool_item_get_use_drag_window().
func (v *ToolItem) GetUseDragWindow() bool {
c := C.gtk_tool_item_get_use_drag_window(v.native())
return gobool(c)
}
// SetVisibleHorizontal is a wrapper around
// gtk_tool_item_set_visible_horizontal().
func (v *ToolItem) SetVisibleHorizontal(visibleHorizontal bool) {
C.gtk_tool_item_set_visible_horizontal(v.native(),
gbool(visibleHorizontal))
}
// GetVisibleHorizontal is a wrapper around
// gtk_tool_item_get_visible_horizontal().
func (v *ToolItem) GetVisibleHorizontal() bool {
c := C.gtk_tool_item_get_visible_horizontal(v.native())
return gobool(c)
}
// SetVisibleVertical is a wrapper around gtk_tool_item_set_visible_vertical().
func (v *ToolItem) SetVisibleVertical(visibleVertical bool) {
C.gtk_tool_item_set_visible_vertical(v.native(), gbool(visibleVertical))
}
// GetVisibleVertical is a wrapper around gtk_tool_item_get_visible_vertical().
func (v *ToolItem) GetVisibleVertical() bool {
c := C.gtk_tool_item_get_visible_vertical(v.native())
return gobool(c)
}
// SetIsImportant is a wrapper around gtk_tool_item_set_is_important().
func (v *ToolItem) SetIsImportant(isImportant bool) {
C.gtk_tool_item_set_is_important(v.native(), gbool(isImportant))
}
// GetIsImportant is a wrapper around gtk_tool_item_get_is_important().
func (v *ToolItem) GetIsImportant() bool {
c := C.gtk_tool_item_get_is_important(v.native())
return gobool(c)
}
// TODO: gtk_tool_item_get_ellipsize_mode
// GetIconSize is a wrapper around gtk_tool_item_get_icon_size().
func (v *ToolItem) GetIconSize() IconSize {
c := C.gtk_tool_item_get_icon_size(v.native())
return IconSize(c)
}
// GetOrientation is a wrapper around gtk_tool_item_get_orientation().
func (v *ToolItem) GetOrientation() Orientation {
c := C.gtk_tool_item_get_orientation(v.native())
return Orientation(c)
}
// GetToolbarStyle is a wrapper around gtk_tool_item_get_toolbar_style().
func (v *ToolItem) gtk_tool_item_get_toolbar_style() ToolbarStyle {
c := C.gtk_tool_item_get_toolbar_style(v.native())
return ToolbarStyle(c)
}
// GetReliefStyle is a wrapper around gtk_tool_item_get_relief_style().
func (v *ToolItem) GetReliefStyle() ReliefStyle {
c := C.gtk_tool_item_get_relief_style(v.native())
return ReliefStyle(c)
}
// GetTextAlignment is a wrapper around gtk_tool_item_get_text_alignment().
func (v *ToolItem) GetTextAlignment() float32 {
c := C.gtk_tool_item_get_text_alignment(v.native())
return float32(c)
}
// GetTextOrientation is a wrapper around gtk_tool_item_get_text_orientation().
func (v *ToolItem) GetTextOrientation() Orientation {
c := C.gtk_tool_item_get_text_orientation(v.native())
return Orientation(c)
}
// RetrieveProxyMenuItem is a wrapper around
// gtk_tool_item_retrieve_proxy_menu_item()
func (v *ToolItem) RetrieveProxyMenuItem() *MenuItem {
c := C.gtk_tool_item_retrieve_proxy_menu_item(v.native())
if c == nil {
return nil
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
m := wrapMenuItem(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return m
}
// SetProxyMenuItem is a wrapper around gtk_tool_item_set_proxy_menu_item().
func (v *ToolItem) SetProxyMenuItem(menuItemId string, menuItem IMenuItem) {
cstr := C.CString(menuItemId)
defer C.free(unsafe.Pointer(cstr))
C.gtk_tool_item_set_proxy_menu_item(v.native(), (*C.gchar)(cstr),
C.toGtkWidget(unsafe.Pointer(menuItem.toMenuItem())))
}
// RebuildMenu is a wrapper around gtk_tool_item_rebuild_menu().
func (v *ToolItem) RebuildMenu() {
C.gtk_tool_item_rebuild_menu(v.native())
}
// ToolbarReconfigured is a wrapper around gtk_tool_item_toolbar_reconfigured().
func (v *ToolItem) ToolbarReconfigured() {
C.gtk_tool_item_toolbar_reconfigured(v.native())
}
// TODO: gtk_tool_item_get_text_size_group
/*
* GtkTreeIter
*/
// TreeIter is a representation of GTK's GtkTreeIter.
type TreeIter struct {
GtkTreeIter C.GtkTreeIter
}
// native returns a pointer to the underlying GtkTreeIter.
func (v *TreeIter) native() *C.GtkTreeIter {
if v == nil {
return nil
}
return &v.GtkTreeIter
}
func marshalTreeIter(p uintptr) (interface{}, error) {
c := C.g_value_get_boxed((*C.GValue)(unsafe.Pointer(p)))
return (*TreeIter)(unsafe.Pointer(c)), nil
}
func (v *TreeIter) free() {
C.gtk_tree_iter_free(v.native())
}
// Copy() is a wrapper around gtk_tree_iter_copy().
func (v *TreeIter) Copy() (*TreeIter, error) {
c := C.gtk_tree_iter_copy(v.native())
if c == nil {
return nil, nilPtrErr
}
t := &TreeIter{*c}
runtime.SetFinalizer(t, (*TreeIter).free)
return t, nil
}
/*
* GtkTreeModel
*/
// TreeModel is a representation of GTK's GtkTreeModel GInterface.
type TreeModel struct {
*glib.Object
}
// ITreeModel is an interface type implemented by all structs
// embedding a TreeModel. It is meant to be used as an argument type
// for wrapper functions that wrap around a C GTK function taking a
// GtkTreeModel.
type ITreeModel interface {
toTreeModel() *C.GtkTreeModel
}
// native returns a pointer to the underlying GObject as a GtkTreeModel.
func (v *TreeModel) native() *C.GtkTreeModel {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkTreeModel(p)
}
func (v *TreeModel) toTreeModel() *C.GtkTreeModel {
if v == nil {
return nil
}
return v.native()
}
func marshalTreeModel(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapTreeModel(obj), nil
}
func wrapTreeModel(obj *glib.Object) *TreeModel {
return &TreeModel{obj}
}
// GetFlags() is a wrapper around gtk_tree_model_get_flags().
func (v *TreeModel) GetFlags() TreeModelFlags {
c := C.gtk_tree_model_get_flags(v.native())
return TreeModelFlags(c)
}
// GetNColumns() is a wrapper around gtk_tree_model_get_n_columns().
func (v *TreeModel) GetNColumns() int {
c := C.gtk_tree_model_get_n_columns(v.native())
return int(c)
}
// GetColumnType() is a wrapper around gtk_tree_model_get_column_type().
func (v *TreeModel) GetColumnType(index int) glib.Type {
c := C.gtk_tree_model_get_column_type(v.native(), C.gint(index))
return glib.Type(c)
}
// GetIter() is a wrapper around gtk_tree_model_get_iter().
func (v *TreeModel) GetIter(path *TreePath) (*TreeIter, error) {
var iter C.GtkTreeIter
c := C.gtk_tree_model_get_iter(v.native(), &iter, path.native())
if !gobool(c) {
return nil, errors.New("Unable to set iterator")
}
t := &TreeIter{iter}
return t, nil
}
// GetIterFromString() is a wrapper around
// gtk_tree_model_get_iter_from_string().
func (v *TreeModel) GetIterFromString(path string) (*TreeIter, error) {
var iter C.GtkTreeIter
cstr := C.CString(path)
defer C.free(unsafe.Pointer(cstr))
c := C.gtk_tree_model_get_iter_from_string(v.native(), &iter,
(*C.gchar)(cstr))
if !gobool(c) {
return nil, errors.New("Unable to set iterator")
}
t := &TreeIter{iter}
return t, nil
}
// GetIterFirst() is a wrapper around gtk_tree_model_get_iter_first().
func (v *TreeModel) GetIterFirst() (*TreeIter, bool) {
var iter C.GtkTreeIter
c := C.gtk_tree_model_get_iter_first(v.native(), &iter)
if !gobool(c) {
return nil, false
}
t := &TreeIter{iter}
return t, true
}
// GetPath() is a wrapper around gtk_tree_model_get_path().
func (v *TreeModel) GetPath(iter *TreeIter) (*TreePath, error) {
c := C.gtk_tree_model_get_path(v.native(), iter.native())
if c == nil {
return nil, nilPtrErr
}
p := &TreePath{c}
runtime.SetFinalizer(p, (*TreePath).free)
return p, nil
}
// GetValue() is a wrapper around gtk_tree_model_get_value().
func (v *TreeModel) GetValue(iter *TreeIter, column int) (*glib.Value, error) {
val, err := glib.ValueAlloc()
if err != nil {
return nil, err
}
C.gtk_tree_model_get_value(
(*C.GtkTreeModel)(unsafe.Pointer(v.native())),
iter.native(),
C.gint(column),
(*C.GValue)(unsafe.Pointer(val.Native())))
return val, nil
}
// IterNext() is a wrapper around gtk_tree_model_iter_next().
func (v *TreeModel) IterNext(iter *TreeIter) bool {
c := C.gtk_tree_model_iter_next(v.native(), iter.native())
return gobool(c)
}
/*
* GtkTreePath
*/
// TreePath is a representation of GTK's GtkTreePath.
type TreePath struct {
GtkTreePath *C.GtkTreePath
}
// native returns a pointer to the underlying GtkTreePath.
func (v *TreePath) native() *C.GtkTreePath {
if v == nil {
return nil
}
return v.GtkTreePath
}
func marshalTreePath(p uintptr) (interface{}, error) {
c := C.g_value_get_boxed((*C.GValue)(unsafe.Pointer(p)))
return &TreePath{(*C.GtkTreePath)(unsafe.Pointer(c))}, nil
}
func (v *TreePath) free() {
C.gtk_tree_path_free(v.native())
}
// String is a wrapper around gtk_tree_path_to_string().
func (v *TreePath) String() string {
c := C.gtk_tree_path_to_string(v.native())
return C.GoString((*C.char)(c))
}
/*
* GtkTreeSelection
*/
// TreeSelection is a representation of GTK's GtkTreeSelection.
type TreeSelection struct {
*glib.Object
}
// native returns a pointer to the underlying GtkTreeSelection.
func (v *TreeSelection) native() *C.GtkTreeSelection {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkTreeSelection(p)
}
func marshalTreeSelection(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapTreeSelection(obj), nil
}
func wrapTreeSelection(obj *glib.Object) *TreeSelection {
return &TreeSelection{obj}
}
// GetSelected() is a wrapper around gtk_tree_selection_get_selected().
func (v *TreeSelection) GetSelected(model *ITreeModel, iter *TreeIter) bool {
var pcmodel **C.GtkTreeModel
if pcmodel != nil {
cmodel := (*model).toTreeModel()
pcmodel = &cmodel
} else {
pcmodel = nil
}
c := C.gtk_tree_selection_get_selected(v.native(),
pcmodel, iter.native())
return gobool(c)
}
/*
* GtkTreeView
*/
// TreeView is a representation of GTK's GtkTreeView.
type TreeView struct {
Container
}
// native returns a pointer to the underlying GtkTreeView.
func (v *TreeView) native() *C.GtkTreeView {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkTreeView(p)
}
func marshalTreeView(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapTreeView(obj), nil
}
func wrapTreeView(obj *glib.Object) *TreeView {
return &TreeView{Container{Widget{glib.InitiallyUnowned{obj}}}}
}
// TreeViewNew() is a wrapper around gtk_tree_view_new().
func TreeViewNew() (*TreeView, error) {
c := C.gtk_tree_view_new()
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
t := wrapTreeView(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return t, nil
}
// TreeViewNewWithModel() is a wrapper around gtk_tree_view_new_with_model().
func TreeViewNewWithModel(model ITreeModel) (*TreeView, error) {
c := C.gtk_tree_view_new_with_model(model.toTreeModel())
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
t := wrapTreeView(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return t, nil
}
// GetModel() is a wrapper around gtk_tree_view_get_model().
func (v *TreeView) GetModel() (*TreeModel, error) {
c := C.gtk_tree_view_get_model(v.native())
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
t := wrapTreeModel(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return t, nil
}
// SetModel() is a wrapper around gtk_tree_view_set_model().
func (v *TreeView) SetModel(model ITreeModel) {
C.gtk_tree_view_set_model(v.native(), model.toTreeModel())
}
// GetSelection() is a wrapper around gtk_tree_view_get_selection().
func (v *TreeView) GetSelection() (*TreeSelection, error) {
c := C.gtk_tree_view_get_selection(v.native())
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
s := wrapTreeSelection(obj)
obj.Ref()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return s, nil
}
// AppendColumn() is a wrapper around gtk_tree_view_append_column().
func (v *TreeView) AppendColumn(column *TreeViewColumn) int {
c := C.gtk_tree_view_append_column(v.native(), column.native())
return int(c)
}
/*
* GtkTreeViewColumn
*/
// TreeViewColumns is a representation of GTK's GtkTreeViewColumn.
type TreeViewColumn struct {
glib.InitiallyUnowned
}
// native returns a pointer to the underlying GtkTreeViewColumn.
func (v *TreeViewColumn) native() *C.GtkTreeViewColumn {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkTreeViewColumn(p)
}
func marshalTreeViewColumn(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapTreeViewColumn(obj), nil
}
func wrapTreeViewColumn(obj *glib.Object) *TreeViewColumn {
return &TreeViewColumn{glib.InitiallyUnowned{obj}}
}
// TreeViewColumnNew() is a wrapper around gtk_tree_view_column_new().
func TreeViewColumnNew() (*TreeViewColumn, error) {
c := C.gtk_tree_view_column_new()
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
t := wrapTreeViewColumn(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return t, nil
}
// TreeViewColumnNewWithAttribute() is a wrapper around
// gtk_tree_view_column_new_with_attributes() that only sets one
// attribute for one column.
func TreeViewColumnNewWithAttribute(title string, renderer ICellRenderer, attribute string, column int) (*TreeViewColumn, error) {
t_cstr := C.CString(title)
defer C.free(unsafe.Pointer(t_cstr))
a_cstr := C.CString(attribute)
defer C.free(unsafe.Pointer(a_cstr))
c := C._gtk_tree_view_column_new_with_attributes_one((*C.gchar)(t_cstr),
renderer.toCellRenderer(), (*C.gchar)(a_cstr), C.gint(column))
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
t := wrapTreeViewColumn(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return t, nil
}
// AddAttribute() is a wrapper around gtk_tree_view_column_add_attribute().
func (v *TreeViewColumn) AddAttribute(renderer ICellRenderer, attribute string, column int) {
cstr := C.CString(attribute)
defer C.free(unsafe.Pointer(cstr))
C.gtk_tree_view_column_add_attribute(v.native(),
renderer.toCellRenderer(), (*C.gchar)(cstr), C.gint(column))
}
// SetExpand() is a wrapper around gtk_tree_view_column_set_expand().
func (v *TreeViewColumn) SetExpand(expand bool) {
C.gtk_tree_view_column_set_expand(v.native(), gbool(expand))
}
// GetExpand() is a wrapper around gtk_tree_view_column_get_expand().
func (v *TreeViewColumn) GetExpand() bool {
c := C.gtk_tree_view_column_get_expand(v.native())
return gobool(c)
}
// SetMinWidth() is a wrapper around gtk_tree_view_column_set_min_width().
func (v *TreeViewColumn) SetMinWidth(minWidth int) {
C.gtk_tree_view_column_set_min_width(v.native(), C.gint(minWidth))
}
// GetMinWidth() is a wrapper around gtk_tree_view_column_get_min_width().
func (v *TreeViewColumn) GetMinWidth() int {
c := C.gtk_tree_view_column_get_min_width(v.native())
return int(c)
}
/*
* GtkWidget
*/
// Widget is a representation of GTK's GtkWidget.
type Widget struct {
glib.InitiallyUnowned
}
// IWidget is an interface type implemented by all structs
// embedding a Widget. It is meant to be used as an argument type
// for wrapper functions that wrap around a C GTK function taking a
// GtkWidget.
type IWidget interface {
toWidget() *C.GtkWidget
}
// native returns a pointer to the underlying GtkWidget.
func (v *Widget) native() *C.GtkWidget {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkWidget(p)
}
func (v *Widget) toWidget() *C.GtkWidget {
if v == nil {
return nil
}
return v.native()
}
func marshalWidget(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapWidget(obj), nil
}
func wrapWidget(obj *glib.Object) *Widget {
return &Widget{glib.InitiallyUnowned{obj}}
}
// Destroy is a wrapper around gtk_widget_destroy().
func (v *Widget) Destroy() {
C.gtk_widget_destroy(v.native())
}
// InDestruction is a wrapper around gtk_widget_in_destruction().
func (v *Widget) InDestruction() bool {
return gobool(C.gtk_widget_in_destruction(v.native()))
}
// TODO(jrick) this may require some rethinking
/*
func (v *Widget) Destroyed(widgetPointer **Widget) {
}
*/
// Unparent is a wrapper around gtk_widget_unparent().
func (v *Widget) Unparent() {
C.gtk_widget_unparent(v.native())
}
// Show is a wrapper around gtk_widget_show().
func (v *Widget) Show() {
C.gtk_widget_show(v.native())
}
// Hide is a wrapper around gtk_widget_hide().
func (v *Widget) Hide() {
C.gtk_widget_hide(v.native())
}
// GetCanFocus is a wrapper around gtk_widget_get_can_focus().
func (v *Widget) GetCanFocus() bool {
c := C.gtk_widget_get_can_focus(v.native())
return gobool(c)
}
// SetCanFocus is a wrapper around gtk_widget_set_can_focus().
func (v *Widget) SetCanFocus(canFocus bool) {
C.gtk_widget_set_can_focus(v.native(), gbool(canFocus))
}
// GetMapped is a wrapper around gtk_window_get_mapped().
func (v *Widget) GetMapped() bool {
c := C.gtk_widget_get_mapped(v.native())
return gobool(c)
}
// SetMapped is a wrapper around gtk_widget_set_mapped().
func (v *Widget) SetMapped(mapped bool) {
C.gtk_widget_set_can_focus(v.native(), gbool(mapped))
}
// GetRealized is a wrapper around gtk_window_get_realized().
func (v *Widget) GetRealized() bool {
c := C.gtk_widget_get_realized(v.native())
return gobool(c)
}
// SetRealized is a wrapper around gtk_widget_set_realized().
func (v *Widget) SetRealized(realized bool) {
C.gtk_widget_set_realized(v.native(), gbool(realized))
}
// GetDoubleBuffered is a wrapper around gtk_widget_get_double_buffered().
func (v *Widget) GetDoubleBuffered() bool {
c := C.gtk_widget_get_double_buffered(v.native())
return gobool(c)
}
// GetHasWindow is a wrapper around gtk_widget_get_has_window().
func (v *Widget) GetHasWindow() bool {
c := C.gtk_widget_get_has_window(v.native())
return gobool(c)
}
// SetHasWindow is a wrapper around gtk_widget_set_has_window().
func (v *Widget) SetHasWindow(hasWindow bool) {
C.gtk_widget_set_has_window(v.native(), gbool(hasWindow))
}
// ShowNow is a wrapper around gtk_widget_show_now().
func (v *Widget) ShowNow() {
C.gtk_widget_show_now(v.native())
}
// ShowAll is a wrapper around gtk_widget_show_all().
func (v *Widget) ShowAll() {
C.gtk_widget_show_all(v.native())
}
// SetNoShowAll is a wrapper around gtk_widget_set_no_show_all().
func (v *Widget) SetNoShowAll(noShowAll bool) {
C.gtk_widget_set_no_show_all(v.native(), gbool(noShowAll))
}
// GetNoShowAll is a wrapper around gtk_widget_get_no_show_all().
func (v *Widget) GetNoShowAll() bool {
c := C.gtk_widget_get_no_show_all(v.native())
return gobool(c)
}
// Map is a wrapper around gtk_widget_map().
func (v *Widget) Map() {
C.gtk_widget_map(v.native())
}
// Unmap is a wrapper around gtk_widget_unmap().
func (v *Widget) Unmap() {
C.gtk_widget_unmap(v.native())
}
//void gtk_widget_realize(GtkWidget *widget);
//void gtk_widget_unrealize(GtkWidget *widget);
//void gtk_widget_draw(GtkWidget *widget, cairo_t *cr);
//void gtk_widget_queue_resize(GtkWidget *widget);
//void gtk_widget_queue_resize_no_redraw(GtkWidget *widget);
//GdkFrameClock *gtk_widget_get_frame_clock(GtkWidget *widget);
//guint gtk_widget_add_tick_callback (GtkWidget *widget,
// GtkTickCallback callback,
// gpointer user_data,
// GDestroyNotify notify);
//void gtk_widget_remove_tick_callback(GtkWidget *widget, guint id);
// TODO(jrick) GtkAllocation
/*
func (v *Widget) SizeAllocate() {
}
*/
// TODO(jrick) GtkAccelGroup GdkModifierType GtkAccelFlags
/*
func (v *Widget) AddAccelerator() {
}
*/
// TODO(jrick) GtkAccelGroup GdkModifierType
/*
func (v *Widget) RemoveAccelerator() {
}
*/
// TODO(jrick) GtkAccelGroup
/*
func (v *Widget) SetAccelPath() {
}
*/
// TODO(jrick) GList
/*
func (v *Widget) ListAccelClosures() {
}
*/
// GetAllocatedWidth() is a wrapper around gtk_widget_get_allocated_width().
func (v *Widget) GetAllocatedWidth() int {
return int(C.gtk_widget_get_allocated_width(v.native()))
}
// GetAllocatedHeight() is a wrapper around gtk_widget_get_allocated_height().
func (v *Widget) GetAllocatedHeight() int {
return int(C.gtk_widget_get_allocated_height(v.native()))
}
//gboolean gtk_widget_can_activate_accel(GtkWidget *widget, guint signal_id);
// Event() is a wrapper around gtk_widget_event().
func (v *Widget) Event(event *gdk.Event) bool {
c := C.gtk_widget_event(v.native(),
(*C.GdkEvent)(unsafe.Pointer(event.Native())))
return gobool(c)
}
// Activate() is a wrapper around gtk_widget_activate().
func (v *Widget) Activate() bool {
return gobool(C.gtk_widget_activate(v.native()))
}
// Reparent() is a wrapper around gtk_widget_reparent().
func (v *Widget) Reparent(newParent IWidget) {
C.gtk_widget_reparent(v.native(), newParent.toWidget())
}
// TODO(jrick) GdkRectangle
/*
func (v *Widget) Intersect() {
}
*/
// IsFocus() is a wrapper around gtk_widget_is_focus().
func (v *Widget) IsFocus() bool {
return gobool(C.gtk_widget_is_focus(v.native()))
}
// GrabFocus() is a wrapper around gtk_widget_grab_focus().
func (v *Widget) GrabFocus() {
C.gtk_widget_grab_focus(v.native())
}
// GrabDefault() is a wrapper around gtk_widget_grab_default().
func (v *Widget) GrabDefault() {
C.gtk_widget_grab_default(v.native())
}
// SetName() is a wrapper around gtk_widget_set_name().
func (v *Widget) SetName(name string) {
cstr := C.CString(name)
defer C.free(unsafe.Pointer(cstr))
C.gtk_widget_set_name(v.native(), (*C.gchar)(cstr))
}
// GetName() is a wrapper around gtk_widget_get_name(). A non-nil
// error is returned in the case that gtk_widget_get_name returns NULL to
// differentiate between NULL and an empty string.
func (v *Widget) GetName() (string, error) {
c := C.gtk_widget_get_name(v.native())
if c == nil {
return "", nilPtrErr
}
return C.GoString((*C.char)(c)), nil
}
// GetSensitive is a wrapper around gtk_widget_get_sensitive().
func (v *Widget) GetSensitive() bool {
c := C.gtk_widget_get_sensitive(v.native())
return gobool(c)
}
// IsSensitive is a wrapper around gtk_widget_is_sensitive().
func (v *Widget) IsSensitive() bool {
c := C.gtk_widget_is_sensitive(v.native())
return gobool(c)
}
// SetSensitive is a wrapper around gtk_widget_set_sensitive().
func (v *Widget) SetSensitive(sensitive bool) {
C.gtk_widget_set_sensitive(v.native(), gbool(sensitive))
}
// GetVisible is a wrapper around gtk_widget_get_visible().
func (v *Widget) GetVisible() bool {
c := C.gtk_widget_get_visible(v.native())
return gobool(c)
}
// SetVisible is a wrapper around gtk_widget_set_visible().
func (v *Widget) SetVisible(visible bool) {
C.gtk_widget_set_visible(v.native(), gbool(visible))
}
// SetParent is a wrapper around gtk_widget_set_parent().
func (v *Widget) SetParent(parent IWidget) {
C.gtk_widget_set_parent(v.native(), parent.toWidget())
}
// GetParent is a wrapper around gtk_widget_get_parent().
func (v *Widget) GetParent() (*Widget, error) {
c := C.gtk_widget_get_parent(v.native())
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
w := wrapWidget(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return w, nil
}
// SetSizeRequest is a wrapper around gtk_widget_set_size_request().
func (v *Widget) SetSizeRequest(width, height int) {
C.gtk_widget_set_size_request(v.native(), C.gint(width), C.gint(height))
}
// GetSizeRequest is a wrapper around gtk_widget_get_size_request().
func (v *Widget) GetSizeRequest() (width, height int) {
var w, h C.gint
C.gtk_widget_get_size_request(v.native(), &w, &h)
return int(w), int(h)
}
// SetParentWindow is a wrapper around gtk_widget_set_parent_window().
func (v *Widget) SetParentWindow(parentWindow *gdk.Window) {
C.gtk_widget_set_parent_window(v.native(),
(*C.GdkWindow)(unsafe.Pointer(parentWindow.Native())))
}
// GetParentWindow is a wrapper around gtk_widget_get_parent_window().
func (v *Widget) GetParentWindow() (*gdk.Window, error) {
c := C.gtk_widget_get_parent_window(v.native())
if v == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
w := &gdk.Window{obj}
w.Ref()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return w, nil
}
// SetEvents is a wrapper around gtk_widget_set_events().
func (v *Widget) SetEvents(events int) {
C.gtk_widget_set_events(v.native(), C.gint(events))
}
// GetEvents is a wrapper around gtk_widget_get_events().
func (v *Widget) GetEvents() int {
return int(C.gtk_widget_get_events(v.native()))
}
// AddEvents is a wrapper around gtk_widget_add_events().
func (v *Widget) AddEvents(events int) {
C.gtk_widget_add_events(v.native(), C.gint(events))
}
// HasDefault is a wrapper around gtk_widget_has_default().
func (v *Widget) HasDefault() bool {
c := C.gtk_widget_has_default(v.native())
return gobool(c)
}
// HasFocus is a wrapper around gtk_widget_has_focus().
func (v *Widget) HasFocus() bool {
c := C.gtk_widget_has_focus(v.native())
return gobool(c)
}
// HasVisibleFocus is a wrapper around gtk_widget_has_visible_focus().
func (v *Widget) HasVisibleFocus() bool {
c := C.gtk_widget_has_visible_focus(v.native())
return gobool(c)
}
// HasGrab is a wrapper around gtk_widget_has_grab().
func (v *Widget) HasGrab() bool {
c := C.gtk_widget_has_grab(v.native())
return gobool(c)
}
// IsDrawable is a wrapper around gtk_widget_is_drawable().
func (v *Widget) IsDrawable() bool {
c := C.gtk_widget_is_drawable(v.native())
return gobool(c)
}
// IsToplevel is a wrapper around gtk_widget_is_toplevel().
func (v *Widget) IsToplevel() bool {
c := C.gtk_widget_is_toplevel(v.native())
return gobool(c)
}
// TODO(jrick) GdkEventMask
/*
func (v *Widget) SetDeviceEvents() {
}
*/
// TODO(jrick) GdkEventMask
/*
func (v *Widget) GetDeviceEvents() {
}
*/
// TODO(jrick) GdkEventMask
/*
func (v *Widget) AddDeviceEvents() {
}
*/
// SetDeviceEnabled is a wrapper around gtk_widget_set_device_enabled().
func (v *Widget) SetDeviceEnabled(device *gdk.Device, enabled bool) {
C.gtk_widget_set_device_enabled(v.native(),
(*C.GdkDevice)(unsafe.Pointer(device.Native())), gbool(enabled))
}
// GetDeviceEnabled is a wrapper around gtk_widget_get_device_enabled().
func (v *Widget) GetDeviceEnabled(device *gdk.Device) bool {
c := C.gtk_widget_get_device_enabled(v.native(),
(*C.GdkDevice)(unsafe.Pointer(device.Native())))
return gobool(c)
}
// GetToplevel is a wrapper around gtk_widget_get_toplevel().
func (v *Widget) GetToplevel() (*Widget, error) {
c := C.gtk_widget_get_toplevel(v.native())
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
w := wrapWidget(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return w, nil
}
// GetTooltipText is a wrapper around gtk_widget_get_tooltip_text().
// A non-nil error is returned in the case that
// gtk_widget_get_tooltip_text returns NULL to differentiate between NULL
// and an empty string.
func (v *Widget) GetTooltipText() (string, error) {
c := C.gtk_widget_get_tooltip_text(v.native())
if c == nil {
return "", nilPtrErr
}
return C.GoString((*C.char)(c)), nil
}
// SetTooltipText is a wrapper around gtk_widget_set_tooltip_text().
func (v *Widget) SetTooltipText(text string) {
cstr := C.CString(text)
defer C.free(unsafe.Pointer(cstr))
C.gtk_widget_set_tooltip_text(v.native(), (*C.gchar)(cstr))
}
// OverrideFont is a wrapper around gtk_widget_override_font().
func (v *Widget) OverrideFont(description string) {
cstr := C.CString(description)
defer C.free(unsafe.Pointer(cstr))
c := C.pango_font_description_from_string(cstr)
C.gtk_widget_override_font(v.native(), c)
}
// GetHAlign is a wrapper around gtk_widget_get_halign().
func (v *Widget) GetHAlign() Align {
c := C.gtk_widget_get_halign(v.native())
return Align(c)
}
// SetHAlign is a wrapper around gtk_widget_set_halign().
func (v *Widget) SetHAlign(align Align) {
C.gtk_widget_set_halign(v.native(), C.GtkAlign(align))
}
// GetVAlign is a wrapper around gtk_widget_get_valign().
func (v *Widget) GetVAlign() Align {
c := C.gtk_widget_get_valign(v.native())
return Align(c)
}
// SetVAlign is a wrapper around gtk_widget_set_valign().
func (v *Widget) SetVAlign(align Align) {
C.gtk_widget_set_valign(v.native(), C.GtkAlign(align))
}
// GetMarginTop is a wrapper around gtk_widget_get_margin_top().
func (v *Widget) GetMarginTop() int {
c := C.gtk_widget_get_margin_top(v.native())
return int(c)
}
// SetMarginTop is a wrapper around gtk_widget_set_margin_top().
func (v *Widget) SetMarginTop(margin int) {
C.gtk_widget_set_margin_top(v.native(), C.gint(margin))
}
// GetMarginBottom is a wrapper around gtk_widget_get_margin_bottom().
func (v *Widget) GetMarginBottom() int {
c := C.gtk_widget_get_margin_bottom(v.native())
return int(c)
}
// SetMarginBottom is a wrapper around gtk_widget_set_margin_bottom().
func (v *Widget) SetMarginBottom(margin int) {
C.gtk_widget_set_margin_bottom(v.native(), C.gint(margin))
}
// GetHExpand is a wrapper around gtk_widget_get_hexpand().
func (v *Widget) GetHExpand() bool {
c := C.gtk_widget_get_hexpand(v.native())
return gobool(c)
}
// SetHExpand is a wrapper around gtk_widget_set_hexpand().
func (v *Widget) SetHExpand(expand bool) {
C.gtk_widget_set_hexpand(v.native(), gbool(expand))
}
// GetVExpand is a wrapper around gtk_widget_get_vexpand().
func (v *Widget) GetVExpand() bool {
c := C.gtk_widget_get_vexpand(v.native())
return gobool(c)
}
// SetVExpand is a wrapper around gtk_widget_set_vexpand().
func (v *Widget) SetVExpand(expand bool) {
C.gtk_widget_set_vexpand(v.native(), gbool(expand))
}
/*
* GtkWindow
*/
// Window is a representation of GTK's GtkWindow.
type Window struct {
Bin
}
// IWindow is an interface type implemented by all structs embedding a
// Window. It is meant to be used as an argument type for wrapper
// functions that wrap around a C GTK function taking a GtkWindow.
type IWindow interface {
toWindow() *C.GtkWindow
}
// native returns a pointer to the underlying GtkWindow.
func (v *Window) native() *C.GtkWindow {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGtkWindow(p)
}
func (v *Window) toWindow() *C.GtkWindow {
if v == nil {
return nil
}
return v.native()
}
func marshalWindow(p uintptr) (interface{}, error) {
c := C.g_value_get_object((*C.GValue)(unsafe.Pointer(p)))
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
return wrapWindow(obj), nil
}
func wrapWindow(obj *glib.Object) *Window {
return &Window{Bin{Container{Widget{glib.InitiallyUnowned{obj}}}}}
}
// WindowNew is a wrapper around gtk_window_new().
func WindowNew(t WindowType) (*Window, error) {
c := C.gtk_window_new(C.GtkWindowType(t))
if c == nil {
return nil, nilPtrErr
}
obj := &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
w := wrapWindow(obj)
obj.RefSink()
runtime.SetFinalizer(obj, (*glib.Object).Unref)
return w, nil
}
// SetTitle is a wrapper around gtk_window_set_title().
func (v *Window) SetTitle(title string) {
cstr := C.CString(title)
defer C.free(unsafe.Pointer(cstr))
C.gtk_window_set_title(v.native(), (*C.gchar)(cstr))
}
// SetResizable is a wrapper around gtk_window_set_resizable().
func (v *Window) SetResizable(resizable bool) {
C.gtk_window_set_resizable(v.native(), gbool(resizable))
}
// GetResizable is a wrapper around gtk_window_get_resizable().
func (v *Window) GetResizable() bool {
c := C.gtk_window_get_resizable(v.native())
return gobool(c)
}
// TODO gtk_window_add_accel_group().
// ActivateFocus is a wrapper around gtk_window_activate_focus().
func (v *Window) ActivateFocus() bool {
c := C.gtk_window_activate_focus(v.native())
return gobool(c)
}
// ActivateDefault is a wrapper around gtk_window_activate_default().
func (v *Window) ActivateDefault() bool {
c := C.gtk_window_activate_default(v.native())
return gobool(c)
}
// SetModal is a wrapper around gtk_window_set_modal().
func (v *Window) SetModal(modal bool) {
C.gtk_window_set_modal(v.native(), gbool(modal))
}
// SetDefaultSize is a wrapper around gtk_window_set_default_size().
func (v *Window) SetDefaultSize(width, height int) {
C.gtk_window_set_default_size(v.native(), C.gint(width), C.gint(height))
}
// SetDefaultGeometry is a wrapper around gtk_window_set_default_geometry().
func (v *Window) SetDefaultGeometry(width, height int) {
C.gtk_window_set_default_geometry(v.native(), C.gint(width),
C.gint(height))
}
// TODO(jrick) GdkGeometry GdkWindowHints.
/*
func (v *Window) SetGeometryHints() {
}
*/
// TODO(jrick) GdkGravity.
/*
func (v *Window) SetGravity() {
}
*/
// TODO(jrick) GdkGravity.
/*
func (v *Window) GetGravity() {
}
*/
// SetPosition is a wrapper around gtk_window_set_position().
func (v *Window) SetPosition(position WindowPosition) {
C.gtk_window_set_position(v.native(), C.GtkWindowPosition(position))
}
// SetTransientFor is a wrapper around gtk_window_set_transient_for().
func (v *Window) SetTransientFor(parent IWindow) {
var pw *C.GtkWindow = nil
if parent != nil {
pw = parent.toWindow()
}
C.gtk_window_set_transient_for(v.native(), pw)
}
// TODO gtk_window_set_attached_to().
// SetDestroyWithParent is a wrapper around
// gtk_window_set_destroy_with_parent().
func (v *Window) SetDestroyWithParent(setting bool) {
C.gtk_window_set_destroy_with_parent(v.native(), gbool(setting))
}
// SetHideTitlebarWhenMaximized is a wrapper around
// gtk_window_set_hide_titlebar_when_maximized().
func (v *Window) SetHideTitlebarWhenMaximized(setting bool) {
C.gtk_window_set_hide_titlebar_when_maximized(v.native(),
gbool(setting))
}
// TODO gtk_window_set_screen().
// IsActive is a wrapper around gtk_window_is_active().
func (v *Window) IsActive() bool {
c := C.gtk_window_is_active(v.native())
return gobool(c)
}
// HasToplevelFocus is a wrapper around gtk_window_has_toplevel_focus().
func (v *Window) HasToplevelFocus() bool {
c := C.gtk_window_has_toplevel_focus(v.native())
return gobool(c)
}
// TODO gtk_window_list_toplevels().
// TODO gtk_window_add_mnemonic().
// TODO gtk_window_remove_mnemonic().
// TODO gtk_window_mnemonic_activate().
// TODO gtk_window_activate_key().
// TODO gtk_window_propogate_key_event().
// TODO gtk_window_get_focus().
// TODO gtk_window_set_focus().
// TODO gtk_window_get_default_widget().
// TODO gtk_window_set_default().
// Present is a wrapper around gtk_window_present().
func (v *Window) Present() {
C.gtk_window_present(v.native())
}
// PresentWithTime is a wrapper around gtk_window_present_with_time().
func (v *Window) PresentWithTime(ts uint32) {
C.gtk_window_present_with_time(v.native(), C.guint32(ts))
}
// Iconify is a wrapper around gtk_window_iconify().
func (v *Window) Iconify() {
C.gtk_window_iconify(v.native())
}
// Deiconify is a wrapper around gtk_window_deiconify().
func (v *Window) Deiconify() {
C.gtk_window_deiconify(v.native())
}
// Stick is a wrapper around gtk_window_stick().
func (v *Window) Stick() {
C.gtk_window_stick(v.native())
}
// Unstick is a wrapper around gtk_window_unstick().
func (v *Window) Unstick() {
C.gtk_window_unstick(v.native())
}
// Maximize is a wrapper around gtk_window_maximize().
func (v *Window) Maximize() {
C.gtk_window_maximize(v.native())
}
// Unmaximize is a wrapper around gtk_window_unmaximize().
func (v *Window) Unmaximize() {
C.gtk_window_unmaximize(v.native())
}
// Fullscreen is a wrapper around gtk_window_fullscreen().
func (v *Window) Fullscreen() {
C.gtk_window_fullscreen(v.native())
}
// Unfullscreen is a wrapper around gtk_window_unfullscreen().
func (v *Window) Unfullscreen() {
C.gtk_window_unfullscreen(v.native())
}
// SetKeepAbove is a wrapper around gtk_window_set_keep_above().
func (v *Window) SetKeepAbove(setting bool) {
C.gtk_window_set_keep_above(v.native(), gbool(setting))
}
// SetKeepBelow is a wrapper around gtk_window_set_keep_below().
func (v *Window) SetKeepBelow(setting bool) {
C.gtk_window_set_keep_below(v.native(), gbool(setting))
}
// TODO gtk_window_begin_resize_drag().
// TODO gtk_window_begin_move_drag().
// SetDecorated is a wrapper around gtk_window_set_decorated().
func (v *Window) SetDecorated(setting bool) {
C.gtk_window_set_decorated(v.native(), gbool(setting))
}
// SetDeletable is a wrapper around gtk_window_set_deletable().
func (v *Window) SetDeletable(setting bool) {
C.gtk_window_set_deletable(v.native(), gbool(setting))
}
// TODO gtk_window_set_mnemonic_modifier().
// TODO gtk_window_set_type_hint().
// SetSkipTaskbarHint is a wrapper around gtk_window_set_skip_taskbar_hint().
func (v *Window) SetSkipTaskbarHint(setting bool) {
C.gtk_window_set_skip_taskbar_hint(v.native(), gbool(setting))
}
// SetSkipPagerHint is a wrapper around gtk_window_set_skip_pager_hint().
func (v *Window) SetSkipPagerHint(setting bool) {
C.gtk_window_set_skip_pager_hint(v.native(), gbool(setting))
}
// SetUrgencyHint is a wrapper around gtk_window_set_urgency_hint().
func (v *Window) SetUrgencyHint(setting bool) {
C.gtk_window_set_urgency_hint(v.native(), gbool(setting))
}
// SetAcceptFocus is a wrapper around gtk_window_set_accept_focus().
func (v *Window) SetAcceptFocus(setting bool) {
C.gtk_window_set_accept_focus(v.native(), gbool(setting))
}
// SetFocusOnMap is a wrapper around gtk_window_set_focus_on_map().
func (v *Window) SetFocusOnMap(setting bool) {
C.gtk_window_set_focus_on_map(v.native(), gbool(setting))
}
// TODO gtk_window_set_startup_id().
// TODO gtk_window_set_role().
// GetDecorated is a wrapper around gtk_window_get_decorated().
func (v *Window) GetDecorated() bool {
c := C.gtk_window_get_decorated(v.native())
return gobool(c)
}
// GetDeletable is a wrapper around gtk_window_get_deletable().
func (v *Window) GetDeletable() bool {
c := C.gtk_window_get_deletable(v.native())
return gobool(c)
}
// TODO get_default_icon_list().
// TODO get_default_icon_name().
// GetDefaultSize is a wrapper around gtk_window_get_default_size().
func (v *Window) GetDefaultSize() (width, height int) {
var w, h C.gint
C.gtk_window_get_default_size(v.native(), &w, &h)
return int(w), int(h)
}
// GetDestroyWithParent is a wrapper around
// gtk_window_get_destroy_with_parent().
func (v *Window) GetDestroyWithParent() bool {
c := C.gtk_window_get_destroy_with_parent(v.native())
return gobool(c)
}
// GetHideTitlebarWhenMaximized is a wrapper around
// gtk_window_get_hide_titlebar_when_maximized().
func (v *Window) GetHideTitlebarWhenMaximized() bool {
c := C.gtk_window_get_hide_titlebar_when_maximized(v.native())
return gobool(c)
}
// TODO gtk_window_get_icon().
// TODO gtk_window_get_icon_list().
// TODO gtk_window_get_icon_name().
// TODO gtk_window_get_mnemonic_modifier().
// GetModal is a wrapper around gtk_window_get_modal().
func (v *Window) GetModal() bool {
c := C.gtk_window_get_modal(v.native())
return gobool(c)
}
// GetPosition is a wrapper around gtk_window_get_position().
func (v *Window) GetPosition() (root_x, root_y int) {
var x, y C.gint
C.gtk_window_get_position(v.native(), &x, &y)
return int(x), int(y)
}
// TODO gtk_window_get_role().
// GetSize is a wrapper around gtk_window_get_size().
func (v *Window) GetSize() (width, height int) {
var w, h C.gint
C.gtk_window_get_size(v.native(), &w, &h)
return int(w), int(h)
}
// TODO gtk_window_get_title().
// TODO gtk_window_get_transient_for().
// TODO gtk_window_get_attached_to().
// TODO gtk_window_get_type_hint().
// GetSkipTaskbarHint is a wrapper around gtk_window_get_skip_taskbar_hint().
func (v *Window) GetSkipTaskbarHint() bool {
c := C.gtk_window_get_skip_taskbar_hint(v.native())
return gobool(c)
}
// GetSkipPagerHint is a wrapper around gtk_window_get_skip_pager_hint().
func (v *Window) GetSkipPagerHint() bool {
c := C.gtk_window_get_skip_taskbar_hint(v.native())
return gobool(c)
}
// GetUrgencyHint is a wrapper around gtk_window_get_urgency_hint().
func (v *Window) GetUrgencyHint() bool {
c := C.gtk_window_get_urgency_hint(v.native())
return gobool(c)
}
// GetAcceptFocus is a wrapper around gtk_window_get_accept_focus().
func (v *Window) GetAcceptFocus() bool {
c := C.gtk_window_get_accept_focus(v.native())
return gobool(c)
}
// GetFocusOnMap is a wrapper around gtk_window_get_focus_on_map().
func (v *Window) GetFocusOnMap() bool {
c := C.gtk_window_get_focus_on_map(v.native())
return gobool(c)
}
// TODO gtk_window_get_group().
// HasGroup is a wrapper around gtk_window_has_group().
func (v *Window) HasGroup() bool {
c := C.gtk_window_has_group(v.native())
return gobool(c)
}
// TODO gtk_window_get_window_type().
// Move is a wrapper around gtk_window_move().
func (v *Window) Move(x, y int) {
C.gtk_window_move(v.native(), C.gint(x), C.gint(y))
}
// TODO gtk_window_parse_geometry().
// Resize is a wrapper around gtk_window_resize().
func (v *Window) Resize(width, height int) {
C.gtk_window_resize(v.native(), C.gint(width), C.gint(height))
}
// ResizeToGeometry is a wrapper around gtk_window_resize_to_geometry().
func (v *Window) ResizeToGeometry(width, height int) {
C.gtk_window_resize_to_geometry(v.native(), C.gint(width), C.gint(height))
}
// TODO gtk_window_set_default_icon_list().
// TODO gtk_window_set_default_icon().
// TODO gtk_window_set_default_icon_from_file().
// TODO gtk_window_set_default_icon_name().
// TODO gtk_window_set_icon().
// TODO gtk_window_set_icon_list().
// SetIconFromFile is a wrapper around gtk_window_set_icon_from_file().
func (v *Window) SetIconFromFile(file string) error {
cstr := C.CString(file)
defer C.free(unsafe.Pointer(cstr))
var err *C.GError = nil
res := C.gtk_window_set_icon_from_file(v.native(), (*C.gchar)(cstr), &err)
if res == 0 {
defer C.g_error_free(err)
return errors.New(C.GoString((*C.char)(C.error_get_message(err))))
}
return nil
}
// TODO gtk_window_set_icon_name().
// SetAutoStartupNotification is a wrapper around
// gtk_window_set_auto_startup_notification().
// This doesn't seem write. Might need to rethink?
/*
func (v *Window) SetAutoStartupNotification(setting bool) {
C.gtk_window_set_auto_startup_notification(gbool(setting))
}
*/
// GetMnemonicsVisible is a wrapper around
// gtk_window_get_mnemonics_visible().
func (v *Window) GetMnemonicsVisible() bool {
c := C.gtk_window_get_mnemonics_visible(v.native())
return gobool(c)
}
// SetMnemonicsVisible is a wrapper around
// gtk_window_get_mnemonics_visible().
func (v *Window) SetMnemonicsVisible(setting bool) {
C.gtk_window_set_mnemonics_visible(v.native(), gbool(setting))
}
// GetFocusVisible is a wrapper around gtk_window_get_focus_visible().
func (v *Window) GetFocusVisible() bool {
c := C.gtk_window_get_focus_visible(v.native())
return gobool(c)
}
// SetFocusVisible is a wrapper around gtk_window_set_focus_visible().
func (v *Window) SetFocusVisible(setting bool) {
C.gtk_window_set_focus_visible(v.native(), gbool(setting))
}
// SetHasResizeGrip is a wrapper around gtk_window_set_has_resize_grip().
func (v *Window) SetHasResizeGrip(setting bool) {
C.gtk_window_set_has_resize_grip(v.native(), gbool(setting))
}
// GetHasResizeGrip is a wrapper around gtk_window_get_has_resize_grip().
func (v *Window) GetHasResizeGrip() bool {
c := C.gtk_window_get_has_resize_grip(v.native())
return gobool(c)
}
// ResizeGripIsVisible is a wrapper around
// gtk_window_resize_grip_is_visible().
func (v *Window) ResizeGripIsVisible() bool {
c := C.gtk_window_resize_grip_is_visible(v.native())
return gobool(c)
}
// TODO gtk_window_get_resize_grip_area().
// TODO gtk_window_set_application().
// TODO gtk_window_get_application().
var cast_3_10_func func(string, *glib.Object) glib.IObject
// cast takes a native GObject and casts it to the appropriate Go struct.
func cast(c *C.GObject) (glib.IObject, error) {
var (
className = C.GoString((*C.char)(C.object_get_class_name(c)))
obj = &glib.Object{glib.ToGObject(unsafe.Pointer(c))}
g glib.IObject
)
runtime.SetFinalizer(obj, (*glib.Object).Unref)
switch className {
case "GtkAboutDialog":
g = wrapAboutDialog(obj)
case "GtkAdjustment":
g = wrapAdjustment(obj)
case "GtkAlignment":
g = wrapAlignment(obj)
case "GtkArrow":
g = wrapArrow(obj)
case "GtkBin":
g = wrapBin(obj)
case "GtkBox":
g = wrapBox(obj)
case "GtkButton":
g = wrapButton(obj)
case "GtkCalendar":
g = wrapCalendar(obj)
case "GtkCellLayout":
g = wrapCellLayout(obj)
case "GtkCellRenderer":
g = wrapCellRenderer(obj)
case "GtkCellRendererText":
g = wrapCellRendererText(obj)
case "GtkCellRendererToggle":
g = wrapCellRendererToggle(obj)
case "GtkCheckButton":
g = wrapCheckButton(obj)
case "GtkCheckMenuItem":
g = wrapCheckMenuItem(obj)
case "GtkClipboard":
g = wrapClipboard(obj)
case "GtkComboBox":
g = wrapComboBox(obj)
case "GtkContainer":
g = wrapContainer(obj)
case "GtkDialog":
g = wrapDialog(obj)
case "GtkDrawingArea":
g = wrapDrawingArea(obj)
case "GtkEditable":
g = wrapEditable(obj)
case "GtkEntry":
g = wrapEntry(obj)
case "GtkEntryBuffer":
g = wrapEntryBuffer(obj)
case "GtkEntryCompletion":
g = wrapEntryCompletion(obj)
case "GtkEventBox":
g = wrapEventBox(obj)
case "GtkFrame":
g = wrapFrame(obj)
case "GtkFileChooser":
g = wrapFileChooser(obj)
case "GtkFileChooserButton":
g = wrapFileChooserButton(obj)
case "GtkFileChooserWidget":
g = wrapFileChooserWidget(obj)
case "GtkGrid":
g = wrapGrid(obj)
case "GtkImage":
g = wrapImage(obj)
case "GtkLabel":
g = wrapLabel(obj)
case "GtkListStore":
g = wrapListStore(obj)
case "GtkMenu":
g = wrapMenu(obj)
case "GtkMenuBar":
g = wrapMenuBar(obj)
case "GtkMenuButton":
g = wrapMenuButton(obj)
case "GtkMenuItem":
g = wrapMenuItem(obj)
case "GtkMenuShell":
g = wrapMenuShell(obj)
case "GtkMessageDialog":
g = wrapMessageDialog(obj)
case "GtkMisc":
g = wrapMisc(obj)
case "GtkNotebook":
g = wrapNotebook(obj)
case "GtkOffscreenWindow":
g = wrapOffscreenWindow(obj)
case "GtkOrientable":
g = wrapOrientable(obj)
case "GtkProgressBar":
g = wrapProgressBar(obj)
case "GtkRadioButton":
g = wrapRadioButton(obj)
case "GtkRadioMenuItem":
g = wrapRadioMenuItem(obj)
case "GtkRange":
g = wrapRange(obj)
case "GtkScrollbar":
g = wrapScrollbar(obj)
case "GtkScrolledWindow":
g = wrapScrolledWindow(obj)
case "GtkSearchEntry":
g = wrapSearchEntry(obj)
case "GtkSeparator":
g = wrapSeparator(obj)
case "GtkSeparatorMenuItem":
g = wrapSeparatorMenuItem(obj)
case "GtkSeparatorToolItem":
g = wrapSeparatorToolItem(obj)
case "GtkSpinButton":
g = wrapSpinButton(obj)
case "GtkSpinner":
g = wrapSpinner(obj)
case "GtkStatusbar":
g = wrapStatusbar(obj)
case "GtkSwitch":
g = wrapSwitch(obj)
case "GtkTextView":
g = wrapTextView(obj)
case "GtkTextBuffer":
g = wrapTextBuffer(obj)
case "GtkTextTagTable":
g = wrapTextTagTable(obj)
case "GtkToggleButton":
g = wrapToggleButton(obj)
case "GtkToolbar":
g = wrapToolbar(obj)
case "GtkToolButton":
g = wrapToolButton(obj)
case "GtkToolItem":
g = wrapToolItem(obj)
case "GtkTreeModel":
g = wrapTreeModel(obj)
case "GtkTreeSelection":
g = wrapTreeSelection(obj)
case "GtkTreeView":
g = wrapTreeView(obj)
case "GtkTreeViewColumn":
g = wrapTreeViewColumn(obj)
case "GtkWidget":
g = wrapWidget(obj)
case "GtkWindow":
g = wrapWindow(obj)
default:
switch {
case cast_3_10_func != nil:
g = cast_3_10_func(className, obj)
if g != nil {
return g, nil
}
}
return nil, errors.New("unrecognized class name '" + className + "'")
}
return g, nil
}
|
package compress
import (
"archive/tar"
"archive/zip"
"compress/gzip"
"fmt"
"io"
"os"
"path/filepath"
"regexp"
"runtime"
"github.com/klauspost/pgzip"
"github.com/mitchellh/packer/common"
"github.com/mitchellh/packer/helper/config"
"github.com/mitchellh/packer/packer"
"github.com/mitchellh/packer/template/interpolate"
"github.com/pierrec/lz4"
)
type Config struct {
common.PackerConfig `mapstructure:",squash"`
OutputPath string `mapstructure:"output"`
CompressionLevel int `mapstructure:"compression_level"`
KeepInputArtifact bool `mapstructure:"keep_input_artifact"`
Archive string
Algorithm string
UsingDefault bool
ctx *interpolate.Context
}
type PostProcessor struct {
config *Config
}
var (
// ErrInvalidCompressionLevel is returned when the compression level passed
// to gzip is not in the expected range. See compress/flate for details.
ErrInvalidCompressionLevel = fmt.Errorf(
"Invalid compression level. Expected an integer from -1 to 9.")
ErrWrongInputCount = fmt.Errorf(
"Can only have 1 input file when not using tar/zip")
filenamePattern = regexp.MustCompile(`(?:\.([a-z0-9]+))`)
)
func (p *PostProcessor) Configure(raws ...interface{}) error {
err := config.Decode(&p.config, &config.DecodeOpts{
Interpolate: true,
InterpolateFilter: &interpolate.RenderFilter{
Exclude: []string{},
},
}, raws...)
fmt.Printf("CompressionLevel: %d\n", p.config.CompressionLevel)
errs := new(packer.MultiError)
if p.config.OutputPath == "" {
p.config.OutputPath = "packer_{{.BuildName}}_{{.Provider}}"
}
if err = interpolate.Validate(p.config.OutputPath, p.config.ctx); err != nil {
errs = packer.MultiErrorAppend(
errs, fmt.Errorf("Error parsing target template: %s", err))
}
templates := map[string]*string{
"output": &p.config.OutputPath,
}
if p.config.CompressionLevel > pgzip.BestCompression {
p.config.CompressionLevel = pgzip.BestCompression
}
// Technically 0 means "don't compress" but I don't know how to
// differentiate between "user entered zero" and "user entered nothing".
// Also, why bother creating a compressed file with zero compression?
if p.config.CompressionLevel == -1 || p.config.CompressionLevel == 0 {
p.config.CompressionLevel = pgzip.DefaultCompression
}
fmt.Printf("CompressionLevel: %d\n", p.config.CompressionLevel)
for key, ptr := range templates {
if *ptr == "" {
errs = packer.MultiErrorAppend(
errs, fmt.Errorf("%s must be set", key))
}
*ptr, err = interpolate.Render(p.config.OutputPath, p.config.ctx)
if err != nil {
errs = packer.MultiErrorAppend(
errs, fmt.Errorf("Error processing %s: %s", key, err))
}
}
p.config.detectFromFilename()
if len(errs.Errors) > 0 {
return errs
}
return nil
}
func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (packer.Artifact, bool, error) {
target := p.config.OutputPath
keep := p.config.KeepInputArtifact
newArtifact := &Artifact{Path: target}
outputFile, err := os.Create(target)
if err != nil {
return nil, false, fmt.Errorf(
"Unable to create archive %s: %s", target, err)
}
defer outputFile.Close()
// Setup output interface. If we're using compression, output is a
// compression writer. Otherwise it's just a file.
var output io.WriteCloser
switch p.config.Algorithm {
case "lz4":
ui.Say(fmt.Sprintf("Preparing lz4 compression for %s", target))
output, err = makeLZ4Writer(outputFile, p.config.CompressionLevel)
defer output.Close()
case "pgzip":
ui.Say(fmt.Sprintf("Preparing gzip compression for %s", target))
output, err = makePgzipWriter(outputFile, p.config.CompressionLevel)
defer output.Close()
default:
output = outputFile
}
compression := p.config.Algorithm
if compression == "" {
compression = "no"
}
// Build an archive, if we're supposed to do that.
switch p.config.Archive {
case "tar":
ui.Say(fmt.Sprintf("Tarring %s with %s compression", target, compression))
err = createTarArchive(artifact.Files(), output)
if err != nil {
return nil, keep, fmt.Errorf("Error creating tar: %s", err)
}
case "zip":
ui.Say(fmt.Sprintf("Zipping %s", target))
err = createZipArchive(artifact.Files(), output)
if err != nil {
return nil, keep, fmt.Errorf("Error creating zip: %s", err)
}
default:
ui.Say(fmt.Sprintf("Copying %s with %s compression", target, compression))
// Filename indicates no tarball (just compress) so we'll do an io.Copy
// into our compressor.
if len(artifact.Files()) != 1 {
return nil, keep, fmt.Errorf(
"Can only have 1 input file when not using tar/zip. Found %d "+
"files: %v", len(artifact.Files()), artifact.Files())
}
source, err := os.Open(artifact.Files()[0])
if err != nil {
return nil, keep, fmt.Errorf(
"Failed to open source file %s for reading: %s",
artifact.Files()[0], err)
}
defer source.Close()
if _, err = io.Copy(output, source); err != nil {
return nil, keep, fmt.Errorf("Failed to compress %s: %s",
artifact.Files()[0], err)
}
}
ui.Say(fmt.Sprintf("Archive %s completed", target))
return newArtifact, keep, nil
}
func (config *Config) detectFromFilename() {
extensions := map[string]string{
"tar": "tar",
"zip": "zip",
"gz": "pgzip",
"lz4": "lz4",
}
result := filenamePattern.FindAllStringSubmatch(config.OutputPath, -1)
// No dots. Bail out with defaults.
if len(result) == 0 {
config.Algorithm = "pgzip"
config.Archive = "tar"
return
}
// Parse the last two .groups, if they're there
lastItem := result[len(result)-1][1]
var nextToLastItem string
if len(result) == 1 {
nextToLastItem = ""
} else {
nextToLastItem = result[len(result)-2][1]
}
// Should we make an archive? E.g. tar or zip?
if nextToLastItem == "tar" {
config.Archive = "tar"
}
if lastItem == "zip" || lastItem == "tar" {
config.Archive = lastItem
// Tar or zip is our final artifact. Bail out.
return
}
// Should we compress the artifact?
algorithm, ok := extensions[lastItem]
if ok {
config.Algorithm = algorithm
// We found our compression algorithm. Bail out.
return
}
// We didn't match a known compression format. Default to tar + pgzip
config.Algorithm = "pgzip"
config.Archive = "tar"
return
}
func makeLZ4Writer(output io.WriteCloser, compressionLevel int) (io.WriteCloser, error) {
lzwriter := lz4.NewWriter(output)
if compressionLevel > gzip.DefaultCompression {
lzwriter.Header.HighCompression = true
}
return lzwriter, nil
}
func makePgzipWriter(output io.WriteCloser, compressionLevel int) (io.WriteCloser, error) {
gzipWriter, err := pgzip.NewWriterLevel(output, compressionLevel)
if err != nil {
return nil, ErrInvalidCompressionLevel
}
gzipWriter.SetConcurrency(500000, runtime.GOMAXPROCS(-1))
return gzipWriter, nil
}
func createTarArchive(files []string, output io.WriteCloser) error {
archive := tar.NewWriter(output)
defer archive.Close()
for _, path := range files {
file, err := os.Open(path)
if err != nil {
return fmt.Errorf("Unable to read file %s: %s", path, err)
}
defer file.Close()
fi, err := file.Stat()
if err != nil {
return fmt.Errorf("Unable to get fileinfo for %s: %s", path, err)
}
header, err := tar.FileInfoHeader(fi, path)
if err != nil {
return fmt.Errorf("Failed to create tar header for %s: %s", path, err)
}
if err := archive.WriteHeader(header); err != nil {
return fmt.Errorf("Failed to write tar header for %s: %s", path, err)
}
if _, err := io.Copy(archive, file); err != nil {
return fmt.Errorf("Failed to copy %s data to archive: %s", path, err)
}
}
return nil
}
func createZipArchive(files []string, output io.WriteCloser) error {
archive := zip.NewWriter(output)
defer archive.Close()
for _, path := range files {
path = filepath.ToSlash(path)
source, err := os.Open(path)
if err != nil {
return fmt.Errorf("Unable to read file %s: %s", path, err)
}
defer source.Close()
target, err := archive.Create(path)
if err != nil {
return fmt.Errorf("Failed to add zip header for %s: %s", path, err)
}
_, err = io.Copy(target, source)
if err != nil {
return fmt.Errorf("Failed to copy %s data to archive: %s", path, err)
}
}
return nil
}
Cleanup some debug code and reorganize config struct
package compress
import (
"archive/tar"
"archive/zip"
"compress/gzip"
"fmt"
"io"
"os"
"path/filepath"
"regexp"
"runtime"
"github.com/klauspost/pgzip"
"github.com/mitchellh/packer/common"
"github.com/mitchellh/packer/helper/config"
"github.com/mitchellh/packer/packer"
"github.com/mitchellh/packer/template/interpolate"
"github.com/pierrec/lz4"
)
type Config struct {
common.PackerConfig `mapstructure:",squash"`
// Fields from config file
OutputPath string `mapstructure:"output"`
CompressionLevel int `mapstructure:"compression_level"`
KeepInputArtifact bool `mapstructure:"keep_input_artifact"`
// Derived fields
Archive string
Algorithm string
ctx *interpolate.Context
}
type PostProcessor struct {
config *Config
}
var (
// ErrInvalidCompressionLevel is returned when the compression level passed
// to gzip is not in the expected range. See compress/flate for details.
ErrInvalidCompressionLevel = fmt.Errorf(
"Invalid compression level. Expected an integer from -1 to 9.")
ErrWrongInputCount = fmt.Errorf(
"Can only have 1 input file when not using tar/zip")
filenamePattern = regexp.MustCompile(`(?:\.([a-z0-9]+))`)
)
func (p *PostProcessor) Configure(raws ...interface{}) error {
err := config.Decode(&p.config, &config.DecodeOpts{
Interpolate: true,
InterpolateFilter: &interpolate.RenderFilter{
Exclude: []string{},
},
}, raws...)
errs := new(packer.MultiError)
if p.config.OutputPath == "" {
p.config.OutputPath = "packer_{{.BuildName}}_{{.Provider}}"
}
if err = interpolate.Validate(p.config.OutputPath, p.config.ctx); err != nil {
errs = packer.MultiErrorAppend(
errs, fmt.Errorf("Error parsing target template: %s", err))
}
templates := map[string]*string{
"output": &p.config.OutputPath,
}
if p.config.CompressionLevel > pgzip.BestCompression {
p.config.CompressionLevel = pgzip.BestCompression
}
// Technically 0 means "don't compress" but I don't know how to
// differentiate between "user entered zero" and "user entered nothing".
// Also, why bother creating a compressed file with zero compression?
if p.config.CompressionLevel == -1 || p.config.CompressionLevel == 0 {
p.config.CompressionLevel = pgzip.DefaultCompression
}
for key, ptr := range templates {
if *ptr == "" {
errs = packer.MultiErrorAppend(
errs, fmt.Errorf("%s must be set", key))
}
*ptr, err = interpolate.Render(p.config.OutputPath, p.config.ctx)
if err != nil {
errs = packer.MultiErrorAppend(
errs, fmt.Errorf("Error processing %s: %s", key, err))
}
}
p.config.detectFromFilename()
if len(errs.Errors) > 0 {
return errs
}
return nil
}
func (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (packer.Artifact, bool, error) {
target := p.config.OutputPath
keep := p.config.KeepInputArtifact
newArtifact := &Artifact{Path: target}
outputFile, err := os.Create(target)
if err != nil {
return nil, false, fmt.Errorf(
"Unable to create archive %s: %s", target, err)
}
defer outputFile.Close()
// Setup output interface. If we're using compression, output is a
// compression writer. Otherwise it's just a file.
var output io.WriteCloser
switch p.config.Algorithm {
case "lz4":
ui.Say(fmt.Sprintf("Preparing lz4 compression for %s", target))
output, err = makeLZ4Writer(outputFile, p.config.CompressionLevel)
defer output.Close()
case "pgzip":
ui.Say(fmt.Sprintf("Preparing gzip compression for %s", target))
output, err = makePgzipWriter(outputFile, p.config.CompressionLevel)
defer output.Close()
default:
output = outputFile
}
compression := p.config.Algorithm
if compression == "" {
compression = "no"
}
// Build an archive, if we're supposed to do that.
switch p.config.Archive {
case "tar":
ui.Say(fmt.Sprintf("Tarring %s with %s compression", target, compression))
err = createTarArchive(artifact.Files(), output)
if err != nil {
return nil, keep, fmt.Errorf("Error creating tar: %s", err)
}
case "zip":
ui.Say(fmt.Sprintf("Zipping %s", target))
err = createZipArchive(artifact.Files(), output)
if err != nil {
return nil, keep, fmt.Errorf("Error creating zip: %s", err)
}
default:
ui.Say(fmt.Sprintf("Copying %s with %s compression", target, compression))
// Filename indicates no tarball (just compress) so we'll do an io.Copy
// into our compressor.
if len(artifact.Files()) != 1 {
return nil, keep, fmt.Errorf(
"Can only have 1 input file when not using tar/zip. Found %d "+
"files: %v", len(artifact.Files()), artifact.Files())
}
source, err := os.Open(artifact.Files()[0])
if err != nil {
return nil, keep, fmt.Errorf(
"Failed to open source file %s for reading: %s",
artifact.Files()[0], err)
}
defer source.Close()
if _, err = io.Copy(output, source); err != nil {
return nil, keep, fmt.Errorf("Failed to compress %s: %s",
artifact.Files()[0], err)
}
}
ui.Say(fmt.Sprintf("Archive %s completed", target))
return newArtifact, keep, nil
}
func (config *Config) detectFromFilename() {
extensions := map[string]string{
"tar": "tar",
"zip": "zip",
"gz": "pgzip",
"lz4": "lz4",
}
result := filenamePattern.FindAllStringSubmatch(config.OutputPath, -1)
// No dots. Bail out with defaults.
if len(result) == 0 {
config.Algorithm = "pgzip"
config.Archive = "tar"
return
}
// Parse the last two .groups, if they're there
lastItem := result[len(result)-1][1]
var nextToLastItem string
if len(result) == 1 {
nextToLastItem = ""
} else {
nextToLastItem = result[len(result)-2][1]
}
// Should we make an archive? E.g. tar or zip?
if nextToLastItem == "tar" {
config.Archive = "tar"
}
if lastItem == "zip" || lastItem == "tar" {
config.Archive = lastItem
// Tar or zip is our final artifact. Bail out.
return
}
// Should we compress the artifact?
algorithm, ok := extensions[lastItem]
if ok {
config.Algorithm = algorithm
// We found our compression algorithm. Bail out.
return
}
// We didn't match a known compression format. Default to tar + pgzip
config.Algorithm = "pgzip"
config.Archive = "tar"
return
}
func makeLZ4Writer(output io.WriteCloser, compressionLevel int) (io.WriteCloser, error) {
lzwriter := lz4.NewWriter(output)
if compressionLevel > gzip.DefaultCompression {
lzwriter.Header.HighCompression = true
}
return lzwriter, nil
}
func makePgzipWriter(output io.WriteCloser, compressionLevel int) (io.WriteCloser, error) {
gzipWriter, err := pgzip.NewWriterLevel(output, compressionLevel)
if err != nil {
return nil, ErrInvalidCompressionLevel
}
gzipWriter.SetConcurrency(500000, runtime.GOMAXPROCS(-1))
return gzipWriter, nil
}
func createTarArchive(files []string, output io.WriteCloser) error {
archive := tar.NewWriter(output)
defer archive.Close()
for _, path := range files {
file, err := os.Open(path)
if err != nil {
return fmt.Errorf("Unable to read file %s: %s", path, err)
}
defer file.Close()
fi, err := file.Stat()
if err != nil {
return fmt.Errorf("Unable to get fileinfo for %s: %s", path, err)
}
header, err := tar.FileInfoHeader(fi, path)
if err != nil {
return fmt.Errorf("Failed to create tar header for %s: %s", path, err)
}
if err := archive.WriteHeader(header); err != nil {
return fmt.Errorf("Failed to write tar header for %s: %s", path, err)
}
if _, err := io.Copy(archive, file); err != nil {
return fmt.Errorf("Failed to copy %s data to archive: %s", path, err)
}
}
return nil
}
func createZipArchive(files []string, output io.WriteCloser) error {
archive := zip.NewWriter(output)
defer archive.Close()
for _, path := range files {
path = filepath.ToSlash(path)
source, err := os.Open(path)
if err != nil {
return fmt.Errorf("Unable to read file %s: %s", path, err)
}
defer source.Close()
target, err := archive.Create(path)
if err != nil {
return fmt.Errorf("Failed to add zip header for %s: %s", path, err)
}
_, err = io.Copy(target, source)
if err != nil {
return fmt.Errorf("Failed to copy %s data to archive: %s", path, err)
}
}
return nil
}
|
// Copyright 2017 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package tsdb implements a time series storage for float64 sample data.
package tsdb
import (
"context"
"fmt"
"io"
"io/ioutil"
"math"
"os"
"path/filepath"
"runtime"
"sort"
"strconv"
"strings"
"sync"
"time"
"github.com/go-kit/kit/log"
"github.com/go-kit/kit/log/level"
"github.com/oklog/ulid"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/prometheus/pkg/labels"
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/tsdb/chunkenc"
tsdb_errors "github.com/prometheus/prometheus/tsdb/errors"
"github.com/prometheus/prometheus/tsdb/fileutil"
"github.com/prometheus/prometheus/tsdb/wal"
"golang.org/x/sync/errgroup"
// Load the package into main to make sure minium Go version is met.
_ "github.com/prometheus/prometheus/tsdb/goversion"
)
const (
// Default duration of a block in milliseconds.
DefaultBlockDuration = int64(2 * time.Hour / time.Millisecond)
)
var (
// ErrNotReady is returned if the underlying storage is not ready yet.
ErrNotReady = errors.New("TSDB not ready")
)
// DefaultOptions used for the DB. They are sane for setups using
// millisecond precision timestamps.
func DefaultOptions() *Options {
return &Options{
WALSegmentSize: wal.DefaultSegmentSize,
RetentionDuration: int64(15 * 24 * time.Hour / time.Millisecond),
MinBlockDuration: DefaultBlockDuration,
MaxBlockDuration: DefaultBlockDuration,
NoLockfile: false,
AllowOverlappingBlocks: false,
WALCompression: false,
StripeSize: DefaultStripeSize,
}
}
// Options of the DB storage.
type Options struct {
// Segments (wal files) max size.
// WALSegmentSize = 0, segment size is default size.
// WALSegmentSize > 0, segment size is WALSegmentSize.
// WALSegmentSize < 0, wal is disabled.
WALSegmentSize int
// Duration of persisted data to keep.
// Unit agnostic as long as unit is consistent with MinBlockDuration and MaxBlockDuration.
// Typically it is in milliseconds.
RetentionDuration int64
// Maximum number of bytes in blocks to be retained.
// 0 or less means disabled.
// NOTE: For proper storage calculations need to consider
// the size of the WAL folder which is not added when calculating
// the current size of the database.
MaxBytes int64
// NoLockfile disables creation and consideration of a lock file.
NoLockfile bool
// Overlapping blocks are allowed if AllowOverlappingBlocks is true.
// This in-turn enables vertical compaction and vertical query merge.
AllowOverlappingBlocks bool
// WALCompression will turn on Snappy compression for records on the WAL.
WALCompression bool
// StripeSize is the size in entries of the series hash map. Reducing the size will save memory but impact performance.
StripeSize int
// The timestamp range of head blocks after which they get persisted.
// It's the minimum duration of any persisted block.
// Unit agnostic as long as unit is consistent with RetentionDuration and MaxBlockDuration.
// Typically it is in milliseconds.
MinBlockDuration int64
// The maximum timestamp range of compacted blocks.
// Unit agnostic as long as unit is consistent with MinBlockDuration and RetentionDuration.
// Typically it is in milliseconds.
MaxBlockDuration int64
// SeriesLifecycleCallback specifies a list of callbacks that will be called during a lifecycle of a series.
// It is always a no-op in Prometheus and mainly meant for external users who import TSDB.
SeriesLifecycleCallback SeriesLifecycleCallback
}
// DB handles reads and writes of time series falling into
// a hashed partition of a seriedb.
type DB struct {
dir string
lockf fileutil.Releaser
logger log.Logger
metrics *dbMetrics
opts *Options
chunkPool chunkenc.Pool
compactor Compactor
// Mutex for that must be held when modifying the general block layout.
mtx sync.RWMutex
blocks []*Block
head *Head
compactc chan struct{}
donec chan struct{}
stopc chan struct{}
// cmtx ensures that compactions and deletions don't run simultaneously.
cmtx sync.Mutex
// autoCompactMtx ensures that no compaction gets triggered while
// changing the autoCompact var.
autoCompactMtx sync.Mutex
autoCompact bool
// Cancel a running compaction when a shutdown is initiated.
compactCancel context.CancelFunc
}
type dbMetrics struct {
loadedBlocks prometheus.GaugeFunc
symbolTableSize prometheus.GaugeFunc
reloads prometheus.Counter
reloadsFailed prometheus.Counter
compactionsFailed prometheus.Counter
compactionsTriggered prometheus.Counter
compactionsSkipped prometheus.Counter
sizeRetentionCount prometheus.Counter
timeRetentionCount prometheus.Counter
startTime prometheus.GaugeFunc
tombCleanTimer prometheus.Histogram
blocksBytes prometheus.Gauge
maxBytes prometheus.Gauge
}
func newDBMetrics(db *DB, r prometheus.Registerer) *dbMetrics {
m := &dbMetrics{}
m.loadedBlocks = prometheus.NewGaugeFunc(prometheus.GaugeOpts{
Name: "prometheus_tsdb_blocks_loaded",
Help: "Number of currently loaded data blocks",
}, func() float64 {
db.mtx.RLock()
defer db.mtx.RUnlock()
return float64(len(db.blocks))
})
m.symbolTableSize = prometheus.NewGaugeFunc(prometheus.GaugeOpts{
Name: "prometheus_tsdb_symbol_table_size_bytes",
Help: "Size of symbol table on disk (in bytes)",
}, func() float64 {
db.mtx.RLock()
blocks := db.blocks[:]
db.mtx.RUnlock()
symTblSize := uint64(0)
for _, b := range blocks {
symTblSize += b.GetSymbolTableSize()
}
return float64(symTblSize)
})
m.reloads = prometheus.NewCounter(prometheus.CounterOpts{
Name: "prometheus_tsdb_reloads_total",
Help: "Number of times the database reloaded block data from disk.",
})
m.reloadsFailed = prometheus.NewCounter(prometheus.CounterOpts{
Name: "prometheus_tsdb_reloads_failures_total",
Help: "Number of times the database failed to reload block data from disk.",
})
m.compactionsTriggered = prometheus.NewCounter(prometheus.CounterOpts{
Name: "prometheus_tsdb_compactions_triggered_total",
Help: "Total number of triggered compactions for the partition.",
})
m.compactionsFailed = prometheus.NewCounter(prometheus.CounterOpts{
Name: "prometheus_tsdb_compactions_failed_total",
Help: "Total number of compactions that failed for the partition.",
})
m.timeRetentionCount = prometheus.NewCounter(prometheus.CounterOpts{
Name: "prometheus_tsdb_time_retentions_total",
Help: "The number of times that blocks were deleted because the maximum time limit was exceeded.",
})
m.compactionsSkipped = prometheus.NewCounter(prometheus.CounterOpts{
Name: "prometheus_tsdb_compactions_skipped_total",
Help: "Total number of skipped compactions due to disabled auto compaction.",
})
m.startTime = prometheus.NewGaugeFunc(prometheus.GaugeOpts{
Name: "prometheus_tsdb_lowest_timestamp",
Help: "Lowest timestamp value stored in the database. The unit is decided by the library consumer.",
}, func() float64 {
db.mtx.RLock()
defer db.mtx.RUnlock()
if len(db.blocks) == 0 {
return float64(db.head.MinTime())
}
return float64(db.blocks[0].meta.MinTime)
})
m.tombCleanTimer = prometheus.NewHistogram(prometheus.HistogramOpts{
Name: "prometheus_tsdb_tombstone_cleanup_seconds",
Help: "The time taken to recompact blocks to remove tombstones.",
})
m.blocksBytes = prometheus.NewGauge(prometheus.GaugeOpts{
Name: "prometheus_tsdb_storage_blocks_bytes",
Help: "The number of bytes that are currently used for local storage by all blocks.",
})
m.maxBytes = prometheus.NewGauge(prometheus.GaugeOpts{
Name: "prometheus_tsdb_retention_limit_bytes",
Help: "Max number of bytes to be retained in the tsdb blocks, configured 0 means disabled",
})
m.sizeRetentionCount = prometheus.NewCounter(prometheus.CounterOpts{
Name: "prometheus_tsdb_size_retentions_total",
Help: "The number of times that blocks were deleted because the maximum number of bytes was exceeded.",
})
if r != nil {
r.MustRegister(
m.loadedBlocks,
m.symbolTableSize,
m.reloads,
m.reloadsFailed,
m.compactionsFailed,
m.compactionsTriggered,
m.compactionsSkipped,
m.sizeRetentionCount,
m.timeRetentionCount,
m.startTime,
m.tombCleanTimer,
m.blocksBytes,
m.maxBytes,
)
}
return m
}
// ErrClosed is returned when the db is closed.
var ErrClosed = errors.New("db already closed")
// DBReadOnly provides APIs for read only operations on a database.
// Current implementation doesn't support concurrency so
// all API calls should happen in the same go routine.
type DBReadOnly struct {
logger log.Logger
dir string
closers []io.Closer
closed chan struct{}
}
// OpenDBReadOnly opens DB in the given directory for read only operations.
func OpenDBReadOnly(dir string, l log.Logger) (*DBReadOnly, error) {
if _, err := os.Stat(dir); err != nil {
return nil, errors.Wrap(err, "opening the db dir")
}
if l == nil {
l = log.NewNopLogger()
}
return &DBReadOnly{
logger: l,
dir: dir,
closed: make(chan struct{}),
}, nil
}
// FlushWAL creates a new block containing all data that's currently in the memory buffer/WAL.
// Samples that are in existing blocks will not be written to the new block.
// Note that if the read only database is running concurrently with a
// writable database then writing the WAL to the database directory can race.
func (db *DBReadOnly) FlushWAL(dir string) (returnErr error) {
blockReaders, err := db.Blocks()
if err != nil {
return errors.Wrap(err, "read blocks")
}
maxBlockTime := int64(math.MinInt64)
if len(blockReaders) > 0 {
maxBlockTime = blockReaders[len(blockReaders)-1].Meta().MaxTime
}
w, err := wal.Open(db.logger, filepath.Join(db.dir, "wal"))
if err != nil {
return err
}
head, err := NewHead(nil, db.logger, w, 1, db.dir, nil, DefaultStripeSize, nil)
if err != nil {
return err
}
defer func() {
var merr tsdb_errors.MultiError
merr.Add(returnErr)
merr.Add(errors.Wrap(head.Close(), "closing Head"))
returnErr = merr.Err()
}()
// Set the min valid time for the ingested wal samples
// to be no lower than the maxt of the last block.
if err := head.Init(maxBlockTime); err != nil {
return errors.Wrap(err, "read WAL")
}
mint := head.MinTime()
maxt := head.MaxTime()
rh := &RangeHead{
head: head,
mint: mint,
maxt: maxt,
}
compactor, err := NewLeveledCompactor(
context.Background(),
nil,
db.logger,
ExponentialBlockRanges(DefaultOptions().MinBlockDuration, 3, 5),
chunkenc.NewPool(),
)
if err != nil {
return errors.Wrap(err, "create leveled compactor")
}
// Add +1 millisecond to block maxt because block intervals are half-open: [b.MinTime, b.MaxTime).
// Because of this block intervals are always +1 than the total samples it includes.
_, err = compactor.Write(dir, rh, mint, maxt+1, nil)
return errors.Wrap(err, "writing WAL")
}
// Querier loads the wal and returns a new querier over the data partition for the given time range.
// Current implementation doesn't support multiple Queriers.
func (db *DBReadOnly) Querier(ctx context.Context, mint, maxt int64) (storage.Querier, error) {
select {
case <-db.closed:
return nil, ErrClosed
default:
}
blockReaders, err := db.Blocks()
if err != nil {
return nil, err
}
blocks := make([]*Block, len(blockReaders))
for i, b := range blockReaders {
b, ok := b.(*Block)
if !ok {
return nil, errors.New("unable to convert a read only block to a normal block")
}
blocks[i] = b
}
head, err := NewHead(nil, db.logger, nil, 1, db.dir, nil, DefaultStripeSize, nil)
if err != nil {
return nil, err
}
maxBlockTime := int64(math.MinInt64)
if len(blocks) > 0 {
maxBlockTime = blocks[len(blocks)-1].Meta().MaxTime
}
// Also add the WAL if the current blocks don't cover the requests time range.
if maxBlockTime <= maxt {
if err := head.Close(); err != nil {
return nil, err
}
w, err := wal.Open(db.logger, filepath.Join(db.dir, "wal"))
if err != nil {
return nil, err
}
head, err = NewHead(nil, db.logger, w, 1, db.dir, nil, DefaultStripeSize, nil)
if err != nil {
return nil, err
}
// Set the min valid time for the ingested wal samples
// to be no lower than the maxt of the last block.
if err := head.Init(maxBlockTime); err != nil {
return nil, errors.Wrap(err, "read WAL")
}
// Set the wal to nil to disable all wal operations.
// This is mainly to avoid blocking when closing the head.
head.wal = nil
}
db.closers = append(db.closers, head)
// TODO: Refactor so that it is possible to obtain a Querier without initializing a writable DB instance.
// Option 1: refactor DB to have the Querier implementation using the DBReadOnly.Querier implementation not the opposite.
// Option 2: refactor Querier to use another independent func which
// can than be used by a read only and writable db instances without any code duplication.
dbWritable := &DB{
dir: db.dir,
logger: db.logger,
blocks: blocks,
head: head,
}
return dbWritable.Querier(ctx, mint, maxt)
}
func (db *DBReadOnly) ChunkQuerier(context.Context, int64, int64) (storage.ChunkQuerier, error) {
// TODO(bwplotka): Implement in next PR.
return nil, errors.New("not implemented")
}
// Blocks returns a slice of block readers for persisted blocks.
func (db *DBReadOnly) Blocks() ([]BlockReader, error) {
select {
case <-db.closed:
return nil, ErrClosed
default:
}
loadable, corrupted, err := openBlocks(db.logger, db.dir, nil, nil)
if err != nil {
return nil, err
}
// Corrupted blocks that have been superseded by a loadable block can be safely ignored.
for _, block := range loadable {
for _, b := range block.Meta().Compaction.Parents {
delete(corrupted, b.ULID)
}
}
if len(corrupted) > 0 {
for _, b := range loadable {
if err := b.Close(); err != nil {
level.Warn(db.logger).Log("msg", "Closing block failed", "err", err, "block", b)
}
}
var merr tsdb_errors.MultiError
for ulid, err := range corrupted {
merr.Add(errors.Wrapf(err, "corrupted block %s", ulid.String()))
}
return nil, merr.Err()
}
if len(loadable) == 0 {
return nil, nil
}
sort.Slice(loadable, func(i, j int) bool {
return loadable[i].Meta().MinTime < loadable[j].Meta().MinTime
})
blockMetas := make([]BlockMeta, 0, len(loadable))
for _, b := range loadable {
blockMetas = append(blockMetas, b.Meta())
}
if overlaps := OverlappingBlocks(blockMetas); len(overlaps) > 0 {
level.Warn(db.logger).Log("msg", "Overlapping blocks found during opening", "detail", overlaps.String())
}
// Close all previously open readers and add the new ones to the cache.
for _, closer := range db.closers {
closer.Close()
}
blockClosers := make([]io.Closer, len(loadable))
blockReaders := make([]BlockReader, len(loadable))
for i, b := range loadable {
blockClosers[i] = b
blockReaders[i] = b
}
db.closers = blockClosers
return blockReaders, nil
}
// Close all block readers.
func (db *DBReadOnly) Close() error {
select {
case <-db.closed:
return ErrClosed
default:
}
close(db.closed)
var merr tsdb_errors.MultiError
for _, b := range db.closers {
merr.Add(b.Close())
}
return merr.Err()
}
// Open returns a new DB in the given directory. If options are empty, DefaultOptions will be used.
func Open(dir string, l log.Logger, r prometheus.Registerer, opts *Options) (db *DB, err error) {
var rngs []int64
opts, rngs = validateOpts(opts, nil)
return open(dir, l, r, opts, rngs)
}
func validateOpts(opts *Options, rngs []int64) (*Options, []int64) {
if opts == nil {
opts = DefaultOptions()
}
if opts.StripeSize <= 0 {
opts.StripeSize = DefaultStripeSize
}
if opts.MinBlockDuration <= 0 {
opts.MinBlockDuration = DefaultBlockDuration
}
if opts.MinBlockDuration > opts.MaxBlockDuration {
opts.MaxBlockDuration = opts.MinBlockDuration
}
if len(rngs) == 0 {
// Start with smallest block duration and create exponential buckets until the exceed the
// configured maximum block duration.
rngs = ExponentialBlockRanges(opts.MinBlockDuration, 10, 3)
}
return opts, rngs
}
func open(dir string, l log.Logger, r prometheus.Registerer, opts *Options, rngs []int64) (db *DB, err error) {
if err := os.MkdirAll(dir, 0777); err != nil {
return nil, err
}
if l == nil {
l = log.NewNopLogger()
}
for i, v := range rngs {
if v > opts.MaxBlockDuration {
rngs = rngs[:i]
break
}
}
// Fixup bad format written by Prometheus 2.1.
if err := repairBadIndexVersion(l, dir); err != nil {
return nil, err
}
// Migrate old WAL if one exists.
if err := MigrateWAL(l, filepath.Join(dir, "wal")); err != nil {
return nil, errors.Wrap(err, "migrate WAL")
}
db = &DB{
dir: dir,
logger: l,
opts: opts,
compactc: make(chan struct{}, 1),
donec: make(chan struct{}),
stopc: make(chan struct{}),
autoCompact: true,
chunkPool: chunkenc.NewPool(),
}
db.metrics = newDBMetrics(db, r)
maxBytes := opts.MaxBytes
if maxBytes < 0 {
maxBytes = 0
}
db.metrics.maxBytes.Set(float64(maxBytes))
if !opts.NoLockfile {
absdir, err := filepath.Abs(dir)
if err != nil {
return nil, err
}
lockf, _, err := fileutil.Flock(filepath.Join(absdir, "lock"))
if err != nil {
return nil, errors.Wrap(err, "lock DB directory")
}
db.lockf = lockf
}
ctx, cancel := context.WithCancel(context.Background())
db.compactor, err = NewLeveledCompactor(ctx, r, l, rngs, db.chunkPool)
if err != nil {
cancel()
return nil, errors.Wrap(err, "create leveled compactor")
}
db.compactCancel = cancel
var wlog *wal.WAL
segmentSize := wal.DefaultSegmentSize
walDir := filepath.Join(dir, "wal")
// Wal is enabled.
if opts.WALSegmentSize >= 0 {
// Wal is set to a custom size.
if opts.WALSegmentSize > 0 {
segmentSize = opts.WALSegmentSize
}
wlog, err = wal.NewSize(l, r, walDir, segmentSize, opts.WALCompression)
if err != nil {
return nil, err
}
}
db.head, err = NewHead(r, l, wlog, rngs[0], dir, db.chunkPool, opts.StripeSize, opts.SeriesLifecycleCallback)
if err != nil {
return nil, err
}
if err := db.reload(); err != nil {
return nil, err
}
// Set the min valid time for the ingested samples
// to be no lower than the maxt of the last block.
blocks := db.Blocks()
minValidTime := int64(math.MinInt64)
if len(blocks) > 0 {
minValidTime = blocks[len(blocks)-1].Meta().MaxTime
}
if initErr := db.head.Init(minValidTime); initErr != nil {
db.head.metrics.walCorruptionsTotal.Inc()
level.Warn(db.logger).Log("msg", "Encountered WAL read error, attempting repair", "err", initErr)
if err := wlog.Repair(initErr); err != nil {
return nil, errors.Wrap(err, "repair corrupted WAL")
}
}
go db.run()
return db, nil
}
// StartTime implements the Storage interface.
func (db *DB) StartTime() (int64, error) {
db.mtx.RLock()
defer db.mtx.RUnlock()
if len(db.blocks) > 0 {
return db.blocks[0].Meta().MinTime, nil
}
return db.head.MinTime(), nil
}
// Dir returns the directory of the database.
func (db *DB) Dir() string {
return db.dir
}
func (db *DB) run() {
defer close(db.donec)
backoff := time.Duration(0)
for {
select {
case <-db.stopc:
return
case <-time.After(backoff):
}
select {
case <-time.After(1 * time.Minute):
select {
case db.compactc <- struct{}{}:
default:
}
case <-db.compactc:
db.metrics.compactionsTriggered.Inc()
db.autoCompactMtx.Lock()
if db.autoCompact {
if err := db.Compact(); err != nil {
level.Error(db.logger).Log("msg", "compaction failed", "err", err)
backoff = exponential(backoff, 1*time.Second, 1*time.Minute)
} else {
backoff = 0
}
} else {
db.metrics.compactionsSkipped.Inc()
}
db.autoCompactMtx.Unlock()
case <-db.stopc:
return
}
}
}
// Appender opens a new appender against the database.
func (db *DB) Appender() storage.Appender {
return dbAppender{db: db, Appender: db.head.Appender()}
}
// dbAppender wraps the DB's head appender and triggers compactions on commit
// if necessary.
type dbAppender struct {
storage.Appender
db *DB
}
func (a dbAppender) Commit() error {
err := a.Appender.Commit()
// We could just run this check every few minutes practically. But for benchmarks
// and high frequency use cases this is the safer way.
if a.db.head.compactable() {
select {
case a.db.compactc <- struct{}{}:
default:
}
}
return err
}
// Compact data if possible. After successful compaction blocks are reloaded
// which will also trigger blocks to be deleted that fall out of the retention
// window.
// If no blocks are compacted, the retention window state doesn't change. Thus,
// this is sufficient to reliably delete old data.
// Old blocks are only deleted on reload based on the new block's parent information.
// See DB.reload documentation for further information.
func (db *DB) Compact() (err error) {
db.cmtx.Lock()
defer db.cmtx.Unlock()
defer func() {
if err != nil {
db.metrics.compactionsFailed.Inc()
}
}()
// Check whether we have pending head blocks that are ready to be persisted.
// They have the highest priority.
for {
select {
case <-db.stopc:
return nil
default:
}
if !db.head.compactable() {
break
}
mint := db.head.MinTime()
maxt := rangeForTimestamp(mint, db.head.chunkRange)
// Wrap head into a range that bounds all reads to it.
// We remove 1 millisecond from maxt because block
// intervals are half-open: [b.MinTime, b.MaxTime). But
// chunk intervals are closed: [c.MinTime, c.MaxTime];
// so in order to make sure that overlaps are evaluated
// consistently, we explicitly remove the last value
// from the block interval here.
head := NewRangeHead(db.head, mint, maxt-1)
if err := db.compactHead(head); err != nil {
return err
}
}
return db.compactBlocks()
}
// CompactHead compacts the given the RangeHead.
func (db *DB) CompactHead(head *RangeHead) (err error) {
db.cmtx.Lock()
defer db.cmtx.Unlock()
return db.compactHead(head)
}
// compactHead compacts the given the RangeHead.
// The compaction mutex should be held before calling this method.
func (db *DB) compactHead(head *RangeHead) (err error) {
// Add +1 millisecond to block maxt because block intervals are half-open: [b.MinTime, b.MaxTime).
// Because of this block intervals are always +1 than the total samples it includes.
maxt := head.MaxTime() + 1
uid, err := db.compactor.Write(db.dir, head, head.MinTime(), maxt, nil)
if err != nil {
return errors.Wrap(err, "persist head block")
}
runtime.GC()
if err := db.reload(); err != nil {
if err := os.RemoveAll(filepath.Join(db.dir, uid.String())); err != nil {
return errors.Wrapf(err, "delete persisted head block after failed db reload:%s", uid)
}
return errors.Wrap(err, "reload blocks")
}
if (uid == ulid.ULID{}) {
// Compaction resulted in an empty block.
// Head truncating during db.reload() depends on the persisted blocks and
// in this case no new block will be persisted so manually truncate the head.
if err = db.head.Truncate(maxt); err != nil {
return errors.Wrap(err, "head truncate failed (in compact)")
}
}
runtime.GC()
return nil
}
// compactBlocks compacts all the eligible on-disk blocks.
// The compaction mutex should be held before calling this method.
func (db *DB) compactBlocks() (err error) {
// Check for compactions of multiple blocks.
for {
plan, err := db.compactor.Plan(db.dir)
if err != nil {
return errors.Wrap(err, "plan compaction")
}
if len(plan) == 0 {
break
}
select {
case <-db.stopc:
return nil
default:
}
uid, err := db.compactor.Compact(db.dir, plan, db.blocks)
if err != nil {
return errors.Wrapf(err, "compact %s", plan)
}
runtime.GC()
if err := db.reload(); err != nil {
if err := os.RemoveAll(filepath.Join(db.dir, uid.String())); err != nil {
return errors.Wrapf(err, "delete compacted block after failed db reload:%s", uid)
}
return errors.Wrap(err, "reload blocks")
}
runtime.GC()
}
return nil
}
// getBlock iterates a given block range to find a block by a given id.
// If found it returns the block itself and a boolean to indicate that it was found.
func getBlock(allBlocks []*Block, id ulid.ULID) (*Block, bool) {
for _, b := range allBlocks {
if b.Meta().ULID == id {
return b, true
}
}
return nil, false
}
// reload blocks and trigger head truncation if new blocks appeared.
// Blocks that are obsolete due to replacement or retention will be deleted.
func (db *DB) reload() (err error) {
defer func() {
if err != nil {
db.metrics.reloadsFailed.Inc()
}
db.metrics.reloads.Inc()
}()
loadable, corrupted, err := openBlocks(db.logger, db.dir, db.blocks, db.chunkPool)
if err != nil {
return err
}
deletable := db.deletableBlocks(loadable)
// Corrupted blocks that have been superseded by a loadable block can be safely ignored.
// This makes it resilient against the process crashing towards the end of a compaction.
// Creation of a new block and deletion of its parents cannot happen atomically.
// By creating blocks with their parents, we can pick up the deletion where it left off during a crash.
for _, block := range loadable {
for _, b := range block.Meta().Compaction.Parents {
delete(corrupted, b.ULID)
deletable[b.ULID] = nil
}
}
if len(corrupted) > 0 {
// Close all new blocks to release the lock for windows.
for _, block := range loadable {
if _, open := getBlock(db.blocks, block.Meta().ULID); !open {
block.Close()
}
}
var merr tsdb_errors.MultiError
for ulid, err := range corrupted {
merr.Add(errors.Wrapf(err, "corrupted block %s", ulid.String()))
}
return merr.Err()
}
// All deletable blocks should not be loaded.
var (
bb []*Block
blocksSize int64
)
for _, block := range loadable {
if _, ok := deletable[block.Meta().ULID]; ok {
deletable[block.Meta().ULID] = block
continue
}
bb = append(bb, block)
blocksSize += block.Size()
}
loadable = bb
db.metrics.blocksBytes.Set(float64(blocksSize))
sort.Slice(loadable, func(i, j int) bool {
return loadable[i].Meta().MinTime < loadable[j].Meta().MinTime
})
if !db.opts.AllowOverlappingBlocks {
if err := validateBlockSequence(loadable); err != nil {
return errors.Wrap(err, "invalid block sequence")
}
}
// Swap new blocks first for subsequently created readers to be seen.
db.mtx.Lock()
oldBlocks := db.blocks
db.blocks = loadable
db.mtx.Unlock()
blockMetas := make([]BlockMeta, 0, len(loadable))
for _, b := range loadable {
blockMetas = append(blockMetas, b.Meta())
}
if overlaps := OverlappingBlocks(blockMetas); len(overlaps) > 0 {
level.Warn(db.logger).Log("msg", "Overlapping blocks found during reload", "detail", overlaps.String())
}
for _, b := range oldBlocks {
if _, ok := deletable[b.Meta().ULID]; ok {
deletable[b.Meta().ULID] = b
}
}
if err := db.deleteBlocks(deletable); err != nil {
return err
}
// Garbage collect data in the head if the most recent persisted block
// covers data of its current time range.
if len(loadable) == 0 {
return nil
}
maxt := loadable[len(loadable)-1].Meta().MaxTime
return errors.Wrap(db.head.Truncate(maxt), "head truncate failed")
}
func openBlocks(l log.Logger, dir string, loaded []*Block, chunkPool chunkenc.Pool) (blocks []*Block, corrupted map[ulid.ULID]error, err error) {
bDirs, err := blockDirs(dir)
if err != nil {
return nil, nil, errors.Wrap(err, "find blocks")
}
corrupted = make(map[ulid.ULID]error)
for _, bDir := range bDirs {
meta, _, err := readMetaFile(bDir)
if err != nil {
level.Error(l).Log("msg", "failed to read meta.json for a block", "dir", bDir, "err", err)
continue
}
// See if we already have the block in memory or open it otherwise.
block, open := getBlock(loaded, meta.ULID)
if !open {
block, err = OpenBlock(l, bDir, chunkPool)
if err != nil {
corrupted[meta.ULID] = err
continue
}
}
blocks = append(blocks, block)
}
return blocks, corrupted, nil
}
// deletableBlocks returns all blocks past retention policy.
func (db *DB) deletableBlocks(blocks []*Block) map[ulid.ULID]*Block {
deletable := make(map[ulid.ULID]*Block)
// Sort the blocks by time - newest to oldest (largest to smallest timestamp).
// This ensures that the retentions will remove the oldest blocks.
sort.Slice(blocks, func(i, j int) bool {
return blocks[i].Meta().MaxTime > blocks[j].Meta().MaxTime
})
for _, block := range blocks {
if block.Meta().Compaction.Deletable {
deletable[block.Meta().ULID] = block
}
}
for ulid, block := range db.beyondTimeRetention(blocks) {
deletable[ulid] = block
}
for ulid, block := range db.beyondSizeRetention(blocks) {
deletable[ulid] = block
}
return deletable
}
func (db *DB) beyondTimeRetention(blocks []*Block) (deletable map[ulid.ULID]*Block) {
// Time retention is disabled or no blocks to work with.
if len(db.blocks) == 0 || db.opts.RetentionDuration == 0 {
return
}
deletable = make(map[ulid.ULID]*Block)
for i, block := range blocks {
// The difference between the first block and this block is larger than
// the retention period so any blocks after that are added as deletable.
if i > 0 && blocks[0].Meta().MaxTime-block.Meta().MaxTime > db.opts.RetentionDuration {
for _, b := range blocks[i:] {
deletable[b.meta.ULID] = b
}
db.metrics.timeRetentionCount.Inc()
break
}
}
return deletable
}
func (db *DB) beyondSizeRetention(blocks []*Block) (deletable map[ulid.ULID]*Block) {
// Size retention is disabled or no blocks to work with.
if len(db.blocks) == 0 || db.opts.MaxBytes <= 0 {
return
}
deletable = make(map[ulid.ULID]*Block)
walSize, _ := db.Head().wal.Size()
headChunksSize := db.Head().chunkDiskMapper.Size()
// Initializing size counter with WAL size and Head chunks
// written to disk, as that is part of the retention strategy.
blocksSize := walSize + headChunksSize
for i, block := range blocks {
blocksSize += block.Size()
if blocksSize > int64(db.opts.MaxBytes) {
// Add this and all following blocks for deletion.
for _, b := range blocks[i:] {
deletable[b.meta.ULID] = b
}
db.metrics.sizeRetentionCount.Inc()
break
}
}
return deletable
}
// deleteBlocks closes and deletes blocks from the disk.
// When the map contains a non nil block object it means it is loaded in memory
// so needs to be closed first as it might need to wait for pending readers to complete.
func (db *DB) deleteBlocks(blocks map[ulid.ULID]*Block) error {
for ulid, block := range blocks {
if block != nil {
if err := block.Close(); err != nil {
level.Warn(db.logger).Log("msg", "Closing block failed", "err", err, "block", ulid)
}
}
if err := os.RemoveAll(filepath.Join(db.dir, ulid.String())); err != nil {
return errors.Wrapf(err, "delete obsolete block %s", ulid)
}
}
return nil
}
// validateBlockSequence returns error if given block meta files indicate that some blocks overlaps within sequence.
func validateBlockSequence(bs []*Block) error {
if len(bs) <= 1 {
return nil
}
var metas []BlockMeta
for _, b := range bs {
metas = append(metas, b.meta)
}
overlaps := OverlappingBlocks(metas)
if len(overlaps) > 0 {
return errors.Errorf("block time ranges overlap: %s", overlaps)
}
return nil
}
// TimeRange specifies minTime and maxTime range.
type TimeRange struct {
Min, Max int64
}
// Overlaps contains overlapping blocks aggregated by overlapping range.
type Overlaps map[TimeRange][]BlockMeta
// String returns human readable string form of overlapped blocks.
func (o Overlaps) String() string {
var res []string
for r, overlaps := range o {
var groups []string
for _, m := range overlaps {
groups = append(groups, fmt.Sprintf(
"<ulid: %s, mint: %d, maxt: %d, range: %s>",
m.ULID.String(),
m.MinTime,
m.MaxTime,
(time.Duration((m.MaxTime-m.MinTime)/1000)*time.Second).String(),
))
}
res = append(res, fmt.Sprintf(
"[mint: %d, maxt: %d, range: %s, blocks: %d]: %s",
r.Min, r.Max,
(time.Duration((r.Max-r.Min)/1000)*time.Second).String(),
len(overlaps),
strings.Join(groups, ", ")),
)
}
return strings.Join(res, "\n")
}
// OverlappingBlocks returns all overlapping blocks from given meta files.
func OverlappingBlocks(bm []BlockMeta) Overlaps {
if len(bm) <= 1 {
return nil
}
var (
overlaps [][]BlockMeta
// pending contains not ended blocks in regards to "current" timestamp.
pending = []BlockMeta{bm[0]}
// continuousPending helps to aggregate same overlaps to single group.
continuousPending = true
)
// We have here blocks sorted by minTime. We iterate over each block and treat its minTime as our "current" timestamp.
// We check if any of the pending block finished (blocks that we have seen before, but their maxTime was still ahead current
// timestamp). If not, it means they overlap with our current block. In the same time current block is assumed pending.
for _, b := range bm[1:] {
var newPending []BlockMeta
for _, p := range pending {
// "b.MinTime" is our current time.
if b.MinTime >= p.MaxTime {
continuousPending = false
continue
}
// "p" overlaps with "b" and "p" is still pending.
newPending = append(newPending, p)
}
// Our block "b" is now pending.
pending = append(newPending, b)
if len(newPending) == 0 {
// No overlaps.
continue
}
if continuousPending && len(overlaps) > 0 {
overlaps[len(overlaps)-1] = append(overlaps[len(overlaps)-1], b)
continue
}
overlaps = append(overlaps, append(newPending, b))
// Start new pendings.
continuousPending = true
}
// Fetch the critical overlapped time range foreach overlap groups.
overlapGroups := Overlaps{}
for _, overlap := range overlaps {
minRange := TimeRange{Min: 0, Max: math.MaxInt64}
for _, b := range overlap {
if minRange.Max > b.MaxTime {
minRange.Max = b.MaxTime
}
if minRange.Min < b.MinTime {
minRange.Min = b.MinTime
}
}
overlapGroups[minRange] = overlap
}
return overlapGroups
}
func (db *DB) String() string {
return "HEAD"
}
// Blocks returns the databases persisted blocks.
func (db *DB) Blocks() []*Block {
db.mtx.RLock()
defer db.mtx.RUnlock()
return db.blocks
}
// Head returns the databases's head.
func (db *DB) Head() *Head {
return db.head
}
// Close the partition.
func (db *DB) Close() error {
close(db.stopc)
db.compactCancel()
<-db.donec
db.mtx.Lock()
defer db.mtx.Unlock()
var g errgroup.Group
// blocks also contains all head blocks.
for _, pb := range db.blocks {
g.Go(pb.Close)
}
var merr tsdb_errors.MultiError
merr.Add(g.Wait())
if db.lockf != nil {
merr.Add(db.lockf.Release())
}
merr.Add(db.head.Close())
return merr.Err()
}
// DisableCompactions disables auto compactions.
func (db *DB) DisableCompactions() {
db.autoCompactMtx.Lock()
defer db.autoCompactMtx.Unlock()
db.autoCompact = false
level.Info(db.logger).Log("msg", "Compactions disabled")
}
// EnableCompactions enables auto compactions.
func (db *DB) EnableCompactions() {
db.autoCompactMtx.Lock()
defer db.autoCompactMtx.Unlock()
db.autoCompact = true
level.Info(db.logger).Log("msg", "Compactions enabled")
}
// Snapshot writes the current data to the directory. If withHead is set to true it
// will create a new block containing all data that's currently in the memory buffer/WAL.
func (db *DB) Snapshot(dir string, withHead bool) error {
if dir == db.dir {
return errors.Errorf("cannot snapshot into base directory")
}
if _, err := ulid.ParseStrict(dir); err == nil {
return errors.Errorf("dir must not be a valid ULID")
}
db.cmtx.Lock()
defer db.cmtx.Unlock()
db.mtx.RLock()
defer db.mtx.RUnlock()
for _, b := range db.blocks {
level.Info(db.logger).Log("msg", "Snapshotting block", "block", b)
if err := b.Snapshot(dir); err != nil {
return errors.Wrapf(err, "error snapshotting block: %s", b.Dir())
}
}
if !withHead {
return nil
}
mint := db.head.MinTime()
maxt := db.head.MaxTime()
head := &RangeHead{
head: db.head,
mint: mint,
maxt: maxt,
}
// Add +1 millisecond to block maxt because block intervals are half-open: [b.MinTime, b.MaxTime).
// Because of this block intervals are always +1 than the total samples it includes.
if _, err := db.compactor.Write(dir, head, mint, maxt+1, nil); err != nil {
return errors.Wrap(err, "snapshot head block")
}
return nil
}
// Querier returns a new querier over the data partition for the given time range.
// A goroutine must not handle more than one open Querier.
func (db *DB) Querier(_ context.Context, mint, maxt int64) (storage.Querier, error) {
var blocks []BlockReader
var blockMetas []BlockMeta
db.mtx.RLock()
defer db.mtx.RUnlock()
for _, b := range db.blocks {
if b.OverlapsClosedInterval(mint, maxt) {
blocks = append(blocks, b)
blockMetas = append(blockMetas, b.Meta())
}
}
if maxt >= db.head.MinTime() {
blocks = append(blocks, &RangeHead{
head: db.head,
mint: mint,
maxt: maxt,
})
}
blockQueriers := make([]storage.Querier, 0, len(blocks))
for _, b := range blocks {
q, err := NewBlockQuerier(b, mint, maxt)
if err == nil {
blockQueriers = append(blockQueriers, q)
continue
}
// If we fail, all previously opened queriers must be closed.
for _, q := range blockQueriers {
q.Close()
}
return nil, errors.Wrapf(err, "open querier for block %s", b)
}
if len(OverlappingBlocks(blockMetas)) > 0 {
return &verticalQuerier{
querier: querier{
blocks: blockQueriers,
},
}, nil
}
return &querier{
blocks: blockQueriers,
}, nil
}
func (db *DB) ChunkQuerier(context.Context, int64, int64) (storage.ChunkQuerier, error) {
// TODO(bwplotka): Implement in next PR.
return nil, errors.New("not implemented")
}
func rangeForTimestamp(t int64, width int64) (maxt int64) {
return (t/width)*width + width
}
// Delete implements deletion of metrics. It only has atomicity guarantees on a per-block basis.
func (db *DB) Delete(mint, maxt int64, ms ...*labels.Matcher) error {
db.cmtx.Lock()
defer db.cmtx.Unlock()
var g errgroup.Group
db.mtx.RLock()
defer db.mtx.RUnlock()
for _, b := range db.blocks {
if b.OverlapsClosedInterval(mint, maxt) {
g.Go(func(b *Block) func() error {
return func() error { return b.Delete(mint, maxt, ms...) }
}(b))
}
}
g.Go(func() error {
return db.head.Delete(mint, maxt, ms...)
})
return g.Wait()
}
// CleanTombstones re-writes any blocks with tombstones.
func (db *DB) CleanTombstones() (err error) {
db.cmtx.Lock()
defer db.cmtx.Unlock()
start := time.Now()
defer db.metrics.tombCleanTimer.Observe(time.Since(start).Seconds())
newUIDs := []ulid.ULID{}
defer func() {
// If any error is caused, we need to delete all the new directory created.
if err != nil {
for _, uid := range newUIDs {
dir := filepath.Join(db.Dir(), uid.String())
if err := os.RemoveAll(dir); err != nil {
level.Error(db.logger).Log("msg", "failed to delete block after failed `CleanTombstones`", "dir", dir, "err", err)
}
}
}
}()
db.mtx.RLock()
blocks := db.blocks[:]
db.mtx.RUnlock()
for _, b := range blocks {
if uid, er := b.CleanTombstones(db.Dir(), db.compactor); er != nil {
err = errors.Wrapf(er, "clean tombstones: %s", b.Dir())
return err
} else if uid != nil { // New block was created.
newUIDs = append(newUIDs, *uid)
}
}
return errors.Wrap(db.reload(), "reload blocks")
}
func isBlockDir(fi os.FileInfo) bool {
if !fi.IsDir() {
return false
}
_, err := ulid.ParseStrict(fi.Name())
return err == nil
}
func blockDirs(dir string) ([]string, error) {
files, err := ioutil.ReadDir(dir)
if err != nil {
return nil, err
}
var dirs []string
for _, fi := range files {
if isBlockDir(fi) {
dirs = append(dirs, filepath.Join(dir, fi.Name()))
}
}
return dirs, nil
}
func sequenceFiles(dir string) ([]string, error) {
files, err := ioutil.ReadDir(dir)
if err != nil {
return nil, err
}
var res []string
for _, fi := range files {
if _, err := strconv.ParseUint(fi.Name(), 10, 64); err != nil {
continue
}
res = append(res, filepath.Join(dir, fi.Name()))
}
return res, nil
}
func nextSequenceFile(dir string) (string, int, error) {
files, err := ioutil.ReadDir(dir)
if err != nil {
return "", 0, err
}
i := uint64(0)
for _, f := range files {
j, err := strconv.ParseUint(f.Name(), 10, 64)
if err != nil {
continue
}
i = j
}
return filepath.Join(dir, fmt.Sprintf("%0.6d", i+1)), int(i + 1), nil
}
func closeAll(cs []io.Closer) error {
var merr tsdb_errors.MultiError
for _, c := range cs {
merr.Add(c.Close())
}
return merr.Err()
}
func exponential(d, min, max time.Duration) time.Duration {
d *= 2
if d < min {
d = min
}
if d > max {
d = max
}
return d
}
Fix panic from db metrics (#7501)
Signed-off-by: Ganesh Vernekar <cb5593e95c6f4f9d31a7d363cec8452e89af4967@iith.ac.in>
// Copyright 2017 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package tsdb implements a time series storage for float64 sample data.
package tsdb
import (
"context"
"fmt"
"io"
"io/ioutil"
"math"
"os"
"path/filepath"
"runtime"
"sort"
"strconv"
"strings"
"sync"
"time"
"github.com/go-kit/kit/log"
"github.com/go-kit/kit/log/level"
"github.com/oklog/ulid"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/prometheus/pkg/labels"
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/tsdb/chunkenc"
tsdb_errors "github.com/prometheus/prometheus/tsdb/errors"
"github.com/prometheus/prometheus/tsdb/fileutil"
"github.com/prometheus/prometheus/tsdb/wal"
"golang.org/x/sync/errgroup"
// Load the package into main to make sure minium Go version is met.
_ "github.com/prometheus/prometheus/tsdb/goversion"
)
const (
// Default duration of a block in milliseconds.
DefaultBlockDuration = int64(2 * time.Hour / time.Millisecond)
)
var (
// ErrNotReady is returned if the underlying storage is not ready yet.
ErrNotReady = errors.New("TSDB not ready")
)
// DefaultOptions used for the DB. They are sane for setups using
// millisecond precision timestamps.
func DefaultOptions() *Options {
return &Options{
WALSegmentSize: wal.DefaultSegmentSize,
RetentionDuration: int64(15 * 24 * time.Hour / time.Millisecond),
MinBlockDuration: DefaultBlockDuration,
MaxBlockDuration: DefaultBlockDuration,
NoLockfile: false,
AllowOverlappingBlocks: false,
WALCompression: false,
StripeSize: DefaultStripeSize,
}
}
// Options of the DB storage.
type Options struct {
// Segments (wal files) max size.
// WALSegmentSize = 0, segment size is default size.
// WALSegmentSize > 0, segment size is WALSegmentSize.
// WALSegmentSize < 0, wal is disabled.
WALSegmentSize int
// Duration of persisted data to keep.
// Unit agnostic as long as unit is consistent with MinBlockDuration and MaxBlockDuration.
// Typically it is in milliseconds.
RetentionDuration int64
// Maximum number of bytes in blocks to be retained.
// 0 or less means disabled.
// NOTE: For proper storage calculations need to consider
// the size of the WAL folder which is not added when calculating
// the current size of the database.
MaxBytes int64
// NoLockfile disables creation and consideration of a lock file.
NoLockfile bool
// Overlapping blocks are allowed if AllowOverlappingBlocks is true.
// This in-turn enables vertical compaction and vertical query merge.
AllowOverlappingBlocks bool
// WALCompression will turn on Snappy compression for records on the WAL.
WALCompression bool
// StripeSize is the size in entries of the series hash map. Reducing the size will save memory but impact performance.
StripeSize int
// The timestamp range of head blocks after which they get persisted.
// It's the minimum duration of any persisted block.
// Unit agnostic as long as unit is consistent with RetentionDuration and MaxBlockDuration.
// Typically it is in milliseconds.
MinBlockDuration int64
// The maximum timestamp range of compacted blocks.
// Unit agnostic as long as unit is consistent with MinBlockDuration and RetentionDuration.
// Typically it is in milliseconds.
MaxBlockDuration int64
// SeriesLifecycleCallback specifies a list of callbacks that will be called during a lifecycle of a series.
// It is always a no-op in Prometheus and mainly meant for external users who import TSDB.
SeriesLifecycleCallback SeriesLifecycleCallback
}
// DB handles reads and writes of time series falling into
// a hashed partition of a seriedb.
type DB struct {
dir string
lockf fileutil.Releaser
logger log.Logger
metrics *dbMetrics
opts *Options
chunkPool chunkenc.Pool
compactor Compactor
// Mutex for that must be held when modifying the general block layout.
mtx sync.RWMutex
blocks []*Block
head *Head
compactc chan struct{}
donec chan struct{}
stopc chan struct{}
// cmtx ensures that compactions and deletions don't run simultaneously.
cmtx sync.Mutex
// autoCompactMtx ensures that no compaction gets triggered while
// changing the autoCompact var.
autoCompactMtx sync.Mutex
autoCompact bool
// Cancel a running compaction when a shutdown is initiated.
compactCancel context.CancelFunc
}
type dbMetrics struct {
loadedBlocks prometheus.GaugeFunc
symbolTableSize prometheus.GaugeFunc
reloads prometheus.Counter
reloadsFailed prometheus.Counter
compactionsFailed prometheus.Counter
compactionsTriggered prometheus.Counter
compactionsSkipped prometheus.Counter
sizeRetentionCount prometheus.Counter
timeRetentionCount prometheus.Counter
startTime prometheus.GaugeFunc
tombCleanTimer prometheus.Histogram
blocksBytes prometheus.Gauge
maxBytes prometheus.Gauge
}
func newDBMetrics(db *DB, r prometheus.Registerer) *dbMetrics {
m := &dbMetrics{}
m.loadedBlocks = prometheus.NewGaugeFunc(prometheus.GaugeOpts{
Name: "prometheus_tsdb_blocks_loaded",
Help: "Number of currently loaded data blocks",
}, func() float64 {
db.mtx.RLock()
defer db.mtx.RUnlock()
return float64(len(db.blocks))
})
m.symbolTableSize = prometheus.NewGaugeFunc(prometheus.GaugeOpts{
Name: "prometheus_tsdb_symbol_table_size_bytes",
Help: "Size of symbol table on disk (in bytes)",
}, func() float64 {
db.mtx.RLock()
blocks := db.blocks[:]
db.mtx.RUnlock()
symTblSize := uint64(0)
for _, b := range blocks {
symTblSize += b.GetSymbolTableSize()
}
return float64(symTblSize)
})
m.reloads = prometheus.NewCounter(prometheus.CounterOpts{
Name: "prometheus_tsdb_reloads_total",
Help: "Number of times the database reloaded block data from disk.",
})
m.reloadsFailed = prometheus.NewCounter(prometheus.CounterOpts{
Name: "prometheus_tsdb_reloads_failures_total",
Help: "Number of times the database failed to reload block data from disk.",
})
m.compactionsTriggered = prometheus.NewCounter(prometheus.CounterOpts{
Name: "prometheus_tsdb_compactions_triggered_total",
Help: "Total number of triggered compactions for the partition.",
})
m.compactionsFailed = prometheus.NewCounter(prometheus.CounterOpts{
Name: "prometheus_tsdb_compactions_failed_total",
Help: "Total number of compactions that failed for the partition.",
})
m.timeRetentionCount = prometheus.NewCounter(prometheus.CounterOpts{
Name: "prometheus_tsdb_time_retentions_total",
Help: "The number of times that blocks were deleted because the maximum time limit was exceeded.",
})
m.compactionsSkipped = prometheus.NewCounter(prometheus.CounterOpts{
Name: "prometheus_tsdb_compactions_skipped_total",
Help: "Total number of skipped compactions due to disabled auto compaction.",
})
m.startTime = prometheus.NewGaugeFunc(prometheus.GaugeOpts{
Name: "prometheus_tsdb_lowest_timestamp",
Help: "Lowest timestamp value stored in the database. The unit is decided by the library consumer.",
}, func() float64 {
db.mtx.RLock()
defer db.mtx.RUnlock()
if len(db.blocks) == 0 {
return float64(db.head.MinTime())
}
return float64(db.blocks[0].meta.MinTime)
})
m.tombCleanTimer = prometheus.NewHistogram(prometheus.HistogramOpts{
Name: "prometheus_tsdb_tombstone_cleanup_seconds",
Help: "The time taken to recompact blocks to remove tombstones.",
})
m.blocksBytes = prometheus.NewGauge(prometheus.GaugeOpts{
Name: "prometheus_tsdb_storage_blocks_bytes",
Help: "The number of bytes that are currently used for local storage by all blocks.",
})
m.maxBytes = prometheus.NewGauge(prometheus.GaugeOpts{
Name: "prometheus_tsdb_retention_limit_bytes",
Help: "Max number of bytes to be retained in the tsdb blocks, configured 0 means disabled",
})
m.sizeRetentionCount = prometheus.NewCounter(prometheus.CounterOpts{
Name: "prometheus_tsdb_size_retentions_total",
Help: "The number of times that blocks were deleted because the maximum number of bytes was exceeded.",
})
if r != nil {
r.MustRegister(
m.loadedBlocks,
m.symbolTableSize,
m.reloads,
m.reloadsFailed,
m.compactionsFailed,
m.compactionsTriggered,
m.compactionsSkipped,
m.sizeRetentionCount,
m.timeRetentionCount,
m.startTime,
m.tombCleanTimer,
m.blocksBytes,
m.maxBytes,
)
}
return m
}
// ErrClosed is returned when the db is closed.
var ErrClosed = errors.New("db already closed")
// DBReadOnly provides APIs for read only operations on a database.
// Current implementation doesn't support concurrency so
// all API calls should happen in the same go routine.
type DBReadOnly struct {
logger log.Logger
dir string
closers []io.Closer
closed chan struct{}
}
// OpenDBReadOnly opens DB in the given directory for read only operations.
func OpenDBReadOnly(dir string, l log.Logger) (*DBReadOnly, error) {
if _, err := os.Stat(dir); err != nil {
return nil, errors.Wrap(err, "opening the db dir")
}
if l == nil {
l = log.NewNopLogger()
}
return &DBReadOnly{
logger: l,
dir: dir,
closed: make(chan struct{}),
}, nil
}
// FlushWAL creates a new block containing all data that's currently in the memory buffer/WAL.
// Samples that are in existing blocks will not be written to the new block.
// Note that if the read only database is running concurrently with a
// writable database then writing the WAL to the database directory can race.
func (db *DBReadOnly) FlushWAL(dir string) (returnErr error) {
blockReaders, err := db.Blocks()
if err != nil {
return errors.Wrap(err, "read blocks")
}
maxBlockTime := int64(math.MinInt64)
if len(blockReaders) > 0 {
maxBlockTime = blockReaders[len(blockReaders)-1].Meta().MaxTime
}
w, err := wal.Open(db.logger, filepath.Join(db.dir, "wal"))
if err != nil {
return err
}
head, err := NewHead(nil, db.logger, w, 1, db.dir, nil, DefaultStripeSize, nil)
if err != nil {
return err
}
defer func() {
var merr tsdb_errors.MultiError
merr.Add(returnErr)
merr.Add(errors.Wrap(head.Close(), "closing Head"))
returnErr = merr.Err()
}()
// Set the min valid time for the ingested wal samples
// to be no lower than the maxt of the last block.
if err := head.Init(maxBlockTime); err != nil {
return errors.Wrap(err, "read WAL")
}
mint := head.MinTime()
maxt := head.MaxTime()
rh := &RangeHead{
head: head,
mint: mint,
maxt: maxt,
}
compactor, err := NewLeveledCompactor(
context.Background(),
nil,
db.logger,
ExponentialBlockRanges(DefaultOptions().MinBlockDuration, 3, 5),
chunkenc.NewPool(),
)
if err != nil {
return errors.Wrap(err, "create leveled compactor")
}
// Add +1 millisecond to block maxt because block intervals are half-open: [b.MinTime, b.MaxTime).
// Because of this block intervals are always +1 than the total samples it includes.
_, err = compactor.Write(dir, rh, mint, maxt+1, nil)
return errors.Wrap(err, "writing WAL")
}
// Querier loads the wal and returns a new querier over the data partition for the given time range.
// Current implementation doesn't support multiple Queriers.
func (db *DBReadOnly) Querier(ctx context.Context, mint, maxt int64) (storage.Querier, error) {
select {
case <-db.closed:
return nil, ErrClosed
default:
}
blockReaders, err := db.Blocks()
if err != nil {
return nil, err
}
blocks := make([]*Block, len(blockReaders))
for i, b := range blockReaders {
b, ok := b.(*Block)
if !ok {
return nil, errors.New("unable to convert a read only block to a normal block")
}
blocks[i] = b
}
head, err := NewHead(nil, db.logger, nil, 1, db.dir, nil, DefaultStripeSize, nil)
if err != nil {
return nil, err
}
maxBlockTime := int64(math.MinInt64)
if len(blocks) > 0 {
maxBlockTime = blocks[len(blocks)-1].Meta().MaxTime
}
// Also add the WAL if the current blocks don't cover the requests time range.
if maxBlockTime <= maxt {
if err := head.Close(); err != nil {
return nil, err
}
w, err := wal.Open(db.logger, filepath.Join(db.dir, "wal"))
if err != nil {
return nil, err
}
head, err = NewHead(nil, db.logger, w, 1, db.dir, nil, DefaultStripeSize, nil)
if err != nil {
return nil, err
}
// Set the min valid time for the ingested wal samples
// to be no lower than the maxt of the last block.
if err := head.Init(maxBlockTime); err != nil {
return nil, errors.Wrap(err, "read WAL")
}
// Set the wal to nil to disable all wal operations.
// This is mainly to avoid blocking when closing the head.
head.wal = nil
}
db.closers = append(db.closers, head)
// TODO: Refactor so that it is possible to obtain a Querier without initializing a writable DB instance.
// Option 1: refactor DB to have the Querier implementation using the DBReadOnly.Querier implementation not the opposite.
// Option 2: refactor Querier to use another independent func which
// can than be used by a read only and writable db instances without any code duplication.
dbWritable := &DB{
dir: db.dir,
logger: db.logger,
blocks: blocks,
head: head,
}
return dbWritable.Querier(ctx, mint, maxt)
}
func (db *DBReadOnly) ChunkQuerier(context.Context, int64, int64) (storage.ChunkQuerier, error) {
// TODO(bwplotka): Implement in next PR.
return nil, errors.New("not implemented")
}
// Blocks returns a slice of block readers for persisted blocks.
func (db *DBReadOnly) Blocks() ([]BlockReader, error) {
select {
case <-db.closed:
return nil, ErrClosed
default:
}
loadable, corrupted, err := openBlocks(db.logger, db.dir, nil, nil)
if err != nil {
return nil, err
}
// Corrupted blocks that have been superseded by a loadable block can be safely ignored.
for _, block := range loadable {
for _, b := range block.Meta().Compaction.Parents {
delete(corrupted, b.ULID)
}
}
if len(corrupted) > 0 {
for _, b := range loadable {
if err := b.Close(); err != nil {
level.Warn(db.logger).Log("msg", "Closing block failed", "err", err, "block", b)
}
}
var merr tsdb_errors.MultiError
for ulid, err := range corrupted {
merr.Add(errors.Wrapf(err, "corrupted block %s", ulid.String()))
}
return nil, merr.Err()
}
if len(loadable) == 0 {
return nil, nil
}
sort.Slice(loadable, func(i, j int) bool {
return loadable[i].Meta().MinTime < loadable[j].Meta().MinTime
})
blockMetas := make([]BlockMeta, 0, len(loadable))
for _, b := range loadable {
blockMetas = append(blockMetas, b.Meta())
}
if overlaps := OverlappingBlocks(blockMetas); len(overlaps) > 0 {
level.Warn(db.logger).Log("msg", "Overlapping blocks found during opening", "detail", overlaps.String())
}
// Close all previously open readers and add the new ones to the cache.
for _, closer := range db.closers {
closer.Close()
}
blockClosers := make([]io.Closer, len(loadable))
blockReaders := make([]BlockReader, len(loadable))
for i, b := range loadable {
blockClosers[i] = b
blockReaders[i] = b
}
db.closers = blockClosers
return blockReaders, nil
}
// Close all block readers.
func (db *DBReadOnly) Close() error {
select {
case <-db.closed:
return ErrClosed
default:
}
close(db.closed)
var merr tsdb_errors.MultiError
for _, b := range db.closers {
merr.Add(b.Close())
}
return merr.Err()
}
// Open returns a new DB in the given directory. If options are empty, DefaultOptions will be used.
func Open(dir string, l log.Logger, r prometheus.Registerer, opts *Options) (db *DB, err error) {
var rngs []int64
opts, rngs = validateOpts(opts, nil)
return open(dir, l, r, opts, rngs)
}
func validateOpts(opts *Options, rngs []int64) (*Options, []int64) {
if opts == nil {
opts = DefaultOptions()
}
if opts.StripeSize <= 0 {
opts.StripeSize = DefaultStripeSize
}
if opts.MinBlockDuration <= 0 {
opts.MinBlockDuration = DefaultBlockDuration
}
if opts.MinBlockDuration > opts.MaxBlockDuration {
opts.MaxBlockDuration = opts.MinBlockDuration
}
if len(rngs) == 0 {
// Start with smallest block duration and create exponential buckets until the exceed the
// configured maximum block duration.
rngs = ExponentialBlockRanges(opts.MinBlockDuration, 10, 3)
}
return opts, rngs
}
func open(dir string, l log.Logger, r prometheus.Registerer, opts *Options, rngs []int64) (db *DB, err error) {
if err := os.MkdirAll(dir, 0777); err != nil {
return nil, err
}
if l == nil {
l = log.NewNopLogger()
}
for i, v := range rngs {
if v > opts.MaxBlockDuration {
rngs = rngs[:i]
break
}
}
// Fixup bad format written by Prometheus 2.1.
if err := repairBadIndexVersion(l, dir); err != nil {
return nil, err
}
// Migrate old WAL if one exists.
if err := MigrateWAL(l, filepath.Join(dir, "wal")); err != nil {
return nil, errors.Wrap(err, "migrate WAL")
}
db = &DB{
dir: dir,
logger: l,
opts: opts,
compactc: make(chan struct{}, 1),
donec: make(chan struct{}),
stopc: make(chan struct{}),
autoCompact: true,
chunkPool: chunkenc.NewPool(),
}
if !opts.NoLockfile {
absdir, err := filepath.Abs(dir)
if err != nil {
return nil, err
}
lockf, _, err := fileutil.Flock(filepath.Join(absdir, "lock"))
if err != nil {
return nil, errors.Wrap(err, "lock DB directory")
}
db.lockf = lockf
}
ctx, cancel := context.WithCancel(context.Background())
db.compactor, err = NewLeveledCompactor(ctx, r, l, rngs, db.chunkPool)
if err != nil {
cancel()
return nil, errors.Wrap(err, "create leveled compactor")
}
db.compactCancel = cancel
var wlog *wal.WAL
segmentSize := wal.DefaultSegmentSize
walDir := filepath.Join(dir, "wal")
// Wal is enabled.
if opts.WALSegmentSize >= 0 {
// Wal is set to a custom size.
if opts.WALSegmentSize > 0 {
segmentSize = opts.WALSegmentSize
}
wlog, err = wal.NewSize(l, r, walDir, segmentSize, opts.WALCompression)
if err != nil {
return nil, err
}
}
db.head, err = NewHead(r, l, wlog, rngs[0], dir, db.chunkPool, opts.StripeSize, opts.SeriesLifecycleCallback)
if err != nil {
return nil, err
}
// Register metrics after assigning the head block.
db.metrics = newDBMetrics(db, r)
maxBytes := opts.MaxBytes
if maxBytes < 0 {
maxBytes = 0
}
db.metrics.maxBytes.Set(float64(maxBytes))
if err := db.reload(); err != nil {
return nil, err
}
// Set the min valid time for the ingested samples
// to be no lower than the maxt of the last block.
blocks := db.Blocks()
minValidTime := int64(math.MinInt64)
if len(blocks) > 0 {
minValidTime = blocks[len(blocks)-1].Meta().MaxTime
}
if initErr := db.head.Init(minValidTime); initErr != nil {
db.head.metrics.walCorruptionsTotal.Inc()
level.Warn(db.logger).Log("msg", "Encountered WAL read error, attempting repair", "err", initErr)
if err := wlog.Repair(initErr); err != nil {
return nil, errors.Wrap(err, "repair corrupted WAL")
}
}
go db.run()
return db, nil
}
// StartTime implements the Storage interface.
func (db *DB) StartTime() (int64, error) {
db.mtx.RLock()
defer db.mtx.RUnlock()
if len(db.blocks) > 0 {
return db.blocks[0].Meta().MinTime, nil
}
return db.head.MinTime(), nil
}
// Dir returns the directory of the database.
func (db *DB) Dir() string {
return db.dir
}
func (db *DB) run() {
defer close(db.donec)
backoff := time.Duration(0)
for {
select {
case <-db.stopc:
return
case <-time.After(backoff):
}
select {
case <-time.After(1 * time.Minute):
select {
case db.compactc <- struct{}{}:
default:
}
case <-db.compactc:
db.metrics.compactionsTriggered.Inc()
db.autoCompactMtx.Lock()
if db.autoCompact {
if err := db.Compact(); err != nil {
level.Error(db.logger).Log("msg", "compaction failed", "err", err)
backoff = exponential(backoff, 1*time.Second, 1*time.Minute)
} else {
backoff = 0
}
} else {
db.metrics.compactionsSkipped.Inc()
}
db.autoCompactMtx.Unlock()
case <-db.stopc:
return
}
}
}
// Appender opens a new appender against the database.
func (db *DB) Appender() storage.Appender {
return dbAppender{db: db, Appender: db.head.Appender()}
}
// dbAppender wraps the DB's head appender and triggers compactions on commit
// if necessary.
type dbAppender struct {
storage.Appender
db *DB
}
func (a dbAppender) Commit() error {
err := a.Appender.Commit()
// We could just run this check every few minutes practically. But for benchmarks
// and high frequency use cases this is the safer way.
if a.db.head.compactable() {
select {
case a.db.compactc <- struct{}{}:
default:
}
}
return err
}
// Compact data if possible. After successful compaction blocks are reloaded
// which will also trigger blocks to be deleted that fall out of the retention
// window.
// If no blocks are compacted, the retention window state doesn't change. Thus,
// this is sufficient to reliably delete old data.
// Old blocks are only deleted on reload based on the new block's parent information.
// See DB.reload documentation for further information.
func (db *DB) Compact() (err error) {
db.cmtx.Lock()
defer db.cmtx.Unlock()
defer func() {
if err != nil {
db.metrics.compactionsFailed.Inc()
}
}()
// Check whether we have pending head blocks that are ready to be persisted.
// They have the highest priority.
for {
select {
case <-db.stopc:
return nil
default:
}
if !db.head.compactable() {
break
}
mint := db.head.MinTime()
maxt := rangeForTimestamp(mint, db.head.chunkRange)
// Wrap head into a range that bounds all reads to it.
// We remove 1 millisecond from maxt because block
// intervals are half-open: [b.MinTime, b.MaxTime). But
// chunk intervals are closed: [c.MinTime, c.MaxTime];
// so in order to make sure that overlaps are evaluated
// consistently, we explicitly remove the last value
// from the block interval here.
head := NewRangeHead(db.head, mint, maxt-1)
if err := db.compactHead(head); err != nil {
return err
}
}
return db.compactBlocks()
}
// CompactHead compacts the given the RangeHead.
func (db *DB) CompactHead(head *RangeHead) (err error) {
db.cmtx.Lock()
defer db.cmtx.Unlock()
return db.compactHead(head)
}
// compactHead compacts the given the RangeHead.
// The compaction mutex should be held before calling this method.
func (db *DB) compactHead(head *RangeHead) (err error) {
// Add +1 millisecond to block maxt because block intervals are half-open: [b.MinTime, b.MaxTime).
// Because of this block intervals are always +1 than the total samples it includes.
maxt := head.MaxTime() + 1
uid, err := db.compactor.Write(db.dir, head, head.MinTime(), maxt, nil)
if err != nil {
return errors.Wrap(err, "persist head block")
}
runtime.GC()
if err := db.reload(); err != nil {
if err := os.RemoveAll(filepath.Join(db.dir, uid.String())); err != nil {
return errors.Wrapf(err, "delete persisted head block after failed db reload:%s", uid)
}
return errors.Wrap(err, "reload blocks")
}
if (uid == ulid.ULID{}) {
// Compaction resulted in an empty block.
// Head truncating during db.reload() depends on the persisted blocks and
// in this case no new block will be persisted so manually truncate the head.
if err = db.head.Truncate(maxt); err != nil {
return errors.Wrap(err, "head truncate failed (in compact)")
}
}
runtime.GC()
return nil
}
// compactBlocks compacts all the eligible on-disk blocks.
// The compaction mutex should be held before calling this method.
func (db *DB) compactBlocks() (err error) {
// Check for compactions of multiple blocks.
for {
plan, err := db.compactor.Plan(db.dir)
if err != nil {
return errors.Wrap(err, "plan compaction")
}
if len(plan) == 0 {
break
}
select {
case <-db.stopc:
return nil
default:
}
uid, err := db.compactor.Compact(db.dir, plan, db.blocks)
if err != nil {
return errors.Wrapf(err, "compact %s", plan)
}
runtime.GC()
if err := db.reload(); err != nil {
if err := os.RemoveAll(filepath.Join(db.dir, uid.String())); err != nil {
return errors.Wrapf(err, "delete compacted block after failed db reload:%s", uid)
}
return errors.Wrap(err, "reload blocks")
}
runtime.GC()
}
return nil
}
// getBlock iterates a given block range to find a block by a given id.
// If found it returns the block itself and a boolean to indicate that it was found.
func getBlock(allBlocks []*Block, id ulid.ULID) (*Block, bool) {
for _, b := range allBlocks {
if b.Meta().ULID == id {
return b, true
}
}
return nil, false
}
// reload blocks and trigger head truncation if new blocks appeared.
// Blocks that are obsolete due to replacement or retention will be deleted.
func (db *DB) reload() (err error) {
defer func() {
if err != nil {
db.metrics.reloadsFailed.Inc()
}
db.metrics.reloads.Inc()
}()
loadable, corrupted, err := openBlocks(db.logger, db.dir, db.blocks, db.chunkPool)
if err != nil {
return err
}
deletable := db.deletableBlocks(loadable)
// Corrupted blocks that have been superseded by a loadable block can be safely ignored.
// This makes it resilient against the process crashing towards the end of a compaction.
// Creation of a new block and deletion of its parents cannot happen atomically.
// By creating blocks with their parents, we can pick up the deletion where it left off during a crash.
for _, block := range loadable {
for _, b := range block.Meta().Compaction.Parents {
delete(corrupted, b.ULID)
deletable[b.ULID] = nil
}
}
if len(corrupted) > 0 {
// Close all new blocks to release the lock for windows.
for _, block := range loadable {
if _, open := getBlock(db.blocks, block.Meta().ULID); !open {
block.Close()
}
}
var merr tsdb_errors.MultiError
for ulid, err := range corrupted {
merr.Add(errors.Wrapf(err, "corrupted block %s", ulid.String()))
}
return merr.Err()
}
// All deletable blocks should not be loaded.
var (
bb []*Block
blocksSize int64
)
for _, block := range loadable {
if _, ok := deletable[block.Meta().ULID]; ok {
deletable[block.Meta().ULID] = block
continue
}
bb = append(bb, block)
blocksSize += block.Size()
}
loadable = bb
db.metrics.blocksBytes.Set(float64(blocksSize))
sort.Slice(loadable, func(i, j int) bool {
return loadable[i].Meta().MinTime < loadable[j].Meta().MinTime
})
if !db.opts.AllowOverlappingBlocks {
if err := validateBlockSequence(loadable); err != nil {
return errors.Wrap(err, "invalid block sequence")
}
}
// Swap new blocks first for subsequently created readers to be seen.
db.mtx.Lock()
oldBlocks := db.blocks
db.blocks = loadable
db.mtx.Unlock()
blockMetas := make([]BlockMeta, 0, len(loadable))
for _, b := range loadable {
blockMetas = append(blockMetas, b.Meta())
}
if overlaps := OverlappingBlocks(blockMetas); len(overlaps) > 0 {
level.Warn(db.logger).Log("msg", "Overlapping blocks found during reload", "detail", overlaps.String())
}
for _, b := range oldBlocks {
if _, ok := deletable[b.Meta().ULID]; ok {
deletable[b.Meta().ULID] = b
}
}
if err := db.deleteBlocks(deletable); err != nil {
return err
}
// Garbage collect data in the head if the most recent persisted block
// covers data of its current time range.
if len(loadable) == 0 {
return nil
}
maxt := loadable[len(loadable)-1].Meta().MaxTime
return errors.Wrap(db.head.Truncate(maxt), "head truncate failed")
}
func openBlocks(l log.Logger, dir string, loaded []*Block, chunkPool chunkenc.Pool) (blocks []*Block, corrupted map[ulid.ULID]error, err error) {
bDirs, err := blockDirs(dir)
if err != nil {
return nil, nil, errors.Wrap(err, "find blocks")
}
corrupted = make(map[ulid.ULID]error)
for _, bDir := range bDirs {
meta, _, err := readMetaFile(bDir)
if err != nil {
level.Error(l).Log("msg", "failed to read meta.json for a block", "dir", bDir, "err", err)
continue
}
// See if we already have the block in memory or open it otherwise.
block, open := getBlock(loaded, meta.ULID)
if !open {
block, err = OpenBlock(l, bDir, chunkPool)
if err != nil {
corrupted[meta.ULID] = err
continue
}
}
blocks = append(blocks, block)
}
return blocks, corrupted, nil
}
// deletableBlocks returns all blocks past retention policy.
func (db *DB) deletableBlocks(blocks []*Block) map[ulid.ULID]*Block {
deletable := make(map[ulid.ULID]*Block)
// Sort the blocks by time - newest to oldest (largest to smallest timestamp).
// This ensures that the retentions will remove the oldest blocks.
sort.Slice(blocks, func(i, j int) bool {
return blocks[i].Meta().MaxTime > blocks[j].Meta().MaxTime
})
for _, block := range blocks {
if block.Meta().Compaction.Deletable {
deletable[block.Meta().ULID] = block
}
}
for ulid, block := range db.beyondTimeRetention(blocks) {
deletable[ulid] = block
}
for ulid, block := range db.beyondSizeRetention(blocks) {
deletable[ulid] = block
}
return deletable
}
func (db *DB) beyondTimeRetention(blocks []*Block) (deletable map[ulid.ULID]*Block) {
// Time retention is disabled or no blocks to work with.
if len(db.blocks) == 0 || db.opts.RetentionDuration == 0 {
return
}
deletable = make(map[ulid.ULID]*Block)
for i, block := range blocks {
// The difference between the first block and this block is larger than
// the retention period so any blocks after that are added as deletable.
if i > 0 && blocks[0].Meta().MaxTime-block.Meta().MaxTime > db.opts.RetentionDuration {
for _, b := range blocks[i:] {
deletable[b.meta.ULID] = b
}
db.metrics.timeRetentionCount.Inc()
break
}
}
return deletable
}
func (db *DB) beyondSizeRetention(blocks []*Block) (deletable map[ulid.ULID]*Block) {
// Size retention is disabled or no blocks to work with.
if len(db.blocks) == 0 || db.opts.MaxBytes <= 0 {
return
}
deletable = make(map[ulid.ULID]*Block)
walSize, _ := db.Head().wal.Size()
headChunksSize := db.Head().chunkDiskMapper.Size()
// Initializing size counter with WAL size and Head chunks
// written to disk, as that is part of the retention strategy.
blocksSize := walSize + headChunksSize
for i, block := range blocks {
blocksSize += block.Size()
if blocksSize > int64(db.opts.MaxBytes) {
// Add this and all following blocks for deletion.
for _, b := range blocks[i:] {
deletable[b.meta.ULID] = b
}
db.metrics.sizeRetentionCount.Inc()
break
}
}
return deletable
}
// deleteBlocks closes and deletes blocks from the disk.
// When the map contains a non nil block object it means it is loaded in memory
// so needs to be closed first as it might need to wait for pending readers to complete.
func (db *DB) deleteBlocks(blocks map[ulid.ULID]*Block) error {
for ulid, block := range blocks {
if block != nil {
if err := block.Close(); err != nil {
level.Warn(db.logger).Log("msg", "Closing block failed", "err", err, "block", ulid)
}
}
if err := os.RemoveAll(filepath.Join(db.dir, ulid.String())); err != nil {
return errors.Wrapf(err, "delete obsolete block %s", ulid)
}
}
return nil
}
// validateBlockSequence returns error if given block meta files indicate that some blocks overlaps within sequence.
func validateBlockSequence(bs []*Block) error {
if len(bs) <= 1 {
return nil
}
var metas []BlockMeta
for _, b := range bs {
metas = append(metas, b.meta)
}
overlaps := OverlappingBlocks(metas)
if len(overlaps) > 0 {
return errors.Errorf("block time ranges overlap: %s", overlaps)
}
return nil
}
// TimeRange specifies minTime and maxTime range.
type TimeRange struct {
Min, Max int64
}
// Overlaps contains overlapping blocks aggregated by overlapping range.
type Overlaps map[TimeRange][]BlockMeta
// String returns human readable string form of overlapped blocks.
func (o Overlaps) String() string {
var res []string
for r, overlaps := range o {
var groups []string
for _, m := range overlaps {
groups = append(groups, fmt.Sprintf(
"<ulid: %s, mint: %d, maxt: %d, range: %s>",
m.ULID.String(),
m.MinTime,
m.MaxTime,
(time.Duration((m.MaxTime-m.MinTime)/1000)*time.Second).String(),
))
}
res = append(res, fmt.Sprintf(
"[mint: %d, maxt: %d, range: %s, blocks: %d]: %s",
r.Min, r.Max,
(time.Duration((r.Max-r.Min)/1000)*time.Second).String(),
len(overlaps),
strings.Join(groups, ", ")),
)
}
return strings.Join(res, "\n")
}
// OverlappingBlocks returns all overlapping blocks from given meta files.
func OverlappingBlocks(bm []BlockMeta) Overlaps {
if len(bm) <= 1 {
return nil
}
var (
overlaps [][]BlockMeta
// pending contains not ended blocks in regards to "current" timestamp.
pending = []BlockMeta{bm[0]}
// continuousPending helps to aggregate same overlaps to single group.
continuousPending = true
)
// We have here blocks sorted by minTime. We iterate over each block and treat its minTime as our "current" timestamp.
// We check if any of the pending block finished (blocks that we have seen before, but their maxTime was still ahead current
// timestamp). If not, it means they overlap with our current block. In the same time current block is assumed pending.
for _, b := range bm[1:] {
var newPending []BlockMeta
for _, p := range pending {
// "b.MinTime" is our current time.
if b.MinTime >= p.MaxTime {
continuousPending = false
continue
}
// "p" overlaps with "b" and "p" is still pending.
newPending = append(newPending, p)
}
// Our block "b" is now pending.
pending = append(newPending, b)
if len(newPending) == 0 {
// No overlaps.
continue
}
if continuousPending && len(overlaps) > 0 {
overlaps[len(overlaps)-1] = append(overlaps[len(overlaps)-1], b)
continue
}
overlaps = append(overlaps, append(newPending, b))
// Start new pendings.
continuousPending = true
}
// Fetch the critical overlapped time range foreach overlap groups.
overlapGroups := Overlaps{}
for _, overlap := range overlaps {
minRange := TimeRange{Min: 0, Max: math.MaxInt64}
for _, b := range overlap {
if minRange.Max > b.MaxTime {
minRange.Max = b.MaxTime
}
if minRange.Min < b.MinTime {
minRange.Min = b.MinTime
}
}
overlapGroups[minRange] = overlap
}
return overlapGroups
}
func (db *DB) String() string {
return "HEAD"
}
// Blocks returns the databases persisted blocks.
func (db *DB) Blocks() []*Block {
db.mtx.RLock()
defer db.mtx.RUnlock()
return db.blocks
}
// Head returns the databases's head.
func (db *DB) Head() *Head {
return db.head
}
// Close the partition.
func (db *DB) Close() error {
close(db.stopc)
db.compactCancel()
<-db.donec
db.mtx.Lock()
defer db.mtx.Unlock()
var g errgroup.Group
// blocks also contains all head blocks.
for _, pb := range db.blocks {
g.Go(pb.Close)
}
var merr tsdb_errors.MultiError
merr.Add(g.Wait())
if db.lockf != nil {
merr.Add(db.lockf.Release())
}
merr.Add(db.head.Close())
return merr.Err()
}
// DisableCompactions disables auto compactions.
func (db *DB) DisableCompactions() {
db.autoCompactMtx.Lock()
defer db.autoCompactMtx.Unlock()
db.autoCompact = false
level.Info(db.logger).Log("msg", "Compactions disabled")
}
// EnableCompactions enables auto compactions.
func (db *DB) EnableCompactions() {
db.autoCompactMtx.Lock()
defer db.autoCompactMtx.Unlock()
db.autoCompact = true
level.Info(db.logger).Log("msg", "Compactions enabled")
}
// Snapshot writes the current data to the directory. If withHead is set to true it
// will create a new block containing all data that's currently in the memory buffer/WAL.
func (db *DB) Snapshot(dir string, withHead bool) error {
if dir == db.dir {
return errors.Errorf("cannot snapshot into base directory")
}
if _, err := ulid.ParseStrict(dir); err == nil {
return errors.Errorf("dir must not be a valid ULID")
}
db.cmtx.Lock()
defer db.cmtx.Unlock()
db.mtx.RLock()
defer db.mtx.RUnlock()
for _, b := range db.blocks {
level.Info(db.logger).Log("msg", "Snapshotting block", "block", b)
if err := b.Snapshot(dir); err != nil {
return errors.Wrapf(err, "error snapshotting block: %s", b.Dir())
}
}
if !withHead {
return nil
}
mint := db.head.MinTime()
maxt := db.head.MaxTime()
head := &RangeHead{
head: db.head,
mint: mint,
maxt: maxt,
}
// Add +1 millisecond to block maxt because block intervals are half-open: [b.MinTime, b.MaxTime).
// Because of this block intervals are always +1 than the total samples it includes.
if _, err := db.compactor.Write(dir, head, mint, maxt+1, nil); err != nil {
return errors.Wrap(err, "snapshot head block")
}
return nil
}
// Querier returns a new querier over the data partition for the given time range.
// A goroutine must not handle more than one open Querier.
func (db *DB) Querier(_ context.Context, mint, maxt int64) (storage.Querier, error) {
var blocks []BlockReader
var blockMetas []BlockMeta
db.mtx.RLock()
defer db.mtx.RUnlock()
for _, b := range db.blocks {
if b.OverlapsClosedInterval(mint, maxt) {
blocks = append(blocks, b)
blockMetas = append(blockMetas, b.Meta())
}
}
if maxt >= db.head.MinTime() {
blocks = append(blocks, &RangeHead{
head: db.head,
mint: mint,
maxt: maxt,
})
}
blockQueriers := make([]storage.Querier, 0, len(blocks))
for _, b := range blocks {
q, err := NewBlockQuerier(b, mint, maxt)
if err == nil {
blockQueriers = append(blockQueriers, q)
continue
}
// If we fail, all previously opened queriers must be closed.
for _, q := range blockQueriers {
q.Close()
}
return nil, errors.Wrapf(err, "open querier for block %s", b)
}
if len(OverlappingBlocks(blockMetas)) > 0 {
return &verticalQuerier{
querier: querier{
blocks: blockQueriers,
},
}, nil
}
return &querier{
blocks: blockQueriers,
}, nil
}
func (db *DB) ChunkQuerier(context.Context, int64, int64) (storage.ChunkQuerier, error) {
// TODO(bwplotka): Implement in next PR.
return nil, errors.New("not implemented")
}
func rangeForTimestamp(t int64, width int64) (maxt int64) {
return (t/width)*width + width
}
// Delete implements deletion of metrics. It only has atomicity guarantees on a per-block basis.
func (db *DB) Delete(mint, maxt int64, ms ...*labels.Matcher) error {
db.cmtx.Lock()
defer db.cmtx.Unlock()
var g errgroup.Group
db.mtx.RLock()
defer db.mtx.RUnlock()
for _, b := range db.blocks {
if b.OverlapsClosedInterval(mint, maxt) {
g.Go(func(b *Block) func() error {
return func() error { return b.Delete(mint, maxt, ms...) }
}(b))
}
}
g.Go(func() error {
return db.head.Delete(mint, maxt, ms...)
})
return g.Wait()
}
// CleanTombstones re-writes any blocks with tombstones.
func (db *DB) CleanTombstones() (err error) {
db.cmtx.Lock()
defer db.cmtx.Unlock()
start := time.Now()
defer db.metrics.tombCleanTimer.Observe(time.Since(start).Seconds())
newUIDs := []ulid.ULID{}
defer func() {
// If any error is caused, we need to delete all the new directory created.
if err != nil {
for _, uid := range newUIDs {
dir := filepath.Join(db.Dir(), uid.String())
if err := os.RemoveAll(dir); err != nil {
level.Error(db.logger).Log("msg", "failed to delete block after failed `CleanTombstones`", "dir", dir, "err", err)
}
}
}
}()
db.mtx.RLock()
blocks := db.blocks[:]
db.mtx.RUnlock()
for _, b := range blocks {
if uid, er := b.CleanTombstones(db.Dir(), db.compactor); er != nil {
err = errors.Wrapf(er, "clean tombstones: %s", b.Dir())
return err
} else if uid != nil { // New block was created.
newUIDs = append(newUIDs, *uid)
}
}
return errors.Wrap(db.reload(), "reload blocks")
}
func isBlockDir(fi os.FileInfo) bool {
if !fi.IsDir() {
return false
}
_, err := ulid.ParseStrict(fi.Name())
return err == nil
}
func blockDirs(dir string) ([]string, error) {
files, err := ioutil.ReadDir(dir)
if err != nil {
return nil, err
}
var dirs []string
for _, fi := range files {
if isBlockDir(fi) {
dirs = append(dirs, filepath.Join(dir, fi.Name()))
}
}
return dirs, nil
}
func sequenceFiles(dir string) ([]string, error) {
files, err := ioutil.ReadDir(dir)
if err != nil {
return nil, err
}
var res []string
for _, fi := range files {
if _, err := strconv.ParseUint(fi.Name(), 10, 64); err != nil {
continue
}
res = append(res, filepath.Join(dir, fi.Name()))
}
return res, nil
}
func nextSequenceFile(dir string) (string, int, error) {
files, err := ioutil.ReadDir(dir)
if err != nil {
return "", 0, err
}
i := uint64(0)
for _, f := range files {
j, err := strconv.ParseUint(f.Name(), 10, 64)
if err != nil {
continue
}
i = j
}
return filepath.Join(dir, fmt.Sprintf("%0.6d", i+1)), int(i + 1), nil
}
func closeAll(cs []io.Closer) error {
var merr tsdb_errors.MultiError
for _, c := range cs {
merr.Add(c.Close())
}
return merr.Err()
}
func exponential(d, min, max time.Duration) time.Duration {
d *= 2
if d < min {
d = min
}
if d > max {
d = max
}
return d
}
|
// Copyright 2014 The Cockroach Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the License.
//
// Author: Spencer Kimball (spencer.kimball@gmail.com)
// Author: Jiang-Ming Yang (jiangming.yang@gmail.com)
// Author: Tobias Schottdorf (tobias.schottdorf@gmail.com)
// Author: Bram Gruneir (bram+code@cockroachlabs.com)
package storage
import (
"bytes"
"crypto/sha512"
"encoding/binary"
"fmt"
"math"
"reflect"
"sync"
"sync/atomic"
"time"
"github.com/coreos/etcd/raft/raftpb"
"github.com/pkg/errors"
"golang.org/x/net/context"
"github.com/cockroachdb/cockroach/base"
"github.com/cockroachdb/cockroach/build"
"github.com/cockroachdb/cockroach/internal/client"
"github.com/cockroachdb/cockroach/keys"
"github.com/cockroachdb/cockroach/roachpb"
"github.com/cockroachdb/cockroach/storage/engine"
"github.com/cockroachdb/cockroach/storage/engine/enginepb"
"github.com/cockroachdb/cockroach/storage/storagebase"
"github.com/cockroachdb/cockroach/util/hlc"
"github.com/cockroachdb/cockroach/util/log"
"github.com/cockroachdb/cockroach/util/protoutil"
"github.com/cockroachdb/cockroach/util/timeutil"
"github.com/cockroachdb/cockroach/util/uuid"
)
var errTransactionUnsupported = errors.New("not supported within a transaction")
// executeCmd switches over the method and multiplexes to execute the appropriate storage API
// command. It returns the response, an error, and a post commit trigger which
// may be actionable even in the case of an error.
// maxKeys is the number of scan results remaining for this batch
// (MaxInt64 for no limit).
func (r *Replica) executeCmd(
ctx context.Context,
raftCmdID storagebase.CmdIDKey,
index int,
batch engine.ReadWriter,
ms *enginepb.MVCCStats,
h roachpb.Header,
maxKeys int64,
args roachpb.Request,
reply roachpb.Response,
) (*PostCommitTrigger, *roachpb.Error) {
ts := h.Timestamp
if _, ok := args.(*roachpb.NoopRequest); ok {
return nil, nil
}
if err := r.checkCmdHeader(args.Header()); err != nil {
return nil, roachpb.NewErrorWithTxn(err, h.Txn)
}
// If a unittest filter was installed, check for an injected error; otherwise, continue.
if filter := r.store.ctx.TestingKnobs.TestingCommandFilter; filter != nil {
filterArgs := storagebase.FilterArgs{Ctx: ctx, CmdID: raftCmdID, Index: index,
Sid: r.store.StoreID(), Req: args, Hdr: h}
if pErr := filter(filterArgs); pErr != nil {
log.Infof(ctx, "test injecting error: %s", pErr)
return nil, pErr
}
}
// Update the node clock with the serviced request. This maintains a
// high water mark for all ops serviced, so that received ops
// without a timestamp specified are guaranteed one higher than any
// op already executed for overlapping keys.
r.store.Clock().Update(ts)
var err error
var trigger *PostCommitTrigger
var num int64
var span *roachpb.Span
// Note that responses are populated even when an error is returned.
// TODO(tschottdorf): Change that. IIRC there is nontrivial use of it currently.
switch tArgs := args.(type) {
case *roachpb.GetRequest:
resp := reply.(*roachpb.GetResponse)
*resp, trigger, err = r.Get(ctx, batch, h, *tArgs)
case *roachpb.PutRequest:
resp := reply.(*roachpb.PutResponse)
*resp, err = r.Put(ctx, batch, ms, h, *tArgs)
case *roachpb.ConditionalPutRequest:
resp := reply.(*roachpb.ConditionalPutResponse)
*resp, err = r.ConditionalPut(ctx, batch, ms, h, *tArgs)
case *roachpb.InitPutRequest:
resp := reply.(*roachpb.InitPutResponse)
*resp, err = r.InitPut(ctx, batch, ms, h, *tArgs)
case *roachpb.IncrementRequest:
resp := reply.(*roachpb.IncrementResponse)
*resp, err = r.Increment(ctx, batch, ms, h, *tArgs)
case *roachpb.DeleteRequest:
resp := reply.(*roachpb.DeleteResponse)
*resp, err = r.Delete(ctx, batch, ms, h, *tArgs)
case *roachpb.DeleteRangeRequest:
resp := reply.(*roachpb.DeleteRangeResponse)
*resp, span, num, err = r.DeleteRange(ctx, batch, ms, h, maxKeys, *tArgs)
case *roachpb.ScanRequest:
resp := reply.(*roachpb.ScanResponse)
*resp, span, num, trigger, err = r.Scan(ctx, batch, h, maxKeys, *tArgs)
case *roachpb.ReverseScanRequest:
resp := reply.(*roachpb.ReverseScanResponse)
*resp, span, num, trigger, err = r.ReverseScan(ctx, batch, h, maxKeys, *tArgs)
case *roachpb.BeginTransactionRequest:
resp := reply.(*roachpb.BeginTransactionResponse)
*resp, err = r.BeginTransaction(ctx, batch, ms, h, *tArgs)
case *roachpb.EndTransactionRequest:
resp := reply.(*roachpb.EndTransactionResponse)
*resp, trigger, err = r.EndTransaction(ctx, batch, ms, h, *tArgs)
case *roachpb.RangeLookupRequest:
resp := reply.(*roachpb.RangeLookupResponse)
*resp, trigger, err = r.RangeLookup(ctx, batch, h, *tArgs)
case *roachpb.HeartbeatTxnRequest:
resp := reply.(*roachpb.HeartbeatTxnResponse)
*resp, err = r.HeartbeatTxn(ctx, batch, ms, h, *tArgs)
case *roachpb.GCRequest:
resp := reply.(*roachpb.GCResponse)
*resp, trigger, err = r.GC(ctx, batch, ms, h, *tArgs)
case *roachpb.PushTxnRequest:
resp := reply.(*roachpb.PushTxnResponse)
*resp, err = r.PushTxn(ctx, batch, ms, h, *tArgs)
case *roachpb.ResolveIntentRequest:
resp := reply.(*roachpb.ResolveIntentResponse)
*resp, err = r.ResolveIntent(ctx, batch, ms, h, *tArgs)
case *roachpb.ResolveIntentRangeRequest:
resp := reply.(*roachpb.ResolveIntentRangeResponse)
*resp, err = r.ResolveIntentRange(ctx, batch, ms, h, *tArgs)
case *roachpb.MergeRequest:
resp := reply.(*roachpb.MergeResponse)
*resp, err = r.Merge(ctx, batch, ms, h, *tArgs)
case *roachpb.TruncateLogRequest:
resp := reply.(*roachpb.TruncateLogResponse)
*resp, trigger, err = r.TruncateLog(ctx, batch, ms, h, *tArgs)
case *roachpb.RequestLeaseRequest:
resp := reply.(*roachpb.RequestLeaseResponse)
*resp, trigger, err = r.RequestLease(ctx, batch, ms, h, *tArgs)
case *roachpb.TransferLeaseRequest:
resp := reply.(*roachpb.RequestLeaseResponse)
*resp, trigger, err = r.TransferLease(ctx, batch, ms, h, *tArgs)
case *roachpb.LeaseInfoRequest:
resp := reply.(*roachpb.LeaseInfoResponse)
*resp, err = r.LeaseInfo(ctx, *tArgs)
case *roachpb.ComputeChecksumRequest:
resp := reply.(*roachpb.ComputeChecksumResponse)
*resp, trigger, err = r.ComputeChecksum(ctx, batch, ms, h, *tArgs)
case *roachpb.DeprecatedVerifyChecksumRequest:
case *roachpb.ChangeFrozenRequest:
resp := reply.(*roachpb.ChangeFrozenResponse)
*resp, trigger, err = r.ChangeFrozen(ctx, batch, ms, h, *tArgs)
default:
err = errors.Errorf("unrecognized command %s", args.Method())
}
// Set the ResumeSpan and NumKeys.
header := reply.Header()
header.NumKeys = num
header.ResumeSpan = span
reply.SetHeader(header)
if log.V(2) {
log.Infof(ctx, "executed %s command %+v: %+v, err=%v", args.Method(), args, reply, err)
}
// Create a roachpb.Error by initializing txn from the request/response header.
var pErr *roachpb.Error
if err != nil {
txn := reply.Header().Txn
if txn == nil {
txn = h.Txn
}
pErr = roachpb.NewErrorWithTxn(err, txn)
}
return trigger, pErr
}
func intentsToTrigger(intents []roachpb.Intent, args roachpb.Request) *PostCommitTrigger {
if len(intents) > 0 {
return &PostCommitTrigger{intents: []intentsWithArg{{args: args, intents: intents}}}
}
return nil
}
// Get returns the value for a specified key.
func (r *Replica) Get(
ctx context.Context, batch engine.ReadWriter, h roachpb.Header, args roachpb.GetRequest,
) (roachpb.GetResponse, *PostCommitTrigger, error) {
var reply roachpb.GetResponse
val, intents, err := engine.MVCCGet(ctx, batch, args.Key, h.Timestamp, h.ReadConsistency == roachpb.CONSISTENT, h.Txn)
reply.Value = val
return reply, intentsToTrigger(intents, &args), err
}
// Put sets the value for a specified key.
func (r *Replica) Put(
ctx context.Context,
batch engine.ReadWriter,
ms *enginepb.MVCCStats,
h roachpb.Header,
args roachpb.PutRequest,
) (roachpb.PutResponse, error) {
var reply roachpb.PutResponse
ts := hlc.ZeroTimestamp
if !args.Inline {
ts = h.Timestamp
}
if h.DistinctSpans {
if b, ok := batch.(engine.Batch); ok {
// Use the distinct batch for both blind and normal ops so that we don't
// accidentally flush mutations to make them visible to the distinct
// batch.
batch = b.Distinct()
defer batch.Close()
}
}
if args.Blind {
return reply, engine.MVCCBlindPut(ctx, batch, ms, args.Key, ts, args.Value, h.Txn)
}
return reply, engine.MVCCPut(ctx, batch, ms, args.Key, ts, args.Value, h.Txn)
}
// ConditionalPut sets the value for a specified key only if
// the expected value matches. If not, the return value contains
// the actual value.
func (r *Replica) ConditionalPut(
ctx context.Context,
batch engine.ReadWriter,
ms *enginepb.MVCCStats,
h roachpb.Header,
args roachpb.ConditionalPutRequest,
) (roachpb.ConditionalPutResponse, error) {
var reply roachpb.ConditionalPutResponse
if h.DistinctSpans {
if b, ok := batch.(engine.Batch); ok {
// Use the distinct batch for both blind and normal ops so that we don't
// accidentally flush mutations to make them visible to the distinct
// batch.
batch = b.Distinct()
defer batch.Close()
}
}
if args.Blind {
return reply, engine.MVCCBlindConditionalPut(ctx, batch, ms, args.Key, h.Timestamp, args.Value, args.ExpValue, h.Txn)
}
return reply, engine.MVCCConditionalPut(ctx, batch, ms, args.Key, h.Timestamp, args.Value, args.ExpValue, h.Txn)
}
// InitPut sets the value for a specified key only if it doesn't exist. It
// returns an error if the key exists with an existing value that is different
// from the value provided.
func (r *Replica) InitPut(
ctx context.Context,
batch engine.ReadWriter,
ms *enginepb.MVCCStats,
h roachpb.Header,
args roachpb.InitPutRequest,
) (roachpb.InitPutResponse, error) {
var reply roachpb.InitPutResponse
return reply, engine.MVCCInitPut(ctx, batch, ms, args.Key, h.Timestamp, args.Value, h.Txn)
}
// Increment increments the value (interpreted as varint64 encoded) and
// returns the newly incremented value (encoded as varint64). If no value
// exists for the key, zero is incremented.
func (r *Replica) Increment(
ctx context.Context,
batch engine.ReadWriter,
ms *enginepb.MVCCStats,
h roachpb.Header,
args roachpb.IncrementRequest,
) (roachpb.IncrementResponse, error) {
var reply roachpb.IncrementResponse
newVal, err := engine.MVCCIncrement(ctx, batch, ms, args.Key, h.Timestamp, h.Txn, args.Increment)
reply.NewValue = newVal
return reply, err
}
// Delete deletes the key and value specified by key.
func (r *Replica) Delete(
ctx context.Context,
batch engine.ReadWriter,
ms *enginepb.MVCCStats,
h roachpb.Header,
args roachpb.DeleteRequest,
) (roachpb.DeleteResponse, error) {
var reply roachpb.DeleteResponse
return reply, engine.MVCCDelete(ctx, batch, ms, args.Key, h.Timestamp, h.Txn)
}
// DeleteRange deletes the range of key/value pairs specified by
// start and end keys.
func (r *Replica) DeleteRange(
ctx context.Context,
batch engine.ReadWriter,
ms *enginepb.MVCCStats,
h roachpb.Header,
maxKeys int64,
args roachpb.DeleteRangeRequest,
) (roachpb.DeleteRangeResponse, *roachpb.Span, int64, error) {
var reply roachpb.DeleteRangeResponse
deleted, resumeKey, num, err := engine.MVCCDeleteRange(
ctx, batch, ms, args.Key, args.EndKey, maxKeys, h.Timestamp, h.Txn, args.ReturnKeys,
)
var retSpan *roachpb.Span
if err == nil {
reply.Keys = deleted
if resumeKey != nil {
span := args.Span
span.Key = resumeKey
retSpan = &span
}
// DeleteRange requires that we retry on push to avoid the lost delete range anomaly.
if h.Txn != nil {
clonedTxn := h.Txn.Clone()
clonedTxn.RetryOnPush = true
reply.Txn = &clonedTxn
}
}
return reply, retSpan, num, err
}
// Scan scans the key range specified by start key through end key in ascending order up to some
// maximum number of results. maxKeys stores the number of scan results remaining for this
// batch (MaxInt64 for no limit).
func (r *Replica) Scan(
ctx context.Context,
batch engine.ReadWriter,
h roachpb.Header,
maxKeys int64,
args roachpb.ScanRequest,
) (roachpb.ScanResponse, *roachpb.Span, int64, *PostCommitTrigger, error) {
span := args.Span
if maxKeys == 0 {
return roachpb.ScanResponse{}, &span, 0, nil, nil
}
rows, intents, err := engine.MVCCScan(ctx, batch, args.Key, args.EndKey, maxKeys, h.Timestamp,
h.ReadConsistency == roachpb.CONSISTENT, h.Txn)
numKeys := int64(len(rows))
var retSpan *roachpb.Span
if numKeys == maxKeys {
span.Key = rows[numKeys-1].Key.Next()
retSpan = &span
}
return roachpb.ScanResponse{Rows: rows}, retSpan, numKeys, intentsToTrigger(intents, &args), err
}
// ReverseScan scans the key range specified by start key through end key in descending order up to
// some maximum number of results. maxKeys stores the number of scan results remaining for
// this batch (MaxInt64 for no limit).
func (r *Replica) ReverseScan(
ctx context.Context,
batch engine.ReadWriter,
h roachpb.Header,
maxKeys int64,
args roachpb.ReverseScanRequest,
) (roachpb.ReverseScanResponse, *roachpb.Span, int64, *PostCommitTrigger, error) {
span := args.Span
if maxKeys == 0 {
return roachpb.ReverseScanResponse{}, &span, 0, nil, nil
}
rows, intents, err := engine.MVCCReverseScan(ctx, batch, args.Key, args.EndKey, maxKeys,
h.Timestamp, h.ReadConsistency == roachpb.CONSISTENT, h.Txn)
numKeys := int64(len(rows))
var retSpan *roachpb.Span
if numKeys == maxKeys {
span.EndKey = rows[numKeys-1].Key
retSpan = &span
}
return roachpb.ReverseScanResponse{Rows: rows}, retSpan, numKeys, intentsToTrigger(intents, &args), err
}
func verifyTransaction(h roachpb.Header, args roachpb.Request) error {
if h.Txn == nil {
return errors.Errorf("no transaction specified to %s", args.Method())
}
if !bytes.Equal(args.Header().Key, h.Txn.Key) {
return errors.Errorf("request key %s should match txn key %s", args.Header().Key, h.Txn.Key)
}
return nil
}
// BeginTransaction writes the initial transaction record. Fails in
// the event that a transaction record is already written. This may
// occur if a transaction is started with a batch containing writes
// to different ranges, and the range containing the txn record fails
// to receive the write batch before a heartbeat or txn push is
// performed first and aborts the transaction.
func (r *Replica) BeginTransaction(
ctx context.Context,
batch engine.ReadWriter,
ms *enginepb.MVCCStats,
h roachpb.Header,
args roachpb.BeginTransactionRequest,
) (roachpb.BeginTransactionResponse, error) {
var reply roachpb.BeginTransactionResponse
if err := verifyTransaction(h, &args); err != nil {
return reply, err
}
key := keys.TransactionKey(h.Txn.Key, h.Txn.ID)
clonedTxn := h.Txn.Clone()
reply.Txn = &clonedTxn
// Verify transaction does not already exist.
txn := roachpb.Transaction{}
ok, err := engine.MVCCGetProto(ctx, batch, key, hlc.ZeroTimestamp, true, nil, &txn)
if err != nil {
return reply, err
}
if ok {
switch txn.Status {
case roachpb.ABORTED:
// Check whether someone has come in ahead and already aborted the
// txn.
return reply, roachpb.NewTransactionAbortedError()
case roachpb.PENDING:
if h.Txn.Epoch > txn.Epoch {
// On a transaction retry there will be an extant txn record
// but this run should have an upgraded epoch. The extant txn
// record may have been pushed or otherwise updated, so update
// this command's txn and rewrite the record.
reply.Txn.Update(&txn)
} else {
return reply, roachpb.NewTransactionStatusError(
fmt.Sprintf("BeginTransaction can't overwrite %s", txn))
}
case roachpb.COMMITTED:
return reply, roachpb.NewTransactionStatusError(
fmt.Sprintf("BeginTransaction can't overwrite %s", txn),
)
default:
return reply, roachpb.NewTransactionStatusError(
fmt.Sprintf("bad txn state: %s", txn),
)
}
}
r.mu.Lock()
threshold := r.mu.state.TxnSpanGCThreshold
r.mu.Unlock()
// Disallow creation of a transaction record if it's at a timestamp before
// the TxnSpanGCThreshold, as in that case our transaction may already have
// been aborted by a concurrent actor which encountered one of our intents
// (which may have been written before this entry).
//
// See #9265.
if txn.LastActive().Less(threshold) {
return reply, roachpb.NewTransactionAbortedError()
}
// Write the txn record.
reply.Txn.Writing = true
return reply, engine.MVCCPutProto(ctx, batch, ms, key, hlc.ZeroTimestamp, nil, reply.Txn)
}
// EndTransaction either commits or aborts (rolls back) an extant
// transaction according to the args.Commit parameter. Rolling back
// an already rolled-back txn is ok.
func (r *Replica) EndTransaction(
ctx context.Context,
batch engine.ReadWriter,
ms *enginepb.MVCCStats,
h roachpb.Header,
args roachpb.EndTransactionRequest,
) (roachpb.EndTransactionResponse, *PostCommitTrigger, error) {
var reply roachpb.EndTransactionResponse
if err := verifyTransaction(h, &args); err != nil {
return reply, nil, err
}
key := keys.TransactionKey(h.Txn.Key, h.Txn.ID)
// Fetch existing transaction.
reply.Txn = &roachpb.Transaction{}
if ok, err := engine.MVCCGetProto(
ctx, batch, key, hlc.ZeroTimestamp, true, nil, reply.Txn,
); err != nil {
return reply, nil, err
} else if !ok {
// Return a fresh empty reply because there's an empty Transaction
// proto in our existing one.
return roachpb.EndTransactionResponse{},
nil, roachpb.NewTransactionStatusError("does not exist")
}
// Verify that we can either commit it or abort it (according
// to args.Commit), and also that the Timestamp and Epoch have
// not suffered regression.
switch reply.Txn.Status {
case roachpb.COMMITTED:
return reply, nil, roachpb.NewTransactionStatusError("already committed")
case roachpb.ABORTED:
if !args.Commit {
// The transaction has already been aborted by other.
// Do not return TransactionAbortedError since the client anyway
// wanted to abort the transaction.
externalIntents := r.resolveLocalIntents(ctx, batch, ms, args, reply.Txn)
if err := updateTxnWithExternalIntents(
ctx, batch, ms, args, reply.Txn, externalIntents,
); err != nil {
return reply, nil, err
}
return reply, intentsToTrigger(externalIntents, &args), nil
}
// If the transaction was previously aborted by a concurrent
// writer's push, any intents written are still open. It's only now
// that we know them, so we return them all for asynchronous
// resolution (we're currently not able to write on error, but
// see #1989).
return reply,
intentsToTrigger(roachpb.AsIntents(args.IntentSpans, reply.Txn), &args),
roachpb.NewTransactionAbortedError()
case roachpb.PENDING:
if h.Txn.Epoch < reply.Txn.Epoch {
// TODO(tschottdorf): this leaves the Txn record (and more
// importantly, intents) dangling; we can't currently write on
// error. Would panic, but that makes TestEndTransactionWithErrors
// awkward.
return reply, nil, roachpb.NewTransactionStatusError(
fmt.Sprintf("epoch regression: %d", h.Txn.Epoch),
)
} else if h.Txn.Epoch == reply.Txn.Epoch && reply.Txn.Timestamp.Less(h.Txn.OrigTimestamp) {
// The transaction record can only ever be pushed forward, so it's an
// error if somehow the transaction record has an earlier timestamp
// than the original transaction timestamp.
// TODO(tschottdorf): see above comment on epoch regression.
return reply, nil, roachpb.NewTransactionStatusError(
fmt.Sprintf("timestamp regression: %s", h.Txn.OrigTimestamp),
)
}
default:
return reply, nil, roachpb.NewTransactionStatusError(
fmt.Sprintf("bad txn status: %s", reply.Txn),
)
}
// Take max of requested epoch and existing epoch. The requester
// may have incremented the epoch on retries.
if reply.Txn.Epoch < h.Txn.Epoch {
reply.Txn.Epoch = h.Txn.Epoch
}
// Take max of requested priority and existing priority. This isn't
// terribly useful, but we do it for completeness.
if reply.Txn.Priority < h.Txn.Priority {
reply.Txn.Priority = h.Txn.Priority
}
// Take max of supplied txn's timestamp and persisted txn's
// timestamp. It may have been pushed by another transaction.
// Note that we do not use the batch request timestamp, which for
// a transaction is always set to the txn's original timestamp.
reply.Txn.Timestamp.Forward(h.Txn.Timestamp)
if isEndTransactionExceedingDeadline(reply.Txn.Timestamp, args) {
reply.Txn.Status = roachpb.ABORTED
// FIXME(#3037):
// If the deadline has lapsed, return all the intents for
// resolution. Unfortunately, since we're (a) returning an error,
// and (b) not able to write on error (see #1989), we can't write
// ABORTED into the master transaction record, which remains
// PENDING, and that's pretty bad.
return reply,
intentsToTrigger(roachpb.AsIntents(args.IntentSpans, reply.Txn), &args),
roachpb.NewTransactionAbortedError()
}
// Set transaction status to COMMITTED or ABORTED as per the
// args.Commit parameter.
if args.Commit {
if isEndTransactionTriggeringRetryError(h.Txn, reply.Txn) {
return reply, nil, roachpb.NewTransactionRetryError()
}
reply.Txn.Status = roachpb.COMMITTED
} else {
reply.Txn.Status = roachpb.ABORTED
}
externalIntents := r.resolveLocalIntents(ctx, batch, ms, args, reply.Txn)
if err := updateTxnWithExternalIntents(ctx, batch, ms, args, reply.Txn, externalIntents); err != nil {
return reply, nil, err
}
// Run triggers if successfully committed.
var trigger *PostCommitTrigger
if reply.Txn.Status == roachpb.COMMITTED {
var err error
if trigger, err = r.runCommitTrigger(ctx, batch.(engine.Batch), ms, args, reply.Txn); err != nil {
return reply, nil, NewReplicaCorruptionError(err)
}
}
// Note: there's no need to clear the abort cache state if we've
// successfully finalized a transaction, as there's no way in
// which an abort cache entry could have been written (the txn would
// already have been in state=ABORTED).
//
// Summary of transaction replay protection after EndTransaction:
// When a transactional write gets replayed over its own resolved
// intents, the write will succeed but only as an intent with a
// newer timestamp (with a WriteTooOldError). However, the replayed
// intent cannot be resolved by a subsequent replay of this
// EndTransaction call because the txn timestamp will be too
// old. Replays which include a BeginTransaction never succeed
// because EndTransaction inserts in the write timestamp cache,
// forcing the BeginTransaction to fail with a transaction retry
// error. If the replay didn't include a BeginTransaction, any push
// will immediately succeed as a missing txn record on push sets the
// transaction to aborted. In both cases, the txn will be GC'd on
// the slow path.
trigger = updateTrigger(trigger, intentsToTrigger(externalIntents, &args))
return reply, trigger, nil
}
// isEndTransactionExceedingDeadline returns true if the transaction
// exceeded its deadline.
func isEndTransactionExceedingDeadline(
t hlc.Timestamp,
args roachpb.EndTransactionRequest,
) bool {
return args.Deadline != nil && args.Deadline.Less(t)
}
// isEndTransactionTriggeringRetryError returns true if the
// EndTransactionRequest cannot be committed and needs to return a
// TransactionRetryError.
func isEndTransactionTriggeringRetryError(headerTxn, currentTxn *roachpb.Transaction) bool {
// If we saw any WriteTooOldErrors, we must restart to avoid lost
// update anomalies.
if headerTxn.WriteTooOld {
return true
}
isTxnPushed := !currentTxn.Timestamp.Equal(headerTxn.OrigTimestamp)
// If pushing requires a retry and the transaction was pushed, retry.
if headerTxn.RetryOnPush && isTxnPushed {
return true
}
// If the isolation level is SERIALIZABLE, return a transaction
// retry error if the commit timestamp isn't equal to the txn
// timestamp.
if headerTxn.Isolation == enginepb.SERIALIZABLE && isTxnPushed {
return true
}
return false
}
// resolveLocalIntents synchronously resolves any intents that are
// local to this range in the same batch. The remainder are collected
// and returned so that they can be handed off to asynchronous
// processing.
func (r *Replica) resolveLocalIntents(
ctx context.Context,
batch engine.ReadWriter,
ms *enginepb.MVCCStats,
args roachpb.EndTransactionRequest,
txn *roachpb.Transaction,
) []roachpb.Intent {
desc := r.Desc()
var preMergeDesc *roachpb.RangeDescriptor
if mergeTrigger := args.InternalCommitTrigger.GetMergeTrigger(); mergeTrigger != nil {
// If this is a merge, then use the post-merge descriptor to determine
// which intents are local (note that for a split, we want to use the
// pre-split one instead because it's larger).
preMergeDesc = desc
desc = &mergeTrigger.LeftDesc
}
iterAndBuf := engine.GetIterAndBuf(batch)
defer iterAndBuf.Cleanup()
var externalIntents []roachpb.Intent
for _, span := range args.IntentSpans {
if err := func() error {
intent := roachpb.Intent{Span: span, Txn: txn.TxnMeta, Status: txn.Status}
if len(span.EndKey) == 0 {
// For single-key intents, do a KeyAddress-aware check of
// whether it's contained in our Range.
if !containsKey(*desc, span.Key) {
externalIntents = append(externalIntents, intent)
return nil
}
resolveMS := ms
if preMergeDesc != nil && !containsKey(*preMergeDesc, span.Key) {
// If this transaction included a merge and the intents
// are from the subsumed range, ignore the intent resolution
// stats, as they will already be accounted for during the
// merge trigger.
resolveMS = nil
}
return engine.MVCCResolveWriteIntentUsingIter(ctx, batch, iterAndBuf, resolveMS, intent)
}
// For intent ranges, cut into parts inside and outside our key
// range. Resolve locally inside, delegate the rest. In particular,
// an intent range for range-local data is correctly considered local.
inSpan, outSpans := intersectSpan(span, *desc)
for _, span := range outSpans {
outIntent := intent
outIntent.Span = span
externalIntents = append(externalIntents, outIntent)
}
if inSpan != nil {
intent.Span = *inSpan
_, err := engine.MVCCResolveWriteIntentRangeUsingIter(ctx, batch, iterAndBuf, ms, intent, math.MaxInt64)
return err
}
return nil
}(); err != nil {
// TODO(tschottdorf): any legitimate reason for this to happen?
// Figure that out and if not, should still be ReplicaCorruption
// and not a panic.
panic(fmt.Sprintf("error resolving intent at %s on end transaction [%s]: %s", span, txn.Status, err))
}
}
return externalIntents
}
// updateTxnWithExternalIntents persists the transaction record with
// updated status (& possibly timestamp). If we've already resolved
// all intents locally, we actually delete the record right away - no
// use in keeping it around.
func updateTxnWithExternalIntents(
ctx context.Context,
batch engine.ReadWriter,
ms *enginepb.MVCCStats,
args roachpb.EndTransactionRequest,
txn *roachpb.Transaction,
externalIntents []roachpb.Intent,
) error {
key := keys.TransactionKey(txn.Key, txn.ID)
if txnAutoGC && len(externalIntents) == 0 {
if log.V(2) {
log.Infof(ctx, "auto-gc'ed %s (%d intents)", txn.ID.Short(), len(args.IntentSpans))
}
return engine.MVCCDelete(ctx, batch, ms, key, hlc.ZeroTimestamp, nil /* txn */)
}
txn.Intents = make([]roachpb.Span, len(externalIntents))
for i := range externalIntents {
txn.Intents[i] = externalIntents[i].Span
}
return engine.MVCCPutProto(ctx, batch, ms, key, hlc.ZeroTimestamp, nil /* txn */, txn)
}
// intersectSpan takes an intent and a descriptor. It then splits the
// intent's range into up to three pieces: A first piece which is contained in
// the Range, and a slice of up to two further intents which are outside of the
// key range. An intent for which [Key, EndKey) is empty does not result in any
// intents; thus intersectIntent only applies to intent ranges.
// A range-local intent range is never split: It's returned as either
// belonging to or outside of the descriptor's key range, and passing an intent
// which begins range-local but ends non-local results in a panic.
// TODO(tschottdorf) move to proto, make more gen-purpose - kv.truncate does
// some similar things.
func intersectSpan(
span roachpb.Span,
desc roachpb.RangeDescriptor,
) (middle *roachpb.Span, outside []roachpb.Span) {
start, end := desc.StartKey.AsRawKey(), desc.EndKey.AsRawKey()
if len(span.EndKey) == 0 {
outside = append(outside, span)
return
}
if bytes.Compare(span.Key, keys.LocalRangeMax) < 0 {
if bytes.Compare(span.EndKey, keys.LocalRangeMax) >= 0 {
log.Fatalf(context.Background(), "a local intent range may not have a non-local portion: %s", span)
}
if containsKeyRange(desc, span.Key, span.EndKey) {
return &span, nil
}
return nil, append(outside, span)
}
// From now on, we're dealing with plain old key ranges - no more local
// addressing.
if bytes.Compare(span.Key, start) < 0 {
// Intent spans a part to the left of [start, end).
iCopy := span
if bytes.Compare(start, span.EndKey) < 0 {
iCopy.EndKey = start
}
span.Key = iCopy.EndKey
outside = append(outside, iCopy)
}
if bytes.Compare(span.Key, span.EndKey) < 0 && bytes.Compare(end, span.EndKey) < 0 {
// Intent spans a part to the right of [start, end).
iCopy := span
if bytes.Compare(iCopy.Key, end) < 0 {
iCopy.Key = end
}
span.EndKey = iCopy.Key
outside = append(outside, iCopy)
}
if bytes.Compare(span.Key, span.EndKey) < 0 && bytes.Compare(span.Key, start) >= 0 && bytes.Compare(end, span.EndKey) >= 0 {
middle = &span
}
return
}
func (r *Replica) runCommitTrigger(
ctx context.Context,
batch engine.Batch,
ms *enginepb.MVCCStats,
args roachpb.EndTransactionRequest,
txn *roachpb.Transaction,
) (*PostCommitTrigger, error) {
var trigger *PostCommitTrigger
ct := args.InternalCommitTrigger
if err := func() error {
if ct.GetSplitTrigger() != nil {
var err error
var postSplit *PostCommitTrigger
if *ms, postSplit, err = r.splitTrigger(
ctx, batch, *ms, ct.SplitTrigger, txn.Timestamp,
); err != nil {
return err
}
trigger = updateTrigger(trigger, postSplit)
}
if ct.GetMergeTrigger() != nil {
postMerge, err := r.mergeTrigger(ctx, batch, ms, ct.MergeTrigger, txn.Timestamp)
if err != nil {
return err
}
trigger = updateTrigger(trigger, postMerge)
}
if crt := ct.GetChangeReplicasTrigger(); crt != nil {
trigger = updateTrigger(trigger, r.changeReplicasTrigger(ctx, batch, crt))
}
if ct.GetModifiedSpanTrigger() != nil {
if ct.ModifiedSpanTrigger.SystemConfigSpan {
// Check if we need to gossip the system config.
// NOTE: System config gossiping can only execute correctly if
// the transaction record is located on the range that contains
// the system span. If a transaction is created which modifies
// both system *and* non-system data, it should be ensured that
// the transaction record itself is on the system span. This can
// be done by making sure a system key is the first key touched
// in the transaction.
if !r.ContainsKey(keys.SystemConfigSpan.Key) {
log.Errorf(ctx, "System configuration span was modified, but the "+
"modification trigger is executing on a non-system range. "+
"Configuration changes will not be gossiped.")
} else {
trigger = updateTrigger(trigger, &PostCommitTrigger{
maybeGossipSystemConfig: true,
})
}
}
}
return nil
}(); err != nil {
return nil, err
}
return trigger, nil
}
// RangeLookup is used to look up RangeDescriptors - a RangeDescriptor
// is a metadata structure which describes the key range and replica locations
// of a distinct range in the cluster.
//
// RangeDescriptors are stored as values in the cockroach cluster's key-value
// store. However, they are always stored using special "Range Metadata keys",
// which are "ordinary" keys with a special prefix prepended. The Range Metadata
// Key for an ordinary key can be generated with the `keys.RangeMetaKey(key)`
// function. The RangeDescriptor for the range which contains a given key can be
// retrieved by generating its Range Metadata Key and dispatching it to
// RangeLookup.
//
// Note that the Range Metadata Key sent to RangeLookup is NOT the key
// at which the desired RangeDescriptor is stored. Instead, this method returns
// the RangeDescriptor stored at the _lowest_ existing key which is _greater_
// than the given key. The returned RangeDescriptor will thus contain the
// ordinary key which was originally used to generate the Range Metadata Key
// sent to RangeLookup.
//
// The "Range Metadata Key" for a range is built by appending the end key of
// the range to the respective meta prefix.
//
// Lookups for range metadata keys usually want to read inconsistently, but
// some callers need a consistent result; both are supported.
//
// This method has an important optimization in the inconsistent case: instead
// of just returning the request RangeDescriptor, it also returns a slice of
// additional range descriptors immediately consecutive to the desired
// RangeDescriptor. This is intended to serve as a sort of caching pre-fetch,
// so that the requesting nodes can aggressively cache RangeDescriptors which
// are likely to be desired by their current workload. The Reverse flag
// specifies whether descriptors are prefetched in descending or ascending
// order.
func (r *Replica) RangeLookup(
ctx context.Context,
batch engine.ReadWriter,
h roachpb.Header,
args roachpb.RangeLookupRequest,
) (roachpb.RangeLookupResponse, *PostCommitTrigger, error) {
log.Event(ctx, "RangeLookup")
var reply roachpb.RangeLookupResponse
ts := h.Timestamp // all we're going to use from the header.
key, err := keys.Addr(args.Key)
if err != nil {
return reply, nil, err
}
if !key.Equal(args.Key) {
return reply, nil, errors.Errorf("illegal lookup of range-local key")
}
rangeCount := int64(args.MaxRanges)
if rangeCount < 1 {
return reply, nil, errors.Errorf("range lookup specified invalid maximum range count %d: must be > 0", rangeCount)
}
consistent := h.ReadConsistency != roachpb.INCONSISTENT
if consistent && args.ConsiderIntents {
return reply, nil, errors.Errorf("can not read consistently and special-case intents")
}
if args.ConsiderIntents {
// Disable prefetching; the caller only cares about a single intent,
// and the code below simplifies considerably.
rangeCount = 1
}
var checkAndUnmarshal func(roachpb.Value) (*roachpb.RangeDescriptor, error)
var kvs []roachpb.KeyValue // kv descriptor pairs in scan order
var intents []roachpb.Intent
if !args.Reverse {
// If scanning forward, there's no special "checking": Just decode the
// descriptor and return it.
checkAndUnmarshal = func(v roachpb.Value) (*roachpb.RangeDescriptor, error) {
var rd roachpb.RangeDescriptor
if err := v.GetProto(&rd); err != nil {
return nil, err
}
return &rd, nil
}
// We want to search for the metadata key greater than
// args.Key. Scan for both the requested key and the keys immediately
// afterwards, up to MaxRanges.
startKey, endKey, err := keys.MetaScanBounds(key)
if err != nil {
return reply, nil, err
}
// Scan for descriptors.
kvs, intents, err = engine.MVCCScan(ctx, batch, startKey, endKey, rangeCount,
ts, consistent, h.Txn)
if err != nil {
// An error here is likely a WriteIntentError when reading consistently.
return reply, nil, err
}
} else {
// Use MVCCScan to get the first range. There are three cases:
// 1. args.Key is not an endpoint of the range and
// 2a. The args.Key is the start/end key of the range.
// 2b. Even worse, the body of args.Key is roachpb.KeyMax.
// In the first case, we need use the MVCCScan() to get the first
// range descriptor, because ReverseScan can't do the work. If we
// have ranges [a,c) and [c,f) and the reverse scan request's key
// range is [b,d), then d.Next() is less than "f", and so the meta
// row {f->[c,f)} would be ignored by MVCCReverseScan. In case 2a,
// the range descriptor received by MVCCScan will be filtered before
// results are returned: With ranges [c,f) and [f,z), reverse scan
// on [d,f) receives the descriptor {z->[f,z)}, which is discarded
// below since it's not being asked for. Finally, in case 2b, we
// don't even attempt the forward scan because it's neither defined
// nor required.
// Note that Meta1KeyMax is admissible: it means we're looking for
// the range descriptor that houses Meta2KeyMax, and a forward scan
// handles it correctly.
// In this case, checkAndUnmarshal is more complicated: It needs
// to weed out descriptors from the forward scan above, which could
// return a result or an intent we're not supposed to return.
checkAndUnmarshal = func(v roachpb.Value) (*roachpb.RangeDescriptor, error) {
var r roachpb.RangeDescriptor
if err := v.GetProto(&r); err != nil {
return nil, err
}
startKeyAddr, err := keys.Addr(keys.RangeMetaKey(r.StartKey))
if err != nil {
return nil, err
}
if !startKeyAddr.Less(key) {
// This is the case in which we've picked up an extra descriptor
// we don't want.
return nil, nil
}
// We actually want this descriptor.
return &r, nil
}
if key.Less(roachpb.RKey(keys.Meta2KeyMax)) {
startKey, endKey, err := keys.MetaScanBounds(key)
if err != nil {
return reply, nil, err
}
kvs, intents, err = engine.MVCCScan(ctx, batch, startKey, endKey, 1,
ts, consistent, h.Txn)
if err != nil {
return reply, nil, err
}
}
// We want to search for the metadata key just less or equal to
// args.Key. Scan in reverse order for both the requested key and the
// keys immediately backwards, up to MaxRanges.
startKey, endKey, err := keys.MetaReverseScanBounds(key)
if err != nil {
return reply, nil, err
}
// Reverse scan for descriptors.
revKvs, revIntents, err := engine.MVCCReverseScan(ctx, batch, startKey, endKey, rangeCount,
ts, consistent, h.Txn)
if err != nil {
// An error here is likely a WriteIntentError when reading consistently.
return reply, nil, err
}
// Merge the results, the total ranges may be bigger than rangeCount.
kvs = append(kvs, revKvs...)
intents = append(intents, revIntents...)
}
userKey := keys.UserKey(key)
containsFn := roachpb.RangeDescriptor.ContainsKey
if args.Reverse {
containsFn = roachpb.RangeDescriptor.ContainsExclusiveEndKey
}
// Decode all scanned range descriptors which haven't been unmarshaled yet.
for _, kv := range kvs {
// TODO(tschottdorf) Candidate for a ReplicaCorruptionError.
rd, err := checkAndUnmarshal(kv.Value)
if err != nil {
return reply, nil, err
}
if rd != nil {
// Add the first valid descriptor to the desired range descriptor
// list in the response, add all others to the prefetched list.
if len(reply.Ranges) == 0 && containsFn(*rd, userKey) {
reply.Ranges = append(reply.Ranges, *rd)
} else {
reply.PrefetchedRanges = append(reply.PrefetchedRanges, *rd)
}
}
}
if args.ConsiderIntents || len(reply.Ranges) == 0 {
// NOTE (subtle): dangling intents on meta records are peculiar: It's not
// clear whether the intent or the previous value point to the correct
// location of the Range. It gets even more complicated when there are
// split-related intents or a txn record co-located with a replica
// involved in the split. Since we cannot know the correct answer, we
// reply with both the pre- and post- transaction values when the
// ConsiderIntents flag is set.
//
// This does not count against a maximum range count because they are
// possible versions of the same descriptor. In other words, both the
// current live descriptor and a potentially valid descriptor from
// observed intents could be returned when MaxRanges is set to 1 and
// the ConsiderIntents flag is set.
for _, intent := range intents {
val, _, err := engine.MVCCGetAsTxn(ctx, batch, intent.Key, intent.Txn.Timestamp, intent.Txn)
if err != nil {
return reply, nil, err
}
if val == nil {
// Intent is a deletion.
continue
}
rd, err := checkAndUnmarshal(*val)
if err != nil {
return reply, nil, err
}
if rd != nil {
if containsFn(*rd, userKey) {
reply.Ranges = append(reply.Ranges, *rd)
break
}
}
}
}
if len(reply.Ranges) == 0 {
// No matching results were returned from the scan. This should
// never happen with the above logic.
log.Fatalf(ctx, "RangeLookup dispatched to correct range, but no matching RangeDescriptor was found: %q", args.Key)
} else if preCount := int64(len(reply.PrefetchedRanges)); 1+preCount > rangeCount {
// We've possibly picked up an extra descriptor if we're in reverse
// mode due to the initial forward scan.
//
// Here, we only count the desired range descriptors as a single
// descriptor against the rangeCount limit, even if multiple versions
// of the same descriptor were found in intents. In practice, we should
// only get multiple desired range descriptors when prefetching is disabled
// anyway (see above), so this should never actually matter.
reply.PrefetchedRanges = reply.PrefetchedRanges[:rangeCount-1]
}
for _, rd := range reply.Ranges {
if !containsFn(rd, userKey) {
log.Fatalf(ctx, "range lookup of meta key %q resulted in descriptor %s which does not contain non-meta key %q", key, rd, userKey)
}
}
return reply, intentsToTrigger(intents, &args), nil
}
// HeartbeatTxn updates the transaction status and heartbeat
// timestamp after receiving transaction heartbeat messages from
// coordinator. Returns the updated transaction.
func (r *Replica) HeartbeatTxn(
ctx context.Context,
batch engine.ReadWriter,
ms *enginepb.MVCCStats,
h roachpb.Header,
args roachpb.HeartbeatTxnRequest,
) (roachpb.HeartbeatTxnResponse, error) {
var reply roachpb.HeartbeatTxnResponse
if err := verifyTransaction(h, &args); err != nil {
return reply, err
}
key := keys.TransactionKey(h.Txn.Key, h.Txn.ID)
var txn roachpb.Transaction
if ok, err := engine.MVCCGetProto(ctx, batch, key, hlc.ZeroTimestamp, true, nil, &txn); err != nil {
return reply, err
} else if !ok {
// If no existing transaction record was found, skip heartbeat.
// This could mean the heartbeat is a delayed relic or it could
// mean that the BeginTransaction call was delayed. In either
// case, there's no reason to persist a new transaction record.
return reply, errors.Errorf("heartbeat for transaction %s failed; record not present", h.Txn)
}
if txn.Status == roachpb.PENDING {
if txn.LastHeartbeat == nil {
txn.LastHeartbeat = &hlc.Timestamp{}
}
txn.LastHeartbeat.Forward(args.Now)
if err := engine.MVCCPutProto(ctx, batch, ms, key, hlc.ZeroTimestamp, nil, &txn); err != nil {
return reply, err
}
}
reply.Txn = &txn
return reply, nil
}
// GC iterates through the list of keys to garbage collect
// specified in the arguments. MVCCGarbageCollect is invoked on each
// listed key along with the expiration timestamp. The GC metadata
// specified in the args is persisted after GC.
func (r *Replica) GC(
ctx context.Context,
batch engine.ReadWriter,
ms *enginepb.MVCCStats,
h roachpb.Header,
args roachpb.GCRequest,
) (roachpb.GCResponse, *PostCommitTrigger, error) {
// All keys must be inside the current replica range. Keys outside
// of this range in the GC request are dropped silently, which is
// safe because they can simply be re-collected later on the correct
// replica. Discrepancies here can arise from race conditions during
// range splitting.
keys := make([]roachpb.GCRequest_GCKey, 0, len(args.Keys))
for _, k := range args.Keys {
if r.ContainsKey(k.Key) {
keys = append(keys, k)
}
}
var reply roachpb.GCResponse
// Garbage collect the specified keys by expiration timestamps.
err := engine.MVCCGarbageCollect(ctx, batch, ms, keys, h.Timestamp)
if err != nil {
return reply, nil, err
}
r.mu.Lock()
newThreshold := r.mu.state.GCThreshold
newTxnSpanGCThreshold := r.mu.state.TxnSpanGCThreshold
// Protect against multiple GC requests arriving out of order; we track
// the maximum timestamps.
newThreshold.Forward(args.Threshold)
newTxnSpanGCThreshold.Forward(args.TxnSpanGCThreshold)
r.mu.Unlock()
trigger := &PostCommitTrigger{
gcThreshold: &newThreshold,
txnSpanGCThreshold: &newTxnSpanGCThreshold,
}
if err := setGCThreshold(ctx, batch, ms, r.Desc().RangeID, &newThreshold); err != nil {
return reply, nil, err
}
if err := setTxnSpanGCThreshold(ctx, batch, ms, r.Desc().RangeID, &newTxnSpanGCThreshold); err != nil {
return reply, nil, err
}
return reply, trigger, nil
}
// PushTxn resolves conflicts between concurrent txns (or
// between a non-transactional reader or writer and a txn) in several
// ways depending on the statuses and priorities of the conflicting
// transactions. The PushTxn operation is invoked by a
// "pusher" (the writer trying to abort a conflicting txn or the
// reader trying to push a conflicting txn's commit timestamp
// forward), who attempts to resolve a conflict with a "pushee"
// (args.PushTxn -- the pushee txn whose intent(s) caused the
// conflict). A pusher is either transactional, in which case
// PushTxn is completely initialized, or not, in which case the
// PushTxn has only the priority set.
//
// Txn already committed/aborted: If pushee txn is committed or
// aborted return success.
//
// Txn Timeout: If pushee txn entry isn't present or its LastHeartbeat
// timestamp isn't set, use its as LastHeartbeat. If current time -
// LastHeartbeat > 2 * DefaultHeartbeatInterval, then the pushee txn
// should be either pushed forward, aborted, or confirmed not pending,
// depending on value of Request.PushType.
//
// Old Txn Epoch: If persisted pushee txn entry has a newer Epoch than
// PushTxn.Epoch, return success, as older epoch may be removed.
//
// Lower Txn Priority: If pushee txn has a lower priority than pusher,
// adjust pushee's persisted txn depending on value of
// args.PushType. If args.PushType is PUSH_ABORT, set txn.Status to
// ABORTED, and priority to one less than the pusher's priority and
// return success. If args.PushType is PUSH_TIMESTAMP, set
// txn.Timestamp to just after PushTo.
//
// Higher Txn Priority: If pushee txn has a higher priority than
// pusher, return TransactionPushError. Transaction will be retried
// with priority one less than the pushee's higher priority.
//
// If the pusher is non-transactional, args.PusherTxn is an empty
// proto with only the priority set.
//
// If the pushee is aborted, its timestamp will be forwarded to match its last
// client activity timestamp (i.e. last heartbeat), if available. This is done
// so that the updated timestamp populates the abort cache, allowing the GC
// queue to purge entries for which the transaction coordinator must have found
// out via its heartbeats that the transaction has failed.
func (r *Replica) PushTxn(
ctx context.Context,
batch engine.ReadWriter,
ms *enginepb.MVCCStats,
h roachpb.Header,
args roachpb.PushTxnRequest,
) (roachpb.PushTxnResponse, error) {
var reply roachpb.PushTxnResponse
if h.Txn != nil {
return reply, errTransactionUnsupported
}
if args.Now.Equal(hlc.ZeroTimestamp) {
return reply, errors.Errorf("the field Now must be provided")
}
if !bytes.Equal(args.Key, args.PusheeTxn.Key) {
return reply, errors.Errorf("request key %s should match pushee's txn key %s", args.Key, args.PusheeTxn.Key)
}
key := keys.TransactionKey(args.PusheeTxn.Key, args.PusheeTxn.ID)
// Fetch existing transaction; if missing, we're allowed to abort.
existTxn := &roachpb.Transaction{}
ok, err := engine.MVCCGetProto(ctx, batch, key, hlc.ZeroTimestamp,
true /* consistent */, nil /* txn */, existTxn)
if err != nil {
return reply, err
}
// There are three cases in which there is no transaction entry:
//
// * the pushee is still active but the BeginTransaction was delayed
// for long enough that a write intent from this txn to another
// range is causing another reader or writer to push.
// * the pushee resolved its intents synchronously on successful commit;
// in this case, the transaction record of the pushee is also removed.
// Note that in this case, the intent which prompted this PushTxn
// doesn't exist any more.
// * the pushee timed out or was aborted and the intent not cleaned up,
// but the transaction record was garbage collected.
//
// We currently make no attempt at guessing which one it is, though we
// could (see #1939). Instead, a new aborted entry is always written.
//
// TODO(tschottdorf): we should actually improve this when we
// garbage-collect aborted transactions, or we run the risk of a push
// recreating a GC'ed transaction as PENDING, which is an error if it
// has open intents (which is likely if someone pushes it).
if !ok {
// If getting an update for a transaction record which doesn't yet
// exist, return empty Pushee, except when querying.
//
// Note that we *do* abort the transaction in PUSH_TOUCH mode. This
// leaves transactions which write intents before their txn entry
// vulnerable, but the alternative is having more intents never cleaned
// up eagerly.
if args.PushType == roachpb.PUSH_QUERY {
return reply, nil
}
// The transaction doesn't exist on disk; we're allowed to abort it.
// TODO(tschottdorf): especially for SNAPSHOT transactions, there's
// something to win here by not aborting, but instead pushing the
// timestamp. For SERIALIZABLE it's less important, but still better
// to have them restart than abort. See #3344.
// TODO(tschottdorf): double-check for problems emanating from
// using a trivial Transaction proto here. Maybe some fields ought
// to receive dummy values.
reply.PusheeTxn.Status = roachpb.ABORTED
reply.PusheeTxn.TxnMeta = args.PusheeTxn
reply.PusheeTxn.Timestamp = args.Now // see method comment
// Setting OrigTimestamp bumps LastActive(); see #9265.
reply.PusheeTxn.OrigTimestamp = args.Now
return reply, engine.MVCCPutProto(ctx, batch, ms, key, hlc.ZeroTimestamp, nil, &reply.PusheeTxn)
}
// Start with the persisted transaction record as final transaction.
reply.PusheeTxn = existTxn.Clone()
// The pusher might be aware of a newer version of the pushee.
reply.PusheeTxn.Timestamp.Forward(args.PusheeTxn.Timestamp)
if reply.PusheeTxn.Epoch < args.PusheeTxn.Epoch {
reply.PusheeTxn.Epoch = args.PusheeTxn.Epoch
}
// If already committed or aborted, return success.
if reply.PusheeTxn.Status != roachpb.PENDING {
// Trivial noop.
return reply, nil
}
// If we're trying to move the timestamp forward, and it's already
// far enough forward, return success.
if args.PushType == roachpb.PUSH_TIMESTAMP && args.PushTo.Less(reply.PusheeTxn.Timestamp) {
// Trivial noop.
return reply, nil
}
// If getting an update for a transaction record, return now.
if args.PushType == roachpb.PUSH_QUERY {
return reply, nil
}
priority := args.PusherTxn.Priority
var pusherWins bool
var reason string
switch {
case reply.PusheeTxn.LastActive().Less(args.Now.Add(-2*base.DefaultHeartbeatInterval.Nanoseconds(), 0)):
reason = "pushee is expired"
// When cleaning up, actually clean up (as opposed to simply pushing
// the garbage in the path of future writers).
args.PushType = roachpb.PUSH_ABORT
pusherWins = true
case args.PushType == roachpb.PUSH_TOUCH:
// If just attempting to cleanup old or already-committed txns,
// pusher always fails.
pusherWins = false
case args.PushType == roachpb.PUSH_TIMESTAMP &&
reply.PusheeTxn.Isolation == enginepb.SNAPSHOT:
// Can always push a SNAPSHOT txn's timestamp.
reason = "pushee is SNAPSHOT"
pusherWins = true
case reply.PusheeTxn.Priority != priority:
reason = "priority"
pusherWins = reply.PusheeTxn.Priority < priority
case args.PusherTxn.ID == nil:
reason = "equal priorities; pusher not transactional"
pusherWins = false
default:
reason = "equal priorities; greater ID wins"
pusherWins = bytes.Compare(reply.PusheeTxn.ID.GetBytes(),
args.PusherTxn.ID.GetBytes()) < 0
}
if log.V(1) && reason != "" {
s := "pushed"
if !pusherWins {
s = "failed to push"
}
log.Infof(ctx, "%s "+s+" %s: %s (pushee last active: %s)",
args.PusherTxn.ID.Short(), reply.PusheeTxn.ID.Short(), reason,
reply.PusheeTxn.LastActive())
}
if !pusherWins {
err := roachpb.NewTransactionPushError(reply.PusheeTxn)
if log.V(1) {
log.Infof(ctx, "%v", err)
}
return reply, err
}
// Upgrade priority of pushed transaction to one less than pusher's.
reply.PusheeTxn.UpgradePriority(priority - 1)
// If aborting transaction, set new status and return success.
if args.PushType == roachpb.PUSH_ABORT {
reply.PusheeTxn.Status = roachpb.ABORTED
// Forward the timestamp to accommodate abort cache GC. See method
// comment for details.
reply.PusheeTxn.Timestamp.Forward(reply.PusheeTxn.LastActive())
} else if args.PushType == roachpb.PUSH_TIMESTAMP {
// Otherwise, update timestamp to be one greater than the request's timestamp.
reply.PusheeTxn.Timestamp = args.PushTo
reply.PusheeTxn.Timestamp.Logical++
}
// Persist the pushed transaction using zero timestamp for inline value.
if err := engine.MVCCPutProto(ctx, batch, ms, key, hlc.ZeroTimestamp, nil, &reply.PusheeTxn); err != nil {
return reply, err
}
return reply, nil
}
// setAbortCache clears any abort cache entry if poison is false.
// Otherwise, if poison is true, creates an entry for this transaction
// in the abort cache to prevent future reads or writes from
// spuriously succeeding on this range.
func (r *Replica) setAbortCache(
ctx context.Context,
batch engine.ReadWriter,
ms *enginepb.MVCCStats,
txn enginepb.TxnMeta,
poison bool,
) error {
if !poison {
return r.abortCache.Del(ctx, batch, ms, txn.ID)
}
entry := roachpb.AbortCacheEntry{
Key: txn.Key,
Timestamp: txn.Timestamp,
Priority: txn.Priority,
}
return r.abortCache.Put(ctx, batch, ms, txn.ID, &entry)
}
// ResolveIntent resolves a write intent from the specified key
// according to the status of the transaction which created it.
func (r *Replica) ResolveIntent(
ctx context.Context,
batch engine.ReadWriter,
ms *enginepb.MVCCStats,
h roachpb.Header,
args roachpb.ResolveIntentRequest,
) (roachpb.ResolveIntentResponse, error) {
var reply roachpb.ResolveIntentResponse
if h.Txn != nil {
return reply, errTransactionUnsupported
}
intent := roachpb.Intent{
Span: args.Span,
Txn: args.IntentTxn,
Status: args.Status,
}
if err := engine.MVCCResolveWriteIntent(ctx, batch, ms, intent); err != nil {
return reply, err
}
if intent.Status == roachpb.ABORTED {
return reply, r.setAbortCache(ctx, batch, ms, args.IntentTxn, args.Poison)
}
return reply, nil
}
// ResolveIntentRange resolves write intents in the specified
// key range according to the status of the transaction which created it.
func (r *Replica) ResolveIntentRange(
ctx context.Context,
batch engine.ReadWriter,
ms *enginepb.MVCCStats,
h roachpb.Header,
args roachpb.ResolveIntentRangeRequest,
) (roachpb.ResolveIntentRangeResponse, error) {
var reply roachpb.ResolveIntentRangeResponse
if h.Txn != nil {
return reply, errTransactionUnsupported
}
intent := roachpb.Intent{
Span: args.Span,
Txn: args.IntentTxn,
Status: args.Status,
}
if _, err := engine.MVCCResolveWriteIntentRange(ctx, batch, ms, intent, math.MaxInt64); err != nil {
return reply, err
}
if intent.Status == roachpb.ABORTED {
return reply, r.setAbortCache(ctx, batch, ms, args.IntentTxn, args.Poison)
}
return reply, nil
}
// Merge is used to merge a value into an existing key. Merge is an
// efficient accumulation operation which is exposed by RocksDB, used
// by CockroachDB for the efficient accumulation of certain
// values. Due to the difficulty of making these operations
// transactional, merges are not currently exposed directly to
// clients. Merged values are explicitly not MVCC data.
func (r *Replica) Merge(
ctx context.Context,
batch engine.ReadWriter,
ms *enginepb.MVCCStats,
h roachpb.Header,
args roachpb.MergeRequest,
) (roachpb.MergeResponse, error) {
var reply roachpb.MergeResponse
return reply, engine.MVCCMerge(ctx, batch, ms, args.Key, h.Timestamp, args.Value)
}
// TruncateLog discards a prefix of the raft log. Truncating part of a log that
// has already been truncated has no effect. If this range is not the one
// specified within the request body, the request will also be ignored.
func (r *Replica) TruncateLog(
ctx context.Context,
batch engine.ReadWriter,
ms *enginepb.MVCCStats,
h roachpb.Header,
args roachpb.TruncateLogRequest,
) (roachpb.TruncateLogResponse, *PostCommitTrigger, error) {
r.mu.Lock()
defer r.mu.Unlock()
var reply roachpb.TruncateLogResponse
// After a merge, it's possible that this request was sent to the wrong
// range based on the start key. This will cancel the request if this is not
// the range specified in the request body.
if r.RangeID != args.RangeID {
log.Infof(ctx, "attempting to truncate raft logs for another range %d. Normally this is due to a merge and can be ignored.",
args.RangeID)
return reply, nil, nil
}
// Have we already truncated this log? If so, just return without an error.
firstIndex, err := r.FirstIndex()
if err != nil {
return reply, nil, err
}
if firstIndex >= args.Index {
if log.V(3) {
log.Infof(ctx, "attempting to truncate previously truncated raft log. FirstIndex:%d, TruncateFrom:%d",
firstIndex, args.Index)
}
return reply, nil, nil
}
// args.Index is the first index to keep.
term, err := r.Term(args.Index - 1)
if err != nil {
return reply, nil, err
}
start := keys.RaftLogKey(r.RangeID, 0)
end := keys.RaftLogKey(r.RangeID, args.Index)
var diff enginepb.MVCCStats
// Passing zero timestamp to MVCCDeleteRange is equivalent to a ranged clear
// but it also computes stats.
if _, _, _, err := engine.MVCCDeleteRange(ctx, batch, &diff, start, end, math.MaxInt64, /* max */
hlc.ZeroTimestamp, nil /* txn */, false /* returnKeys */); err != nil {
return reply, nil, err
}
raftLogSize := r.mu.raftLogSize + diff.SysBytes
// Check raftLogSize since it isn't persisted between server restarts.
if raftLogSize < 0 {
raftLogSize = 0
}
tState := &roachpb.RaftTruncatedState{
Index: args.Index - 1,
Term: term,
}
trigger := &PostCommitTrigger{
truncatedState: tState,
raftLogSize: &raftLogSize,
}
return reply, trigger, engine.MVCCPutProto(ctx, batch, ms, keys.RaftTruncatedStateKey(r.RangeID), hlc.ZeroTimestamp, nil, tState)
}
func newFailedLeaseTrigger() *PostCommitTrigger {
return &PostCommitTrigger{leaseMetricsResult: new(bool)}
}
// RequestLease sets the range lease for this range. The command fails
// only if the desired start timestamp collides with a previous lease.
// Otherwise, the start timestamp is wound back to right after the expiration
// of the previous lease (or zero). If this range replica is already the lease
// holder, the expiration will be extended or shortened as indicated. For a new
// lease, all duties required of the range lease holder are commenced, including
// clearing the command queue and timestamp cache.
func (r *Replica) RequestLease(
ctx context.Context,
batch engine.ReadWriter,
ms *enginepb.MVCCStats,
h roachpb.Header,
args roachpb.RequestLeaseRequest,
) (roachpb.RequestLeaseResponse, *PostCommitTrigger, error) {
// When returning an error from this method, must always return
// a newFailedLeaseTrigger() to satisfy stats.
r.mu.Lock()
defer r.mu.Unlock()
prevLease := r.mu.state.Lease
rErr := &roachpb.LeaseRejectedError{
Existing: *prevLease,
Requested: args.Lease,
}
// MIGRATION(tschottdorf): needed to apply Raft commands which got proposed
// before the StartStasis field was introduced.
if args.Lease.StartStasis.Equal(hlc.ZeroTimestamp) {
args.Lease.StartStasis = args.Lease.Expiration
}
isExtension := prevLease.Replica.StoreID == args.Lease.Replica.StoreID
effectiveStart := args.Lease.Start
// Wind the start timestamp back as far towards the previous lease as we
// can. That'll make sure that when multiple leases are requested out of
// order at the same replica (after all, they use the request timestamp,
// which isn't straight out of our local clock), they all succeed unless
// they have a "real" issue with a previous lease. Example: Assuming no
// previous lease, one request for [5, 15) followed by one for [0, 15)
// would fail without this optimization. With it, the first request
// effectively gets the lease for [0, 15), which the second one can commit
// again (even extending your own lease is possible; see below).
//
// If this is our lease (or no prior lease exists), we effectively absorb
// the old lease. This allows multiple requests from the same replica to
// merge without ticking away from the minimal common start timestamp. It
// also has the positive side-effect of fixing #3561, which was caused by
// the absence of replay protection.
if prevLease.Replica.StoreID == 0 || isExtension {
effectiveStart.Backward(prevLease.Start)
} else {
effectiveStart.Backward(prevLease.Expiration.Next())
}
if isExtension {
if effectiveStart.Less(prevLease.Start) {
rErr.Message = "extension moved start timestamp backwards"
return roachpb.RequestLeaseResponse{}, newFailedLeaseTrigger(), rErr
}
args.Lease.Expiration.Forward(prevLease.Expiration)
} else if effectiveStart.Less(prevLease.Expiration) {
rErr.Message = "requested lease overlaps previous lease"
return roachpb.RequestLeaseResponse{}, newFailedLeaseTrigger(), rErr
}
args.Lease.Start = effectiveStart
return r.applyNewLeaseLocked(ctx, batch, ms, args.Lease, isExtension)
}
// TransferLease sets the lease holder for the range.
// Unlike with RequestLease(), the new lease is allowed to overlap the old one,
// the contract being that the transfer must have been initiated by the (soon
// ex-) lease holder which must have dropped all of its lease holder powers
// before proposing.
func (r *Replica) TransferLease(
ctx context.Context,
batch engine.ReadWriter,
ms *enginepb.MVCCStats,
h roachpb.Header,
args roachpb.TransferLeaseRequest,
) (roachpb.RequestLeaseResponse, *PostCommitTrigger, error) {
// When returning an error from this method, must always return
// a newFailedLeaseTrigger() to satisfy stats.
r.mu.Lock()
defer r.mu.Unlock()
if log.V(2) {
prevLease := r.mu.state.Lease
log.Infof(ctx, "lease transfer: prev lease: %+v, new lease: %+v "+
"old expiration: %s, new start: %s",
prevLease, args.Lease, prevLease.Expiration, args.Lease.Start)
}
return r.applyNewLeaseLocked(ctx, batch, ms, args.Lease, false /* isExtension */)
}
// applyNewLeaseLocked checks that the lease contains a valid interval and that
// the new lease holder is still a member of the replica set, and then proceeds
// to write the new lease to the batch, emitting an appropriate trigger.
//
// The new lease might be a lease for a range that didn't previously have an
// active lease, might be an extension or a lease transfer.
//
// isExtension should be set if the lease holder does not change with this
// lease. If it doesn't change, we don't need a PostCommitTrigger that
// synchronizes with reads.
//
// r.mu needs to be locked.
//
// TODO(tschottdorf): refactoring what's returned from the trigger here makes
// sense to minimize the amount of code intolerant of rolling updates.
func (r *Replica) applyNewLeaseLocked(
ctx context.Context,
batch engine.ReadWriter,
ms *enginepb.MVCCStats,
lease roachpb.Lease,
isExtension bool,
) (roachpb.RequestLeaseResponse, *PostCommitTrigger, error) {
// When returning an error from this method, must always return
// a newFailedLeaseTrigger() to satisfy stats.
prevLease := r.mu.state.Lease
// Ensure Start < StartStasis <= Expiration.
if !lease.Start.Less(lease.StartStasis) ||
lease.Expiration.Less(lease.StartStasis) {
// This amounts to a bug.
return roachpb.RequestLeaseResponse{}, newFailedLeaseTrigger(),
&roachpb.LeaseRejectedError{
Existing: *prevLease,
Requested: lease,
Message: fmt.Sprintf("illegal lease interval: [%s, %s, %s]",
lease.Start, lease.StartStasis, lease.Expiration),
}
}
// Verify that requesting replica is part of the current replica set.
if _, ok := r.mu.state.Desc.GetReplicaDescriptor(lease.Replica.StoreID); !ok {
return roachpb.RequestLeaseResponse{}, newFailedLeaseTrigger(),
&roachpb.LeaseRejectedError{
Existing: *prevLease,
Requested: lease,
Message: "replica not found",
}
}
var reply roachpb.RequestLeaseResponse
// Store the lease to disk & in-memory.
if err := setLease(ctx, batch, ms, r.RangeID, &lease); err != nil {
return reply, newFailedLeaseTrigger(), err
}
t := true
trigger := &PostCommitTrigger{
// If we didn't block concurrent reads here, there'd be a chance that
// reads could sneak in on a new lease holder between setting the lease
// and updating the low water mark. This in itself isn't a consistency
// violation, but it's a bit suspicious and did make
// TestRangeTransferLease flaky. We err on the side of caution for now, but
// at least we don't do it in case of an extension.
//
// TODO(tschottdorf): Maybe we shouldn't do this at all. Need to think
// through potential consequences.
noConcurrentReads: !isExtension,
lease: &lease,
leaseMetricsResult: &t,
// TODO(tschottdorf): having traced the origin of this call back to
// rev 6281926, it seems that we should only be doing this when the
// lease holder has changed. However, it's likely not a big deal to
// do it always.
maybeGossipSystemConfig: true,
}
return reply, trigger, nil
}
// CheckConsistency runs a consistency check on the range. It first applies a
// ComputeChecksum command on the range. It then issues CollectChecksum commands
// to the other replicas.
func (r *Replica) CheckConsistency(
ctx context.Context,
args roachpb.CheckConsistencyRequest,
desc *roachpb.RangeDescriptor,
) (roachpb.CheckConsistencyResponse, *roachpb.Error) {
key := desc.StartKey.AsRawKey()
endKey := desc.EndKey.AsRawKey()
id := uuid.MakeV4()
// Send a ComputeChecksum to all the replicas of the range.
{
var ba roachpb.BatchRequest
ba.RangeID = r.Desc().RangeID
checkArgs := &roachpb.ComputeChecksumRequest{
Span: roachpb.Span{
Key: key,
EndKey: endKey,
},
Version: replicaChecksumVersion,
ChecksumID: id,
Snapshot: args.WithDiff,
}
ba.Add(checkArgs)
ba.Timestamp = r.store.Clock().Now()
_, pErr := r.Send(ctx, ba)
if pErr != nil {
return roachpb.CheckConsistencyResponse{}, pErr
}
}
// Get local checksum. This might involving waiting for it.
c, err := r.getChecksum(ctx, id)
if err != nil {
return roachpb.CheckConsistencyResponse{}, roachpb.NewError(
errors.Wrapf(err, "could not compute checksum for range [%s, %s]", key, endKey))
}
// Get remote checksums.
localReplica, err := r.GetReplicaDescriptor()
if err != nil {
return roachpb.CheckConsistencyResponse{},
roachpb.NewError(errors.Wrap(err, "could not get replica descriptor"))
}
var inconsistencyCount uint32
var wg sync.WaitGroup
sp := r.store.ctx.StorePool
for _, replica := range r.Desc().Replicas {
if replica == localReplica {
continue
}
wg.Add(1)
replica := replica // per-iteration copy
if err := r.store.Stopper().RunAsyncTask(ctx, func(ctx context.Context) {
defer wg.Done()
addr, err := sp.resolver(replica.NodeID)
if err != nil {
log.Error(ctx, errors.Wrapf(err, "could not resolve node ID %d", replica.NodeID))
return
}
conn, err := sp.rpcContext.GRPCDial(addr.String())
if err != nil {
log.Error(ctx,
errors.Wrapf(err, "could not dial node ID %d address %s", replica.NodeID, addr))
return
}
client := NewConsistencyClient(conn)
req := &CollectChecksumRequest{
StoreRequestHeader{NodeID: replica.NodeID, StoreID: replica.StoreID},
r.RangeID,
id,
c.checksum,
}
resp, err := client.CollectChecksum(ctx, req)
if err != nil {
log.Error(ctx, errors.Wrapf(err, "could not CollectChecksum from replica %s", replica))
return
}
if bytes.Equal(c.checksum, resp.Checksum) {
return
}
atomic.AddUint32(&inconsistencyCount, 1)
var buf bytes.Buffer
_, _ = fmt.Fprintf(&buf, "replica %s is inconsistent: expected checksum %x, got %x",
replica, c.checksum, resp.Checksum)
if c.snapshot != nil && resp.Snapshot != nil {
diff := diffRange(c.snapshot, resp.Snapshot)
if report := r.store.ctx.TestingKnobs.BadChecksumReportDiff; report != nil {
report(r.store.Ident, diff)
}
for _, d := range diff {
otherSide := "lease holder"
if d.LeaseHolder {
otherSide = "replica"
}
_, _ = fmt.Fprintf(&buf, "\nk:v = (%q (%x), %s, %.1024x) not present on %s",
d.Key, d.Key, d.Timestamp, d.Value, otherSide)
}
}
log.Errorf(ctx, buf.String())
}); err != nil {
log.Error(ctx, errors.Wrap(err, "could not run async CollectChecksum"))
wg.Done()
}
}
wg.Wait()
if inconsistencyCount == 0 {
} else if args.WithDiff {
logFunc := log.Errorf
if p := r.store.TestingKnobs().BadChecksumPanic; p != nil {
p(r.store.Ident)
} else if r.store.ctx.ConsistencyCheckPanicOnFailure {
logFunc = log.Fatalf
}
logFunc(ctx, "consistency check failed with %d inconsistent replicas", inconsistencyCount)
} else {
if err := r.store.stopper.RunAsyncTask(r.ctx, func(ctx context.Context) {
log.Errorf(ctx, "consistency check failed with %d inconsistent replicas; fetching details",
inconsistencyCount)
// Keep the request from crossing the local->global boundary.
if bytes.Compare(key, keys.LocalMax) < 0 {
key = keys.LocalMax
}
if err := r.store.db.CheckConsistency(ctx, key, endKey, true /* withDiff */); err != nil {
log.Error(ctx, errors.Wrap(err, "could not rerun consistency check"))
}
}); err != nil {
log.Error(ctx, errors.Wrap(err, "could not rerun consistency check"))
}
}
return roachpb.CheckConsistencyResponse{}, nil
}
const (
replicaChecksumVersion = 2
replicaChecksumGCInterval = time.Hour
)
// getChecksum waits for the result of ComputeChecksum and returns it.
// It returns false if there is no checksum being computed for the id,
// or it has already been GCed.
func (r *Replica) getChecksum(
ctx context.Context,
id uuid.UUID,
) (replicaChecksum, error) {
now := timeutil.Now()
r.mu.Lock()
r.gcOldChecksumEntriesLocked(now)
c, ok := r.mu.checksums[id]
if !ok {
if d, dOk := ctx.Deadline(); dOk {
c.gcTimestamp = d
}
c.notify = make(chan struct{})
r.mu.checksums[id] = c
}
r.mu.Unlock()
// Wait
select {
case <-r.store.Stopper().ShouldStop():
return replicaChecksum{},
errors.Errorf("store has stopped while waiting for compute checksum (ID = %s)", id)
case <-ctx.Done():
return replicaChecksum{},
errors.Wrapf(ctx.Err(), "while waiting for compute checksum (ID = %s)", id)
case <-c.notify:
}
if log.V(1) {
log.Infof(ctx, "waited for compute checksum for %s", timeutil.Since(now))
}
r.mu.Lock()
c, ok = r.mu.checksums[id]
r.mu.Unlock()
if !ok {
return replicaChecksum{}, errors.Errorf("no map entry for checksum (ID = %s)", id)
}
if c.checksum == nil {
return replicaChecksum{}, errors.Errorf(
"checksum is nil, most likely because the async computation could not be run (ID = %s)", id)
}
return c, nil
}
// computeChecksumDone adds the computed checksum, sets a deadline for GCing the
// checksum, and sends out a notification.
func (r *Replica) computeChecksumDone(
ctx context.Context,
id uuid.UUID,
sha []byte,
snapshot *roachpb.RaftSnapshotData,
) {
r.mu.Lock()
defer r.mu.Unlock()
if c, ok := r.mu.checksums[id]; ok {
c.checksum = sha
c.gcTimestamp = timeutil.Now().Add(replicaChecksumGCInterval)
c.snapshot = snapshot
r.mu.checksums[id] = c
// Notify
close(c.notify)
} else {
// ComputeChecksum adds an entry into the map, and the entry can
// only be GCed once the gcTimestamp is set above. Something
// really bad happened.
log.Errorf(ctx, "no map entry for checksum (ID = %s)", id)
}
}
// ComputeChecksum starts the process of computing a checksum on the replica at
// a particular snapshot. The checksum is later verified through a
// CollectChecksumRequest.
func (r *Replica) ComputeChecksum(
ctx context.Context,
batch engine.ReadWriter,
ms *enginepb.MVCCStats,
h roachpb.Header,
args roachpb.ComputeChecksumRequest,
) (roachpb.ComputeChecksumResponse, *PostCommitTrigger, error) {
if args.Version != replicaChecksumVersion {
log.Errorf(ctx, "Incompatible versions: e=%d, v=%d", replicaChecksumVersion, args.Version)
return roachpb.ComputeChecksumResponse{}, nil, nil
}
return roachpb.ComputeChecksumResponse{}, &PostCommitTrigger{computeChecksum: &args}, nil
}
// sha512 computes the SHA512 hash of all the replica data at the snapshot.
// It will dump all the k:v data into snapshot if it is provided.
func (r *Replica) sha512(
desc roachpb.RangeDescriptor,
snap engine.Reader,
snapshot *roachpb.RaftSnapshotData,
) ([]byte, error) {
hasher := sha512.New()
// Iterate over all the data in the range.
iter := NewReplicaDataIterator(&desc, snap, true /* replicatedOnly */)
defer iter.Close()
for ; iter.Valid(); iter.Next() {
key := iter.Key()
value := iter.Value()
if snapshot != nil {
// Add the k:v into the debug message.
snapshot.KV = append(snapshot.KV, roachpb.RaftSnapshotData_KeyValue{Key: key.Key, Value: value, Timestamp: key.Timestamp})
}
// Encode the length of the key and value.
if err := binary.Write(hasher, binary.LittleEndian, int64(len(key.Key))); err != nil {
return nil, err
}
if err := binary.Write(hasher, binary.LittleEndian, int64(len(value))); err != nil {
return nil, err
}
if _, err := hasher.Write(key.Key); err != nil {
return nil, err
}
timestamp, err := protoutil.Marshal(&key.Timestamp)
if err != nil {
return nil, err
}
if _, err := hasher.Write(timestamp); err != nil {
return nil, err
}
if _, err := hasher.Write(value); err != nil {
return nil, err
}
}
sha := make([]byte, 0, sha512.Size)
return hasher.Sum(sha), nil
}
// ChangeFrozen freezes or unfreezes the Replica idempotently.
func (r *Replica) ChangeFrozen(
ctx context.Context,
batch engine.ReadWriter,
ms *enginepb.MVCCStats,
h roachpb.Header,
args roachpb.ChangeFrozenRequest,
) (roachpb.ChangeFrozenResponse, *PostCommitTrigger, error) {
var resp roachpb.ChangeFrozenResponse
resp.MinStartKey = roachpb.RKeyMax
curStart, err := keys.Addr(args.Key)
if err != nil {
return resp, nil, err
}
if !bytes.Equal(curStart, args.Key) {
return resp, nil, errors.Errorf("unsupported range-local key")
}
desc := r.Desc()
frozen, err := loadFrozenStatus(ctx, batch, desc.RangeID)
if err != nil || frozen == args.Frozen {
// Something went wrong or we're already in the right frozen state. In
// the latter case, we avoid writing the "same thing" because "we"
// might actually not be the same version of the code (picture a couple
// of freeze requests lined up, but not all of them applied between
// version changes).
return resp, nil, err
}
if args.MustVersion == "" {
return resp, nil, errors.Errorf("empty version tag")
} else if bi := build.GetInfo(); !frozen && args.Frozen && args.MustVersion != bi.Tag {
// Some earlier version tried to freeze but we never applied it until
// someone restarted this node with another version. No bueno - have to
// assume that integrity has already been compromised.
// Note that we have extra hooks upstream which delay returning success
// to the caller until it's reasonable to assume that all Replicas have
// applied the freeze.
// This is a classical candidate for returning replica corruption, but
// we don't do it (yet); for now we'll assume that the update steps
// are carried out in correct order.
log.Warningf(ctx, "freeze %s issued from %s is applied by %s",
desc, args.MustVersion, bi)
}
// Generally, we want to act only if the request hits the Range's StartKey.
// The one case in which that behaves unexpectedly is if we're the first
// range, which has StartKey equal to KeyMin, but the lowest curStart which
// is feasible is LocalMax.
if !desc.StartKey.Less(curStart) {
resp.RangesAffected++
} else if locMax, err := keys.Addr(keys.LocalMax); err != nil {
return resp, nil, err
} else if !locMax.Less(curStart) {
resp.RangesAffected++
}
// Note down the Stores on which this request ran, even if the Range was
// not affected.
resp.Stores = make(map[roachpb.StoreID]roachpb.NodeID, len(desc.Replicas))
for i := range desc.Replicas {
resp.Stores[desc.Replicas[i].StoreID] = desc.Replicas[i].NodeID
}
if resp.RangesAffected == 0 {
return resp, nil, nil
}
resp.MinStartKey = desc.StartKey
if err := setFrozenStatus(ctx, batch, ms, r.Desc().RangeID, args.Frozen); err != nil {
return roachpb.ChangeFrozenResponse{}, nil, err
}
trigger := &PostCommitTrigger{
frozen: &args.Frozen,
}
return resp, trigger, nil
}
// ReplicaSnapshotDiff is a part of a []ReplicaSnapshotDiff which represents a diff between
// two replica snapshots. For now it's only a diff between their KV pairs.
type ReplicaSnapshotDiff struct {
// LeaseHolder is set to true of this k:v pair is only present on the lease
// holder.
LeaseHolder bool
Key roachpb.Key
Timestamp hlc.Timestamp
Value []byte
}
// diffs the two k:v dumps between the lease holder and the replica.
func diffRange(l, r *roachpb.RaftSnapshotData) []ReplicaSnapshotDiff {
if l == nil || r == nil {
return nil
}
var diff []ReplicaSnapshotDiff
i, j := 0, 0
for {
var e, v roachpb.RaftSnapshotData_KeyValue
if i < len(l.KV) {
e = l.KV[i]
}
if j < len(r.KV) {
v = r.KV[j]
}
addLeader := func() {
diff = append(diff, ReplicaSnapshotDiff{LeaseHolder: true, Key: e.Key, Timestamp: e.Timestamp, Value: e.Value})
i++
}
addReplica := func() {
diff = append(diff, ReplicaSnapshotDiff{LeaseHolder: false, Key: v.Key, Timestamp: v.Timestamp, Value: v.Value})
j++
}
// Compare keys.
var comp int
// Check if it has finished traversing over all the lease holder keys.
if e.Key == nil {
if v.Key == nil {
// Done traversing over all the replica keys. Done!
break
} else {
comp = 1
}
} else {
// Check if it has finished traversing over all the replica keys.
if v.Key == nil {
comp = -1
} else {
// Both lease holder and replica keys exist. Compare them.
comp = bytes.Compare(e.Key, v.Key)
}
}
switch comp {
case -1:
addLeader()
case 0:
if !e.Timestamp.Equal(v.Timestamp) {
if v.Timestamp.Less(e.Timestamp) {
addLeader()
} else {
addReplica()
}
} else if !bytes.Equal(e.Value, v.Value) {
addLeader()
addReplica()
} else {
// No diff; skip.
i++
j++
}
case 1:
addReplica()
}
}
return diff
}
// AdminSplit divides the range into into two ranges, using either
// args.SplitKey (if provided) or an internally computed key that aims
// to roughly equipartition the range by size. The split is done
// inside of a distributed txn which writes updated left and new right
// hand side range descriptors, and updates the range addressing
// metadata. The handover of responsibility for the reassigned key
// range is carried out seamlessly through a split trigger carried out
// as part of the commit of that transaction.
//
// The supplied RangeDescriptor is used as a form of optimistic lock. An
// operation which might split a range should obtain a copy of the range's
// current descriptor before making the decision to split. If the decision is
// affirmative the descriptor is passed to AdminSplit, which performs a
// Conditional Put on the RangeDescriptor to ensure that no other operation has
// modified the range in the time the decision was being made.
// TODO(tschottdorf): should assert that split key is not a local key.
//
// See the comment on splitTrigger for details on the complexities.
func (r *Replica) AdminSplit(
ctx context.Context, args roachpb.AdminSplitRequest, desc *roachpb.RangeDescriptor,
) (roachpb.AdminSplitResponse, *roachpb.Error) {
var reply roachpb.AdminSplitResponse
// Determine split key if not provided with args. This scan is
// allowed to be relatively slow because admin commands don't block
// other commands.
log.Event(ctx, "split begins")
var splitKey roachpb.RKey
{
foundSplitKey := args.SplitKey
if len(foundSplitKey) == 0 {
snap := r.store.NewSnapshot()
defer snap.Close()
var err error
targetSize := r.GetMaxBytes() / 2
foundSplitKey, err = engine.MVCCFindSplitKey(ctx, snap, desc.RangeID, desc.StartKey, desc.EndKey, targetSize, nil /* logFn */)
if err != nil {
return reply, roachpb.NewErrorf("unable to determine split key: %s", err)
}
} else if !r.ContainsKey(foundSplitKey) {
return reply, roachpb.NewError(roachpb.NewRangeKeyMismatchError(args.SplitKey, args.SplitKey, desc))
}
foundSplitKey, err := keys.EnsureSafeSplitKey(foundSplitKey)
if err != nil {
return reply, roachpb.NewErrorf("cannot split range at key %s: %v",
args.SplitKey, err)
}
splitKey, err = keys.Addr(foundSplitKey)
if err != nil {
return reply, roachpb.NewError(err)
}
if !splitKey.Equal(foundSplitKey) {
return reply, roachpb.NewErrorf("cannot split range at range-local key %s", splitKey)
}
if !engine.IsValidSplitKey(foundSplitKey) {
return reply, roachpb.NewErrorf("cannot split range at key %s", splitKey)
}
}
// First verify this condition so that it will not return
// roachpb.NewRangeKeyMismatchError if splitKey equals to desc.EndKey,
// otherwise it will cause infinite retry loop.
if desc.StartKey.Equal(splitKey) || desc.EndKey.Equal(splitKey) {
return reply, roachpb.NewErrorf("range is already split at key %s", splitKey)
}
log.Event(ctx, "found split key")
// Create right hand side range descriptor with the newly-allocated Range ID.
rightDesc, err := r.store.NewRangeDescriptor(splitKey, desc.EndKey, desc.Replicas)
if err != nil {
return reply, roachpb.NewErrorf("unable to allocate right hand side range descriptor: %s", err)
}
// Init updated version of existing range descriptor.
leftDesc := *desc
leftDesc.EndKey = splitKey
log.Infof(ctx, "initiating a split of this range at key %s", splitKey)
if err := r.store.DB().Txn(context.TODO(), func(txn *client.Txn) error {
log.Event(ctx, "split closure begins")
defer log.Event(ctx, "split closure ends")
// Update existing range descriptor for left hand side of
// split. Note that we mutate the descriptor for the left hand
// side of the split first to locate the txn record there.
{
b := txn.NewBatch()
leftDescKey := keys.RangeDescriptorKey(leftDesc.StartKey)
if err := updateRangeDescriptor(b, leftDescKey, desc, &leftDesc); err != nil {
return err
}
// Commit this batch first to ensure that the transaction record
// is created in the right place (our triggers rely on this),
// but also to ensure the transaction record is created _before_
// intents for the RHS range descriptor or addressing records.
// This prevents cases where splits are aborted early due to
// conflicts with meta intents before the txn record has been
// written (see #9265).
log.Event(ctx, "updating left descriptor")
if err := txn.Run(b); err != nil {
if _, ok := err.(*roachpb.ConditionFailedError); ok {
return errors.Errorf("conflict updating range descriptors")
}
return err
}
}
// Log the split into the range event log.
// TODO(spencer): event logging API should accept a batch
// instead of a transaction; there's no reason this logging
// shouldn't be done in parallel via the batch with the updated
// range addressing.
if err := r.store.logSplit(txn, leftDesc, *rightDesc); err != nil {
return err
}
b := txn.NewBatch()
// Create range descriptor for right hand side of the split.
rightDescKey := keys.RangeDescriptorKey(rightDesc.StartKey)
if err := updateRangeDescriptor(b, rightDescKey, nil, rightDesc); err != nil {
return err
}
// Update range descriptor addressing record(s).
if err := splitRangeAddressing(b, rightDesc, &leftDesc); err != nil {
return err
}
// End the transaction manually, instead of letting RunTransaction
// loop do it, in order to provide a split trigger.
b.AddRawRequest(&roachpb.EndTransactionRequest{
Commit: true,
InternalCommitTrigger: &roachpb.InternalCommitTrigger{
SplitTrigger: &roachpb.SplitTrigger{
LeftDesc: leftDesc,
RightDesc: *rightDesc,
},
},
})
// Commit txn with final batch (RHS desc and meta).
log.Event(ctx, "commit txn with batch containing RHS descriptor and meta records")
if err := txn.Run(b); err != nil {
if _, ok := err.(*roachpb.ConditionFailedError); ok {
return errors.Errorf("conflict updating range descriptors")
}
return err
}
return nil
}); err != nil {
return reply, roachpb.NewErrorf("split at key %s failed: %s", splitKey, err)
}
return reply, nil
}
// splitTrigger is called on a successful commit of a transaction
// containing an AdminSplit operation. It copies the abort cache for
// the new range and recomputes stats for both the existing, left hand
// side (LHS) range and the right hand side (RHS) range. For
// performance it only computes the stats for the original range (the
// left hand side) and infers the RHS stats by subtracting from the
// original stats. We compute the LHS stats because the split key
// computation ensures that we do not create large LHS
// ranges. However, this optimization is only possible if the stats
// are fully accurate. If they contain estimates, stats for both the
// LHS and RHS are computed.
//
// Splits are complicated. A split is initiated when a replica receives an
// AdminSplit request. Note that this request (and other "admin" requests)
// differs from normal requests in that it doesn't go through Raft but instead
// allows the lease holder Replica to act as the orchestrator for the
// distributed transaction that performs the split. As such, this request is
// only executed on the lease holder replica and the request is redirected to
// the lease holder if the recipient is a follower.
//
// Splits do not require the lease for correctness (which is good, because we
// only check that the lease is held at the beginning of the operation, and
// have no way to ensure that it is continually held until the end). Followers
// could perform splits too, and the only downside would be that if two splits
// were attempted concurrently (or a split and a ChangeReplicas), one would
// fail. The lease is used to designate one replica for this role and avoid
// wasting time on splits that may fail.
//
// The processing of splits is divided into two phases. The first phase occurs
// in Replica.AdminSplit. In that phase, the split-point is computed, and a
// transaction is started which updates both the LHS and RHS range descriptors
// and the meta range addressing information. (If we're splitting a meta2 range
// we'll be updating the meta1 addressing, otherwise we'll be updating the
// meta2 addressing). That transaction includes a special SplitTrigger flag on
// the EndTransaction request. Like all transactions, the requests within the
// transaction are replicated via Raft, including the EndTransaction request.
//
// The second phase of split processing occurs when each replica for the range
// encounters the SplitTrigger. Processing of the SplitTrigger happens below,
// in Replica.splitTrigger. The processing of the SplitTrigger occurs in two
// stages. The first stage operates within the context of an engine.Batch and
// updates all of the on-disk state for the old and new ranges atomically. The
// second stage is invoked when the batch commits and updates the in-memory
// state, creating the new replica in memory and populating its timestamp cache
// and registering it with the store.
//
// There is lots of subtlety here. The easy scenario is that all of the
// replicas process the SplitTrigger before processing any Raft message for RHS
// (right hand side) of the newly split range. Something like:
//
// Node A Node B Node C
// ----------------------------------------------------
// range 1 | | |
// | | |
// SplitTrigger | |
// | SplitTrigger |
// | | SplitTrigger
// | | |
// ----------------------------------------------------
// split finished on A, B and C | |
// | | |
// range 2 | | |
// | ---- MsgVote --> | |
// | ---------------------- MsgVote ---> |
//
// But that ideal ordering is not guaranteed. The split is "finished" when two
// of the replicas have appended the end-txn request containing the
// SplitTrigger to their Raft log. The following scenario is possible:
//
// Node A Node B Node C
// ----------------------------------------------------
// range 1 | | |
// | | |
// SplitTrigger | |
// | SplitTrigger |
// | | |
// ----------------------------------------------------
// split finished on A and B | |
// | | |
// range 2 | | |
// | ---- MsgVote --> | |
// | --------------------- MsgVote ---> ???
// | | |
// | | SplitTrigger
//
// In this scenario, C will create range 2 upon reception of the MsgVote from
// A, though locally that span of keys is still part of range 1. This is
// possible because at the Raft level ranges are identified by integer IDs and
// it isn't until C receives a snapshot of range 2 from the leader that it
// discovers the span of keys it covers. In order to prevent C from fully
// initializing range 2 in this instance, we prohibit applying a snapshot to a
// range if the snapshot overlaps another range. See Store.canApplySnapshotLocked.
//
// But while a snapshot may not have been applied at C, an uninitialized
// Replica was created. An uninitialized Replica is one which belongs to a Raft
// group but for which the range descriptor has not been received. This Replica
// will have participated in the Raft elections. When we're creating the new
// Replica below we take control of this uninitialized Replica and stop it from
// responding to Raft messages by marking it "destroyed". Note that we use the
// Replica.mu.destroyed field for this, but we don't do everything that
// Replica.Destroy does (so we should probably rename that field in light of
// its new uses). In particular we don't touch any data on disk or leave a
// tombstone. This is especially important because leaving a tombstone would
// prevent the legitimate recreation of this replica.
//
// There is subtle synchronization here that is currently controlled by the
// Store.processRaft goroutine. In particular, the serial execution of
// Replica.handleRaftReady by Store.processRaft ensures that an uninitialized
// RHS won't be concurrently executing in Replica.handleRaftReady because we're
// currently running on that goroutine (i.e. Replica.splitTrigger is called on
// the processRaft goroutine).
//
// TODO(peter): The above synchronization needs to be fixed. Using a single
// goroutine for executing Replica.handleRaftReady is undesirable from a
// performance perspective. Likely we will have to add a mutex to Replica to
// protect handleRaftReady and to grab that mutex below when marking the
// uninitialized Replica as "destroyed". Hopefully we'll also be able to remove
// Store.processRaftMu.
//
// Note that in this more complex scenario, A (which performed the SplitTrigger
// first) will create the associated Raft group for range 2 and start
// campaigning immediately. It is possible for B to receive MsgVote requests
// before it has applied the SplitTrigger as well. Both B and C will vote for A
// (and preserve the records of that vote in their HardState). It is critically
// important for Raft correctness that we do not lose the records of these
// votes. After electing A the Raft leader for range 2, A will then attempt to
// send a snapshot to B and C and we'll fall into the situation above where a
// snapshot is received for a range before it has finished splitting from its
// sibling and is thus rejected. An interesting subtlety here: A will send a
// snapshot to B and C because when range 2 is initialized we were careful set
// synthesize its HardState to set its Raft log index to 10. If we had instead
// used log index 0, Raft would have believed the group to be empty, but the
// RHS has something. Using a non-zero initial log index causes Raft to believe
// that there is a discarded prefix to the log and will thus send a snapshot to
// followers.
//
// A final point of clarification: when we split a range we're splitting the
// data the range contains. But we're not forking or splitting the associated
// Raft group. Instead, we're creating a new Raft group to control the RHS of
// the split. That Raft group is starting from an empty Raft log (positioned at
// log entry 10) and a snapshot of the RHS of the split range.
//
// After the split trigger returns, the on-disk state of the right-hand side
// will be suitable for instantiating the right hand side Replica, and
// a suitable trigger is returned, along with the updated stats which represent
// the LHS delta caused by the split (i.e. all writes in the current batch
// which went to the left-hand side, minus the kv pairs which moved to the
// RHS).
//
// These stats are suitable for returning up the callstack like those for
// regular commands; the corresponding delta for the RHS is part of the
// returned trigger and is handled by the Store.
func (r *Replica) splitTrigger(
ctx context.Context,
batch engine.Batch,
bothDeltaMS enginepb.MVCCStats, // stats for batch so far
split *roachpb.SplitTrigger,
ts hlc.Timestamp,
) (
enginepb.MVCCStats,
*PostCommitTrigger,
error,
) {
// TODO(tschottdorf): should have an incoming context from the corresponding
// EndTransaction, but the plumbing has not been done yet.
sp := r.store.Tracer().StartSpan("split")
defer sp.Finish()
desc := r.Desc()
if !bytes.Equal(desc.StartKey, split.LeftDesc.StartKey) ||
!bytes.Equal(desc.EndKey, split.RightDesc.EndKey) {
return enginepb.MVCCStats{}, nil, errors.Errorf("range does not match splits: (%s-%s) + (%s-%s) != %s",
split.LeftDesc.StartKey, split.LeftDesc.EndKey,
split.RightDesc.StartKey, split.RightDesc.EndKey, r)
}
// Preserve stats for pre-split range, excluding the current batch.
origBothMS := r.GetMVCCStats()
// TODO(d4l3k): we should check which side of the split is smaller
// and compute stats for it instead of having a constraint that the
// left hand side is smaller.
// Compute (absolute) stats for LHS range. This means that no more writes
// to the LHS must happen below this point.
leftMS, err := ComputeStatsForRange(&split.LeftDesc, batch, ts.WallTime)
if err != nil {
return enginepb.MVCCStats{}, nil, errors.Wrap(err, "unable to compute stats for LHS range after split")
}
log.Event(ctx, "computed stats for left hand side range")
// Copy the last replica GC timestamp. This value is unreplicated,
// which is why the MVCC stats are set to nil on calls to
// MVCCPutProto.
replicaGCTS, err := r.getLastReplicaGCTimestamp()
if err != nil {
return enginepb.MVCCStats{}, nil, errors.Wrap(err, "unable to fetch last replica GC timestamp")
}
if err := engine.MVCCPutProto(ctx, batch, nil, keys.RangeLastReplicaGCTimestampKey(split.RightDesc.RangeID), hlc.ZeroTimestamp, nil, &replicaGCTS); err != nil {
return enginepb.MVCCStats{}, nil, errors.Wrap(err, "unable to copy last replica GC timestamp")
}
// Initialize the RHS range's abort cache by copying the LHS's.
seqCount, err := r.abortCache.CopyInto(batch, &bothDeltaMS, split.RightDesc.RangeID)
if err != nil {
// TODO(tschottdorf): ReplicaCorruptionError.
return enginepb.MVCCStats{}, nil, errors.Wrap(err, "unable to copy abort cache to RHS split range")
}
log.Eventf(ctx, "copied abort cache (%d entries)", seqCount)
// Initialize the right-hand lease to be the same as the left-hand lease.
// This looks like an innocuous performance improvement, but it's more than
// that - it ensures that we properly initialize the timestamp cache, which
// is only populated on the lease holder, from that of the original Range.
// We found out about a regression here the hard way in #7899. Prior to
// this block, the following could happen:
// - a client reads key 'd', leaving an entry in the timestamp cache on the
// lease holder of [a,e) at the time, node one.
// - the range [a,e) splits at key 'c'. [c,e) starts out without a lease.
// - the replicas of [a,e) on nodes one and two both process the split
// trigger and thus copy their timestamp caches to the new right-hand side
// Replica. However, only node one's timestamp cache contains information
// about the read of key 'd' in the first place.
// - node two becomes the lease holder for [c,e). Its timestamp cache does
// know about the read at 'd' which happened at the beginning.
// - node two can illegally propose a write to 'd' at a lower timestamp.
{
leftLease, err := loadLease(ctx, r.store.Engine(), r.RangeID)
if err != nil {
return enginepb.MVCCStats{}, nil, errors.Wrap(err, "unable to load lease")
}
replica, found := split.RightDesc.GetReplicaDescriptor(leftLease.Replica.StoreID)
if !found {
return enginepb.MVCCStats{}, nil, errors.Errorf(
"pre-split lease holder %+v not found in post-split descriptor %+v",
leftLease.Replica, split.RightDesc,
)
}
rightLease := leftLease
rightLease.Replica = replica
if err := setLease(
ctx, batch, &bothDeltaMS, split.RightDesc.RangeID, rightLease,
); err != nil {
return enginepb.MVCCStats{}, nil, errors.Wrap(err, "unable to seed right-hand side lease")
}
}
// Compute (absolute) stats for RHS range.
var rightMS enginepb.MVCCStats
if origBothMS.ContainsEstimates || bothDeltaMS.ContainsEstimates {
// Because either the original stats or the delta stats contain
// estimate values, we cannot perform arithmetic to determine the
// new range's stats. Instead, we must recompute by iterating
// over the keys and counting.
rightMS, err = ComputeStatsForRange(&split.RightDesc, batch, ts.WallTime)
if err != nil {
return enginepb.MVCCStats{}, nil, errors.Wrap(err, "unable to compute stats for RHS range after split")
}
} else {
// Because neither the original stats or the delta stats contain
// estimate values, we can safely perform arithmetic to determine the
// new range's stats. The calculation looks like:
// rhs_ms = orig_both_ms - orig_left_ms + right_delta_ms
// = orig_both_ms - left_ms + left_delta_ms + right_delta_ms
// = orig_both_ms - left_ms + delta_ms
// where the following extra helper variables are used:
// - orig_left_ms: the left-hand side key range, before the split
// - (left|right)_delta_ms: the contributions to bothDeltaMS in this batch,
// itemized by the side of the split.
//
// Note that the result of that computation never has ContainsEstimates
// set due to none of the inputs having it.
// Start with the full stats before the split.
rightMS = origBothMS
// Remove stats from the left side of the split, at the same time adding
// the batch contributions for the right-hand side.
rightMS.Subtract(leftMS)
rightMS.Add(bothDeltaMS)
}
// Now that we've computed the stats for the RHS so far, we persist them.
// This looks a bit more complicated than it really is: updating the stats
// also changes the stats, and we write not only the stats but a complete
// initial state. Additionally, since bothDeltaMS is tracking writes to
// both sides, we need to update it as well.
{
preRightMS := rightMS // for bothDeltaMS
// Account for MVCCStats' own contribution to the RHS range's statistics.
if err := engine.AccountForSelf(&rightMS, split.RightDesc.RangeID); err != nil {
return enginepb.MVCCStats{}, nil, errors.Wrap(err, "unable to account for enginepb.MVCCStats's own stats impact")
}
// Writing the initial state is subtle since this also seeds the Raft
// group. We are writing to the right hand side's Raft group state in this
// batch so we need to synchronize with anything else that could be
// touching that replica's Raft state. Specifically, we want to prohibit an
// uninitialized Replica from receiving a message for the right hand side
// range and performing raft processing. This is achieved by serializing
// execution of uninitialized Replicas in Store.processRaft and ensuring
// that no uninitialized Replica is being processed while an initialized
// one (like the one currently being split) is being processed.
//
// Note also that it is crucial that writeInitialState *absorbs* an
// existing HardState (which might contain a cast vote). We load the
// existing HardState from the underlying engine instead of the batch
// because batch reads are from a snapshot taken at the point in time when
// the first read was performed on the batch. This last requirement is not
// currently needed due to the uninitialized Replica synchronization
// mentioned above, but future work will relax that synchronization, moving
// it from before the point that batch was created to this method. We want
// to see any writes to the hard state that were performed between the
// creation of the batch and that synchronization point. The only drawback
// to not reading from the batch is that we won't see any writes to the
// right hand side's hard state that were previously made in the batch
// (which should be impossible).
oldHS, err := loadHardState(ctx, r.store.Engine(), split.RightDesc.RangeID)
if err != nil {
return enginepb.MVCCStats{}, nil, errors.Wrap(err, "unable to load hard state")
}
rightMS, err = writeInitialState(ctx, batch, rightMS, split.RightDesc, oldHS)
if err != nil {
return enginepb.MVCCStats{}, nil, errors.Wrap(err, "unable to write initial state")
}
bothDeltaMS.Subtract(preRightMS)
bothDeltaMS.Add(rightMS)
}
// Compute how much data the left-hand side has shed by splitting.
// We've already recomputed that in absolute terms, so all we need to do is
// to turn it into a delta so the upstream machinery can digest it.
leftDeltaMS := leftMS // start with new left-hand side absolute stats
leftDeltaMS.Subtract(r.GetMVCCStats()) // subtract pre-split absolute stats
leftDeltaMS.ContainsEstimates = false // if there were any, recomputation removed them
// Perform a similar computation for the right hand side. The difference
// is that there isn't yet a Replica which could apply these stats, so
// they will go into the trigger to make the Store (which keeps running
// counters) aware.
rightDeltaMS := bothDeltaMS
rightDeltaMS.Subtract(leftDeltaMS)
trigger := &PostCommitTrigger{
// This makes sure that no reads are happening in parallel; see #3148.
noConcurrentReads: true,
split: &postCommitSplit{
SplitTrigger: *split,
RightDeltaMS: rightDeltaMS,
},
}
return leftDeltaMS, trigger, nil
}
// AdminMerge extends this range to subsume the range that comes next
// in the key space. The merge is performed inside of a distributed
// transaction which writes the left hand side range descriptor (the
// subsuming range) and deletes the range descriptor for the right
// hand side range (the subsumed range). It also updates the range
// addressing metadata. The handover of responsibility for the
// reassigned key range is carried out seamlessly through a merge
// trigger carried out as part of the commit of that transaction. A
// merge requires that the two ranges are collocated on the same set
// of replicas.
//
// The supplied RangeDescriptor is used as a form of optimistic lock. See the
// comment of "AdminSplit" for more information on this pattern.
func (r *Replica) AdminMerge(
ctx context.Context, args roachpb.AdminMergeRequest, origLeftDesc *roachpb.RangeDescriptor,
) (roachpb.AdminMergeResponse, *roachpb.Error) {
var reply roachpb.AdminMergeResponse
if origLeftDesc.EndKey.Equal(roachpb.RKeyMax) {
// Merging the final range doesn't make sense.
return reply, roachpb.NewErrorf("cannot merge final range")
}
updatedLeftDesc := *origLeftDesc
// Lookup right hand side range (subsumed). This really belongs
// inside the transaction for consistency, but it is important (for
// transaction record placement) that the first action inside the
// transaction is the conditional put to change the left hand side's
// descriptor end key. We look up the descriptor here only to get
// the new end key and then repeat the lookup inside the
// transaction.
{
rightRng := r.store.LookupReplica(origLeftDesc.EndKey, nil)
if rightRng == nil {
return reply, roachpb.NewErrorf("ranges not collocated")
}
updatedLeftDesc.EndKey = rightRng.Desc().EndKey
log.Infof(ctx, "initiating a merge of %s into this range", rightRng)
}
if err := r.store.DB().Txn(context.TODO(), func(txn *client.Txn) error {
log.Event(ctx, "merge closure begins")
// Update the range descriptor for the receiving range.
{
b := txn.NewBatch()
leftDescKey := keys.RangeDescriptorKey(updatedLeftDesc.StartKey)
if err := updateRangeDescriptor(b, leftDescKey, origLeftDesc, &updatedLeftDesc); err != nil {
return err
}
// Commit this batch on its own to ensure that the transaction record
// is created in the right place (our triggers rely on this).
log.Event(ctx, "updating left descriptor")
if err := txn.Run(b); err != nil {
return err
}
}
// Do a consistent read of the right hand side's range descriptor.
rightDescKey := keys.RangeDescriptorKey(origLeftDesc.EndKey)
var rightDesc roachpb.RangeDescriptor
if err := txn.GetProto(rightDescKey, &rightDesc); err != nil {
return err
}
// Verify that the two ranges are mergeable.
if !bytes.Equal(origLeftDesc.EndKey, rightDesc.StartKey) {
// Should never happen, but just in case.
return errors.Errorf("ranges are not adjacent; %s != %s", origLeftDesc.EndKey, rightDesc.StartKey)
}
if !bytes.Equal(rightDesc.EndKey, updatedLeftDesc.EndKey) {
// This merge raced with a split of the right-hand range.
// TODO(bdarnell): needs a test.
return errors.Errorf("range changed during merge; %s != %s", rightDesc.EndKey, updatedLeftDesc.EndKey)
}
if !replicaSetsEqual(origLeftDesc.Replicas, rightDesc.Replicas) {
return errors.Errorf("ranges not collocated")
}
b := txn.NewBatch()
// Remove the range descriptor for the deleted range.
b.Del(rightDescKey)
if err := mergeRangeAddressing(b, origLeftDesc, &updatedLeftDesc); err != nil {
return err
}
// End the transaction manually instead of letting RunTransaction
// loop do it, in order to provide a merge trigger.
b.AddRawRequest(&roachpb.EndTransactionRequest{
Commit: true,
InternalCommitTrigger: &roachpb.InternalCommitTrigger{
MergeTrigger: &roachpb.MergeTrigger{
LeftDesc: updatedLeftDesc,
RightDesc: rightDesc,
},
},
})
log.Event(ctx, "attempting commit")
return txn.Run(b)
}); err != nil {
return reply, roachpb.NewErrorf("merge of range into %d failed: %s", origLeftDesc.RangeID, err)
}
return reply, nil
}
// mergeTrigger is called on a successful commit of an AdminMerge
// transaction. It recomputes stats for the receiving range.
//
// TODO(tschottdorf): give mergeTrigger more idiomatic stats computation as
// in splitTrigger.
func (r *Replica) mergeTrigger(
ctx context.Context,
batch engine.Batch,
ms *enginepb.MVCCStats,
merge *roachpb.MergeTrigger,
ts hlc.Timestamp,
) (*PostCommitTrigger, error) {
desc := r.Desc()
if !bytes.Equal(desc.StartKey, merge.LeftDesc.StartKey) {
return nil, errors.Errorf("LHS range start keys do not match: %s != %s",
desc.StartKey, merge.LeftDesc.StartKey)
}
if !desc.EndKey.Less(merge.LeftDesc.EndKey) {
return nil, errors.Errorf("original LHS end key is not less than the post merge end key: %s >= %s",
desc.EndKey, merge.LeftDesc.EndKey)
}
rightRangeID := merge.RightDesc.RangeID
if rightRangeID <= 0 {
return nil, errors.Errorf("RHS range ID must be provided: %d", rightRangeID)
}
{
// TODO(peter,tschottdorf): This is necessary but likely not
// sufficient. The right hand side of the merge can still race on
// reads. See #8630.
//
// TODO(peter): We need to hold the subsumed range's raftMu until the
// Store.MergeRange is invoked. Currently we release it when this method
// returns which isn't correct.
subsumedRng, err := r.store.GetReplica(rightRangeID)
if err != nil {
panic(err)
}
defer subsumedRng.raftUnlock(subsumedRng.raftLock())
}
// Compute stats for premerged range, including current transaction.
var mergedMS = r.GetMVCCStats()
mergedMS.Add(*ms)
// We will recompute the stats below and update the state, so when the
// batch commits it has already taken ms into account.
*ms = enginepb.MVCCStats{}
// Add in stats for right hand side of merge, excluding system-local
// stats, which will need to be recomputed.
var rightMS enginepb.MVCCStats
if err := engine.MVCCGetRangeStats(ctx, batch, rightRangeID, &rightMS); err != nil {
return nil, err
}
rightMS.SysBytes, rightMS.SysCount = 0, 0
mergedMS.Add(rightMS)
// Copy the RHS range's abort cache to the new LHS one.
_, err := r.abortCache.CopyFrom(ctx, batch, &mergedMS, rightRangeID)
if err != nil {
return nil, errors.Errorf("unable to copy abort cache to new split range: %s", err)
}
// Remove the RHS range's metadata. Note that we don't need to
// keep track of stats here, because we already set the right range's
// system-local stats contribution to 0.
localRangeIDKeyPrefix := keys.MakeRangeIDPrefix(rightRangeID)
if _, _, _, err := engine.MVCCDeleteRange(ctx, batch, nil, localRangeIDKeyPrefix, localRangeIDKeyPrefix.PrefixEnd(), math.MaxInt64, hlc.ZeroTimestamp, nil, false); err != nil {
return nil, errors.Errorf("cannot remove range metadata %s", err)
}
// Add in the stats for the RHS range's range keys.
iter := batch.NewIterator(false)
defer iter.Close()
localRangeKeyStart := engine.MakeMVCCMetadataKey(keys.MakeRangeKeyPrefix(merge.RightDesc.StartKey))
localRangeKeyEnd := engine.MakeMVCCMetadataKey(keys.MakeRangeKeyPrefix(merge.RightDesc.EndKey))
msRange, err := iter.ComputeStats(localRangeKeyStart, localRangeKeyEnd, ts.WallTime)
if err != nil {
return nil, errors.Errorf("unable to compute RHS range's local stats: %s", err)
}
mergedMS.Add(msRange)
// Set stats for updated range.
if err := setMVCCStats(ctx, batch, r.RangeID, mergedMS); err != nil {
return nil, errors.Errorf("unable to write MVCC stats: %s", err)
}
// Clear the timestamp cache. In case both the LHS and RHS replicas
// held their respective range leases, we could merge the timestamp
// caches for efficiency. But it's unlikely and not worth the extra
// logic and potential for error.
*ms = r.GetMVCCStats()
mergedMS.Subtract(r.GetMVCCStats())
*ms = mergedMS
r.mu.Lock()
r.mu.tsCache.Clear(r.store.Clock())
r.mu.Unlock()
trigger := &PostCommitTrigger{
// This makes sure that no reads are happening in parallel; see #3148.
noConcurrentReads: true,
merge: &postCommitMerge{
MergeTrigger: *merge,
},
}
return trigger, nil
}
func (r *Replica) changeReplicasTrigger(
ctx context.Context,
batch engine.Batch,
change *roachpb.ChangeReplicasTrigger,
) *PostCommitTrigger {
var trigger *PostCommitTrigger
// If we're removing the current replica, add it to the range GC queue.
if change.ChangeType == roachpb.REMOVE_REPLICA && r.store.StoreID() == change.Replica.StoreID {
// This wants to run as late as possible, maximizing the chances
// that the other nodes have finished this command as well (since
// processing the removal from the queue looks up the Range at the
// lease holder, being too early here turns this into a no-op).
trigger = updateTrigger(trigger, &PostCommitTrigger{
addToReplicaGCQueue: true,
})
} else {
// After a successful replica addition or removal check to see if the
// range needs to be split. Splitting usually takes precedence over
// replication via configuration of the split and replicate queues, but
// if the split occurs concurrently with the replicas change the split
// can fail and won't retry until the next scanner cycle. Re-queuing
// the replica here removes that latency.
trigger = updateTrigger(trigger, &PostCommitTrigger{
maybeAddToSplitQueue: true,
})
}
// Gossip the first range whenever the range descriptor changes. We also
// gossip the first range whenever the lease holder changes, but that might
// not have occurred if a replica was being added or the non-lease-holder
// replica was being removed. Note that we attempt the gossiping even from
// the removed replica in case it was the lease-holder and it is still
// holding the lease.
if r.IsFirstRange() {
trigger = updateTrigger(trigger, &PostCommitTrigger{
gossipFirstRange: true,
})
}
cpy := *r.Desc()
cpy.Replicas = change.UpdatedReplicas
cpy.NextReplicaID = change.NextReplicaID
trigger = updateTrigger(trigger, &PostCommitTrigger{
desc: &cpy,
})
return trigger
}
// ChangeReplicas adds or removes a replica of a range. The change is performed
// in a distributed transaction and takes effect when that transaction is committed.
// When removing a replica, only the NodeID and StoreID fields of the Replica are used.
//
// The supplied RangeDescriptor is used as a form of optimistic lock. See the
// comment of "AdminSplit" for more information on this pattern.
//
// Changing the replicas for a range is complicated. A change is initiated by
// the "replicate" queue when it encounters a range which has too many
// replicas, too few replicas or requires rebalancing. Addition and removal of
// a replica is divided into four phases. The first phase, which occurs in
// Replica.ChangeReplicas, is performed via a distributed transaction which
// updates the range descriptor and the meta range addressing information. This
// transaction includes a special ChangeReplicasTrigger on the EndTransaction
// request. A ConditionalPut of the RangeDescriptor implements the optimistic
// lock on the RangeDescriptor mentioned previously. Like all transactions, the
// requests within the transaction are replicated via Raft, including the
// EndTransaction request.
//
// The second phase of processing occurs when the batch containing the
// EndTransaction is proposed to raft. This proposing occurs on whatever
// replica received the batch, usually, but not always the range lease
// holder. defaultProposeRaftCommandLocked notices that the EndTransaction
// contains a ChangeReplicasTrigger and proposes a ConfChange to Raft (via
// raft.RawNode.ProposeConfChange).
//
// The ConfChange is propagated to all of the replicas similar to a normal Raft
// command, though additional processing is done inside of Raft. A Replica
// encounters the ConfChange in Replica.handleRaftReady and executes it using
// raft.RawNode.ApplyConfChange. If a new replica was added the Raft leader
// will start sending it heartbeat messages and attempting to bring it up to
// date. If a replica was removed, it is at this point that the Raft leader
// will stop communicating with it.
//
// The fourth phase of change replicas occurs when each replica for the range
// encounters the ChangeReplicasTrigger when applying the EndTransaction
// request. The replica will update its local range descriptor so as to contain
// the new set of replicas. If the replica is the one that is being removed, it
// will queue itself for removal with replicaGCQueue.
//
// Note that a removed replica may not see the EndTransaction containing the
// ChangeReplicasTrigger. The ConfChange operation will be applied as soon as a
// quorum of nodes have committed it. If the removed replica is down or the
// message is dropped for some reason the removed replica will not be
// notified. The replica GC queue will eventually discover and cleanup this
// state.
//
// When a new replica is added, it will have to catch up to the state of the
// other replicas. The Raft leader automatically handles this by either sending
// the new replica Raft log entries to apply, or by generating and sending a
// snapshot. See Replica.Snapshot and Replica.Entries.
//
// Note that Replica.ChangeReplicas returns when the distributed transaction
// has been committed to a quorum of replicas in the range. The actual
// replication of data occurs asynchronously via a snapshot or application of
// Raft log entries. This is important for the replicate queue to be aware
// of. A node can process hundreds or thousands of ChangeReplicas operations
// per second even though the actual replication of data proceeds at a much
// slower base. In order to avoid having this background replication overwhelm
// the system, replication is throttled via a reservation system. When
// allocating a new replica for a range, the replicate queue reserves space for
// that replica on the target store via a ReservationRequest. (See
// StorePool.reserve). The reservation is fulfilled when the snapshot is
// applied.
//
// TODO(peter): There is a rare scenario in which a replica can be brought up
// to date via Raft log replay. In this scenario, the reservation will be left
// dangling until it expires. See #7849.
//
// TODO(peter): Describe preemptive snapshots. Preemptive snapshots are needed
// for the replicate queue to function properly. Currently the replicate queue
// will fire off as many replica additions as possible until it starts getting
// reservations denied at which point it will ignore the replica until the next
// scanner cycle.
func (r *Replica) ChangeReplicas(
ctx context.Context,
changeType roachpb.ReplicaChangeType,
repDesc roachpb.ReplicaDescriptor,
desc *roachpb.RangeDescriptor,
) error {
repDescIdx := -1 // tracks NodeID && StoreID
nodeUsed := false // tracks NodeID only
for i, existingRep := range desc.Replicas {
nodeUsedByExistingRep := existingRep.NodeID == repDesc.NodeID
nodeUsed = nodeUsed || nodeUsedByExistingRep
if nodeUsedByExistingRep && existingRep.StoreID == repDesc.StoreID {
repDescIdx = i
repDesc.ReplicaID = existingRep.ReplicaID
break
}
}
rangeID := desc.RangeID
updatedDesc := *desc
updatedDesc.Replicas = append([]roachpb.ReplicaDescriptor(nil), desc.Replicas...)
switch changeType {
case roachpb.ADD_REPLICA:
// If the replica exists on the remote node, no matter in which store,
// abort the replica add.
if nodeUsed {
return errors.Errorf("%s: unable to add replica %v which is already present", r, repDesc)
}
log.Event(ctx, "requesting reservation")
// Before we try to add a new replica, we first need to secure a
// reservation for the replica on the receiving store.
if err := r.store.allocator.storePool.reserve(
r.store.Ident,
repDesc.StoreID,
rangeID,
r.GetMVCCStats().Total(),
); err != nil {
return errors.Wrapf(err, "%s: change replicas failed", r)
}
log.Event(ctx, "reservation granted")
// Prohibit premature raft log truncation. We set the pending index to 1
// here until we determine what it is below. This removes a small window of
// opportunity for the raft log to get truncated after the snapshot is
// generated.
if err := r.setPendingSnapshotIndex(1); err != nil {
return err
}
defer r.clearPendingSnapshotIndex()
// Send a pre-emptive snapshot. Note that the replica to which this
// snapshot is addressed has not yet had its replica ID initialized; this
// is intentional, and serves to avoid the following race with the replica
// GC queue:
//
// - snapshot received, a replica is lazily created with the "real" replica ID
// - the replica is eligible for GC because it is not yet a member of the range
// - GC queue runs, creating a raft tombstone with the replica's ID
// - the replica is added to the range
// - lazy creation of the replica fails due to the raft tombstone
//
// Instead, the replica GC queue will create a tombstone with replica ID
// zero, which is never legitimately used, and thus never interferes with
// raft operations. Racing with the replica GC queue can still partially
// negate the benefits of pre-emptive snapshots, but that is a recoverable
// degradation, not a catastrophic failure.
snap, err := r.GetSnapshot(ctx)
log.Event(ctx, "generated snapshot")
if err != nil {
return errors.Wrapf(err, "%s: change replicas failed", r)
}
fromRepDesc, err := r.GetReplicaDescriptor()
if err != nil {
return errors.Wrapf(err, "%s: change replicas failed", r)
}
if repDesc.ReplicaID != 0 {
return errors.Errorf(
"must not specify a ReplicaID (%d) for new Replica",
repDesc.ReplicaID,
)
}
if err := r.setPendingSnapshotIndex(snap.Metadata.Index); err != nil {
return err
}
req := &RaftMessageRequest{
RangeID: r.RangeID,
FromReplica: fromRepDesc,
ToReplica: repDesc,
Message: raftpb.Message{
Type: raftpb.MsgSnap,
To: 0, // special cased ReplicaID for preemptive snapshots
From: uint64(fromRepDesc.ReplicaID),
Term: snap.Metadata.Term,
Snapshot: snap,
},
}
if err := r.store.ctx.Transport.SendSync(ctx, req); err != nil {
return errors.Wrapf(err, "%s: change replicas aborted due to failed preemptive snapshot", r)
}
repDesc.ReplicaID = updatedDesc.NextReplicaID
updatedDesc.NextReplicaID++
updatedDesc.Replicas = append(updatedDesc.Replicas, repDesc)
case roachpb.REMOVE_REPLICA:
// If that exact node-store combination does not have the replica,
// abort the removal.
if repDescIdx == -1 {
return errors.Errorf("%s: unable to remove replica %v which is not present", r, repDesc)
}
updatedDesc.Replicas[repDescIdx] = updatedDesc.Replicas[len(updatedDesc.Replicas)-1]
updatedDesc.Replicas = updatedDesc.Replicas[:len(updatedDesc.Replicas)-1]
}
descKey := keys.RangeDescriptorKey(desc.StartKey)
if err := r.store.DB().Txn(ctx, func(txn *client.Txn) error {
log.Event(ctx, "attempting txn")
txn.Proto.Name = replicaChangeTxnName
// TODO(tschottdorf): oldDesc is used for sanity checks related to #7224.
// Remove when that has been solved. The failure mode is likely based on
// prior divergence of the Replica (in which case the check below does not
// fire because everything reads from the local, diverged, set of data),
// so we don't expect to see this fail in practice ever.
oldDesc := new(roachpb.RangeDescriptor)
if err := txn.GetProto(descKey, oldDesc); err != nil {
return err
}
log.Infof(ctx, "change replicas: read existing descriptor %+v", oldDesc)
{
b := txn.NewBatch()
// Important: the range descriptor must be the first thing touched in the transaction
// so the transaction record is co-located with the range being modified.
if err := updateRangeDescriptor(b, descKey, desc, &updatedDesc); err != nil {
return err
}
// Run transaction up to this point to create txn record early (see #9265).
if err := txn.Run(b); err != nil {
return err
}
}
// Log replica change into range event log.
if err := r.store.logChange(txn, changeType, repDesc, updatedDesc); err != nil {
return err
}
// End the transaction manually instead of letting RunTransaction
// loop do it, in order to provide a commit trigger.
b := txn.NewBatch()
// Update range descriptor addressing record(s).
if err := updateRangeAddressing(b, &updatedDesc); err != nil {
return err
}
b.AddRawRequest(&roachpb.EndTransactionRequest{
Commit: true,
InternalCommitTrigger: &roachpb.InternalCommitTrigger{
ChangeReplicasTrigger: &roachpb.ChangeReplicasTrigger{
ChangeType: changeType,
Replica: repDesc,
UpdatedReplicas: updatedDesc.Replicas,
NextReplicaID: updatedDesc.NextReplicaID,
},
},
})
if err := txn.Run(b); err != nil {
log.Event(ctx, err.Error())
return err
}
if oldDesc.RangeID != 0 && !reflect.DeepEqual(oldDesc, desc) {
// We read the previous value, it wasn't what we supposedly used in
// the CPut, but we still overwrote in the CPut above.
panic(fmt.Sprintf("committed replica change, but oldDesc != assumedOldDesc:\n%+v\n%+v\nnew desc:\n%+v",
oldDesc, desc, updatedDesc))
}
return nil
}); err != nil {
log.Event(ctx, err.Error())
return errors.Wrapf(err, "change replicas of range %d failed", rangeID)
}
log.Event(ctx, "txn complete")
return nil
}
// replicaSetsEqual is used in AdminMerge to ensure that the ranges are
// all collocate on the same set of replicas.
func replicaSetsEqual(a, b []roachpb.ReplicaDescriptor) bool {
if len(a) != len(b) {
return false
}
set := make(map[roachpb.StoreID]int)
for _, replica := range a {
set[replica.StoreID]++
}
for _, replica := range b {
set[replica.StoreID]--
}
for _, value := range set {
if value != 0 {
return false
}
}
return true
}
// updateRangeDescriptor adds a ConditionalPut on the range descriptor. The
// conditional put verifies that changes to the range descriptor are made in a
// well-defined order, preventing a scenario where a wayward replica which is
// no longer part of the original Raft group comes back online to form a
// splinter group with a node which was also a former replica, and hijacks the
// range descriptor. This is a last line of defense; other mechanisms should
// prevent rogue replicas from getting this far (see #768).
//
// Note that in addition to using this method to update the on-disk range
// descriptor, a CommitTrigger must be used to update the in-memory
// descriptor; it will not automatically be copied from newDesc.
// TODO(bdarnell): store the entire RangeDescriptor in the CommitTrigger
// and load it automatically instead of reconstructing individual
// changes.
func updateRangeDescriptor(
b *client.Batch,
descKey roachpb.Key,
oldDesc,
newDesc *roachpb.RangeDescriptor,
) error {
if err := newDesc.Validate(); err != nil {
return err
}
// This is subtle: []byte(nil) != interface{}(nil). A []byte(nil) refers to
// an empty value. An interface{}(nil) refers to a non-existent value. So
// we're careful to construct an interface{}(nil) when oldDesc is nil.
var oldValue interface{}
if oldDesc != nil {
oldBytes, err := protoutil.Marshal(oldDesc)
if err != nil {
return err
}
oldValue = oldBytes
}
newValue, err := protoutil.Marshal(newDesc)
if err != nil {
return err
}
b.CPut(descKey, newValue, oldValue)
return nil
}
// LeaseInfo returns information about the lease holder for the range.
func (r *Replica) LeaseInfo(
ctx context.Context, args roachpb.LeaseInfoRequest,
) (roachpb.LeaseInfoResponse, error) {
var reply roachpb.LeaseInfoResponse
lease, nextLease := r.getLease()
if nextLease != nil {
// If there's a lease request in progress, speculatively return that future
// lease.
reply.Lease = nextLease
} else if lease != nil {
reply.Lease = lease
}
return reply, nil
}
stability: respond to comment clarifications requested by peter
// Copyright 2014 The Cockroach Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the License.
//
// Author: Spencer Kimball (spencer.kimball@gmail.com)
// Author: Jiang-Ming Yang (jiangming.yang@gmail.com)
// Author: Tobias Schottdorf (tobias.schottdorf@gmail.com)
// Author: Bram Gruneir (bram+code@cockroachlabs.com)
package storage
import (
"bytes"
"crypto/sha512"
"encoding/binary"
"fmt"
"math"
"reflect"
"sync"
"sync/atomic"
"time"
"github.com/coreos/etcd/raft/raftpb"
"github.com/pkg/errors"
"golang.org/x/net/context"
"github.com/cockroachdb/cockroach/base"
"github.com/cockroachdb/cockroach/build"
"github.com/cockroachdb/cockroach/internal/client"
"github.com/cockroachdb/cockroach/keys"
"github.com/cockroachdb/cockroach/roachpb"
"github.com/cockroachdb/cockroach/storage/engine"
"github.com/cockroachdb/cockroach/storage/engine/enginepb"
"github.com/cockroachdb/cockroach/storage/storagebase"
"github.com/cockroachdb/cockroach/util/hlc"
"github.com/cockroachdb/cockroach/util/log"
"github.com/cockroachdb/cockroach/util/protoutil"
"github.com/cockroachdb/cockroach/util/timeutil"
"github.com/cockroachdb/cockroach/util/uuid"
)
var errTransactionUnsupported = errors.New("not supported within a transaction")
// executeCmd switches over the method and multiplexes to execute the appropriate storage API
// command. It returns the response, an error, and a post commit trigger which
// may be actionable even in the case of an error.
// maxKeys is the number of scan results remaining for this batch
// (MaxInt64 for no limit).
func (r *Replica) executeCmd(
ctx context.Context,
raftCmdID storagebase.CmdIDKey,
index int,
batch engine.ReadWriter,
ms *enginepb.MVCCStats,
h roachpb.Header,
maxKeys int64,
args roachpb.Request,
reply roachpb.Response,
) (*PostCommitTrigger, *roachpb.Error) {
ts := h.Timestamp
if _, ok := args.(*roachpb.NoopRequest); ok {
return nil, nil
}
if err := r.checkCmdHeader(args.Header()); err != nil {
return nil, roachpb.NewErrorWithTxn(err, h.Txn)
}
// If a unittest filter was installed, check for an injected error; otherwise, continue.
if filter := r.store.ctx.TestingKnobs.TestingCommandFilter; filter != nil {
filterArgs := storagebase.FilterArgs{Ctx: ctx, CmdID: raftCmdID, Index: index,
Sid: r.store.StoreID(), Req: args, Hdr: h}
if pErr := filter(filterArgs); pErr != nil {
log.Infof(ctx, "test injecting error: %s", pErr)
return nil, pErr
}
}
// Update the node clock with the serviced request. This maintains a
// high water mark for all ops serviced, so that received ops
// without a timestamp specified are guaranteed one higher than any
// op already executed for overlapping keys.
r.store.Clock().Update(ts)
var err error
var trigger *PostCommitTrigger
var num int64
var span *roachpb.Span
// Note that responses are populated even when an error is returned.
// TODO(tschottdorf): Change that. IIRC there is nontrivial use of it currently.
switch tArgs := args.(type) {
case *roachpb.GetRequest:
resp := reply.(*roachpb.GetResponse)
*resp, trigger, err = r.Get(ctx, batch, h, *tArgs)
case *roachpb.PutRequest:
resp := reply.(*roachpb.PutResponse)
*resp, err = r.Put(ctx, batch, ms, h, *tArgs)
case *roachpb.ConditionalPutRequest:
resp := reply.(*roachpb.ConditionalPutResponse)
*resp, err = r.ConditionalPut(ctx, batch, ms, h, *tArgs)
case *roachpb.InitPutRequest:
resp := reply.(*roachpb.InitPutResponse)
*resp, err = r.InitPut(ctx, batch, ms, h, *tArgs)
case *roachpb.IncrementRequest:
resp := reply.(*roachpb.IncrementResponse)
*resp, err = r.Increment(ctx, batch, ms, h, *tArgs)
case *roachpb.DeleteRequest:
resp := reply.(*roachpb.DeleteResponse)
*resp, err = r.Delete(ctx, batch, ms, h, *tArgs)
case *roachpb.DeleteRangeRequest:
resp := reply.(*roachpb.DeleteRangeResponse)
*resp, span, num, err = r.DeleteRange(ctx, batch, ms, h, maxKeys, *tArgs)
case *roachpb.ScanRequest:
resp := reply.(*roachpb.ScanResponse)
*resp, span, num, trigger, err = r.Scan(ctx, batch, h, maxKeys, *tArgs)
case *roachpb.ReverseScanRequest:
resp := reply.(*roachpb.ReverseScanResponse)
*resp, span, num, trigger, err = r.ReverseScan(ctx, batch, h, maxKeys, *tArgs)
case *roachpb.BeginTransactionRequest:
resp := reply.(*roachpb.BeginTransactionResponse)
*resp, err = r.BeginTransaction(ctx, batch, ms, h, *tArgs)
case *roachpb.EndTransactionRequest:
resp := reply.(*roachpb.EndTransactionResponse)
*resp, trigger, err = r.EndTransaction(ctx, batch, ms, h, *tArgs)
case *roachpb.RangeLookupRequest:
resp := reply.(*roachpb.RangeLookupResponse)
*resp, trigger, err = r.RangeLookup(ctx, batch, h, *tArgs)
case *roachpb.HeartbeatTxnRequest:
resp := reply.(*roachpb.HeartbeatTxnResponse)
*resp, err = r.HeartbeatTxn(ctx, batch, ms, h, *tArgs)
case *roachpb.GCRequest:
resp := reply.(*roachpb.GCResponse)
*resp, trigger, err = r.GC(ctx, batch, ms, h, *tArgs)
case *roachpb.PushTxnRequest:
resp := reply.(*roachpb.PushTxnResponse)
*resp, err = r.PushTxn(ctx, batch, ms, h, *tArgs)
case *roachpb.ResolveIntentRequest:
resp := reply.(*roachpb.ResolveIntentResponse)
*resp, err = r.ResolveIntent(ctx, batch, ms, h, *tArgs)
case *roachpb.ResolveIntentRangeRequest:
resp := reply.(*roachpb.ResolveIntentRangeResponse)
*resp, err = r.ResolveIntentRange(ctx, batch, ms, h, *tArgs)
case *roachpb.MergeRequest:
resp := reply.(*roachpb.MergeResponse)
*resp, err = r.Merge(ctx, batch, ms, h, *tArgs)
case *roachpb.TruncateLogRequest:
resp := reply.(*roachpb.TruncateLogResponse)
*resp, trigger, err = r.TruncateLog(ctx, batch, ms, h, *tArgs)
case *roachpb.RequestLeaseRequest:
resp := reply.(*roachpb.RequestLeaseResponse)
*resp, trigger, err = r.RequestLease(ctx, batch, ms, h, *tArgs)
case *roachpb.TransferLeaseRequest:
resp := reply.(*roachpb.RequestLeaseResponse)
*resp, trigger, err = r.TransferLease(ctx, batch, ms, h, *tArgs)
case *roachpb.LeaseInfoRequest:
resp := reply.(*roachpb.LeaseInfoResponse)
*resp, err = r.LeaseInfo(ctx, *tArgs)
case *roachpb.ComputeChecksumRequest:
resp := reply.(*roachpb.ComputeChecksumResponse)
*resp, trigger, err = r.ComputeChecksum(ctx, batch, ms, h, *tArgs)
case *roachpb.DeprecatedVerifyChecksumRequest:
case *roachpb.ChangeFrozenRequest:
resp := reply.(*roachpb.ChangeFrozenResponse)
*resp, trigger, err = r.ChangeFrozen(ctx, batch, ms, h, *tArgs)
default:
err = errors.Errorf("unrecognized command %s", args.Method())
}
// Set the ResumeSpan and NumKeys.
header := reply.Header()
header.NumKeys = num
header.ResumeSpan = span
reply.SetHeader(header)
if log.V(2) {
log.Infof(ctx, "executed %s command %+v: %+v, err=%v", args.Method(), args, reply, err)
}
// Create a roachpb.Error by initializing txn from the request/response header.
var pErr *roachpb.Error
if err != nil {
txn := reply.Header().Txn
if txn == nil {
txn = h.Txn
}
pErr = roachpb.NewErrorWithTxn(err, txn)
}
return trigger, pErr
}
func intentsToTrigger(intents []roachpb.Intent, args roachpb.Request) *PostCommitTrigger {
if len(intents) > 0 {
return &PostCommitTrigger{intents: []intentsWithArg{{args: args, intents: intents}}}
}
return nil
}
// Get returns the value for a specified key.
func (r *Replica) Get(
ctx context.Context, batch engine.ReadWriter, h roachpb.Header, args roachpb.GetRequest,
) (roachpb.GetResponse, *PostCommitTrigger, error) {
var reply roachpb.GetResponse
val, intents, err := engine.MVCCGet(ctx, batch, args.Key, h.Timestamp, h.ReadConsistency == roachpb.CONSISTENT, h.Txn)
reply.Value = val
return reply, intentsToTrigger(intents, &args), err
}
// Put sets the value for a specified key.
func (r *Replica) Put(
ctx context.Context,
batch engine.ReadWriter,
ms *enginepb.MVCCStats,
h roachpb.Header,
args roachpb.PutRequest,
) (roachpb.PutResponse, error) {
var reply roachpb.PutResponse
ts := hlc.ZeroTimestamp
if !args.Inline {
ts = h.Timestamp
}
if h.DistinctSpans {
if b, ok := batch.(engine.Batch); ok {
// Use the distinct batch for both blind and normal ops so that we don't
// accidentally flush mutations to make them visible to the distinct
// batch.
batch = b.Distinct()
defer batch.Close()
}
}
if args.Blind {
return reply, engine.MVCCBlindPut(ctx, batch, ms, args.Key, ts, args.Value, h.Txn)
}
return reply, engine.MVCCPut(ctx, batch, ms, args.Key, ts, args.Value, h.Txn)
}
// ConditionalPut sets the value for a specified key only if
// the expected value matches. If not, the return value contains
// the actual value.
func (r *Replica) ConditionalPut(
ctx context.Context,
batch engine.ReadWriter,
ms *enginepb.MVCCStats,
h roachpb.Header,
args roachpb.ConditionalPutRequest,
) (roachpb.ConditionalPutResponse, error) {
var reply roachpb.ConditionalPutResponse
if h.DistinctSpans {
if b, ok := batch.(engine.Batch); ok {
// Use the distinct batch for both blind and normal ops so that we don't
// accidentally flush mutations to make them visible to the distinct
// batch.
batch = b.Distinct()
defer batch.Close()
}
}
if args.Blind {
return reply, engine.MVCCBlindConditionalPut(ctx, batch, ms, args.Key, h.Timestamp, args.Value, args.ExpValue, h.Txn)
}
return reply, engine.MVCCConditionalPut(ctx, batch, ms, args.Key, h.Timestamp, args.Value, args.ExpValue, h.Txn)
}
// InitPut sets the value for a specified key only if it doesn't exist. It
// returns an error if the key exists with an existing value that is different
// from the value provided.
func (r *Replica) InitPut(
ctx context.Context,
batch engine.ReadWriter,
ms *enginepb.MVCCStats,
h roachpb.Header,
args roachpb.InitPutRequest,
) (roachpb.InitPutResponse, error) {
var reply roachpb.InitPutResponse
return reply, engine.MVCCInitPut(ctx, batch, ms, args.Key, h.Timestamp, args.Value, h.Txn)
}
// Increment increments the value (interpreted as varint64 encoded) and
// returns the newly incremented value (encoded as varint64). If no value
// exists for the key, zero is incremented.
func (r *Replica) Increment(
ctx context.Context,
batch engine.ReadWriter,
ms *enginepb.MVCCStats,
h roachpb.Header,
args roachpb.IncrementRequest,
) (roachpb.IncrementResponse, error) {
var reply roachpb.IncrementResponse
newVal, err := engine.MVCCIncrement(ctx, batch, ms, args.Key, h.Timestamp, h.Txn, args.Increment)
reply.NewValue = newVal
return reply, err
}
// Delete deletes the key and value specified by key.
func (r *Replica) Delete(
ctx context.Context,
batch engine.ReadWriter,
ms *enginepb.MVCCStats,
h roachpb.Header,
args roachpb.DeleteRequest,
) (roachpb.DeleteResponse, error) {
var reply roachpb.DeleteResponse
return reply, engine.MVCCDelete(ctx, batch, ms, args.Key, h.Timestamp, h.Txn)
}
// DeleteRange deletes the range of key/value pairs specified by
// start and end keys.
func (r *Replica) DeleteRange(
ctx context.Context,
batch engine.ReadWriter,
ms *enginepb.MVCCStats,
h roachpb.Header,
maxKeys int64,
args roachpb.DeleteRangeRequest,
) (roachpb.DeleteRangeResponse, *roachpb.Span, int64, error) {
var reply roachpb.DeleteRangeResponse
deleted, resumeKey, num, err := engine.MVCCDeleteRange(
ctx, batch, ms, args.Key, args.EndKey, maxKeys, h.Timestamp, h.Txn, args.ReturnKeys,
)
var retSpan *roachpb.Span
if err == nil {
reply.Keys = deleted
if resumeKey != nil {
span := args.Span
span.Key = resumeKey
retSpan = &span
}
// DeleteRange requires that we retry on push to avoid the lost delete range anomaly.
if h.Txn != nil {
clonedTxn := h.Txn.Clone()
clonedTxn.RetryOnPush = true
reply.Txn = &clonedTxn
}
}
return reply, retSpan, num, err
}
// Scan scans the key range specified by start key through end key in ascending order up to some
// maximum number of results. maxKeys stores the number of scan results remaining for this
// batch (MaxInt64 for no limit).
func (r *Replica) Scan(
ctx context.Context,
batch engine.ReadWriter,
h roachpb.Header,
maxKeys int64,
args roachpb.ScanRequest,
) (roachpb.ScanResponse, *roachpb.Span, int64, *PostCommitTrigger, error) {
span := args.Span
if maxKeys == 0 {
return roachpb.ScanResponse{}, &span, 0, nil, nil
}
rows, intents, err := engine.MVCCScan(ctx, batch, args.Key, args.EndKey, maxKeys, h.Timestamp,
h.ReadConsistency == roachpb.CONSISTENT, h.Txn)
numKeys := int64(len(rows))
var retSpan *roachpb.Span
if numKeys == maxKeys {
span.Key = rows[numKeys-1].Key.Next()
retSpan = &span
}
return roachpb.ScanResponse{Rows: rows}, retSpan, numKeys, intentsToTrigger(intents, &args), err
}
// ReverseScan scans the key range specified by start key through end key in descending order up to
// some maximum number of results. maxKeys stores the number of scan results remaining for
// this batch (MaxInt64 for no limit).
func (r *Replica) ReverseScan(
ctx context.Context,
batch engine.ReadWriter,
h roachpb.Header,
maxKeys int64,
args roachpb.ReverseScanRequest,
) (roachpb.ReverseScanResponse, *roachpb.Span, int64, *PostCommitTrigger, error) {
span := args.Span
if maxKeys == 0 {
return roachpb.ReverseScanResponse{}, &span, 0, nil, nil
}
rows, intents, err := engine.MVCCReverseScan(ctx, batch, args.Key, args.EndKey, maxKeys,
h.Timestamp, h.ReadConsistency == roachpb.CONSISTENT, h.Txn)
numKeys := int64(len(rows))
var retSpan *roachpb.Span
if numKeys == maxKeys {
span.EndKey = rows[numKeys-1].Key
retSpan = &span
}
return roachpb.ReverseScanResponse{Rows: rows}, retSpan, numKeys, intentsToTrigger(intents, &args), err
}
func verifyTransaction(h roachpb.Header, args roachpb.Request) error {
if h.Txn == nil {
return errors.Errorf("no transaction specified to %s", args.Method())
}
if !bytes.Equal(args.Header().Key, h.Txn.Key) {
return errors.Errorf("request key %s should match txn key %s", args.Header().Key, h.Txn.Key)
}
return nil
}
// BeginTransaction writes the initial transaction record. Fails in
// the event that a transaction record is already written. This may
// occur if a transaction is started with a batch containing writes
// to different ranges, and the range containing the txn record fails
// to receive the write batch before a heartbeat or txn push is
// performed first and aborts the transaction.
func (r *Replica) BeginTransaction(
ctx context.Context,
batch engine.ReadWriter,
ms *enginepb.MVCCStats,
h roachpb.Header,
args roachpb.BeginTransactionRequest,
) (roachpb.BeginTransactionResponse, error) {
var reply roachpb.BeginTransactionResponse
if err := verifyTransaction(h, &args); err != nil {
return reply, err
}
key := keys.TransactionKey(h.Txn.Key, h.Txn.ID)
clonedTxn := h.Txn.Clone()
reply.Txn = &clonedTxn
// Verify transaction does not already exist.
txn := roachpb.Transaction{}
ok, err := engine.MVCCGetProto(ctx, batch, key, hlc.ZeroTimestamp, true, nil, &txn)
if err != nil {
return reply, err
}
if ok {
switch txn.Status {
case roachpb.ABORTED:
// Check whether someone has come in ahead and already aborted the
// txn.
return reply, roachpb.NewTransactionAbortedError()
case roachpb.PENDING:
if h.Txn.Epoch > txn.Epoch {
// On a transaction retry there will be an extant txn record
// but this run should have an upgraded epoch. The extant txn
// record may have been pushed or otherwise updated, so update
// this command's txn and rewrite the record.
reply.Txn.Update(&txn)
} else {
return reply, roachpb.NewTransactionStatusError(
fmt.Sprintf("BeginTransaction can't overwrite %s", txn))
}
case roachpb.COMMITTED:
return reply, roachpb.NewTransactionStatusError(
fmt.Sprintf("BeginTransaction can't overwrite %s", txn),
)
default:
return reply, roachpb.NewTransactionStatusError(
fmt.Sprintf("bad txn state: %s", txn),
)
}
}
r.mu.Lock()
threshold := r.mu.state.TxnSpanGCThreshold
r.mu.Unlock()
// Disallow creation of a transaction record if it's at a timestamp before
// the TxnSpanGCThreshold, as in that case our transaction may already have
// been aborted by a concurrent actor which encountered one of our intents
// (which may have been written before this entry).
//
// See #9265.
if txn.LastActive().Less(threshold) {
return reply, roachpb.NewTransactionAbortedError()
}
// Write the txn record.
reply.Txn.Writing = true
return reply, engine.MVCCPutProto(ctx, batch, ms, key, hlc.ZeroTimestamp, nil, reply.Txn)
}
// EndTransaction either commits or aborts (rolls back) an extant
// transaction according to the args.Commit parameter. Rolling back
// an already rolled-back txn is ok.
func (r *Replica) EndTransaction(
ctx context.Context,
batch engine.ReadWriter,
ms *enginepb.MVCCStats,
h roachpb.Header,
args roachpb.EndTransactionRequest,
) (roachpb.EndTransactionResponse, *PostCommitTrigger, error) {
var reply roachpb.EndTransactionResponse
if err := verifyTransaction(h, &args); err != nil {
return reply, nil, err
}
key := keys.TransactionKey(h.Txn.Key, h.Txn.ID)
// Fetch existing transaction.
reply.Txn = &roachpb.Transaction{}
if ok, err := engine.MVCCGetProto(
ctx, batch, key, hlc.ZeroTimestamp, true, nil, reply.Txn,
); err != nil {
return reply, nil, err
} else if !ok {
// Return a fresh empty reply because there's an empty Transaction
// proto in our existing one.
return roachpb.EndTransactionResponse{},
nil, roachpb.NewTransactionStatusError("does not exist")
}
// Verify that we can either commit it or abort it (according
// to args.Commit), and also that the Timestamp and Epoch have
// not suffered regression.
switch reply.Txn.Status {
case roachpb.COMMITTED:
return reply, nil, roachpb.NewTransactionStatusError("already committed")
case roachpb.ABORTED:
if !args.Commit {
// The transaction has already been aborted by other.
// Do not return TransactionAbortedError since the client anyway
// wanted to abort the transaction.
externalIntents := r.resolveLocalIntents(ctx, batch, ms, args, reply.Txn)
if err := updateTxnWithExternalIntents(
ctx, batch, ms, args, reply.Txn, externalIntents,
); err != nil {
return reply, nil, err
}
return reply, intentsToTrigger(externalIntents, &args), nil
}
// If the transaction was previously aborted by a concurrent
// writer's push, any intents written are still open. It's only now
// that we know them, so we return them all for asynchronous
// resolution (we're currently not able to write on error, but
// see #1989).
return reply,
intentsToTrigger(roachpb.AsIntents(args.IntentSpans, reply.Txn), &args),
roachpb.NewTransactionAbortedError()
case roachpb.PENDING:
if h.Txn.Epoch < reply.Txn.Epoch {
// TODO(tschottdorf): this leaves the Txn record (and more
// importantly, intents) dangling; we can't currently write on
// error. Would panic, but that makes TestEndTransactionWithErrors
// awkward.
return reply, nil, roachpb.NewTransactionStatusError(
fmt.Sprintf("epoch regression: %d", h.Txn.Epoch),
)
} else if h.Txn.Epoch == reply.Txn.Epoch && reply.Txn.Timestamp.Less(h.Txn.OrigTimestamp) {
// The transaction record can only ever be pushed forward, so it's an
// error if somehow the transaction record has an earlier timestamp
// than the original transaction timestamp.
// TODO(tschottdorf): see above comment on epoch regression.
return reply, nil, roachpb.NewTransactionStatusError(
fmt.Sprintf("timestamp regression: %s", h.Txn.OrigTimestamp),
)
}
default:
return reply, nil, roachpb.NewTransactionStatusError(
fmt.Sprintf("bad txn status: %s", reply.Txn),
)
}
// Take max of requested epoch and existing epoch. The requester
// may have incremented the epoch on retries.
if reply.Txn.Epoch < h.Txn.Epoch {
reply.Txn.Epoch = h.Txn.Epoch
}
// Take max of requested priority and existing priority. This isn't
// terribly useful, but we do it for completeness.
if reply.Txn.Priority < h.Txn.Priority {
reply.Txn.Priority = h.Txn.Priority
}
// Take max of supplied txn's timestamp and persisted txn's
// timestamp. It may have been pushed by another transaction.
// Note that we do not use the batch request timestamp, which for
// a transaction is always set to the txn's original timestamp.
reply.Txn.Timestamp.Forward(h.Txn.Timestamp)
if isEndTransactionExceedingDeadline(reply.Txn.Timestamp, args) {
reply.Txn.Status = roachpb.ABORTED
// FIXME(#3037):
// If the deadline has lapsed, return all the intents for
// resolution. Unfortunately, since we're (a) returning an error,
// and (b) not able to write on error (see #1989), we can't write
// ABORTED into the master transaction record, which remains
// PENDING, and that's pretty bad.
return reply,
intentsToTrigger(roachpb.AsIntents(args.IntentSpans, reply.Txn), &args),
roachpb.NewTransactionAbortedError()
}
// Set transaction status to COMMITTED or ABORTED as per the
// args.Commit parameter.
if args.Commit {
if isEndTransactionTriggeringRetryError(h.Txn, reply.Txn) {
return reply, nil, roachpb.NewTransactionRetryError()
}
reply.Txn.Status = roachpb.COMMITTED
} else {
reply.Txn.Status = roachpb.ABORTED
}
externalIntents := r.resolveLocalIntents(ctx, batch, ms, args, reply.Txn)
if err := updateTxnWithExternalIntents(ctx, batch, ms, args, reply.Txn, externalIntents); err != nil {
return reply, nil, err
}
// Run triggers if successfully committed.
var trigger *PostCommitTrigger
if reply.Txn.Status == roachpb.COMMITTED {
var err error
if trigger, err = r.runCommitTrigger(ctx, batch.(engine.Batch), ms, args, reply.Txn); err != nil {
return reply, nil, NewReplicaCorruptionError(err)
}
}
// Note: there's no need to clear the abort cache state if we've
// successfully finalized a transaction, as there's no way in
// which an abort cache entry could have been written (the txn would
// already have been in state=ABORTED).
//
// Summary of transaction replay protection after EndTransaction:
// When a transactional write gets replayed over its own resolved
// intents, the write will succeed but only as an intent with a
// newer timestamp (with a WriteTooOldError). However, the replayed
// intent cannot be resolved by a subsequent replay of this
// EndTransaction call because the txn timestamp will be too
// old. Replays which include a BeginTransaction never succeed
// because EndTransaction inserts in the write timestamp cache,
// forcing the BeginTransaction to fail with a transaction retry
// error. If the replay didn't include a BeginTransaction, any push
// will immediately succeed as a missing txn record on push sets the
// transaction to aborted. In both cases, the txn will be GC'd on
// the slow path.
trigger = updateTrigger(trigger, intentsToTrigger(externalIntents, &args))
return reply, trigger, nil
}
// isEndTransactionExceedingDeadline returns true if the transaction
// exceeded its deadline.
func isEndTransactionExceedingDeadline(
t hlc.Timestamp,
args roachpb.EndTransactionRequest,
) bool {
return args.Deadline != nil && args.Deadline.Less(t)
}
// isEndTransactionTriggeringRetryError returns true if the
// EndTransactionRequest cannot be committed and needs to return a
// TransactionRetryError.
func isEndTransactionTriggeringRetryError(headerTxn, currentTxn *roachpb.Transaction) bool {
// If we saw any WriteTooOldErrors, we must restart to avoid lost
// update anomalies.
if headerTxn.WriteTooOld {
return true
}
isTxnPushed := !currentTxn.Timestamp.Equal(headerTxn.OrigTimestamp)
// If pushing requires a retry and the transaction was pushed, retry.
if headerTxn.RetryOnPush && isTxnPushed {
return true
}
// If the isolation level is SERIALIZABLE, return a transaction
// retry error if the commit timestamp isn't equal to the txn
// timestamp.
if headerTxn.Isolation == enginepb.SERIALIZABLE && isTxnPushed {
return true
}
return false
}
// resolveLocalIntents synchronously resolves any intents that are
// local to this range in the same batch. The remainder are collected
// and returned so that they can be handed off to asynchronous
// processing.
func (r *Replica) resolveLocalIntents(
ctx context.Context,
batch engine.ReadWriter,
ms *enginepb.MVCCStats,
args roachpb.EndTransactionRequest,
txn *roachpb.Transaction,
) []roachpb.Intent {
desc := r.Desc()
var preMergeDesc *roachpb.RangeDescriptor
if mergeTrigger := args.InternalCommitTrigger.GetMergeTrigger(); mergeTrigger != nil {
// If this is a merge, then use the post-merge descriptor to determine
// which intents are local (note that for a split, we want to use the
// pre-split one instead because it's larger).
preMergeDesc = desc
desc = &mergeTrigger.LeftDesc
}
iterAndBuf := engine.GetIterAndBuf(batch)
defer iterAndBuf.Cleanup()
var externalIntents []roachpb.Intent
for _, span := range args.IntentSpans {
if err := func() error {
intent := roachpb.Intent{Span: span, Txn: txn.TxnMeta, Status: txn.Status}
if len(span.EndKey) == 0 {
// For single-key intents, do a KeyAddress-aware check of
// whether it's contained in our Range.
if !containsKey(*desc, span.Key) {
externalIntents = append(externalIntents, intent)
return nil
}
resolveMS := ms
if preMergeDesc != nil && !containsKey(*preMergeDesc, span.Key) {
// If this transaction included a merge and the intents
// are from the subsumed range, ignore the intent resolution
// stats, as they will already be accounted for during the
// merge trigger.
resolveMS = nil
}
return engine.MVCCResolveWriteIntentUsingIter(ctx, batch, iterAndBuf, resolveMS, intent)
}
// For intent ranges, cut into parts inside and outside our key
// range. Resolve locally inside, delegate the rest. In particular,
// an intent range for range-local data is correctly considered local.
inSpan, outSpans := intersectSpan(span, *desc)
for _, span := range outSpans {
outIntent := intent
outIntent.Span = span
externalIntents = append(externalIntents, outIntent)
}
if inSpan != nil {
intent.Span = *inSpan
_, err := engine.MVCCResolveWriteIntentRangeUsingIter(ctx, batch, iterAndBuf, ms, intent, math.MaxInt64)
return err
}
return nil
}(); err != nil {
// TODO(tschottdorf): any legitimate reason for this to happen?
// Figure that out and if not, should still be ReplicaCorruption
// and not a panic.
panic(fmt.Sprintf("error resolving intent at %s on end transaction [%s]: %s", span, txn.Status, err))
}
}
return externalIntents
}
// updateTxnWithExternalIntents persists the transaction record with
// updated status (& possibly timestamp). If we've already resolved
// all intents locally, we actually delete the record right away - no
// use in keeping it around.
func updateTxnWithExternalIntents(
ctx context.Context,
batch engine.ReadWriter,
ms *enginepb.MVCCStats,
args roachpb.EndTransactionRequest,
txn *roachpb.Transaction,
externalIntents []roachpb.Intent,
) error {
key := keys.TransactionKey(txn.Key, txn.ID)
if txnAutoGC && len(externalIntents) == 0 {
if log.V(2) {
log.Infof(ctx, "auto-gc'ed %s (%d intents)", txn.ID.Short(), len(args.IntentSpans))
}
return engine.MVCCDelete(ctx, batch, ms, key, hlc.ZeroTimestamp, nil /* txn */)
}
txn.Intents = make([]roachpb.Span, len(externalIntents))
for i := range externalIntents {
txn.Intents[i] = externalIntents[i].Span
}
return engine.MVCCPutProto(ctx, batch, ms, key, hlc.ZeroTimestamp, nil /* txn */, txn)
}
// intersectSpan takes an intent and a descriptor. It then splits the
// intent's range into up to three pieces: A first piece which is contained in
// the Range, and a slice of up to two further intents which are outside of the
// key range. An intent for which [Key, EndKey) is empty does not result in any
// intents; thus intersectIntent only applies to intent ranges.
// A range-local intent range is never split: It's returned as either
// belonging to or outside of the descriptor's key range, and passing an intent
// which begins range-local but ends non-local results in a panic.
// TODO(tschottdorf) move to proto, make more gen-purpose - kv.truncate does
// some similar things.
func intersectSpan(
span roachpb.Span,
desc roachpb.RangeDescriptor,
) (middle *roachpb.Span, outside []roachpb.Span) {
start, end := desc.StartKey.AsRawKey(), desc.EndKey.AsRawKey()
if len(span.EndKey) == 0 {
outside = append(outside, span)
return
}
if bytes.Compare(span.Key, keys.LocalRangeMax) < 0 {
if bytes.Compare(span.EndKey, keys.LocalRangeMax) >= 0 {
log.Fatalf(context.Background(), "a local intent range may not have a non-local portion: %s", span)
}
if containsKeyRange(desc, span.Key, span.EndKey) {
return &span, nil
}
return nil, append(outside, span)
}
// From now on, we're dealing with plain old key ranges - no more local
// addressing.
if bytes.Compare(span.Key, start) < 0 {
// Intent spans a part to the left of [start, end).
iCopy := span
if bytes.Compare(start, span.EndKey) < 0 {
iCopy.EndKey = start
}
span.Key = iCopy.EndKey
outside = append(outside, iCopy)
}
if bytes.Compare(span.Key, span.EndKey) < 0 && bytes.Compare(end, span.EndKey) < 0 {
// Intent spans a part to the right of [start, end).
iCopy := span
if bytes.Compare(iCopy.Key, end) < 0 {
iCopy.Key = end
}
span.EndKey = iCopy.Key
outside = append(outside, iCopy)
}
if bytes.Compare(span.Key, span.EndKey) < 0 && bytes.Compare(span.Key, start) >= 0 && bytes.Compare(end, span.EndKey) >= 0 {
middle = &span
}
return
}
func (r *Replica) runCommitTrigger(
ctx context.Context,
batch engine.Batch,
ms *enginepb.MVCCStats,
args roachpb.EndTransactionRequest,
txn *roachpb.Transaction,
) (*PostCommitTrigger, error) {
var trigger *PostCommitTrigger
ct := args.InternalCommitTrigger
if err := func() error {
if ct.GetSplitTrigger() != nil {
var err error
var postSplit *PostCommitTrigger
if *ms, postSplit, err = r.splitTrigger(
ctx, batch, *ms, ct.SplitTrigger, txn.Timestamp,
); err != nil {
return err
}
trigger = updateTrigger(trigger, postSplit)
}
if ct.GetMergeTrigger() != nil {
postMerge, err := r.mergeTrigger(ctx, batch, ms, ct.MergeTrigger, txn.Timestamp)
if err != nil {
return err
}
trigger = updateTrigger(trigger, postMerge)
}
if crt := ct.GetChangeReplicasTrigger(); crt != nil {
trigger = updateTrigger(trigger, r.changeReplicasTrigger(ctx, batch, crt))
}
if ct.GetModifiedSpanTrigger() != nil {
if ct.ModifiedSpanTrigger.SystemConfigSpan {
// Check if we need to gossip the system config.
// NOTE: System config gossiping can only execute correctly if
// the transaction record is located on the range that contains
// the system span. If a transaction is created which modifies
// both system *and* non-system data, it should be ensured that
// the transaction record itself is on the system span. This can
// be done by making sure a system key is the first key touched
// in the transaction.
if !r.ContainsKey(keys.SystemConfigSpan.Key) {
log.Errorf(ctx, "System configuration span was modified, but the "+
"modification trigger is executing on a non-system range. "+
"Configuration changes will not be gossiped.")
} else {
trigger = updateTrigger(trigger, &PostCommitTrigger{
maybeGossipSystemConfig: true,
})
}
}
}
return nil
}(); err != nil {
return nil, err
}
return trigger, nil
}
// RangeLookup is used to look up RangeDescriptors - a RangeDescriptor
// is a metadata structure which describes the key range and replica locations
// of a distinct range in the cluster.
//
// RangeDescriptors are stored as values in the cockroach cluster's key-value
// store. However, they are always stored using special "Range Metadata keys",
// which are "ordinary" keys with a special prefix prepended. The Range Metadata
// Key for an ordinary key can be generated with the `keys.RangeMetaKey(key)`
// function. The RangeDescriptor for the range which contains a given key can be
// retrieved by generating its Range Metadata Key and dispatching it to
// RangeLookup.
//
// Note that the Range Metadata Key sent to RangeLookup is NOT the key
// at which the desired RangeDescriptor is stored. Instead, this method returns
// the RangeDescriptor stored at the _lowest_ existing key which is _greater_
// than the given key. The returned RangeDescriptor will thus contain the
// ordinary key which was originally used to generate the Range Metadata Key
// sent to RangeLookup.
//
// The "Range Metadata Key" for a range is built by appending the end key of
// the range to the respective meta prefix.
//
// Lookups for range metadata keys usually want to read inconsistently, but
// some callers need a consistent result; both are supported.
//
// This method has an important optimization in the inconsistent case: instead
// of just returning the request RangeDescriptor, it also returns a slice of
// additional range descriptors immediately consecutive to the desired
// RangeDescriptor. This is intended to serve as a sort of caching pre-fetch,
// so that the requesting nodes can aggressively cache RangeDescriptors which
// are likely to be desired by their current workload. The Reverse flag
// specifies whether descriptors are prefetched in descending or ascending
// order.
func (r *Replica) RangeLookup(
ctx context.Context,
batch engine.ReadWriter,
h roachpb.Header,
args roachpb.RangeLookupRequest,
) (roachpb.RangeLookupResponse, *PostCommitTrigger, error) {
log.Event(ctx, "RangeLookup")
var reply roachpb.RangeLookupResponse
ts := h.Timestamp // all we're going to use from the header.
key, err := keys.Addr(args.Key)
if err != nil {
return reply, nil, err
}
if !key.Equal(args.Key) {
return reply, nil, errors.Errorf("illegal lookup of range-local key")
}
rangeCount := int64(args.MaxRanges)
if rangeCount < 1 {
return reply, nil, errors.Errorf("range lookup specified invalid maximum range count %d: must be > 0", rangeCount)
}
consistent := h.ReadConsistency != roachpb.INCONSISTENT
if consistent && args.ConsiderIntents {
return reply, nil, errors.Errorf("can not read consistently and special-case intents")
}
if args.ConsiderIntents {
// Disable prefetching; the caller only cares about a single intent,
// and the code below simplifies considerably.
rangeCount = 1
}
var checkAndUnmarshal func(roachpb.Value) (*roachpb.RangeDescriptor, error)
var kvs []roachpb.KeyValue // kv descriptor pairs in scan order
var intents []roachpb.Intent
if !args.Reverse {
// If scanning forward, there's no special "checking": Just decode the
// descriptor and return it.
checkAndUnmarshal = func(v roachpb.Value) (*roachpb.RangeDescriptor, error) {
var rd roachpb.RangeDescriptor
if err := v.GetProto(&rd); err != nil {
return nil, err
}
return &rd, nil
}
// We want to search for the metadata key greater than
// args.Key. Scan for both the requested key and the keys immediately
// afterwards, up to MaxRanges.
startKey, endKey, err := keys.MetaScanBounds(key)
if err != nil {
return reply, nil, err
}
// Scan for descriptors.
kvs, intents, err = engine.MVCCScan(ctx, batch, startKey, endKey, rangeCount,
ts, consistent, h.Txn)
if err != nil {
// An error here is likely a WriteIntentError when reading consistently.
return reply, nil, err
}
} else {
// Use MVCCScan to get the first range. There are three cases:
// 1. args.Key is not an endpoint of the range and
// 2a. The args.Key is the start/end key of the range.
// 2b. Even worse, the body of args.Key is roachpb.KeyMax.
// In the first case, we need use the MVCCScan() to get the first
// range descriptor, because ReverseScan can't do the work. If we
// have ranges [a,c) and [c,f) and the reverse scan request's key
// range is [b,d), then d.Next() is less than "f", and so the meta
// row {f->[c,f)} would be ignored by MVCCReverseScan. In case 2a,
// the range descriptor received by MVCCScan will be filtered before
// results are returned: With ranges [c,f) and [f,z), reverse scan
// on [d,f) receives the descriptor {z->[f,z)}, which is discarded
// below since it's not being asked for. Finally, in case 2b, we
// don't even attempt the forward scan because it's neither defined
// nor required.
// Note that Meta1KeyMax is admissible: it means we're looking for
// the range descriptor that houses Meta2KeyMax, and a forward scan
// handles it correctly.
// In this case, checkAndUnmarshal is more complicated: It needs
// to weed out descriptors from the forward scan above, which could
// return a result or an intent we're not supposed to return.
checkAndUnmarshal = func(v roachpb.Value) (*roachpb.RangeDescriptor, error) {
var r roachpb.RangeDescriptor
if err := v.GetProto(&r); err != nil {
return nil, err
}
startKeyAddr, err := keys.Addr(keys.RangeMetaKey(r.StartKey))
if err != nil {
return nil, err
}
if !startKeyAddr.Less(key) {
// This is the case in which we've picked up an extra descriptor
// we don't want.
return nil, nil
}
// We actually want this descriptor.
return &r, nil
}
if key.Less(roachpb.RKey(keys.Meta2KeyMax)) {
startKey, endKey, err := keys.MetaScanBounds(key)
if err != nil {
return reply, nil, err
}
kvs, intents, err = engine.MVCCScan(ctx, batch, startKey, endKey, 1,
ts, consistent, h.Txn)
if err != nil {
return reply, nil, err
}
}
// We want to search for the metadata key just less or equal to
// args.Key. Scan in reverse order for both the requested key and the
// keys immediately backwards, up to MaxRanges.
startKey, endKey, err := keys.MetaReverseScanBounds(key)
if err != nil {
return reply, nil, err
}
// Reverse scan for descriptors.
revKvs, revIntents, err := engine.MVCCReverseScan(ctx, batch, startKey, endKey, rangeCount,
ts, consistent, h.Txn)
if err != nil {
// An error here is likely a WriteIntentError when reading consistently.
return reply, nil, err
}
// Merge the results, the total ranges may be bigger than rangeCount.
kvs = append(kvs, revKvs...)
intents = append(intents, revIntents...)
}
userKey := keys.UserKey(key)
containsFn := roachpb.RangeDescriptor.ContainsKey
if args.Reverse {
containsFn = roachpb.RangeDescriptor.ContainsExclusiveEndKey
}
// Decode all scanned range descriptors which haven't been unmarshaled yet.
for _, kv := range kvs {
// TODO(tschottdorf) Candidate for a ReplicaCorruptionError.
rd, err := checkAndUnmarshal(kv.Value)
if err != nil {
return reply, nil, err
}
if rd != nil {
// Add the first valid descriptor to the desired range descriptor
// list in the response, add all others to the prefetched list.
if len(reply.Ranges) == 0 && containsFn(*rd, userKey) {
reply.Ranges = append(reply.Ranges, *rd)
} else {
reply.PrefetchedRanges = append(reply.PrefetchedRanges, *rd)
}
}
}
if args.ConsiderIntents || len(reply.Ranges) == 0 {
// NOTE (subtle): dangling intents on meta records are peculiar: It's not
// clear whether the intent or the previous value point to the correct
// location of the Range. It gets even more complicated when there are
// split-related intents or a txn record co-located with a replica
// involved in the split. Since we cannot know the correct answer, we
// reply with both the pre- and post- transaction values when the
// ConsiderIntents flag is set.
//
// This does not count against a maximum range count because they are
// possible versions of the same descriptor. In other words, both the
// current live descriptor and a potentially valid descriptor from
// observed intents could be returned when MaxRanges is set to 1 and
// the ConsiderIntents flag is set.
for _, intent := range intents {
val, _, err := engine.MVCCGetAsTxn(ctx, batch, intent.Key, intent.Txn.Timestamp, intent.Txn)
if err != nil {
return reply, nil, err
}
if val == nil {
// Intent is a deletion.
continue
}
rd, err := checkAndUnmarshal(*val)
if err != nil {
return reply, nil, err
}
if rd != nil {
if containsFn(*rd, userKey) {
reply.Ranges = append(reply.Ranges, *rd)
break
}
}
}
}
if len(reply.Ranges) == 0 {
// No matching results were returned from the scan. This should
// never happen with the above logic.
log.Fatalf(ctx, "RangeLookup dispatched to correct range, but no matching RangeDescriptor was found: %q", args.Key)
} else if preCount := int64(len(reply.PrefetchedRanges)); 1+preCount > rangeCount {
// We've possibly picked up an extra descriptor if we're in reverse
// mode due to the initial forward scan.
//
// Here, we only count the desired range descriptors as a single
// descriptor against the rangeCount limit, even if multiple versions
// of the same descriptor were found in intents. In practice, we should
// only get multiple desired range descriptors when prefetching is disabled
// anyway (see above), so this should never actually matter.
reply.PrefetchedRanges = reply.PrefetchedRanges[:rangeCount-1]
}
for _, rd := range reply.Ranges {
if !containsFn(rd, userKey) {
log.Fatalf(ctx, "range lookup of meta key %q resulted in descriptor %s which does not contain non-meta key %q", key, rd, userKey)
}
}
return reply, intentsToTrigger(intents, &args), nil
}
// HeartbeatTxn updates the transaction status and heartbeat
// timestamp after receiving transaction heartbeat messages from
// coordinator. Returns the updated transaction.
func (r *Replica) HeartbeatTxn(
ctx context.Context,
batch engine.ReadWriter,
ms *enginepb.MVCCStats,
h roachpb.Header,
args roachpb.HeartbeatTxnRequest,
) (roachpb.HeartbeatTxnResponse, error) {
var reply roachpb.HeartbeatTxnResponse
if err := verifyTransaction(h, &args); err != nil {
return reply, err
}
key := keys.TransactionKey(h.Txn.Key, h.Txn.ID)
var txn roachpb.Transaction
if ok, err := engine.MVCCGetProto(ctx, batch, key, hlc.ZeroTimestamp, true, nil, &txn); err != nil {
return reply, err
} else if !ok {
// If no existing transaction record was found, skip heartbeat.
// This could mean the heartbeat is a delayed relic or it could
// mean that the BeginTransaction call was delayed. In either
// case, there's no reason to persist a new transaction record.
return reply, errors.Errorf("heartbeat for transaction %s failed; record not present", h.Txn)
}
if txn.Status == roachpb.PENDING {
if txn.LastHeartbeat == nil {
txn.LastHeartbeat = &hlc.Timestamp{}
}
txn.LastHeartbeat.Forward(args.Now)
if err := engine.MVCCPutProto(ctx, batch, ms, key, hlc.ZeroTimestamp, nil, &txn); err != nil {
return reply, err
}
}
reply.Txn = &txn
return reply, nil
}
// GC iterates through the list of keys to garbage collect
// specified in the arguments. MVCCGarbageCollect is invoked on each
// listed key along with the expiration timestamp. The GC metadata
// specified in the args is persisted after GC.
func (r *Replica) GC(
ctx context.Context,
batch engine.ReadWriter,
ms *enginepb.MVCCStats,
h roachpb.Header,
args roachpb.GCRequest,
) (roachpb.GCResponse, *PostCommitTrigger, error) {
// All keys must be inside the current replica range. Keys outside
// of this range in the GC request are dropped silently, which is
// safe because they can simply be re-collected later on the correct
// replica. Discrepancies here can arise from race conditions during
// range splitting.
keys := make([]roachpb.GCRequest_GCKey, 0, len(args.Keys))
for _, k := range args.Keys {
if r.ContainsKey(k.Key) {
keys = append(keys, k)
}
}
var reply roachpb.GCResponse
// Garbage collect the specified keys by expiration timestamps.
err := engine.MVCCGarbageCollect(ctx, batch, ms, keys, h.Timestamp)
if err != nil {
return reply, nil, err
}
r.mu.Lock()
newThreshold := r.mu.state.GCThreshold
newTxnSpanGCThreshold := r.mu.state.TxnSpanGCThreshold
// Protect against multiple GC requests arriving out of order; we track
// the maximum timestamps.
newThreshold.Forward(args.Threshold)
newTxnSpanGCThreshold.Forward(args.TxnSpanGCThreshold)
r.mu.Unlock()
trigger := &PostCommitTrigger{
gcThreshold: &newThreshold,
txnSpanGCThreshold: &newTxnSpanGCThreshold,
}
if err := setGCThreshold(ctx, batch, ms, r.Desc().RangeID, &newThreshold); err != nil {
return reply, nil, err
}
if err := setTxnSpanGCThreshold(ctx, batch, ms, r.Desc().RangeID, &newTxnSpanGCThreshold); err != nil {
return reply, nil, err
}
return reply, trigger, nil
}
// PushTxn resolves conflicts between concurrent txns (or
// between a non-transactional reader or writer and a txn) in several
// ways depending on the statuses and priorities of the conflicting
// transactions. The PushTxn operation is invoked by a
// "pusher" (the writer trying to abort a conflicting txn or the
// reader trying to push a conflicting txn's commit timestamp
// forward), who attempts to resolve a conflict with a "pushee"
// (args.PushTxn -- the pushee txn whose intent(s) caused the
// conflict). A pusher is either transactional, in which case
// PushTxn is completely initialized, or not, in which case the
// PushTxn has only the priority set.
//
// Txn already committed/aborted: If pushee txn is committed or
// aborted return success.
//
// Txn Timeout: If pushee txn entry isn't present or its LastHeartbeat
// timestamp isn't set, use its as LastHeartbeat. If current time -
// LastHeartbeat > 2 * DefaultHeartbeatInterval, then the pushee txn
// should be either pushed forward, aborted, or confirmed not pending,
// depending on value of Request.PushType.
//
// Old Txn Epoch: If persisted pushee txn entry has a newer Epoch than
// PushTxn.Epoch, return success, as older epoch may be removed.
//
// Lower Txn Priority: If pushee txn has a lower priority than pusher,
// adjust pushee's persisted txn depending on value of
// args.PushType. If args.PushType is PUSH_ABORT, set txn.Status to
// ABORTED, and priority to one less than the pusher's priority and
// return success. If args.PushType is PUSH_TIMESTAMP, set
// txn.Timestamp to just after PushTo.
//
// Higher Txn Priority: If pushee txn has a higher priority than
// pusher, return TransactionPushError. Transaction will be retried
// with priority one less than the pushee's higher priority.
//
// If the pusher is non-transactional, args.PusherTxn is an empty
// proto with only the priority set.
//
// If the pushee is aborted, its timestamp will be forwarded to match its last
// client activity timestamp (i.e. last heartbeat), if available. This is done
// so that the updated timestamp populates the abort cache, allowing the GC
// queue to purge entries for which the transaction coordinator must have found
// out via its heartbeats that the transaction has failed.
func (r *Replica) PushTxn(
ctx context.Context,
batch engine.ReadWriter,
ms *enginepb.MVCCStats,
h roachpb.Header,
args roachpb.PushTxnRequest,
) (roachpb.PushTxnResponse, error) {
var reply roachpb.PushTxnResponse
if h.Txn != nil {
return reply, errTransactionUnsupported
}
if args.Now.Equal(hlc.ZeroTimestamp) {
return reply, errors.Errorf("the field Now must be provided")
}
if !bytes.Equal(args.Key, args.PusheeTxn.Key) {
return reply, errors.Errorf("request key %s should match pushee's txn key %s", args.Key, args.PusheeTxn.Key)
}
key := keys.TransactionKey(args.PusheeTxn.Key, args.PusheeTxn.ID)
// Fetch existing transaction; if missing, we're allowed to abort.
existTxn := &roachpb.Transaction{}
ok, err := engine.MVCCGetProto(ctx, batch, key, hlc.ZeroTimestamp,
true /* consistent */, nil /* txn */, existTxn)
if err != nil {
return reply, err
}
// There are three cases in which there is no transaction entry:
//
// * the pushee is still active but the BeginTransaction was delayed
// for long enough that a write intent from this txn to another
// range is causing another reader or writer to push.
// * the pushee resolved its intents synchronously on successful commit;
// in this case, the transaction record of the pushee is also removed.
// Note that in this case, the intent which prompted this PushTxn
// doesn't exist any more.
// * the pushee timed out or was aborted and the intent not cleaned up,
// but the transaction record was garbage collected.
//
// We currently make no attempt at guessing which one it is, though we
// could (see #1939). Instead, a new aborted entry is always written.
//
// TODO(tschottdorf): we should actually improve this when we
// garbage-collect aborted transactions, or we run the risk of a push
// recreating a GC'ed transaction as PENDING, which is an error if it
// has open intents (which is likely if someone pushes it).
if !ok {
// If getting an update for a transaction record which doesn't yet
// exist, return empty Pushee, except when querying.
//
// Note that we *do* abort the transaction in PUSH_TOUCH mode. This
// leaves transactions which write intents before their txn entry
// vulnerable, but the alternative is having more intents never cleaned
// up eagerly.
if args.PushType == roachpb.PUSH_QUERY {
return reply, nil
}
// The transaction doesn't exist on disk; we're allowed to abort it.
// TODO(tschottdorf): especially for SNAPSHOT transactions, there's
// something to win here by not aborting, but instead pushing the
// timestamp. For SERIALIZABLE it's less important, but still better
// to have them restart than abort. See #3344.
// TODO(tschottdorf): double-check for problems emanating from
// using a trivial Transaction proto here. Maybe some fields ought
// to receive dummy values.
reply.PusheeTxn.Status = roachpb.ABORTED
reply.PusheeTxn.TxnMeta = args.PusheeTxn
reply.PusheeTxn.Timestamp = args.Now // see method comment
// Setting OrigTimestamp bumps LastActive(); see #9265.
reply.PusheeTxn.OrigTimestamp = args.Now
return reply, engine.MVCCPutProto(ctx, batch, ms, key, hlc.ZeroTimestamp, nil, &reply.PusheeTxn)
}
// Start with the persisted transaction record as final transaction.
reply.PusheeTxn = existTxn.Clone()
// The pusher might be aware of a newer version of the pushee.
reply.PusheeTxn.Timestamp.Forward(args.PusheeTxn.Timestamp)
if reply.PusheeTxn.Epoch < args.PusheeTxn.Epoch {
reply.PusheeTxn.Epoch = args.PusheeTxn.Epoch
}
// If already committed or aborted, return success.
if reply.PusheeTxn.Status != roachpb.PENDING {
// Trivial noop.
return reply, nil
}
// If we're trying to move the timestamp forward, and it's already
// far enough forward, return success.
if args.PushType == roachpb.PUSH_TIMESTAMP && args.PushTo.Less(reply.PusheeTxn.Timestamp) {
// Trivial noop.
return reply, nil
}
// If getting an update for a transaction record, return now.
if args.PushType == roachpb.PUSH_QUERY {
return reply, nil
}
priority := args.PusherTxn.Priority
var pusherWins bool
var reason string
switch {
case reply.PusheeTxn.LastActive().Less(args.Now.Add(-2*base.DefaultHeartbeatInterval.Nanoseconds(), 0)):
reason = "pushee is expired"
// When cleaning up, actually clean up (as opposed to simply pushing
// the garbage in the path of future writers).
args.PushType = roachpb.PUSH_ABORT
pusherWins = true
case args.PushType == roachpb.PUSH_TOUCH:
// If just attempting to cleanup old or already-committed txns,
// pusher always fails.
pusherWins = false
case args.PushType == roachpb.PUSH_TIMESTAMP &&
reply.PusheeTxn.Isolation == enginepb.SNAPSHOT:
// Can always push a SNAPSHOT txn's timestamp.
reason = "pushee is SNAPSHOT"
pusherWins = true
case reply.PusheeTxn.Priority != priority:
reason = "priority"
pusherWins = reply.PusheeTxn.Priority < priority
case args.PusherTxn.ID == nil:
reason = "equal priorities; pusher not transactional"
pusherWins = false
default:
reason = "equal priorities; greater ID wins"
pusherWins = bytes.Compare(reply.PusheeTxn.ID.GetBytes(),
args.PusherTxn.ID.GetBytes()) < 0
}
if log.V(1) && reason != "" {
s := "pushed"
if !pusherWins {
s = "failed to push"
}
log.Infof(ctx, "%s "+s+" %s: %s (pushee last active: %s)",
args.PusherTxn.ID.Short(), reply.PusheeTxn.ID.Short(), reason,
reply.PusheeTxn.LastActive())
}
if !pusherWins {
err := roachpb.NewTransactionPushError(reply.PusheeTxn)
if log.V(1) {
log.Infof(ctx, "%v", err)
}
return reply, err
}
// Upgrade priority of pushed transaction to one less than pusher's.
reply.PusheeTxn.UpgradePriority(priority - 1)
// If aborting transaction, set new status and return success.
if args.PushType == roachpb.PUSH_ABORT {
reply.PusheeTxn.Status = roachpb.ABORTED
// Forward the timestamp to accommodate abort cache GC. See method
// comment for details.
reply.PusheeTxn.Timestamp.Forward(reply.PusheeTxn.LastActive())
} else if args.PushType == roachpb.PUSH_TIMESTAMP {
// Otherwise, update timestamp to be one greater than the request's timestamp.
reply.PusheeTxn.Timestamp = args.PushTo
reply.PusheeTxn.Timestamp.Logical++
}
// Persist the pushed transaction using zero timestamp for inline value.
if err := engine.MVCCPutProto(ctx, batch, ms, key, hlc.ZeroTimestamp, nil, &reply.PusheeTxn); err != nil {
return reply, err
}
return reply, nil
}
// setAbortCache clears any abort cache entry if poison is false.
// Otherwise, if poison is true, creates an entry for this transaction
// in the abort cache to prevent future reads or writes from
// spuriously succeeding on this range.
func (r *Replica) setAbortCache(
ctx context.Context,
batch engine.ReadWriter,
ms *enginepb.MVCCStats,
txn enginepb.TxnMeta,
poison bool,
) error {
if !poison {
return r.abortCache.Del(ctx, batch, ms, txn.ID)
}
entry := roachpb.AbortCacheEntry{
Key: txn.Key,
Timestamp: txn.Timestamp,
Priority: txn.Priority,
}
return r.abortCache.Put(ctx, batch, ms, txn.ID, &entry)
}
// ResolveIntent resolves a write intent from the specified key
// according to the status of the transaction which created it.
func (r *Replica) ResolveIntent(
ctx context.Context,
batch engine.ReadWriter,
ms *enginepb.MVCCStats,
h roachpb.Header,
args roachpb.ResolveIntentRequest,
) (roachpb.ResolveIntentResponse, error) {
var reply roachpb.ResolveIntentResponse
if h.Txn != nil {
return reply, errTransactionUnsupported
}
intent := roachpb.Intent{
Span: args.Span,
Txn: args.IntentTxn,
Status: args.Status,
}
if err := engine.MVCCResolveWriteIntent(ctx, batch, ms, intent); err != nil {
return reply, err
}
if intent.Status == roachpb.ABORTED {
return reply, r.setAbortCache(ctx, batch, ms, args.IntentTxn, args.Poison)
}
return reply, nil
}
// ResolveIntentRange resolves write intents in the specified
// key range according to the status of the transaction which created it.
func (r *Replica) ResolveIntentRange(
ctx context.Context,
batch engine.ReadWriter,
ms *enginepb.MVCCStats,
h roachpb.Header,
args roachpb.ResolveIntentRangeRequest,
) (roachpb.ResolveIntentRangeResponse, error) {
var reply roachpb.ResolveIntentRangeResponse
if h.Txn != nil {
return reply, errTransactionUnsupported
}
intent := roachpb.Intent{
Span: args.Span,
Txn: args.IntentTxn,
Status: args.Status,
}
if _, err := engine.MVCCResolveWriteIntentRange(ctx, batch, ms, intent, math.MaxInt64); err != nil {
return reply, err
}
if intent.Status == roachpb.ABORTED {
return reply, r.setAbortCache(ctx, batch, ms, args.IntentTxn, args.Poison)
}
return reply, nil
}
// Merge is used to merge a value into an existing key. Merge is an
// efficient accumulation operation which is exposed by RocksDB, used
// by CockroachDB for the efficient accumulation of certain
// values. Due to the difficulty of making these operations
// transactional, merges are not currently exposed directly to
// clients. Merged values are explicitly not MVCC data.
func (r *Replica) Merge(
ctx context.Context,
batch engine.ReadWriter,
ms *enginepb.MVCCStats,
h roachpb.Header,
args roachpb.MergeRequest,
) (roachpb.MergeResponse, error) {
var reply roachpb.MergeResponse
return reply, engine.MVCCMerge(ctx, batch, ms, args.Key, h.Timestamp, args.Value)
}
// TruncateLog discards a prefix of the raft log. Truncating part of a log that
// has already been truncated has no effect. If this range is not the one
// specified within the request body, the request will also be ignored.
func (r *Replica) TruncateLog(
ctx context.Context,
batch engine.ReadWriter,
ms *enginepb.MVCCStats,
h roachpb.Header,
args roachpb.TruncateLogRequest,
) (roachpb.TruncateLogResponse, *PostCommitTrigger, error) {
r.mu.Lock()
defer r.mu.Unlock()
var reply roachpb.TruncateLogResponse
// After a merge, it's possible that this request was sent to the wrong
// range based on the start key. This will cancel the request if this is not
// the range specified in the request body.
if r.RangeID != args.RangeID {
log.Infof(ctx, "attempting to truncate raft logs for another range %d. Normally this is due to a merge and can be ignored.",
args.RangeID)
return reply, nil, nil
}
// Have we already truncated this log? If so, just return without an error.
firstIndex, err := r.FirstIndex()
if err != nil {
return reply, nil, err
}
if firstIndex >= args.Index {
if log.V(3) {
log.Infof(ctx, "attempting to truncate previously truncated raft log. FirstIndex:%d, TruncateFrom:%d",
firstIndex, args.Index)
}
return reply, nil, nil
}
// args.Index is the first index to keep.
term, err := r.Term(args.Index - 1)
if err != nil {
return reply, nil, err
}
start := keys.RaftLogKey(r.RangeID, 0)
end := keys.RaftLogKey(r.RangeID, args.Index)
var diff enginepb.MVCCStats
// Passing zero timestamp to MVCCDeleteRange is equivalent to a ranged clear
// but it also computes stats.
if _, _, _, err := engine.MVCCDeleteRange(ctx, batch, &diff, start, end, math.MaxInt64, /* max */
hlc.ZeroTimestamp, nil /* txn */, false /* returnKeys */); err != nil {
return reply, nil, err
}
raftLogSize := r.mu.raftLogSize + diff.SysBytes
// Check raftLogSize since it isn't persisted between server restarts.
if raftLogSize < 0 {
raftLogSize = 0
}
tState := &roachpb.RaftTruncatedState{
Index: args.Index - 1,
Term: term,
}
trigger := &PostCommitTrigger{
truncatedState: tState,
raftLogSize: &raftLogSize,
}
return reply, trigger, engine.MVCCPutProto(ctx, batch, ms, keys.RaftTruncatedStateKey(r.RangeID), hlc.ZeroTimestamp, nil, tState)
}
func newFailedLeaseTrigger() *PostCommitTrigger {
return &PostCommitTrigger{leaseMetricsResult: new(bool)}
}
// RequestLease sets the range lease for this range. The command fails
// only if the desired start timestamp collides with a previous lease.
// Otherwise, the start timestamp is wound back to right after the expiration
// of the previous lease (or zero). If this range replica is already the lease
// holder, the expiration will be extended or shortened as indicated. For a new
// lease, all duties required of the range lease holder are commenced, including
// clearing the command queue and timestamp cache.
func (r *Replica) RequestLease(
ctx context.Context,
batch engine.ReadWriter,
ms *enginepb.MVCCStats,
h roachpb.Header,
args roachpb.RequestLeaseRequest,
) (roachpb.RequestLeaseResponse, *PostCommitTrigger, error) {
// When returning an error from this method, must always return
// a newFailedLeaseTrigger() to satisfy stats.
r.mu.Lock()
defer r.mu.Unlock()
prevLease := r.mu.state.Lease
rErr := &roachpb.LeaseRejectedError{
Existing: *prevLease,
Requested: args.Lease,
}
// MIGRATION(tschottdorf): needed to apply Raft commands which got proposed
// before the StartStasis field was introduced.
if args.Lease.StartStasis.Equal(hlc.ZeroTimestamp) {
args.Lease.StartStasis = args.Lease.Expiration
}
isExtension := prevLease.Replica.StoreID == args.Lease.Replica.StoreID
effectiveStart := args.Lease.Start
// Wind the start timestamp back as far towards the previous lease as we
// can. That'll make sure that when multiple leases are requested out of
// order at the same replica (after all, they use the request timestamp,
// which isn't straight out of our local clock), they all succeed unless
// they have a "real" issue with a previous lease. Example: Assuming no
// previous lease, one request for [5, 15) followed by one for [0, 15)
// would fail without this optimization. With it, the first request
// effectively gets the lease for [0, 15), which the second one can commit
// again (even extending your own lease is possible; see below).
//
// If this is our lease (or no prior lease exists), we effectively absorb
// the old lease. This allows multiple requests from the same replica to
// merge without ticking away from the minimal common start timestamp. It
// also has the positive side-effect of fixing #3561, which was caused by
// the absence of replay protection.
if prevLease.Replica.StoreID == 0 || isExtension {
effectiveStart.Backward(prevLease.Start)
} else {
effectiveStart.Backward(prevLease.Expiration.Next())
}
if isExtension {
if effectiveStart.Less(prevLease.Start) {
rErr.Message = "extension moved start timestamp backwards"
return roachpb.RequestLeaseResponse{}, newFailedLeaseTrigger(), rErr
}
args.Lease.Expiration.Forward(prevLease.Expiration)
} else if effectiveStart.Less(prevLease.Expiration) {
rErr.Message = "requested lease overlaps previous lease"
return roachpb.RequestLeaseResponse{}, newFailedLeaseTrigger(), rErr
}
args.Lease.Start = effectiveStart
return r.applyNewLeaseLocked(ctx, batch, ms, args.Lease, isExtension)
}
// TransferLease sets the lease holder for the range.
// Unlike with RequestLease(), the new lease is allowed to overlap the old one,
// the contract being that the transfer must have been initiated by the (soon
// ex-) lease holder which must have dropped all of its lease holder powers
// before proposing.
func (r *Replica) TransferLease(
ctx context.Context,
batch engine.ReadWriter,
ms *enginepb.MVCCStats,
h roachpb.Header,
args roachpb.TransferLeaseRequest,
) (roachpb.RequestLeaseResponse, *PostCommitTrigger, error) {
// When returning an error from this method, must always return
// a newFailedLeaseTrigger() to satisfy stats.
r.mu.Lock()
defer r.mu.Unlock()
if log.V(2) {
prevLease := r.mu.state.Lease
log.Infof(ctx, "lease transfer: prev lease: %+v, new lease: %+v "+
"old expiration: %s, new start: %s",
prevLease, args.Lease, prevLease.Expiration, args.Lease.Start)
}
return r.applyNewLeaseLocked(ctx, batch, ms, args.Lease, false /* isExtension */)
}
// applyNewLeaseLocked checks that the lease contains a valid interval and that
// the new lease holder is still a member of the replica set, and then proceeds
// to write the new lease to the batch, emitting an appropriate trigger.
//
// The new lease might be a lease for a range that didn't previously have an
// active lease, might be an extension or a lease transfer.
//
// isExtension should be set if the lease holder does not change with this
// lease. If it doesn't change, we don't need a PostCommitTrigger that
// synchronizes with reads.
//
// r.mu needs to be locked.
//
// TODO(tschottdorf): refactoring what's returned from the trigger here makes
// sense to minimize the amount of code intolerant of rolling updates.
func (r *Replica) applyNewLeaseLocked(
ctx context.Context,
batch engine.ReadWriter,
ms *enginepb.MVCCStats,
lease roachpb.Lease,
isExtension bool,
) (roachpb.RequestLeaseResponse, *PostCommitTrigger, error) {
// When returning an error from this method, must always return
// a newFailedLeaseTrigger() to satisfy stats.
prevLease := r.mu.state.Lease
// Ensure Start < StartStasis <= Expiration.
if !lease.Start.Less(lease.StartStasis) ||
lease.Expiration.Less(lease.StartStasis) {
// This amounts to a bug.
return roachpb.RequestLeaseResponse{}, newFailedLeaseTrigger(),
&roachpb.LeaseRejectedError{
Existing: *prevLease,
Requested: lease,
Message: fmt.Sprintf("illegal lease interval: [%s, %s, %s]",
lease.Start, lease.StartStasis, lease.Expiration),
}
}
// Verify that requesting replica is part of the current replica set.
if _, ok := r.mu.state.Desc.GetReplicaDescriptor(lease.Replica.StoreID); !ok {
return roachpb.RequestLeaseResponse{}, newFailedLeaseTrigger(),
&roachpb.LeaseRejectedError{
Existing: *prevLease,
Requested: lease,
Message: "replica not found",
}
}
var reply roachpb.RequestLeaseResponse
// Store the lease to disk & in-memory.
if err := setLease(ctx, batch, ms, r.RangeID, &lease); err != nil {
return reply, newFailedLeaseTrigger(), err
}
t := true
trigger := &PostCommitTrigger{
// If we didn't block concurrent reads here, there'd be a chance that
// reads could sneak in on a new lease holder between setting the lease
// and updating the low water mark. This in itself isn't a consistency
// violation, but it's a bit suspicious and did make
// TestRangeTransferLease flaky. We err on the side of caution for now, but
// at least we don't do it in case of an extension.
//
// TODO(tschottdorf): Maybe we shouldn't do this at all. Need to think
// through potential consequences.
noConcurrentReads: !isExtension,
lease: &lease,
leaseMetricsResult: &t,
// TODO(tschottdorf): having traced the origin of this call back to
// rev 6281926, it seems that we should only be doing this when the
// lease holder has changed. However, it's likely not a big deal to
// do it always.
maybeGossipSystemConfig: true,
}
return reply, trigger, nil
}
// CheckConsistency runs a consistency check on the range. It first applies a
// ComputeChecksum command on the range. It then issues CollectChecksum commands
// to the other replicas.
func (r *Replica) CheckConsistency(
ctx context.Context,
args roachpb.CheckConsistencyRequest,
desc *roachpb.RangeDescriptor,
) (roachpb.CheckConsistencyResponse, *roachpb.Error) {
key := desc.StartKey.AsRawKey()
endKey := desc.EndKey.AsRawKey()
id := uuid.MakeV4()
// Send a ComputeChecksum to all the replicas of the range.
{
var ba roachpb.BatchRequest
ba.RangeID = r.Desc().RangeID
checkArgs := &roachpb.ComputeChecksumRequest{
Span: roachpb.Span{
Key: key,
EndKey: endKey,
},
Version: replicaChecksumVersion,
ChecksumID: id,
Snapshot: args.WithDiff,
}
ba.Add(checkArgs)
ba.Timestamp = r.store.Clock().Now()
_, pErr := r.Send(ctx, ba)
if pErr != nil {
return roachpb.CheckConsistencyResponse{}, pErr
}
}
// Get local checksum. This might involving waiting for it.
c, err := r.getChecksum(ctx, id)
if err != nil {
return roachpb.CheckConsistencyResponse{}, roachpb.NewError(
errors.Wrapf(err, "could not compute checksum for range [%s, %s]", key, endKey))
}
// Get remote checksums.
localReplica, err := r.GetReplicaDescriptor()
if err != nil {
return roachpb.CheckConsistencyResponse{},
roachpb.NewError(errors.Wrap(err, "could not get replica descriptor"))
}
var inconsistencyCount uint32
var wg sync.WaitGroup
sp := r.store.ctx.StorePool
for _, replica := range r.Desc().Replicas {
if replica == localReplica {
continue
}
wg.Add(1)
replica := replica // per-iteration copy
if err := r.store.Stopper().RunAsyncTask(ctx, func(ctx context.Context) {
defer wg.Done()
addr, err := sp.resolver(replica.NodeID)
if err != nil {
log.Error(ctx, errors.Wrapf(err, "could not resolve node ID %d", replica.NodeID))
return
}
conn, err := sp.rpcContext.GRPCDial(addr.String())
if err != nil {
log.Error(ctx,
errors.Wrapf(err, "could not dial node ID %d address %s", replica.NodeID, addr))
return
}
client := NewConsistencyClient(conn)
req := &CollectChecksumRequest{
StoreRequestHeader{NodeID: replica.NodeID, StoreID: replica.StoreID},
r.RangeID,
id,
c.checksum,
}
resp, err := client.CollectChecksum(ctx, req)
if err != nil {
log.Error(ctx, errors.Wrapf(err, "could not CollectChecksum from replica %s", replica))
return
}
if bytes.Equal(c.checksum, resp.Checksum) {
return
}
atomic.AddUint32(&inconsistencyCount, 1)
var buf bytes.Buffer
_, _ = fmt.Fprintf(&buf, "replica %s is inconsistent: expected checksum %x, got %x",
replica, c.checksum, resp.Checksum)
if c.snapshot != nil && resp.Snapshot != nil {
diff := diffRange(c.snapshot, resp.Snapshot)
if report := r.store.ctx.TestingKnobs.BadChecksumReportDiff; report != nil {
report(r.store.Ident, diff)
}
for _, d := range diff {
otherSide := "lease holder"
if d.LeaseHolder {
otherSide = "replica"
}
_, _ = fmt.Fprintf(&buf, "\nk:v = (%q (%x), %s, %.1024x) not present on %s",
d.Key, d.Key, d.Timestamp, d.Value, otherSide)
}
}
log.Errorf(ctx, buf.String())
}); err != nil {
log.Error(ctx, errors.Wrap(err, "could not run async CollectChecksum"))
wg.Done()
}
}
wg.Wait()
if inconsistencyCount == 0 {
} else if args.WithDiff {
logFunc := log.Errorf
if p := r.store.TestingKnobs().BadChecksumPanic; p != nil {
p(r.store.Ident)
} else if r.store.ctx.ConsistencyCheckPanicOnFailure {
logFunc = log.Fatalf
}
logFunc(ctx, "consistency check failed with %d inconsistent replicas", inconsistencyCount)
} else {
if err := r.store.stopper.RunAsyncTask(r.ctx, func(ctx context.Context) {
log.Errorf(ctx, "consistency check failed with %d inconsistent replicas; fetching details",
inconsistencyCount)
// Keep the request from crossing the local->global boundary.
if bytes.Compare(key, keys.LocalMax) < 0 {
key = keys.LocalMax
}
if err := r.store.db.CheckConsistency(ctx, key, endKey, true /* withDiff */); err != nil {
log.Error(ctx, errors.Wrap(err, "could not rerun consistency check"))
}
}); err != nil {
log.Error(ctx, errors.Wrap(err, "could not rerun consistency check"))
}
}
return roachpb.CheckConsistencyResponse{}, nil
}
const (
replicaChecksumVersion = 2
replicaChecksumGCInterval = time.Hour
)
// getChecksum waits for the result of ComputeChecksum and returns it.
// It returns false if there is no checksum being computed for the id,
// or it has already been GCed.
func (r *Replica) getChecksum(
ctx context.Context,
id uuid.UUID,
) (replicaChecksum, error) {
now := timeutil.Now()
r.mu.Lock()
r.gcOldChecksumEntriesLocked(now)
c, ok := r.mu.checksums[id]
if !ok {
if d, dOk := ctx.Deadline(); dOk {
c.gcTimestamp = d
}
c.notify = make(chan struct{})
r.mu.checksums[id] = c
}
r.mu.Unlock()
// Wait
select {
case <-r.store.Stopper().ShouldStop():
return replicaChecksum{},
errors.Errorf("store has stopped while waiting for compute checksum (ID = %s)", id)
case <-ctx.Done():
return replicaChecksum{},
errors.Wrapf(ctx.Err(), "while waiting for compute checksum (ID = %s)", id)
case <-c.notify:
}
if log.V(1) {
log.Infof(ctx, "waited for compute checksum for %s", timeutil.Since(now))
}
r.mu.Lock()
c, ok = r.mu.checksums[id]
r.mu.Unlock()
if !ok {
return replicaChecksum{}, errors.Errorf("no map entry for checksum (ID = %s)", id)
}
if c.checksum == nil {
return replicaChecksum{}, errors.Errorf(
"checksum is nil, most likely because the async computation could not be run (ID = %s)", id)
}
return c, nil
}
// computeChecksumDone adds the computed checksum, sets a deadline for GCing the
// checksum, and sends out a notification.
func (r *Replica) computeChecksumDone(
ctx context.Context,
id uuid.UUID,
sha []byte,
snapshot *roachpb.RaftSnapshotData,
) {
r.mu.Lock()
defer r.mu.Unlock()
if c, ok := r.mu.checksums[id]; ok {
c.checksum = sha
c.gcTimestamp = timeutil.Now().Add(replicaChecksumGCInterval)
c.snapshot = snapshot
r.mu.checksums[id] = c
// Notify
close(c.notify)
} else {
// ComputeChecksum adds an entry into the map, and the entry can
// only be GCed once the gcTimestamp is set above. Something
// really bad happened.
log.Errorf(ctx, "no map entry for checksum (ID = %s)", id)
}
}
// ComputeChecksum starts the process of computing a checksum on the replica at
// a particular snapshot. The checksum is later verified through a
// CollectChecksumRequest.
func (r *Replica) ComputeChecksum(
ctx context.Context,
batch engine.ReadWriter,
ms *enginepb.MVCCStats,
h roachpb.Header,
args roachpb.ComputeChecksumRequest,
) (roachpb.ComputeChecksumResponse, *PostCommitTrigger, error) {
if args.Version != replicaChecksumVersion {
log.Errorf(ctx, "Incompatible versions: e=%d, v=%d", replicaChecksumVersion, args.Version)
return roachpb.ComputeChecksumResponse{}, nil, nil
}
return roachpb.ComputeChecksumResponse{}, &PostCommitTrigger{computeChecksum: &args}, nil
}
// sha512 computes the SHA512 hash of all the replica data at the snapshot.
// It will dump all the k:v data into snapshot if it is provided.
func (r *Replica) sha512(
desc roachpb.RangeDescriptor,
snap engine.Reader,
snapshot *roachpb.RaftSnapshotData,
) ([]byte, error) {
hasher := sha512.New()
// Iterate over all the data in the range.
iter := NewReplicaDataIterator(&desc, snap, true /* replicatedOnly */)
defer iter.Close()
for ; iter.Valid(); iter.Next() {
key := iter.Key()
value := iter.Value()
if snapshot != nil {
// Add the k:v into the debug message.
snapshot.KV = append(snapshot.KV, roachpb.RaftSnapshotData_KeyValue{Key: key.Key, Value: value, Timestamp: key.Timestamp})
}
// Encode the length of the key and value.
if err := binary.Write(hasher, binary.LittleEndian, int64(len(key.Key))); err != nil {
return nil, err
}
if err := binary.Write(hasher, binary.LittleEndian, int64(len(value))); err != nil {
return nil, err
}
if _, err := hasher.Write(key.Key); err != nil {
return nil, err
}
timestamp, err := protoutil.Marshal(&key.Timestamp)
if err != nil {
return nil, err
}
if _, err := hasher.Write(timestamp); err != nil {
return nil, err
}
if _, err := hasher.Write(value); err != nil {
return nil, err
}
}
sha := make([]byte, 0, sha512.Size)
return hasher.Sum(sha), nil
}
// ChangeFrozen freezes or unfreezes the Replica idempotently.
func (r *Replica) ChangeFrozen(
ctx context.Context,
batch engine.ReadWriter,
ms *enginepb.MVCCStats,
h roachpb.Header,
args roachpb.ChangeFrozenRequest,
) (roachpb.ChangeFrozenResponse, *PostCommitTrigger, error) {
var resp roachpb.ChangeFrozenResponse
resp.MinStartKey = roachpb.RKeyMax
curStart, err := keys.Addr(args.Key)
if err != nil {
return resp, nil, err
}
if !bytes.Equal(curStart, args.Key) {
return resp, nil, errors.Errorf("unsupported range-local key")
}
desc := r.Desc()
frozen, err := loadFrozenStatus(ctx, batch, desc.RangeID)
if err != nil || frozen == args.Frozen {
// Something went wrong or we're already in the right frozen state. In
// the latter case, we avoid writing the "same thing" because "we"
// might actually not be the same version of the code (picture a couple
// of freeze requests lined up, but not all of them applied between
// version changes).
return resp, nil, err
}
if args.MustVersion == "" {
return resp, nil, errors.Errorf("empty version tag")
} else if bi := build.GetInfo(); !frozen && args.Frozen && args.MustVersion != bi.Tag {
// Some earlier version tried to freeze but we never applied it until
// someone restarted this node with another version. No bueno - have to
// assume that integrity has already been compromised.
// Note that we have extra hooks upstream which delay returning success
// to the caller until it's reasonable to assume that all Replicas have
// applied the freeze.
// This is a classical candidate for returning replica corruption, but
// we don't do it (yet); for now we'll assume that the update steps
// are carried out in correct order.
log.Warningf(ctx, "freeze %s issued from %s is applied by %s",
desc, args.MustVersion, bi)
}
// Generally, we want to act only if the request hits the Range's StartKey.
// The one case in which that behaves unexpectedly is if we're the first
// range, which has StartKey equal to KeyMin, but the lowest curStart which
// is feasible is LocalMax.
if !desc.StartKey.Less(curStart) {
resp.RangesAffected++
} else if locMax, err := keys.Addr(keys.LocalMax); err != nil {
return resp, nil, err
} else if !locMax.Less(curStart) {
resp.RangesAffected++
}
// Note down the Stores on which this request ran, even if the Range was
// not affected.
resp.Stores = make(map[roachpb.StoreID]roachpb.NodeID, len(desc.Replicas))
for i := range desc.Replicas {
resp.Stores[desc.Replicas[i].StoreID] = desc.Replicas[i].NodeID
}
if resp.RangesAffected == 0 {
return resp, nil, nil
}
resp.MinStartKey = desc.StartKey
if err := setFrozenStatus(ctx, batch, ms, r.Desc().RangeID, args.Frozen); err != nil {
return roachpb.ChangeFrozenResponse{}, nil, err
}
trigger := &PostCommitTrigger{
frozen: &args.Frozen,
}
return resp, trigger, nil
}
// ReplicaSnapshotDiff is a part of a []ReplicaSnapshotDiff which represents a diff between
// two replica snapshots. For now it's only a diff between their KV pairs.
type ReplicaSnapshotDiff struct {
// LeaseHolder is set to true of this k:v pair is only present on the lease
// holder.
LeaseHolder bool
Key roachpb.Key
Timestamp hlc.Timestamp
Value []byte
}
// diffs the two k:v dumps between the lease holder and the replica.
func diffRange(l, r *roachpb.RaftSnapshotData) []ReplicaSnapshotDiff {
if l == nil || r == nil {
return nil
}
var diff []ReplicaSnapshotDiff
i, j := 0, 0
for {
var e, v roachpb.RaftSnapshotData_KeyValue
if i < len(l.KV) {
e = l.KV[i]
}
if j < len(r.KV) {
v = r.KV[j]
}
addLeader := func() {
diff = append(diff, ReplicaSnapshotDiff{LeaseHolder: true, Key: e.Key, Timestamp: e.Timestamp, Value: e.Value})
i++
}
addReplica := func() {
diff = append(diff, ReplicaSnapshotDiff{LeaseHolder: false, Key: v.Key, Timestamp: v.Timestamp, Value: v.Value})
j++
}
// Compare keys.
var comp int
// Check if it has finished traversing over all the lease holder keys.
if e.Key == nil {
if v.Key == nil {
// Done traversing over all the replica keys. Done!
break
} else {
comp = 1
}
} else {
// Check if it has finished traversing over all the replica keys.
if v.Key == nil {
comp = -1
} else {
// Both lease holder and replica keys exist. Compare them.
comp = bytes.Compare(e.Key, v.Key)
}
}
switch comp {
case -1:
addLeader()
case 0:
if !e.Timestamp.Equal(v.Timestamp) {
if v.Timestamp.Less(e.Timestamp) {
addLeader()
} else {
addReplica()
}
} else if !bytes.Equal(e.Value, v.Value) {
addLeader()
addReplica()
} else {
// No diff; skip.
i++
j++
}
case 1:
addReplica()
}
}
return diff
}
// AdminSplit divides the range into into two ranges, using either
// args.SplitKey (if provided) or an internally computed key that aims
// to roughly equipartition the range by size. The split is done
// inside of a distributed txn which writes updated left and new right
// hand side range descriptors, and updates the range addressing
// metadata. The handover of responsibility for the reassigned key
// range is carried out seamlessly through a split trigger carried out
// as part of the commit of that transaction.
//
// The supplied RangeDescriptor is used as a form of optimistic lock. An
// operation which might split a range should obtain a copy of the range's
// current descriptor before making the decision to split. If the decision is
// affirmative the descriptor is passed to AdminSplit, which performs a
// Conditional Put on the RangeDescriptor to ensure that no other operation has
// modified the range in the time the decision was being made.
// TODO(tschottdorf): should assert that split key is not a local key.
//
// See the comment on splitTrigger for details on the complexities.
func (r *Replica) AdminSplit(
ctx context.Context, args roachpb.AdminSplitRequest, desc *roachpb.RangeDescriptor,
) (roachpb.AdminSplitResponse, *roachpb.Error) {
var reply roachpb.AdminSplitResponse
// Determine split key if not provided with args. This scan is
// allowed to be relatively slow because admin commands don't block
// other commands.
log.Event(ctx, "split begins")
var splitKey roachpb.RKey
{
foundSplitKey := args.SplitKey
if len(foundSplitKey) == 0 {
snap := r.store.NewSnapshot()
defer snap.Close()
var err error
targetSize := r.GetMaxBytes() / 2
foundSplitKey, err = engine.MVCCFindSplitKey(ctx, snap, desc.RangeID, desc.StartKey, desc.EndKey, targetSize, nil /* logFn */)
if err != nil {
return reply, roachpb.NewErrorf("unable to determine split key: %s", err)
}
} else if !r.ContainsKey(foundSplitKey) {
return reply, roachpb.NewError(roachpb.NewRangeKeyMismatchError(args.SplitKey, args.SplitKey, desc))
}
foundSplitKey, err := keys.EnsureSafeSplitKey(foundSplitKey)
if err != nil {
return reply, roachpb.NewErrorf("cannot split range at key %s: %v",
args.SplitKey, err)
}
splitKey, err = keys.Addr(foundSplitKey)
if err != nil {
return reply, roachpb.NewError(err)
}
if !splitKey.Equal(foundSplitKey) {
return reply, roachpb.NewErrorf("cannot split range at range-local key %s", splitKey)
}
if !engine.IsValidSplitKey(foundSplitKey) {
return reply, roachpb.NewErrorf("cannot split range at key %s", splitKey)
}
}
// First verify this condition so that it will not return
// roachpb.NewRangeKeyMismatchError if splitKey equals to desc.EndKey,
// otherwise it will cause infinite retry loop.
if desc.StartKey.Equal(splitKey) || desc.EndKey.Equal(splitKey) {
return reply, roachpb.NewErrorf("range is already split at key %s", splitKey)
}
log.Event(ctx, "found split key")
// Create right hand side range descriptor with the newly-allocated Range ID.
rightDesc, err := r.store.NewRangeDescriptor(splitKey, desc.EndKey, desc.Replicas)
if err != nil {
return reply, roachpb.NewErrorf("unable to allocate right hand side range descriptor: %s", err)
}
// Init updated version of existing range descriptor.
leftDesc := *desc
leftDesc.EndKey = splitKey
log.Infof(ctx, "initiating a split of this range at key %s", splitKey)
if err := r.store.DB().Txn(context.TODO(), func(txn *client.Txn) error {
log.Event(ctx, "split closure begins")
defer log.Event(ctx, "split closure ends")
// Update existing range descriptor for left hand side of
// split. Note that we mutate the descriptor for the left hand
// side of the split first to locate the txn record there.
{
b := txn.NewBatch()
leftDescKey := keys.RangeDescriptorKey(leftDesc.StartKey)
if err := updateRangeDescriptor(b, leftDescKey, desc, &leftDesc); err != nil {
return err
}
// Commit this batch first to ensure that the transaction record
// is created in the right place (split trigger relies on this),
// but also to ensure the transaction record is created _before_
// intents for the RHS range descriptor or addressing records.
// Keep in mind that the BeginTransaction request is injected
// to accompany the first write request, but if part of a batch
// which spans ranges, the dist sender does not guarantee the
// order which parts of the split batch arrive.
//
// Sending the batch containing only the first write guarantees
// the transaction record is written first, preventing cases
// where splits are aborted early due to conflicts with meta
// intents (see #9265).
log.Event(ctx, "updating LHS descriptor")
if err := txn.Run(b); err != nil {
if _, ok := err.(*roachpb.ConditionFailedError); ok {
return errors.Errorf("conflict updating range descriptors")
}
return err
}
}
// Log the split into the range event log.
// TODO(spencer): event logging API should accept a batch
// instead of a transaction; there's no reason this logging
// shouldn't be done in parallel via the batch with the updated
// range addressing.
if err := r.store.logSplit(txn, leftDesc, *rightDesc); err != nil {
return err
}
b := txn.NewBatch()
// Create range descriptor for right hand side of the split.
rightDescKey := keys.RangeDescriptorKey(rightDesc.StartKey)
if err := updateRangeDescriptor(b, rightDescKey, nil, rightDesc); err != nil {
return err
}
// Update range descriptor addressing record(s).
if err := splitRangeAddressing(b, rightDesc, &leftDesc); err != nil {
return err
}
// End the transaction manually, instead of letting RunTransaction
// loop do it, in order to provide a split trigger.
b.AddRawRequest(&roachpb.EndTransactionRequest{
Commit: true,
InternalCommitTrigger: &roachpb.InternalCommitTrigger{
SplitTrigger: &roachpb.SplitTrigger{
LeftDesc: leftDesc,
RightDesc: *rightDesc,
},
},
})
// Commit txn with final batch (RHS descriptor and meta).
log.Event(ctx, "commit txn with batch containing RHS descriptor and meta records")
if err := txn.Run(b); err != nil {
if _, ok := err.(*roachpb.ConditionFailedError); ok {
return errors.Errorf("conflict updating range descriptors")
}
return err
}
return nil
}); err != nil {
return reply, roachpb.NewErrorf("split at key %s failed: %s", splitKey, err)
}
return reply, nil
}
// splitTrigger is called on a successful commit of a transaction
// containing an AdminSplit operation. It copies the abort cache for
// the new range and recomputes stats for both the existing, left hand
// side (LHS) range and the right hand side (RHS) range. For
// performance it only computes the stats for the original range (the
// left hand side) and infers the RHS stats by subtracting from the
// original stats. We compute the LHS stats because the split key
// computation ensures that we do not create large LHS
// ranges. However, this optimization is only possible if the stats
// are fully accurate. If they contain estimates, stats for both the
// LHS and RHS are computed.
//
// Splits are complicated. A split is initiated when a replica receives an
// AdminSplit request. Note that this request (and other "admin" requests)
// differs from normal requests in that it doesn't go through Raft but instead
// allows the lease holder Replica to act as the orchestrator for the
// distributed transaction that performs the split. As such, this request is
// only executed on the lease holder replica and the request is redirected to
// the lease holder if the recipient is a follower.
//
// Splits do not require the lease for correctness (which is good, because we
// only check that the lease is held at the beginning of the operation, and
// have no way to ensure that it is continually held until the end). Followers
// could perform splits too, and the only downside would be that if two splits
// were attempted concurrently (or a split and a ChangeReplicas), one would
// fail. The lease is used to designate one replica for this role and avoid
// wasting time on splits that may fail.
//
// The processing of splits is divided into two phases. The first phase occurs
// in Replica.AdminSplit. In that phase, the split-point is computed, and a
// transaction is started which updates both the LHS and RHS range descriptors
// and the meta range addressing information. (If we're splitting a meta2 range
// we'll be updating the meta1 addressing, otherwise we'll be updating the
// meta2 addressing). That transaction includes a special SplitTrigger flag on
// the EndTransaction request. Like all transactions, the requests within the
// transaction are replicated via Raft, including the EndTransaction request.
//
// The second phase of split processing occurs when each replica for the range
// encounters the SplitTrigger. Processing of the SplitTrigger happens below,
// in Replica.splitTrigger. The processing of the SplitTrigger occurs in two
// stages. The first stage operates within the context of an engine.Batch and
// updates all of the on-disk state for the old and new ranges atomically. The
// second stage is invoked when the batch commits and updates the in-memory
// state, creating the new replica in memory and populating its timestamp cache
// and registering it with the store.
//
// There is lots of subtlety here. The easy scenario is that all of the
// replicas process the SplitTrigger before processing any Raft message for RHS
// (right hand side) of the newly split range. Something like:
//
// Node A Node B Node C
// ----------------------------------------------------
// range 1 | | |
// | | |
// SplitTrigger | |
// | SplitTrigger |
// | | SplitTrigger
// | | |
// ----------------------------------------------------
// split finished on A, B and C | |
// | | |
// range 2 | | |
// | ---- MsgVote --> | |
// | ---------------------- MsgVote ---> |
//
// But that ideal ordering is not guaranteed. The split is "finished" when two
// of the replicas have appended the end-txn request containing the
// SplitTrigger to their Raft log. The following scenario is possible:
//
// Node A Node B Node C
// ----------------------------------------------------
// range 1 | | |
// | | |
// SplitTrigger | |
// | SplitTrigger |
// | | |
// ----------------------------------------------------
// split finished on A and B | |
// | | |
// range 2 | | |
// | ---- MsgVote --> | |
// | --------------------- MsgVote ---> ???
// | | |
// | | SplitTrigger
//
// In this scenario, C will create range 2 upon reception of the MsgVote from
// A, though locally that span of keys is still part of range 1. This is
// possible because at the Raft level ranges are identified by integer IDs and
// it isn't until C receives a snapshot of range 2 from the leader that it
// discovers the span of keys it covers. In order to prevent C from fully
// initializing range 2 in this instance, we prohibit applying a snapshot to a
// range if the snapshot overlaps another range. See Store.canApplySnapshotLocked.
//
// But while a snapshot may not have been applied at C, an uninitialized
// Replica was created. An uninitialized Replica is one which belongs to a Raft
// group but for which the range descriptor has not been received. This Replica
// will have participated in the Raft elections. When we're creating the new
// Replica below we take control of this uninitialized Replica and stop it from
// responding to Raft messages by marking it "destroyed". Note that we use the
// Replica.mu.destroyed field for this, but we don't do everything that
// Replica.Destroy does (so we should probably rename that field in light of
// its new uses). In particular we don't touch any data on disk or leave a
// tombstone. This is especially important because leaving a tombstone would
// prevent the legitimate recreation of this replica.
//
// There is subtle synchronization here that is currently controlled by the
// Store.processRaft goroutine. In particular, the serial execution of
// Replica.handleRaftReady by Store.processRaft ensures that an uninitialized
// RHS won't be concurrently executing in Replica.handleRaftReady because we're
// currently running on that goroutine (i.e. Replica.splitTrigger is called on
// the processRaft goroutine).
//
// TODO(peter): The above synchronization needs to be fixed. Using a single
// goroutine for executing Replica.handleRaftReady is undesirable from a
// performance perspective. Likely we will have to add a mutex to Replica to
// protect handleRaftReady and to grab that mutex below when marking the
// uninitialized Replica as "destroyed". Hopefully we'll also be able to remove
// Store.processRaftMu.
//
// Note that in this more complex scenario, A (which performed the SplitTrigger
// first) will create the associated Raft group for range 2 and start
// campaigning immediately. It is possible for B to receive MsgVote requests
// before it has applied the SplitTrigger as well. Both B and C will vote for A
// (and preserve the records of that vote in their HardState). It is critically
// important for Raft correctness that we do not lose the records of these
// votes. After electing A the Raft leader for range 2, A will then attempt to
// send a snapshot to B and C and we'll fall into the situation above where a
// snapshot is received for a range before it has finished splitting from its
// sibling and is thus rejected. An interesting subtlety here: A will send a
// snapshot to B and C because when range 2 is initialized we were careful set
// synthesize its HardState to set its Raft log index to 10. If we had instead
// used log index 0, Raft would have believed the group to be empty, but the
// RHS has something. Using a non-zero initial log index causes Raft to believe
// that there is a discarded prefix to the log and will thus send a snapshot to
// followers.
//
// A final point of clarification: when we split a range we're splitting the
// data the range contains. But we're not forking or splitting the associated
// Raft group. Instead, we're creating a new Raft group to control the RHS of
// the split. That Raft group is starting from an empty Raft log (positioned at
// log entry 10) and a snapshot of the RHS of the split range.
//
// After the split trigger returns, the on-disk state of the right-hand side
// will be suitable for instantiating the right hand side Replica, and
// a suitable trigger is returned, along with the updated stats which represent
// the LHS delta caused by the split (i.e. all writes in the current batch
// which went to the left-hand side, minus the kv pairs which moved to the
// RHS).
//
// These stats are suitable for returning up the callstack like those for
// regular commands; the corresponding delta for the RHS is part of the
// returned trigger and is handled by the Store.
func (r *Replica) splitTrigger(
ctx context.Context,
batch engine.Batch,
bothDeltaMS enginepb.MVCCStats, // stats for batch so far
split *roachpb.SplitTrigger,
ts hlc.Timestamp,
) (
enginepb.MVCCStats,
*PostCommitTrigger,
error,
) {
// TODO(tschottdorf): should have an incoming context from the corresponding
// EndTransaction, but the plumbing has not been done yet.
sp := r.store.Tracer().StartSpan("split")
defer sp.Finish()
desc := r.Desc()
if !bytes.Equal(desc.StartKey, split.LeftDesc.StartKey) ||
!bytes.Equal(desc.EndKey, split.RightDesc.EndKey) {
return enginepb.MVCCStats{}, nil, errors.Errorf("range does not match splits: (%s-%s) + (%s-%s) != %s",
split.LeftDesc.StartKey, split.LeftDesc.EndKey,
split.RightDesc.StartKey, split.RightDesc.EndKey, r)
}
// Preserve stats for pre-split range, excluding the current batch.
origBothMS := r.GetMVCCStats()
// TODO(d4l3k): we should check which side of the split is smaller
// and compute stats for it instead of having a constraint that the
// left hand side is smaller.
// Compute (absolute) stats for LHS range. This means that no more writes
// to the LHS must happen below this point.
leftMS, err := ComputeStatsForRange(&split.LeftDesc, batch, ts.WallTime)
if err != nil {
return enginepb.MVCCStats{}, nil, errors.Wrap(err, "unable to compute stats for LHS range after split")
}
log.Event(ctx, "computed stats for left hand side range")
// Copy the last replica GC timestamp. This value is unreplicated,
// which is why the MVCC stats are set to nil on calls to
// MVCCPutProto.
replicaGCTS, err := r.getLastReplicaGCTimestamp()
if err != nil {
return enginepb.MVCCStats{}, nil, errors.Wrap(err, "unable to fetch last replica GC timestamp")
}
if err := engine.MVCCPutProto(ctx, batch, nil, keys.RangeLastReplicaGCTimestampKey(split.RightDesc.RangeID), hlc.ZeroTimestamp, nil, &replicaGCTS); err != nil {
return enginepb.MVCCStats{}, nil, errors.Wrap(err, "unable to copy last replica GC timestamp")
}
// Initialize the RHS range's abort cache by copying the LHS's.
seqCount, err := r.abortCache.CopyInto(batch, &bothDeltaMS, split.RightDesc.RangeID)
if err != nil {
// TODO(tschottdorf): ReplicaCorruptionError.
return enginepb.MVCCStats{}, nil, errors.Wrap(err, "unable to copy abort cache to RHS split range")
}
log.Eventf(ctx, "copied abort cache (%d entries)", seqCount)
// Initialize the right-hand lease to be the same as the left-hand lease.
// This looks like an innocuous performance improvement, but it's more than
// that - it ensures that we properly initialize the timestamp cache, which
// is only populated on the lease holder, from that of the original Range.
// We found out about a regression here the hard way in #7899. Prior to
// this block, the following could happen:
// - a client reads key 'd', leaving an entry in the timestamp cache on the
// lease holder of [a,e) at the time, node one.
// - the range [a,e) splits at key 'c'. [c,e) starts out without a lease.
// - the replicas of [a,e) on nodes one and two both process the split
// trigger and thus copy their timestamp caches to the new right-hand side
// Replica. However, only node one's timestamp cache contains information
// about the read of key 'd' in the first place.
// - node two becomes the lease holder for [c,e). Its timestamp cache does
// know about the read at 'd' which happened at the beginning.
// - node two can illegally propose a write to 'd' at a lower timestamp.
{
leftLease, err := loadLease(ctx, r.store.Engine(), r.RangeID)
if err != nil {
return enginepb.MVCCStats{}, nil, errors.Wrap(err, "unable to load lease")
}
replica, found := split.RightDesc.GetReplicaDescriptor(leftLease.Replica.StoreID)
if !found {
return enginepb.MVCCStats{}, nil, errors.Errorf(
"pre-split lease holder %+v not found in post-split descriptor %+v",
leftLease.Replica, split.RightDesc,
)
}
rightLease := leftLease
rightLease.Replica = replica
if err := setLease(
ctx, batch, &bothDeltaMS, split.RightDesc.RangeID, rightLease,
); err != nil {
return enginepb.MVCCStats{}, nil, errors.Wrap(err, "unable to seed right-hand side lease")
}
}
// Compute (absolute) stats for RHS range.
var rightMS enginepb.MVCCStats
if origBothMS.ContainsEstimates || bothDeltaMS.ContainsEstimates {
// Because either the original stats or the delta stats contain
// estimate values, we cannot perform arithmetic to determine the
// new range's stats. Instead, we must recompute by iterating
// over the keys and counting.
rightMS, err = ComputeStatsForRange(&split.RightDesc, batch, ts.WallTime)
if err != nil {
return enginepb.MVCCStats{}, nil, errors.Wrap(err, "unable to compute stats for RHS range after split")
}
} else {
// Because neither the original stats or the delta stats contain
// estimate values, we can safely perform arithmetic to determine the
// new range's stats. The calculation looks like:
// rhs_ms = orig_both_ms - orig_left_ms + right_delta_ms
// = orig_both_ms - left_ms + left_delta_ms + right_delta_ms
// = orig_both_ms - left_ms + delta_ms
// where the following extra helper variables are used:
// - orig_left_ms: the left-hand side key range, before the split
// - (left|right)_delta_ms: the contributions to bothDeltaMS in this batch,
// itemized by the side of the split.
//
// Note that the result of that computation never has ContainsEstimates
// set due to none of the inputs having it.
// Start with the full stats before the split.
rightMS = origBothMS
// Remove stats from the left side of the split, at the same time adding
// the batch contributions for the right-hand side.
rightMS.Subtract(leftMS)
rightMS.Add(bothDeltaMS)
}
// Now that we've computed the stats for the RHS so far, we persist them.
// This looks a bit more complicated than it really is: updating the stats
// also changes the stats, and we write not only the stats but a complete
// initial state. Additionally, since bothDeltaMS is tracking writes to
// both sides, we need to update it as well.
{
preRightMS := rightMS // for bothDeltaMS
// Account for MVCCStats' own contribution to the RHS range's statistics.
if err := engine.AccountForSelf(&rightMS, split.RightDesc.RangeID); err != nil {
return enginepb.MVCCStats{}, nil, errors.Wrap(err, "unable to account for enginepb.MVCCStats's own stats impact")
}
// Writing the initial state is subtle since this also seeds the Raft
// group. We are writing to the right hand side's Raft group state in this
// batch so we need to synchronize with anything else that could be
// touching that replica's Raft state. Specifically, we want to prohibit an
// uninitialized Replica from receiving a message for the right hand side
// range and performing raft processing. This is achieved by serializing
// execution of uninitialized Replicas in Store.processRaft and ensuring
// that no uninitialized Replica is being processed while an initialized
// one (like the one currently being split) is being processed.
//
// Note also that it is crucial that writeInitialState *absorbs* an
// existing HardState (which might contain a cast vote). We load the
// existing HardState from the underlying engine instead of the batch
// because batch reads are from a snapshot taken at the point in time when
// the first read was performed on the batch. This last requirement is not
// currently needed due to the uninitialized Replica synchronization
// mentioned above, but future work will relax that synchronization, moving
// it from before the point that batch was created to this method. We want
// to see any writes to the hard state that were performed between the
// creation of the batch and that synchronization point. The only drawback
// to not reading from the batch is that we won't see any writes to the
// right hand side's hard state that were previously made in the batch
// (which should be impossible).
oldHS, err := loadHardState(ctx, r.store.Engine(), split.RightDesc.RangeID)
if err != nil {
return enginepb.MVCCStats{}, nil, errors.Wrap(err, "unable to load hard state")
}
rightMS, err = writeInitialState(ctx, batch, rightMS, split.RightDesc, oldHS)
if err != nil {
return enginepb.MVCCStats{}, nil, errors.Wrap(err, "unable to write initial state")
}
bothDeltaMS.Subtract(preRightMS)
bothDeltaMS.Add(rightMS)
}
// Compute how much data the left-hand side has shed by splitting.
// We've already recomputed that in absolute terms, so all we need to do is
// to turn it into a delta so the upstream machinery can digest it.
leftDeltaMS := leftMS // start with new left-hand side absolute stats
leftDeltaMS.Subtract(r.GetMVCCStats()) // subtract pre-split absolute stats
leftDeltaMS.ContainsEstimates = false // if there were any, recomputation removed them
// Perform a similar computation for the right hand side. The difference
// is that there isn't yet a Replica which could apply these stats, so
// they will go into the trigger to make the Store (which keeps running
// counters) aware.
rightDeltaMS := bothDeltaMS
rightDeltaMS.Subtract(leftDeltaMS)
trigger := &PostCommitTrigger{
// This makes sure that no reads are happening in parallel; see #3148.
noConcurrentReads: true,
split: &postCommitSplit{
SplitTrigger: *split,
RightDeltaMS: rightDeltaMS,
},
}
return leftDeltaMS, trigger, nil
}
// AdminMerge extends this range to subsume the range that comes next
// in the key space. The merge is performed inside of a distributed
// transaction which writes the left hand side range descriptor (the
// subsuming range) and deletes the range descriptor for the right
// hand side range (the subsumed range). It also updates the range
// addressing metadata. The handover of responsibility for the
// reassigned key range is carried out seamlessly through a merge
// trigger carried out as part of the commit of that transaction. A
// merge requires that the two ranges are collocated on the same set
// of replicas.
//
// The supplied RangeDescriptor is used as a form of optimistic lock. See the
// comment of "AdminSplit" for more information on this pattern.
func (r *Replica) AdminMerge(
ctx context.Context, args roachpb.AdminMergeRequest, origLeftDesc *roachpb.RangeDescriptor,
) (roachpb.AdminMergeResponse, *roachpb.Error) {
var reply roachpb.AdminMergeResponse
if origLeftDesc.EndKey.Equal(roachpb.RKeyMax) {
// Merging the final range doesn't make sense.
return reply, roachpb.NewErrorf("cannot merge final range")
}
updatedLeftDesc := *origLeftDesc
// Lookup right hand side range (subsumed). This really belongs
// inside the transaction for consistency, but it is important (for
// transaction record placement) that the first action inside the
// transaction is the conditional put to change the left hand side's
// descriptor end key. We look up the descriptor here only to get
// the new end key and then repeat the lookup inside the
// transaction.
{
rightRng := r.store.LookupReplica(origLeftDesc.EndKey, nil)
if rightRng == nil {
return reply, roachpb.NewErrorf("ranges not collocated")
}
updatedLeftDesc.EndKey = rightRng.Desc().EndKey
log.Infof(ctx, "initiating a merge of %s into this range", rightRng)
}
if err := r.store.DB().Txn(context.TODO(), func(txn *client.Txn) error {
log.Event(ctx, "merge closure begins")
// Update the range descriptor for the receiving range.
{
b := txn.NewBatch()
leftDescKey := keys.RangeDescriptorKey(updatedLeftDesc.StartKey)
if err := updateRangeDescriptor(b, leftDescKey, origLeftDesc, &updatedLeftDesc); err != nil {
return err
}
// Commit this batch on its own to ensure that the transaction record
// is created in the right place (our triggers rely on this).
log.Event(ctx, "updating left descriptor")
if err := txn.Run(b); err != nil {
return err
}
}
// Do a consistent read of the right hand side's range descriptor.
rightDescKey := keys.RangeDescriptorKey(origLeftDesc.EndKey)
var rightDesc roachpb.RangeDescriptor
if err := txn.GetProto(rightDescKey, &rightDesc); err != nil {
return err
}
// Verify that the two ranges are mergeable.
if !bytes.Equal(origLeftDesc.EndKey, rightDesc.StartKey) {
// Should never happen, but just in case.
return errors.Errorf("ranges are not adjacent; %s != %s", origLeftDesc.EndKey, rightDesc.StartKey)
}
if !bytes.Equal(rightDesc.EndKey, updatedLeftDesc.EndKey) {
// This merge raced with a split of the right-hand range.
// TODO(bdarnell): needs a test.
return errors.Errorf("range changed during merge; %s != %s", rightDesc.EndKey, updatedLeftDesc.EndKey)
}
if !replicaSetsEqual(origLeftDesc.Replicas, rightDesc.Replicas) {
return errors.Errorf("ranges not collocated")
}
b := txn.NewBatch()
// Remove the range descriptor for the deleted range.
b.Del(rightDescKey)
if err := mergeRangeAddressing(b, origLeftDesc, &updatedLeftDesc); err != nil {
return err
}
// End the transaction manually instead of letting RunTransaction
// loop do it, in order to provide a merge trigger.
b.AddRawRequest(&roachpb.EndTransactionRequest{
Commit: true,
InternalCommitTrigger: &roachpb.InternalCommitTrigger{
MergeTrigger: &roachpb.MergeTrigger{
LeftDesc: updatedLeftDesc,
RightDesc: rightDesc,
},
},
})
log.Event(ctx, "attempting commit")
return txn.Run(b)
}); err != nil {
return reply, roachpb.NewErrorf("merge of range into %d failed: %s", origLeftDesc.RangeID, err)
}
return reply, nil
}
// mergeTrigger is called on a successful commit of an AdminMerge
// transaction. It recomputes stats for the receiving range.
//
// TODO(tschottdorf): give mergeTrigger more idiomatic stats computation as
// in splitTrigger.
func (r *Replica) mergeTrigger(
ctx context.Context,
batch engine.Batch,
ms *enginepb.MVCCStats,
merge *roachpb.MergeTrigger,
ts hlc.Timestamp,
) (*PostCommitTrigger, error) {
desc := r.Desc()
if !bytes.Equal(desc.StartKey, merge.LeftDesc.StartKey) {
return nil, errors.Errorf("LHS range start keys do not match: %s != %s",
desc.StartKey, merge.LeftDesc.StartKey)
}
if !desc.EndKey.Less(merge.LeftDesc.EndKey) {
return nil, errors.Errorf("original LHS end key is not less than the post merge end key: %s >= %s",
desc.EndKey, merge.LeftDesc.EndKey)
}
rightRangeID := merge.RightDesc.RangeID
if rightRangeID <= 0 {
return nil, errors.Errorf("RHS range ID must be provided: %d", rightRangeID)
}
{
// TODO(peter,tschottdorf): This is necessary but likely not
// sufficient. The right hand side of the merge can still race on
// reads. See #8630.
//
// TODO(peter): We need to hold the subsumed range's raftMu until the
// Store.MergeRange is invoked. Currently we release it when this method
// returns which isn't correct.
subsumedRng, err := r.store.GetReplica(rightRangeID)
if err != nil {
panic(err)
}
defer subsumedRng.raftUnlock(subsumedRng.raftLock())
}
// Compute stats for premerged range, including current transaction.
var mergedMS = r.GetMVCCStats()
mergedMS.Add(*ms)
// We will recompute the stats below and update the state, so when the
// batch commits it has already taken ms into account.
*ms = enginepb.MVCCStats{}
// Add in stats for right hand side of merge, excluding system-local
// stats, which will need to be recomputed.
var rightMS enginepb.MVCCStats
if err := engine.MVCCGetRangeStats(ctx, batch, rightRangeID, &rightMS); err != nil {
return nil, err
}
rightMS.SysBytes, rightMS.SysCount = 0, 0
mergedMS.Add(rightMS)
// Copy the RHS range's abort cache to the new LHS one.
_, err := r.abortCache.CopyFrom(ctx, batch, &mergedMS, rightRangeID)
if err != nil {
return nil, errors.Errorf("unable to copy abort cache to new split range: %s", err)
}
// Remove the RHS range's metadata. Note that we don't need to
// keep track of stats here, because we already set the right range's
// system-local stats contribution to 0.
localRangeIDKeyPrefix := keys.MakeRangeIDPrefix(rightRangeID)
if _, _, _, err := engine.MVCCDeleteRange(ctx, batch, nil, localRangeIDKeyPrefix, localRangeIDKeyPrefix.PrefixEnd(), math.MaxInt64, hlc.ZeroTimestamp, nil, false); err != nil {
return nil, errors.Errorf("cannot remove range metadata %s", err)
}
// Add in the stats for the RHS range's range keys.
iter := batch.NewIterator(false)
defer iter.Close()
localRangeKeyStart := engine.MakeMVCCMetadataKey(keys.MakeRangeKeyPrefix(merge.RightDesc.StartKey))
localRangeKeyEnd := engine.MakeMVCCMetadataKey(keys.MakeRangeKeyPrefix(merge.RightDesc.EndKey))
msRange, err := iter.ComputeStats(localRangeKeyStart, localRangeKeyEnd, ts.WallTime)
if err != nil {
return nil, errors.Errorf("unable to compute RHS range's local stats: %s", err)
}
mergedMS.Add(msRange)
// Set stats for updated range.
if err := setMVCCStats(ctx, batch, r.RangeID, mergedMS); err != nil {
return nil, errors.Errorf("unable to write MVCC stats: %s", err)
}
// Clear the timestamp cache. In case both the LHS and RHS replicas
// held their respective range leases, we could merge the timestamp
// caches for efficiency. But it's unlikely and not worth the extra
// logic and potential for error.
*ms = r.GetMVCCStats()
mergedMS.Subtract(r.GetMVCCStats())
*ms = mergedMS
r.mu.Lock()
r.mu.tsCache.Clear(r.store.Clock())
r.mu.Unlock()
trigger := &PostCommitTrigger{
// This makes sure that no reads are happening in parallel; see #3148.
noConcurrentReads: true,
merge: &postCommitMerge{
MergeTrigger: *merge,
},
}
return trigger, nil
}
func (r *Replica) changeReplicasTrigger(
ctx context.Context,
batch engine.Batch,
change *roachpb.ChangeReplicasTrigger,
) *PostCommitTrigger {
var trigger *PostCommitTrigger
// If we're removing the current replica, add it to the range GC queue.
if change.ChangeType == roachpb.REMOVE_REPLICA && r.store.StoreID() == change.Replica.StoreID {
// This wants to run as late as possible, maximizing the chances
// that the other nodes have finished this command as well (since
// processing the removal from the queue looks up the Range at the
// lease holder, being too early here turns this into a no-op).
trigger = updateTrigger(trigger, &PostCommitTrigger{
addToReplicaGCQueue: true,
})
} else {
// After a successful replica addition or removal check to see if the
// range needs to be split. Splitting usually takes precedence over
// replication via configuration of the split and replicate queues, but
// if the split occurs concurrently with the replicas change the split
// can fail and won't retry until the next scanner cycle. Re-queuing
// the replica here removes that latency.
trigger = updateTrigger(trigger, &PostCommitTrigger{
maybeAddToSplitQueue: true,
})
}
// Gossip the first range whenever the range descriptor changes. We also
// gossip the first range whenever the lease holder changes, but that might
// not have occurred if a replica was being added or the non-lease-holder
// replica was being removed. Note that we attempt the gossiping even from
// the removed replica in case it was the lease-holder and it is still
// holding the lease.
if r.IsFirstRange() {
trigger = updateTrigger(trigger, &PostCommitTrigger{
gossipFirstRange: true,
})
}
cpy := *r.Desc()
cpy.Replicas = change.UpdatedReplicas
cpy.NextReplicaID = change.NextReplicaID
trigger = updateTrigger(trigger, &PostCommitTrigger{
desc: &cpy,
})
return trigger
}
// ChangeReplicas adds or removes a replica of a range. The change is performed
// in a distributed transaction and takes effect when that transaction is committed.
// When removing a replica, only the NodeID and StoreID fields of the Replica are used.
//
// The supplied RangeDescriptor is used as a form of optimistic lock. See the
// comment of "AdminSplit" for more information on this pattern.
//
// Changing the replicas for a range is complicated. A change is initiated by
// the "replicate" queue when it encounters a range which has too many
// replicas, too few replicas or requires rebalancing. Addition and removal of
// a replica is divided into four phases. The first phase, which occurs in
// Replica.ChangeReplicas, is performed via a distributed transaction which
// updates the range descriptor and the meta range addressing information. This
// transaction includes a special ChangeReplicasTrigger on the EndTransaction
// request. A ConditionalPut of the RangeDescriptor implements the optimistic
// lock on the RangeDescriptor mentioned previously. Like all transactions, the
// requests within the transaction are replicated via Raft, including the
// EndTransaction request.
//
// The second phase of processing occurs when the batch containing the
// EndTransaction is proposed to raft. This proposing occurs on whatever
// replica received the batch, usually, but not always the range lease
// holder. defaultProposeRaftCommandLocked notices that the EndTransaction
// contains a ChangeReplicasTrigger and proposes a ConfChange to Raft (via
// raft.RawNode.ProposeConfChange).
//
// The ConfChange is propagated to all of the replicas similar to a normal Raft
// command, though additional processing is done inside of Raft. A Replica
// encounters the ConfChange in Replica.handleRaftReady and executes it using
// raft.RawNode.ApplyConfChange. If a new replica was added the Raft leader
// will start sending it heartbeat messages and attempting to bring it up to
// date. If a replica was removed, it is at this point that the Raft leader
// will stop communicating with it.
//
// The fourth phase of change replicas occurs when each replica for the range
// encounters the ChangeReplicasTrigger when applying the EndTransaction
// request. The replica will update its local range descriptor so as to contain
// the new set of replicas. If the replica is the one that is being removed, it
// will queue itself for removal with replicaGCQueue.
//
// Note that a removed replica may not see the EndTransaction containing the
// ChangeReplicasTrigger. The ConfChange operation will be applied as soon as a
// quorum of nodes have committed it. If the removed replica is down or the
// message is dropped for some reason the removed replica will not be
// notified. The replica GC queue will eventually discover and cleanup this
// state.
//
// When a new replica is added, it will have to catch up to the state of the
// other replicas. The Raft leader automatically handles this by either sending
// the new replica Raft log entries to apply, or by generating and sending a
// snapshot. See Replica.Snapshot and Replica.Entries.
//
// Note that Replica.ChangeReplicas returns when the distributed transaction
// has been committed to a quorum of replicas in the range. The actual
// replication of data occurs asynchronously via a snapshot or application of
// Raft log entries. This is important for the replicate queue to be aware
// of. A node can process hundreds or thousands of ChangeReplicas operations
// per second even though the actual replication of data proceeds at a much
// slower base. In order to avoid having this background replication overwhelm
// the system, replication is throttled via a reservation system. When
// allocating a new replica for a range, the replicate queue reserves space for
// that replica on the target store via a ReservationRequest. (See
// StorePool.reserve). The reservation is fulfilled when the snapshot is
// applied.
//
// TODO(peter): There is a rare scenario in which a replica can be brought up
// to date via Raft log replay. In this scenario, the reservation will be left
// dangling until it expires. See #7849.
//
// TODO(peter): Describe preemptive snapshots. Preemptive snapshots are needed
// for the replicate queue to function properly. Currently the replicate queue
// will fire off as many replica additions as possible until it starts getting
// reservations denied at which point it will ignore the replica until the next
// scanner cycle.
func (r *Replica) ChangeReplicas(
ctx context.Context,
changeType roachpb.ReplicaChangeType,
repDesc roachpb.ReplicaDescriptor,
desc *roachpb.RangeDescriptor,
) error {
repDescIdx := -1 // tracks NodeID && StoreID
nodeUsed := false // tracks NodeID only
for i, existingRep := range desc.Replicas {
nodeUsedByExistingRep := existingRep.NodeID == repDesc.NodeID
nodeUsed = nodeUsed || nodeUsedByExistingRep
if nodeUsedByExistingRep && existingRep.StoreID == repDesc.StoreID {
repDescIdx = i
repDesc.ReplicaID = existingRep.ReplicaID
break
}
}
rangeID := desc.RangeID
updatedDesc := *desc
updatedDesc.Replicas = append([]roachpb.ReplicaDescriptor(nil), desc.Replicas...)
switch changeType {
case roachpb.ADD_REPLICA:
// If the replica exists on the remote node, no matter in which store,
// abort the replica add.
if nodeUsed {
return errors.Errorf("%s: unable to add replica %v which is already present", r, repDesc)
}
log.Event(ctx, "requesting reservation")
// Before we try to add a new replica, we first need to secure a
// reservation for the replica on the receiving store.
if err := r.store.allocator.storePool.reserve(
r.store.Ident,
repDesc.StoreID,
rangeID,
r.GetMVCCStats().Total(),
); err != nil {
return errors.Wrapf(err, "%s: change replicas failed", r)
}
log.Event(ctx, "reservation granted")
// Prohibit premature raft log truncation. We set the pending index to 1
// here until we determine what it is below. This removes a small window of
// opportunity for the raft log to get truncated after the snapshot is
// generated.
if err := r.setPendingSnapshotIndex(1); err != nil {
return err
}
defer r.clearPendingSnapshotIndex()
// Send a pre-emptive snapshot. Note that the replica to which this
// snapshot is addressed has not yet had its replica ID initialized; this
// is intentional, and serves to avoid the following race with the replica
// GC queue:
//
// - snapshot received, a replica is lazily created with the "real" replica ID
// - the replica is eligible for GC because it is not yet a member of the range
// - GC queue runs, creating a raft tombstone with the replica's ID
// - the replica is added to the range
// - lazy creation of the replica fails due to the raft tombstone
//
// Instead, the replica GC queue will create a tombstone with replica ID
// zero, which is never legitimately used, and thus never interferes with
// raft operations. Racing with the replica GC queue can still partially
// negate the benefits of pre-emptive snapshots, but that is a recoverable
// degradation, not a catastrophic failure.
snap, err := r.GetSnapshot(ctx)
log.Event(ctx, "generated snapshot")
if err != nil {
return errors.Wrapf(err, "%s: change replicas failed", r)
}
fromRepDesc, err := r.GetReplicaDescriptor()
if err != nil {
return errors.Wrapf(err, "%s: change replicas failed", r)
}
if repDesc.ReplicaID != 0 {
return errors.Errorf(
"must not specify a ReplicaID (%d) for new Replica",
repDesc.ReplicaID,
)
}
if err := r.setPendingSnapshotIndex(snap.Metadata.Index); err != nil {
return err
}
req := &RaftMessageRequest{
RangeID: r.RangeID,
FromReplica: fromRepDesc,
ToReplica: repDesc,
Message: raftpb.Message{
Type: raftpb.MsgSnap,
To: 0, // special cased ReplicaID for preemptive snapshots
From: uint64(fromRepDesc.ReplicaID),
Term: snap.Metadata.Term,
Snapshot: snap,
},
}
if err := r.store.ctx.Transport.SendSync(ctx, req); err != nil {
return errors.Wrapf(err, "%s: change replicas aborted due to failed preemptive snapshot", r)
}
repDesc.ReplicaID = updatedDesc.NextReplicaID
updatedDesc.NextReplicaID++
updatedDesc.Replicas = append(updatedDesc.Replicas, repDesc)
case roachpb.REMOVE_REPLICA:
// If that exact node-store combination does not have the replica,
// abort the removal.
if repDescIdx == -1 {
return errors.Errorf("%s: unable to remove replica %v which is not present", r, repDesc)
}
updatedDesc.Replicas[repDescIdx] = updatedDesc.Replicas[len(updatedDesc.Replicas)-1]
updatedDesc.Replicas = updatedDesc.Replicas[:len(updatedDesc.Replicas)-1]
}
descKey := keys.RangeDescriptorKey(desc.StartKey)
if err := r.store.DB().Txn(ctx, func(txn *client.Txn) error {
log.Event(ctx, "attempting txn")
txn.Proto.Name = replicaChangeTxnName
// TODO(tschottdorf): oldDesc is used for sanity checks related to #7224.
// Remove when that has been solved. The failure mode is likely based on
// prior divergence of the Replica (in which case the check below does not
// fire because everything reads from the local, diverged, set of data),
// so we don't expect to see this fail in practice ever.
oldDesc := new(roachpb.RangeDescriptor)
if err := txn.GetProto(descKey, oldDesc); err != nil {
return err
}
log.Infof(ctx, "change replicas: read existing descriptor %+v", oldDesc)
{
b := txn.NewBatch()
// Important: the range descriptor must be the first thing touched in the transaction
// so the transaction record is co-located with the range being modified.
if err := updateRangeDescriptor(b, descKey, desc, &updatedDesc); err != nil {
return err
}
// Run transaction up to this point to create txn record early (see #9265).
if err := txn.Run(b); err != nil {
return err
}
}
// Log replica change into range event log.
if err := r.store.logChange(txn, changeType, repDesc, updatedDesc); err != nil {
return err
}
// End the transaction manually instead of letting RunTransaction
// loop do it, in order to provide a commit trigger.
b := txn.NewBatch()
// Update range descriptor addressing record(s).
if err := updateRangeAddressing(b, &updatedDesc); err != nil {
return err
}
b.AddRawRequest(&roachpb.EndTransactionRequest{
Commit: true,
InternalCommitTrigger: &roachpb.InternalCommitTrigger{
ChangeReplicasTrigger: &roachpb.ChangeReplicasTrigger{
ChangeType: changeType,
Replica: repDesc,
UpdatedReplicas: updatedDesc.Replicas,
NextReplicaID: updatedDesc.NextReplicaID,
},
},
})
if err := txn.Run(b); err != nil {
log.Event(ctx, err.Error())
return err
}
if oldDesc.RangeID != 0 && !reflect.DeepEqual(oldDesc, desc) {
// We read the previous value, it wasn't what we supposedly used in
// the CPut, but we still overwrote in the CPut above.
panic(fmt.Sprintf("committed replica change, but oldDesc != assumedOldDesc:\n%+v\n%+v\nnew desc:\n%+v",
oldDesc, desc, updatedDesc))
}
return nil
}); err != nil {
log.Event(ctx, err.Error())
return errors.Wrapf(err, "change replicas of range %d failed", rangeID)
}
log.Event(ctx, "txn complete")
return nil
}
// replicaSetsEqual is used in AdminMerge to ensure that the ranges are
// all collocate on the same set of replicas.
func replicaSetsEqual(a, b []roachpb.ReplicaDescriptor) bool {
if len(a) != len(b) {
return false
}
set := make(map[roachpb.StoreID]int)
for _, replica := range a {
set[replica.StoreID]++
}
for _, replica := range b {
set[replica.StoreID]--
}
for _, value := range set {
if value != 0 {
return false
}
}
return true
}
// updateRangeDescriptor adds a ConditionalPut on the range descriptor. The
// conditional put verifies that changes to the range descriptor are made in a
// well-defined order, preventing a scenario where a wayward replica which is
// no longer part of the original Raft group comes back online to form a
// splinter group with a node which was also a former replica, and hijacks the
// range descriptor. This is a last line of defense; other mechanisms should
// prevent rogue replicas from getting this far (see #768).
//
// Note that in addition to using this method to update the on-disk range
// descriptor, a CommitTrigger must be used to update the in-memory
// descriptor; it will not automatically be copied from newDesc.
// TODO(bdarnell): store the entire RangeDescriptor in the CommitTrigger
// and load it automatically instead of reconstructing individual
// changes.
func updateRangeDescriptor(
b *client.Batch,
descKey roachpb.Key,
oldDesc,
newDesc *roachpb.RangeDescriptor,
) error {
if err := newDesc.Validate(); err != nil {
return err
}
// This is subtle: []byte(nil) != interface{}(nil). A []byte(nil) refers to
// an empty value. An interface{}(nil) refers to a non-existent value. So
// we're careful to construct an interface{}(nil) when oldDesc is nil.
var oldValue interface{}
if oldDesc != nil {
oldBytes, err := protoutil.Marshal(oldDesc)
if err != nil {
return err
}
oldValue = oldBytes
}
newValue, err := protoutil.Marshal(newDesc)
if err != nil {
return err
}
b.CPut(descKey, newValue, oldValue)
return nil
}
// LeaseInfo returns information about the lease holder for the range.
func (r *Replica) LeaseInfo(
ctx context.Context, args roachpb.LeaseInfoRequest,
) (roachpb.LeaseInfoResponse, error) {
var reply roachpb.LeaseInfoResponse
lease, nextLease := r.getLease()
if nextLease != nil {
// If there's a lease request in progress, speculatively return that future
// lease.
reply.Lease = nextLease
} else if lease != nil {
reply.Lease = lease
}
return reply, nil
}
|
package parser
import (
"fmt"
"io"
"net"
"net/mail"
"os"
"path/filepath"
"regexp"
"strconv"
"strings"
"text/scanner"
"unicode"
"github.com/yuuki/gokc/log"
)
const (
EOF = 0
)
var SYMBOL_TABLES = map[string]int{
"{": LB,
"}": RB,
"global_defs": GLOBALDEFS,
"notification_email": NOTIFICATION_EMAIL,
"notification_email_from": NOTIFICATION_EMAIL_FROM,
"smtp_server": SMTP_SERVER,
"smtp_connect_timeout": SMTP_CONNECT_TIMEOUT,
"router_id": ROUTER_ID,
"lvs_id": LVS_ID,
"vrrp_mcast_group4": VRRP_MCAST_GROUP4,
"vrrp_mcast_group6": VRRP_MCAST_GROUP6,
"vrrp_garp_master_delay": VRRP_GARP_MASTER_DELAY,
"vrrp_garp_master_repeat": VRRP_GARP_MASTER_REPEAT,
"vrrp_garp_master_refresh": VRRP_GARP_MASTER_REFRESH,
"vrrp_garp_master_refresh_repeat": VRRP_GARP_MASTER_REFRESH_REPEAT,
"vrrp_version": VRRP_VERSION,
"static_ipaddress": STATIC_IPADDRESS,
"static_routes": STATIC_ROUTES,
"static_rules": STATIC_RULES,
"vrrp_sync_group": VRRP_SYNC_GROUP,
"group": GROUP,
"vrrp_instance": VRRP_INSTANCE,
"use_vmac": USE_VMAC,
"version": VERSION,
"vmac_xmit_base": VMAC_XMIT_BASE,
"native_ipv6": NATIVE_IPV6,
"interface": INTERFACE,
"mcast_src_ip": MCAST_SRC_IP,
"unicast_src_ip": UNICAST_SRC_IP,
"unicast_peer": UNICAST_PEER,
"lvs_sync_daemon_interface": LVS_SYNC_DAEMON_INTERFACE,
"virtual_router_id": VIRTUAL_ROUTER_ID,
"nopreempt": NOPREEMPT,
"priority": PRIORITY,
"advert_int": ADVERT_INT,
"virtual_ipaddress": VIRTUAL_IPADDRESS,
"virtual_ipaddress_excluded": VIRTUAL_IPADDRESS_EXCLUDED,
"virtual_routes": VIRTUAL_ROUTES,
"state": STATE,
"MASTER": MASTER,
"BACKUP": BACKUP,
"garp_master_delay": GARP_MASTER_DELAY,
"smtp_alert": SMTP_ALERT,
"authentication": AUTHENTICATION,
"auth_type": AUTH_TYPE,
"auth_pass": AUTH_PASS,
"PASS": PASS,
"AH": AH,
"label": LABEL,
"dev": DEV,
"scope": SCOPE,
"site": SITE,
"link": LINK,
"host": HOST,
"nowhere": NOWHERE,
"global": GLOBAL,
"brd": BRD,
"src": SRC,
"from": FROM,
"to": TO,
"via": VIA,
"gw": GW,
"or": OR,
"table": TABLE,
"metric": METRIC,
"blackhole": BLACKHOLE,
"track_interface": TRACK_INTERFACE,
"track_script": TRACK_SCRIPT,
"dont_track_primary": DONT_TRACK_PRIMARY,
"notify_master": NOTIFY_MASTER,
"notify_backup": NOTIFY_BACKUP,
"notify_fault": NOTIFY_FAULT,
"notify_stop": NOTIFY_STOP,
"notify": NOTIFY,
"vrrp_script": VRRP_SCRIPT,
"script": SCRIPT,
"interval": INTERVAL,
"timeout": TIMEOUT,
"fall": FALL,
"rise": RISE,
"virtual_server_group": VIRTUAL_SERVER_GROUP,
"fwmark": FWMARK,
"virtual_server": VIRTUAL_SERVER,
"delay_loop": DELAY_LOOP,
"lb_algo": LB_ALGO,
"lb_kind": LB_KIND,
"lvs_sched": LVS_SCHED,
"lvs_method": LVS_METHOD,
"rr": RR,
"wrr": WRR,
"lc": LC,
"wlc": WLC,
"fo": FO,
"ovf": OVF,
"lblc": LBLC,
"lblcr": LBLCR,
"sh": SH,
"dh": DH,
"sed": SED,
"nq": NQ,
"NAT": NAT,
"DR": DR,
"TUN": TUN,
"persistence_timeout": PERSISTENCE_TIMEOUT,
"protocol": PROTOCOL,
"TCP": TCP,
"UDP": UDP,
"sorry_server": SORRY_SERVER,
"real_server": REAL_SERVER,
"weight": WEIGHT,
"inhibit_on_failure": INHIBIT_ON_FAILURE,
"TCP_CHECK": TCP_CHECK,
"HTTP_GET": HTTP_GET,
"SSL_GET": SSL_GET,
"SMTP_CHECK": SMTP_CHECK,
"DNS_CHECK": DNS_CHECK,
"MISC_CHECK": MISC_CHECK,
"url": URL,
"path": PATH,
"digest": DIGEST,
"status_code": STATUS_CODE,
"connect_timeout": CONNECT_TIMEOUT,
"connect_port": CONNECT_PORT,
"connect_ip": CONNECT_IP,
"bindto": BINDTO,
"bind_port": BIND_PORT,
"retry": RETRY,
"helo_name": HELO_NAME,
"delay_before_retry": DELAY_BEFORE_RETRY,
"type": TYPE,
"name": NAME,
"misc_path": MISC_PATH,
"misc_timeout": MISC_TIMEOUT,
"warmup": WARMUP,
"misc_dynamic": MISC_DYNAMIC,
"nb_get_retry": NB_GET_RETRY,
"virtualhost": VIRTUALHOST,
"alpha": ALPHA,
"omega": OMEGA,
"quorum": QUORUM,
"hysteresis": HYSTERESIS,
"quorum_up": QUORUM_UP,
"quorum_down": QUORUM_DOWN,
}
type Lexer struct {
scanner scanner.Scanner
tokens []int
pos int
filename string
e error
}
type Error struct {
Message string
Filename string
Line int
Column int
}
func (e *Error) Error() string {
return e.Message
}
func NewLexer(src io.Reader, filename string) *Lexer {
var lex Lexer
lex.scanner.Init(src)
lex.scanner.Mode &^= scanner.ScanInts | scanner.ScanFloats | scanner.ScanChars | scanner.ScanRawStrings | scanner.ScanComments | scanner.SkipComments
lex.scanner.IsIdentRune = isIdentRune
lex.tokens = []int{}
lex.filename = filename
return &lex
}
func isIdentRune(ch rune, i int) bool {
return ch == '_' || ch == '.' || ch == '/' || ch == ':' || ch == '-' || ch == '+' || ch == '*' || ch == '?' || ch == '=' || ch == '&' || ch == '@' || unicode.IsLetter(ch) || unicode.IsDigit(ch)
}
func (l *Lexer) scanNextToken() (int, string) {
token := int(l.scanner.Scan())
s := l.scanner.TokenText()
for s == "!" || s == "#" {
skipComments(&l.scanner)
token = int(l.scanner.Scan())
s = l.scanner.TokenText()
}
log.Debugf("token text: %s\n", s)
return token, s
}
func skipComments(scanner *scanner.Scanner) {
ch := scanner.Next()
for ch != '\n' && ch >= 0 {
ch = scanner.Next()
}
}
func (l *Lexer) scanInclude(rawfilename string) error {
curDir, err := filepath.Abs(".")
if err != nil {
return err
}
baseDir := filepath.Dir(l.filename)
os.Chdir(baseDir)
defer os.Chdir(curDir)
rawpaths, err := filepath.Glob(rawfilename)
if err != nil {
return err
}
if len(rawpaths) < 1 {
return fmt.Errorf("warning: %s: No such file or directory", rawfilename)
}
prevScanner := l.scanner
defer func() { l.scanner = prevScanner }()
prevFilename := l.filename
defer func() { l.filename = prevFilename }()
for _, rawpath := range rawpaths {
l.filename = rawpath
log.Verbosef("--> Parsing ... %s\n", rawpath)
f, err := os.Open(rawpath)
if err != nil {
return err
}
l.scanner.Init(f)
l.scanner.Mode &^= scanner.ScanInts | scanner.ScanFloats | scanner.ScanChars | scanner.ScanRawStrings | scanner.ScanComments | scanner.SkipComments
l.scanner.IsIdentRune = isIdentRune
l.tokenize()
f.Close()
}
return nil
}
func (l *Lexer) Lex(lval *yySymType) int {
if len(l.tokens) == l.pos {
return EOF
}
token := l.tokens[l.pos]
l.pos++
return token
}
func (l *Lexer) tokenize() {
for {
token, s := l.scanNextToken()
for s == "include" {
token, s = l.scanNextToken()
if err := l.scanInclude(s); err != nil {
l.Error(err.Error())
}
token, s = l.scanNextToken()
}
if token == scanner.EOF {
break
}
if token == scanner.Ident || token == scanner.String {
token = STRING
}
if _, err := strconv.Atoi(s); err == nil {
token = NUMBER
}
if ip := net.ParseIP(s); ip != nil {
if ip.To4() != nil {
token = IPV4
} else if ip.To16() != nil {
token = IPV6
} else {
log.Infof("warning: %s may be IP address?", s)
}
}
if _, _, err := net.ParseCIDR(s); err == nil {
token = IP_CIDR
}
// IPADDR_RANGE(XXX.YYY.ZZZ.WWW-VVV)
if ss := strings.Split(s, "-"); len(ss) == 2 {
if net.ParseIP(ss[0]) != nil {
if ok, _ := regexp.MatchString(`^[\d]{1,3}$`, ss[1]); ok {
token = IPADDR_RANGE
}
}
}
if ok, _ := regexp.MatchString(`^[[:xdigit:]]{32}$`, s); ok {
token = HEX32
}
if ok, _ := regexp.MatchString(`/^([[:alnum:]./-_])*`, s); ok {
token = PATHSTR
}
if _, err := mail.ParseAddress(s); err == nil {
token = EMAIL
}
if _, ok := SYMBOL_TABLES[s]; ok {
token = SYMBOL_TABLES[s]
}
l.tokens = append(l.tokens, token)
}
}
func (l *Lexer) Error(msg string) {
l.e = &Error{
Filename: l.filename,
Line: l.scanner.Line,
Column: l.scanner.Column,
Message: msg,
}
}
func Parse(src io.Reader, filename string) error {
yyErrorVerbose = true
l := NewLexer(src, filename)
l.tokenize()
if ret := yyParse(l); ret != 0 {
return l.e
}
return l.e
}
Split Lexer into Tokenizer and Lexer
package parser
import (
"fmt"
"io"
"net"
"net/mail"
"os"
"path/filepath"
"regexp"
"strconv"
"strings"
"text/scanner"
"unicode"
"github.com/yuuki/gokc/log"
)
const (
EOF = 0
)
var SYMBOL_TABLES = map[string]int{
"{": LB,
"}": RB,
"global_defs": GLOBALDEFS,
"notification_email": NOTIFICATION_EMAIL,
"notification_email_from": NOTIFICATION_EMAIL_FROM,
"smtp_server": SMTP_SERVER,
"smtp_connect_timeout": SMTP_CONNECT_TIMEOUT,
"router_id": ROUTER_ID,
"lvs_id": LVS_ID,
"vrrp_mcast_group4": VRRP_MCAST_GROUP4,
"vrrp_mcast_group6": VRRP_MCAST_GROUP6,
"vrrp_garp_master_delay": VRRP_GARP_MASTER_DELAY,
"vrrp_garp_master_repeat": VRRP_GARP_MASTER_REPEAT,
"vrrp_garp_master_refresh": VRRP_GARP_MASTER_REFRESH,
"vrrp_garp_master_refresh_repeat": VRRP_GARP_MASTER_REFRESH_REPEAT,
"vrrp_version": VRRP_VERSION,
"static_ipaddress": STATIC_IPADDRESS,
"static_routes": STATIC_ROUTES,
"static_rules": STATIC_RULES,
"vrrp_sync_group": VRRP_SYNC_GROUP,
"group": GROUP,
"vrrp_instance": VRRP_INSTANCE,
"use_vmac": USE_VMAC,
"version": VERSION,
"vmac_xmit_base": VMAC_XMIT_BASE,
"native_ipv6": NATIVE_IPV6,
"interface": INTERFACE,
"mcast_src_ip": MCAST_SRC_IP,
"unicast_src_ip": UNICAST_SRC_IP,
"unicast_peer": UNICAST_PEER,
"lvs_sync_daemon_interface": LVS_SYNC_DAEMON_INTERFACE,
"virtual_router_id": VIRTUAL_ROUTER_ID,
"nopreempt": NOPREEMPT,
"priority": PRIORITY,
"advert_int": ADVERT_INT,
"virtual_ipaddress": VIRTUAL_IPADDRESS,
"virtual_ipaddress_excluded": VIRTUAL_IPADDRESS_EXCLUDED,
"virtual_routes": VIRTUAL_ROUTES,
"state": STATE,
"MASTER": MASTER,
"BACKUP": BACKUP,
"garp_master_delay": GARP_MASTER_DELAY,
"smtp_alert": SMTP_ALERT,
"authentication": AUTHENTICATION,
"auth_type": AUTH_TYPE,
"auth_pass": AUTH_PASS,
"PASS": PASS,
"AH": AH,
"label": LABEL,
"dev": DEV,
"scope": SCOPE,
"site": SITE,
"link": LINK,
"host": HOST,
"nowhere": NOWHERE,
"global": GLOBAL,
"brd": BRD,
"src": SRC,
"from": FROM,
"to": TO,
"via": VIA,
"gw": GW,
"or": OR,
"table": TABLE,
"metric": METRIC,
"blackhole": BLACKHOLE,
"track_interface": TRACK_INTERFACE,
"track_script": TRACK_SCRIPT,
"dont_track_primary": DONT_TRACK_PRIMARY,
"notify_master": NOTIFY_MASTER,
"notify_backup": NOTIFY_BACKUP,
"notify_fault": NOTIFY_FAULT,
"notify_stop": NOTIFY_STOP,
"notify": NOTIFY,
"vrrp_script": VRRP_SCRIPT,
"script": SCRIPT,
"interval": INTERVAL,
"timeout": TIMEOUT,
"fall": FALL,
"rise": RISE,
"virtual_server_group": VIRTUAL_SERVER_GROUP,
"fwmark": FWMARK,
"virtual_server": VIRTUAL_SERVER,
"delay_loop": DELAY_LOOP,
"lb_algo": LB_ALGO,
"lb_kind": LB_KIND,
"lvs_sched": LVS_SCHED,
"lvs_method": LVS_METHOD,
"rr": RR,
"wrr": WRR,
"lc": LC,
"wlc": WLC,
"fo": FO,
"ovf": OVF,
"lblc": LBLC,
"lblcr": LBLCR,
"sh": SH,
"dh": DH,
"sed": SED,
"nq": NQ,
"NAT": NAT,
"DR": DR,
"TUN": TUN,
"persistence_timeout": PERSISTENCE_TIMEOUT,
"protocol": PROTOCOL,
"TCP": TCP,
"UDP": UDP,
"sorry_server": SORRY_SERVER,
"real_server": REAL_SERVER,
"weight": WEIGHT,
"inhibit_on_failure": INHIBIT_ON_FAILURE,
"TCP_CHECK": TCP_CHECK,
"HTTP_GET": HTTP_GET,
"SSL_GET": SSL_GET,
"SMTP_CHECK": SMTP_CHECK,
"DNS_CHECK": DNS_CHECK,
"MISC_CHECK": MISC_CHECK,
"url": URL,
"path": PATH,
"digest": DIGEST,
"status_code": STATUS_CODE,
"connect_timeout": CONNECT_TIMEOUT,
"connect_port": CONNECT_PORT,
"connect_ip": CONNECT_IP,
"bindto": BINDTO,
"bind_port": BIND_PORT,
"retry": RETRY,
"helo_name": HELO_NAME,
"delay_before_retry": DELAY_BEFORE_RETRY,
"type": TYPE,
"name": NAME,
"misc_path": MISC_PATH,
"misc_timeout": MISC_TIMEOUT,
"warmup": WARMUP,
"misc_dynamic": MISC_DYNAMIC,
"nb_get_retry": NB_GET_RETRY,
"virtualhost": VIRTUALHOST,
"alpha": ALPHA,
"omega": OMEGA,
"quorum": QUORUM,
"hysteresis": HYSTERESIS,
"quorum_up": QUORUM_UP,
"quorum_down": QUORUM_DOWN,
}
type Tokenizer struct {
scanner scanner.Scanner
filename string
}
func NewTokenizer(src io.Reader, filename string) *Tokenizer {
var t Tokenizer
t.scanner.Init(src)
t.scanner.Mode &^= scanner.ScanInts | scanner.ScanFloats | scanner.ScanChars | scanner.ScanRawStrings | scanner.ScanComments | scanner.SkipComments
t.scanner.IsIdentRune = isIdentRune
t.filename = filename
return &t
}
func (t *Tokenizer) NextAll() ([]*Token, error) {
var tokens []*Token
for {
token, s := t.scanNextToken()
for s == "include" {
token, s = t.scanNextToken()
if err := t.scanInclude(s); err != nil {
return nil, err
}
token, s = t.scanNextToken()
}
if token == scanner.EOF {
break
}
if token == scanner.Ident || token == scanner.String {
token = STRING
}
if _, err := strconv.Atoi(s); err == nil {
token = NUMBER
}
if ip := net.ParseIP(s); ip != nil {
if ip.To4() != nil {
token = IPV4
} else if ip.To16() != nil {
token = IPV6
} else {
log.Infof("warning: %s may be IP address?", s)
}
}
if _, _, err := net.ParseCIDR(s); err == nil {
token = IP_CIDR
}
// IPADDR_RANGE(XXX.YYY.ZZZ.WWW-VVV)
if ss := strings.Split(s, "-"); len(ss) == 2 {
if net.ParseIP(ss[0]) != nil {
if ok, _ := regexp.MatchString(`^[\d]{1,3}$`, ss[1]); ok {
token = IPADDR_RANGE
}
}
}
if ok, _ := regexp.MatchString(`^[[:xdigit:]]{32}$`, s); ok {
token = HEX32
}
if ok, _ := regexp.MatchString(`/^([[:alnum:]./-_])*`, s); ok {
token = PATHSTR
}
if _, err := mail.ParseAddress(s); err == nil {
token = EMAIL
}
if _, ok := SYMBOL_TABLES[s]; ok {
token = SYMBOL_TABLES[s]
}
tokens = append(tokens, &Token{
value: token,
filename: t.filename,
line: t.scanner.Line,
column: t.scanner.Column,
})
}
return tokens, nil
}
func skipComments(scanner *scanner.Scanner) {
ch := scanner.Next()
for ch != '\n' && ch >= 0 {
ch = scanner.Next()
}
}
func (t *Tokenizer) scanNextToken() (int, string) {
token := int(t.scanner.Scan())
s := t.scanner.TokenText()
for s == "!" || s == "#" {
skipComments(&t.scanner)
token = int(t.scanner.Scan())
s = t.scanner.TokenText()
}
log.Debugf("token text: %s\n", s)
return token, s
}
func (t *Tokenizer) scanInclude(rawfilename string) error {
curDir, err := filepath.Abs(".")
if err != nil {
return err
}
baseDir := filepath.Dir(t.filename)
os.Chdir(baseDir)
defer os.Chdir(curDir)
rawpaths, err := filepath.Glob(rawfilename)
if err != nil {
return err
}
if len(rawpaths) < 1 {
return fmt.Errorf("warning: %s: No such file or directory", rawfilename)
}
prevScanner := t.scanner
defer func() { t.scanner = prevScanner }()
prevFilename := t.filename
defer func() { t.filename = prevFilename }()
for _, rawpath := range rawpaths {
t.filename = rawpath
log.Verbosef("--> Parsing ... %s\n", rawpath)
f, err := os.Open(rawpath)
if err != nil {
return err
}
t.scanner.Init(f)
t.scanner.Mode &^= scanner.ScanInts | scanner.ScanFloats | scanner.ScanChars | scanner.ScanRawStrings | scanner.ScanComments | scanner.SkipComments
t.scanner.IsIdentRune = isIdentRune
t.NextAll()
f.Close()
}
return nil
}
type Token struct {
value int
filename string
line int
column int
}
type Lexer struct {
tokens []*Token
pos int
e error
}
type Error struct {
Message string
Filename string
Line int
Column int
}
func (e *Error) Error() string {
return e.Message
}
func NewLexer(tokens []*Token) *Lexer {
return &Lexer{tokens: tokens, pos: 0}
}
func isIdentRune(ch rune, i int) bool {
return ch == '_' || ch == '.' || ch == '/' || ch == ':' || ch == '-' || ch == '+' || ch == '*' || ch == '?' || ch == '=' || ch == '&' || ch == '@' || unicode.IsLetter(ch) || unicode.IsDigit(ch)
}
func (l *Lexer) curToken() *Token {
return l.tokens[l.pos]
}
func (l *Lexer) Lex(lval *yySymType) int {
if len(l.tokens) == l.pos {
return EOF
}
token := l.tokens[l.pos]
l.pos++
return token.value
}
func (l *Lexer) Error(msg string) {
token := l.curToken()
l.e = &Error{
Filename: token.filename,
Line: token.line,
Column: token.column,
Message: msg,
}
}
func Parse(src io.Reader, filename string) error {
yyErrorVerbose = true
t := NewTokenizer(src, filename)
tokens, err := t.NextAll()
if err != nil {
return err
}
l := NewLexer(tokens)
if ret := yyParse(l); ret != 0 {
return l.e
}
return l.e
}
|
// Package passwordhash implements safe password hashing and comparison.
//
// Hashes are derived using PBKDF2-HMAC-SHA256 function with 5000 iterations
// (by default), 32-byte salt and 64-byte output.
//
// Note: you must not allow users to change parameters of PasswordHash, such as
// the number of iterations, directly. If a malicious user can change the
// number of iterations, he can set it too high, and it will lead to DoS.
//
// Example usage:
//
// ph := passwordhash.New("hello, world")
// // Store ph somewhere...
// // Later, when user provides a password:
// if ph.EqualToPassword("hello, world") {
// // Password's okay, user authorized...
// }
//
package passwordhash
import (
"crypto/rand"
"crypto/sha256"
"crypto/subtle"
"fmt"
"github.com/dchest/pbkdf2"
"io"
)
// PasswordHash stores hash, salt, and number of iterations.
type PasswordHash struct {
Iter int
Salt []byte
Hash []byte
}
const (
// Default number of iterations for PBKDF2
DefaultIterations = 5000
// Default salt length
SaltLen = 32
// Default hash length
HashLen = 64
)
// getSalt returns a new random salt.
// The function causes runtime panic if it fails to read from random source.
func getSalt() []byte {
salt := make([]byte, SaltLen)
if _, err := io.ReadFull(rand.Reader, salt); err != nil {
panic("error reading from random source: " + err.String())
}
return salt
}
// New returns a new password hash derived from the provided password,
// a random salt, and the default number of iterations.
// The function causes runtime panic if it fails to get random salt.
func New(password string) *PasswordHash {
return NewSaltIter(password, getSalt(), DefaultIterations)
}
// NewIter returns a new password hash derived from the provided password,
// the number of iterations, and a random salt.
// The function causes runtime panic if it fails to get random salt.
func NewIter(password string, iter int) *PasswordHash {
return NewSaltIter(password, getSalt(), iter)
}
// NewSaltIter creates a new password hash from the provided password, salt,
// and the number of iterations.
func NewSaltIter(password string, salt []byte, iter int) *PasswordHash {
return &PasswordHash{iter, salt,
pbkdf2.WithHMAC(sha256.New, []byte(password), salt, iter, HashLen)}
}
// EqualToPassword returns true if the password hash was derived from the provided password.
// This function uses constant time comparison.
func (ph *PasswordHash) EqualToPassword(password string) bool {
provided := NewSaltIter(password, ph.Salt, ph.Iter)
if len(ph.Hash) != len(provided.Hash) {
return false
}
return subtle.ConstantTimeCompare(ph.Hash, provided.Hash) == 1
}
// String returns a string representation of the password hash.
func (ph *PasswordHash) String() string {
return fmt.Sprintf("&PasswordHash{Iter: %d, Salt: %x, Hash: %x}",
ph.Iter, ph.Salt, ph.Hash)
}
Fix for Go weekly.2012-01-15.
// Package passwordhash implements safe password hashing and comparison.
//
// Hashes are derived using PBKDF2-HMAC-SHA256 function with 5000 iterations
// (by default), 32-byte salt and 64-byte output.
//
// Note: you must not allow users to change parameters of PasswordHash, such as
// the number of iterations, directly. If a malicious user can change the
// number of iterations, he can set it too high, and it will lead to DoS.
//
// Example usage:
//
// ph := passwordhash.New("hello, world")
// // Store ph somewhere...
// // Later, when user provides a password:
// if ph.EqualToPassword("hello, world") {
// // Password's okay, user authorized...
// }
//
package passwordhash
import (
"crypto/rand"
"crypto/sha256"
"crypto/subtle"
"fmt"
"github.com/dchest/pbkdf2"
"io"
)
// PasswordHash stores hash, salt, and number of iterations.
type PasswordHash struct {
Iter int
Salt []byte
Hash []byte
}
const (
// Default number of iterations for PBKDF2
DefaultIterations = 5000
// Default salt length
SaltLen = 32
// Default hash length
HashLen = 64
)
// getSalt returns a new random salt.
// The function causes runtime panic if it fails to read from random source.
func getSalt() []byte {
salt := make([]byte, SaltLen)
if _, err := io.ReadFull(rand.Reader, salt); err != nil {
panic("error reading from random source: " + err.Error())
}
return salt
}
// New returns a new password hash derived from the provided password,
// a random salt, and the default number of iterations.
// The function causes runtime panic if it fails to get random salt.
func New(password string) *PasswordHash {
return NewSaltIter(password, getSalt(), DefaultIterations)
}
// NewIter returns a new password hash derived from the provided password,
// the number of iterations, and a random salt.
// The function causes runtime panic if it fails to get random salt.
func NewIter(password string, iter int) *PasswordHash {
return NewSaltIter(password, getSalt(), iter)
}
// NewSaltIter creates a new password hash from the provided password, salt,
// and the number of iterations.
func NewSaltIter(password string, salt []byte, iter int) *PasswordHash {
return &PasswordHash{iter, salt,
pbkdf2.WithHMAC(sha256.New, []byte(password), salt, iter, HashLen)}
}
// EqualToPassword returns true if the password hash was derived from the provided password.
// This function uses constant time comparison.
func (ph *PasswordHash) EqualToPassword(password string) bool {
provided := NewSaltIter(password, ph.Salt, ph.Iter)
if len(ph.Hash) != len(provided.Hash) {
return false
}
return subtle.ConstantTimeCompare(ph.Hash, provided.Hash) == 1
}
// String returns a string representation of the password hash.
func (ph *PasswordHash) String() string {
return fmt.Sprintf("&PasswordHash{Iter: %d, Salt: %x, Hash: %x}",
ph.Iter, ph.Salt, ph.Hash)
}
|
package suggest_test
import (
"bytes"
"testing"
"github.com/mdempsky/gocode/suggest"
)
func TestFormatters(t *testing.T) {
// TODO(mdempsky): More comprehensive test.
num := len("client")
candidates := []suggest.Candidate{{
Class: "func",
Name: "client_auto_complete",
Type: "func(cli *rpc.Client, Arg0 []byte, Arg1 string, Arg2 int, Arg3 gocode_env) (c []candidate, d int)",
}, {
Class: "func",
Name: "client_close",
Type: "func(cli *rpc.Client, Arg0 int) int",
}, {
Class: "func",
Name: "client_cursor_type_pkg",
Type: "func(cli *rpc.Client, Arg0 []byte, Arg1 string, Arg2 int) (typ, pkg string)",
}, {
Class: "func",
Name: "client_drop_cache",
Type: "func(cli *rpc.Client, Arg0 int) int",
}, {
Class: "func",
Name: "client_highlight",
Type: "func(cli *rpc.Client, Arg0 []byte, Arg1 string, Arg2 gocode_env) (c []highlight_range, d int)",
}, {
Class: "func",
Name: "client_set",
Type: "func(cli *rpc.Client, Arg0, Arg1 string) string",
}, {
Class: "func",
Name: "client_status",
Type: "func(cli *rpc.Client, Arg0 int) string",
}}
var tests = [...]struct {
name string
want string
}{
{"json", `[6, [{"class": "func", "name": "client_auto_complete", "type": "func(cli *rpc.Client, Arg0 []byte, Arg1 string, Arg2 int, Arg3 gocode_env) (c []candidate, d int)"}, {"class": "func", "name": "client_close", "type": "func(cli *rpc.Client, Arg0 int) int"}, {"class": "func", "name": "client_cursor_type_pkg", "type": "func(cli *rpc.Client, Arg0 []byte, Arg1 string, Arg2 int) (typ, pkg string)"}, {"class": "func", "name": "client_drop_cache", "type": "func(cli *rpc.Client, Arg0 int) int"}, {"class": "func", "name": "client_highlight", "type": "func(cli *rpc.Client, Arg0 []byte, Arg1 string, Arg2 gocode_env) (c []highlight_range, d int)"}, {"class": "func", "name": "client_set", "type": "func(cli *rpc.Client, Arg0, Arg1 string) string"}, {"class": "func", "name": "client_status", "type": "func(cli *rpc.Client, Arg0 int) string"}]]`},
{"nice", `Found 7 candidates:
func client_auto_complete(cli *rpc.Client, Arg0 []byte, Arg1 string, Arg2 int, Arg3 gocode_env) (c []candidate, d int)
func client_close(cli *rpc.Client, Arg0 int) int
func client_cursor_type_pkg(cli *rpc.Client, Arg0 []byte, Arg1 string, Arg2 int) (typ, pkg string)
func client_drop_cache(cli *rpc.Client, Arg0 int) int
func client_highlight(cli *rpc.Client, Arg0 []byte, Arg1 string, Arg2 gocode_env) (c []highlight_range, d int)
func client_set(cli *rpc.Client, Arg0, Arg1 string) string
func client_status(cli *rpc.Client, Arg0 int) string
`},
{"vim", `[6, [{'word': 'client_auto_complete(', 'abbr': 'func client_auto_complete(cli *rpc.Client, Arg0 []byte, Arg1 string, Arg2 int, Arg3 gocode_env) (c []candidate, d int)', 'info': 'func client_auto_complete(cli *rpc.Client, Arg0 []byte, Arg1 string, Arg2 int, Arg3 gocode_env) (c []candidate, d int)'}, {'word': 'client_close(', 'abbr': 'func client_close(cli *rpc.Client, Arg0 int) int', 'info': 'func client_close(cli *rpc.Client, Arg0 int) int'}, {'word': 'client_cursor_type_pkg(', 'abbr': 'func client_cursor_type_pkg(cli *rpc.Client, Arg0 []byte, Arg1 string, Arg2 int) (typ, pkg string)', 'info': 'func client_cursor_type_pkg(cli *rpc.Client, Arg0 []byte, Arg1 string, Arg2 int) (typ, pkg string)'}, {'word': 'client_drop_cache(', 'abbr': 'func client_drop_cache(cli *rpc.Client, Arg0 int) int', 'info': 'func client_drop_cache(cli *rpc.Client, Arg0 int) int'}, {'word': 'client_highlight(', 'abbr': 'func client_highlight(cli *rpc.Client, Arg0 []byte, Arg1 string, Arg2 gocode_env) (c []highlight_range, d int)', 'info': 'func client_highlight(cli *rpc.Client, Arg0 []byte, Arg1 string, Arg2 gocode_env) (c []highlight_range, d int)'}, {'word': 'client_set(', 'abbr': 'func client_set(cli *rpc.Client, Arg0, Arg1 string) string', 'info': 'func client_set(cli *rpc.Client, Arg0, Arg1 string) string'}, {'word': 'client_status(', 'abbr': 'func client_status(cli *rpc.Client, Arg0 int) string', 'info': 'func client_status(cli *rpc.Client, Arg0 int) string'}]]`},
{"godit", `6,,7
func client_auto_complete(cli *rpc.Client, Arg0 []byte, Arg1 string, Arg2 int, Arg3 gocode_env) (c []candidate, d int),,client_auto_complete(
func client_close(cli *rpc.Client, Arg0 int) int,,client_close(
func client_cursor_type_pkg(cli *rpc.Client, Arg0 []byte, Arg1 string, Arg2 int) (typ, pkg string),,client_cursor_type_pkg(
func client_drop_cache(cli *rpc.Client, Arg0 int) int,,client_drop_cache(
func client_highlight(cli *rpc.Client, Arg0 []byte, Arg1 string, Arg2 gocode_env) (c []highlight_range, d int),,client_highlight(
func client_set(cli *rpc.Client, Arg0, Arg1 string) string,,client_set(
func client_status(cli *rpc.Client, Arg0 int) string,,client_status(
`},
{"emacs", `
client_auto_complete,,func(cli *rpc.Client, Arg0 []byte, Arg1 string, Arg2 int, Arg3 gocode_env) (c []candidate, d int)
client_close,,func(cli *rpc.Client, Arg0 int) int
client_cursor_type_pkg,,func(cli *rpc.Client, Arg0 []byte, Arg1 string, Arg2 int) (typ, pkg string)
client_drop_cache,,func(cli *rpc.Client, Arg0 int) int
client_highlight,,func(cli *rpc.Client, Arg0 []byte, Arg1 string, Arg2 gocode_env) (c []highlight_range, d int)
client_set,,func(cli *rpc.Client, Arg0, Arg1 string) string
client_status,,func(cli *rpc.Client, Arg0 int) string
`[1:]},
{"csv", `
func,,client_auto_complete,,func(cli *rpc.Client, Arg0 []byte, Arg1 string, Arg2 int, Arg3 gocode_env) (c []candidate, d int)
func,,client_close,,func(cli *rpc.Client, Arg0 int) int
func,,client_cursor_type_pkg,,func(cli *rpc.Client, Arg0 []byte, Arg1 string, Arg2 int) (typ, pkg string)
func,,client_drop_cache,,func(cli *rpc.Client, Arg0 int) int
func,,client_highlight,,func(cli *rpc.Client, Arg0 []byte, Arg1 string, Arg2 gocode_env) (c []highlight_range, d int)
func,,client_set,,func(cli *rpc.Client, Arg0, Arg1 string) string
func,,client_status,,func(cli *rpc.Client, Arg0 int) string
`[1:]},
}
for _, test := range tests {
var out bytes.Buffer
suggest.Formatters[test.name](&out, candidates, num)
if got := out.String(); got != test.want {
t.Errorf("Format %s:\nGot:\n%s\nWant:\n%s\n", test.name, got, test.want)
}
}
}
suggest: fix tests
package suggest_test
import (
"bytes"
"testing"
"github.com/mdempsky/gocode/suggest"
)
func TestFormatters(t *testing.T) {
// TODO(mdempsky): More comprehensive test.
num := len("client")
candidates := []suggest.Candidate{{
Class: "func",
PkgPath: "gocode",
Name: "client_auto_complete",
Type: "func(cli *rpc.Client, Arg0 []byte, Arg1 string, Arg2 int, Arg3 gocode_env) (c []candidate, d int)",
}, {
Class: "func",
PkgPath: "gocode",
Name: "client_close",
Type: "func(cli *rpc.Client, Arg0 int) int",
}, {
Class: "func",
PkgPath: "gocode",
Name: "client_cursor_type_pkg",
Type: "func(cli *rpc.Client, Arg0 []byte, Arg1 string, Arg2 int) (typ, pkg string)",
}, {
Class: "func",
PkgPath: "gocode",
Name: "client_drop_cache",
Type: "func(cli *rpc.Client, Arg0 int) int",
}, {
Class: "func",
PkgPath: "gocode",
Name: "client_highlight",
Type: "func(cli *rpc.Client, Arg0 []byte, Arg1 string, Arg2 gocode_env) (c []highlight_range, d int)",
}, {
Class: "func",
PkgPath: "gocode",
Name: "client_set",
Type: "func(cli *rpc.Client, Arg0, Arg1 string) string",
}, {
Class: "func",
PkgPath: "gocode",
Name: "client_status",
Type: "func(cli *rpc.Client, Arg0 int) string",
}}
var tests = [...]struct {
name string
want string
}{
{"json", `[6, [{"class": "func", "name": "client_auto_complete", "type": "func(cli *rpc.Client, Arg0 []byte, Arg1 string, Arg2 int, Arg3 gocode_env) (c []candidate, d int)", "package": "gocode"}, {"class": "func", "name": "client_close", "type": "func(cli *rpc.Client, Arg0 int) int", "package": "gocode"}, {"class": "func", "name": "client_cursor_type_pkg", "type": "func(cli *rpc.Client, Arg0 []byte, Arg1 string, Arg2 int) (typ, pkg string)", "package": "gocode"}, {"class": "func", "name": "client_drop_cache", "type": "func(cli *rpc.Client, Arg0 int) int", "package": "gocode"}, {"class": "func", "name": "client_highlight", "type": "func(cli *rpc.Client, Arg0 []byte, Arg1 string, Arg2 gocode_env) (c []highlight_range, d int)", "package": "gocode"}, {"class": "func", "name": "client_set", "type": "func(cli *rpc.Client, Arg0, Arg1 string) string", "package": "gocode"}, {"class": "func", "name": "client_status", "type": "func(cli *rpc.Client, Arg0 int) string", "package": "gocode"}]]`},
{"nice", `Found 7 candidates:
func client_auto_complete(cli *rpc.Client, Arg0 []byte, Arg1 string, Arg2 int, Arg3 gocode_env) (c []candidate, d int)
func client_close(cli *rpc.Client, Arg0 int) int
func client_cursor_type_pkg(cli *rpc.Client, Arg0 []byte, Arg1 string, Arg2 int) (typ, pkg string)
func client_drop_cache(cli *rpc.Client, Arg0 int) int
func client_highlight(cli *rpc.Client, Arg0 []byte, Arg1 string, Arg2 gocode_env) (c []highlight_range, d int)
func client_set(cli *rpc.Client, Arg0, Arg1 string) string
func client_status(cli *rpc.Client, Arg0 int) string
`},
{"vim", `[6, [{'word': 'client_auto_complete(', 'abbr': 'func client_auto_complete(cli *rpc.Client, Arg0 []byte, Arg1 string, Arg2 int, Arg3 gocode_env) (c []candidate, d int)', 'info': 'func client_auto_complete(cli *rpc.Client, Arg0 []byte, Arg1 string, Arg2 int, Arg3 gocode_env) (c []candidate, d int)'}, {'word': 'client_close(', 'abbr': 'func client_close(cli *rpc.Client, Arg0 int) int', 'info': 'func client_close(cli *rpc.Client, Arg0 int) int'}, {'word': 'client_cursor_type_pkg(', 'abbr': 'func client_cursor_type_pkg(cli *rpc.Client, Arg0 []byte, Arg1 string, Arg2 int) (typ, pkg string)', 'info': 'func client_cursor_type_pkg(cli *rpc.Client, Arg0 []byte, Arg1 string, Arg2 int) (typ, pkg string)'}, {'word': 'client_drop_cache(', 'abbr': 'func client_drop_cache(cli *rpc.Client, Arg0 int) int', 'info': 'func client_drop_cache(cli *rpc.Client, Arg0 int) int'}, {'word': 'client_highlight(', 'abbr': 'func client_highlight(cli *rpc.Client, Arg0 []byte, Arg1 string, Arg2 gocode_env) (c []highlight_range, d int)', 'info': 'func client_highlight(cli *rpc.Client, Arg0 []byte, Arg1 string, Arg2 gocode_env) (c []highlight_range, d int)'}, {'word': 'client_set(', 'abbr': 'func client_set(cli *rpc.Client, Arg0, Arg1 string) string', 'info': 'func client_set(cli *rpc.Client, Arg0, Arg1 string) string'}, {'word': 'client_status(', 'abbr': 'func client_status(cli *rpc.Client, Arg0 int) string', 'info': 'func client_status(cli *rpc.Client, Arg0 int) string'}]]`},
{"godit", `6,,7
func client_auto_complete(cli *rpc.Client, Arg0 []byte, Arg1 string, Arg2 int, Arg3 gocode_env) (c []candidate, d int),,client_auto_complete(
func client_close(cli *rpc.Client, Arg0 int) int,,client_close(
func client_cursor_type_pkg(cli *rpc.Client, Arg0 []byte, Arg1 string, Arg2 int) (typ, pkg string),,client_cursor_type_pkg(
func client_drop_cache(cli *rpc.Client, Arg0 int) int,,client_drop_cache(
func client_highlight(cli *rpc.Client, Arg0 []byte, Arg1 string, Arg2 gocode_env) (c []highlight_range, d int),,client_highlight(
func client_set(cli *rpc.Client, Arg0, Arg1 string) string,,client_set(
func client_status(cli *rpc.Client, Arg0 int) string,,client_status(
`},
{"emacs", `
client_auto_complete,,func(cli *rpc.Client, Arg0 []byte, Arg1 string, Arg2 int, Arg3 gocode_env) (c []candidate, d int)
client_close,,func(cli *rpc.Client, Arg0 int) int
client_cursor_type_pkg,,func(cli *rpc.Client, Arg0 []byte, Arg1 string, Arg2 int) (typ, pkg string)
client_drop_cache,,func(cli *rpc.Client, Arg0 int) int
client_highlight,,func(cli *rpc.Client, Arg0 []byte, Arg1 string, Arg2 gocode_env) (c []highlight_range, d int)
client_set,,func(cli *rpc.Client, Arg0, Arg1 string) string
client_status,,func(cli *rpc.Client, Arg0 int) string
`[1:]},
{"csv", `
func,,client_auto_complete,,func(cli *rpc.Client, Arg0 []byte, Arg1 string, Arg2 int, Arg3 gocode_env) (c []candidate, d int),,gocode
func,,client_close,,func(cli *rpc.Client, Arg0 int) int,,gocode
func,,client_cursor_type_pkg,,func(cli *rpc.Client, Arg0 []byte, Arg1 string, Arg2 int) (typ, pkg string),,gocode
func,,client_drop_cache,,func(cli *rpc.Client, Arg0 int) int,,gocode
func,,client_highlight,,func(cli *rpc.Client, Arg0 []byte, Arg1 string, Arg2 gocode_env) (c []highlight_range, d int),,gocode
func,,client_set,,func(cli *rpc.Client, Arg0, Arg1 string) string,,gocode
func,,client_status,,func(cli *rpc.Client, Arg0 int) string,,gocode
`[1:]},
}
for _, test := range tests {
var out bytes.Buffer
suggest.Formatters[test.name](&out, candidates, num)
if got := out.String(); got != test.want {
t.Errorf("Format %s:\nGot:\n%s\nWant:\n%s\n", test.name, got, test.want)
}
}
}
|
package gdoc
import (
"testing"
"github.com/google/go-cmp/cmp"
"golang.org/x/net/html"
)
func nodeWithStyle(s string) *html.Node {
return nodeWithAttrs(map[string]string{"style": s})
}
func nodeWithAttrs(attrs map[string]string) *html.Node {
n := makePNode()
for k, v := range attrs {
n.Attr = append(n.Attr, html.Attribute{Key: k, Val: v})
}
return n
}
// TODO: test parseStyle
func TestClassList(t *testing.T) {
tests := []struct {
name string
inNode *html.Node
out []string
}{
{
name: "Simple",
inNode: nodeWithAttrs(map[string]string{"class": "foo"}),
out: []string{"foo"},
},
{
name: "MultipleClassesPresorted",
inNode: nodeWithAttrs(map[string]string{"class": "bar baz foo"}),
out: []string{"bar", "baz", "foo"},
},
{
name: "MultipleClassesUnsorted",
inNode: nodeWithAttrs(map[string]string{"class": "foo bar baz"}),
out: []string{"bar", "baz", "foo"},
},
{
name: "OtherAttrs",
inNode: nodeWithAttrs(map[string]string{"style": "margin-left: 2em", "class": "bar baz foo", "data-something": "a value"}),
out: []string{"bar", "baz", "foo"},
},
{
// TODO should this just return nil?
name: "NoAttrs",
inNode: makePNode(),
out: []string{""},
},
{
// TODO should this just return nil?
// TODO should capitalization be handled?
name: "CapitalizationKey",
inNode: nodeWithAttrs(map[string]string{"Class": "bar baz foo"}),
out: []string{""},
},
{
// TODO should this just return nil?
name: "NoClass",
inNode: nodeWithAttrs(map[string]string{"data-whatever": "lol"}),
out: []string{""},
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
if diff := cmp.Diff(tc.out, classList(tc.inNode)); diff != "" {
t.Errorf("classList(%+v) got diff (-want +got):\n%s", tc.inNode, diff)
}
})
}
}
func TestHasClass(t *testing.T) {
tests := []struct {
name string
inNode *html.Node
inName string
out bool
}{
{
name: "Simple",
inNode: nodeWithAttrs(map[string]string{"class": "foo"}),
inName: "foo",
out: true,
},
{
name: "Multiple",
inNode: nodeWithAttrs(map[string]string{"class": "foo bar baz"}),
inName: "bar",
out: true,
},
{
name: "NotFound",
inNode: nodeWithAttrs(map[string]string{"class": "foo bar baz"}),
inName: "qux",
},
{
name: "NoClasses",
inNode: makePNode(),
inName: "foo",
},
{
name: "CapitalizationInput",
inNode: nodeWithAttrs(map[string]string{"class": "foo bar baz"}),
inName: "Foo",
},
{
name: "CapitalizationClass",
inNode: nodeWithAttrs(map[string]string{"class": "foo bar baZ"}),
inName: "baz",
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
if out := hasClass(tc.inNode, tc.inName); out != tc.out {
t.Errorf("hasClass(%+v, %q) = %t, want %t", tc.inNode, tc.inName, out, tc.out)
}
})
}
}
// TODO: test hasClassStyle
func TestStyleValue(t *testing.T) {
tests := []struct {
name string
inNode *html.Node
inName string
out string
}{
{
name: "NoName",
inNode: makePNode(),
},
{
name: "NoStyle",
inNode: makePNode(),
inName: "foobar",
},
{
name: "One",
inNode: nodeWithStyle("position: absolute"),
inName: "position",
out: "absolute",
},
{
name: "CapitalizationKeyStyle",
inNode: nodeWithStyle("Position: relative"),
inName: "position",
out: "relative",
},
{
name: "CapitalizationValueStyle",
inNode: nodeWithStyle("color: #0000FF"),
inName: "color",
out: "#0000ff",
},
{
name: "CapitalizationKeyInput",
inNode: nodeWithStyle("position: relative"),
inName: "Position",
out: "relative",
},
{
name: "Multiple",
inNode: nodeWithStyle("position: absolute; color: #ff00ff; font-weight: 300"),
inName: "color",
out: "#ff00ff",
},
{
name: "NotFound",
inNode: nodeWithStyle("position: absolute; color: #FF00FF; font-weight: 300"),
inName: "margin-left",
},
{
name: "NoKVPair",
inNode: nodeWithStyle("margin-left"),
inName: "margin-left",
},
{
// TODO should this be the behavior?
name: "BadSyntax",
inNode: nodeWithStyle("margin-left: font-weight: #00ff00"),
inName: "margin-left",
out: "font-weight: #00ff00",
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
if out := styleValue(tc.inNode, tc.inName); out != tc.out {
t.Errorf("styleValue(%+v, %q) = %q, want %q", tc.inNode, tc.inName, out, tc.out)
}
})
}
}
func TestStyleFloatValue(t *testing.T) {
tests := []struct {
name string
inNode *html.Node
inName string
out float32
}{
{
name: "NoName",
inNode: makePNode(),
},
{
name: "NoStyle",
inNode: makePNode(),
inName: "foobar",
},
{
name: "Simple",
inNode: nodeWithStyle("margin-top: 3.14em"),
inName: "margin-top",
out: 3.14,
},
{
name: "NoDecimalPlaces",
inNode: nodeWithStyle("margin-left: 2in"),
inName: "margin-left",
out: 2,
},
{
name: "DecimalZeroes",
inNode: nodeWithStyle("margin-right: 1.0px"),
inName: "margin-right",
out: 1,
},
{
name: "NoUnit",
inNode: nodeWithStyle("margin-bottom: 4"),
inName: "margin-bottom",
out: 4,
},
{
name: "Multiple",
inNode: nodeWithStyle("padding-top: 1.2; padding-left: 3.4; padding-right: 5.6"),
inName: "padding-left",
out: 3.4,
},
{
name: "NotFound",
inNode: nodeWithStyle("border-top: 7.8; border-left: 0.9"),
inName: "border-right",
},
{
name: "NoKVPair",
inNode: nodeWithStyle("margin-left"),
inName: "margin-left",
},
{
name: "BadSyntax",
inNode: nodeWithStyle("margin-left: margin-top: 1.234em"),
inName: "margin-left",
out: -1,
},
{
// TODO should this be the behavior?
name: "BadSyntaxMiddle",
inNode: nodeWithStyle("margin-left: margin-top: 1.234em"),
inName: "margin-top",
},
{
// TODO should this be the behavior?
name: "BadValue",
inNode: nodeWithStyle("margin-left: 7jv9ue4if4.21"),
inName: "margin-left",
out: 7,
},
{
name: "CapitalizationKeyStyle",
inNode: nodeWithStyle("Margin-Left: 2.3px"),
inName: "margin-left",
out: 2.3,
},
{
name: "CapitalizationKeyInput",
inNode: nodeWithStyle("margin-left: 4.5px"),
inName: "Margin-Left",
out: 4.5,
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
if out := styleFloatValue(tc.inNode, tc.inName); out != tc.out {
t.Errorf("styleFloatValue(%+v, %q) = %f, want %f", tc.inNode, tc.inName, out, tc.out)
}
})
}
}
Add tests for gdoc parsing's parseStyle.
package gdoc
import (
"testing"
"github.com/google/go-cmp/cmp"
"golang.org/x/net/html"
"golang.org/x/net/html/atom"
)
func nodeWithStyle(s string) *html.Node {
return nodeWithAttrs(map[string]string{"style": s})
}
func nodeWithAttrs(attrs map[string]string) *html.Node {
n := makePNode()
for k, v := range attrs {
n.Attr = append(n.Attr, html.Attribute{Key: k, Val: v})
}
return n
}
// Input string is used as the text content, i.e. <style>s</style>
func makeStyleNode(s string) *html.Node {
n := html.Node{
Type: html.ElementNode,
DataAtom: atom.Style,
Data: "style",
}
n.AppendChild(makeTextNode(s))
return &n
}
func TestParseStyle(t *testing.T) {
tests := []struct {
name string
inNode *html.Node
out cssStyle
ok bool
}{
{
name: "Simple",
inNode: makeStyleNode(`.foo {
margin-top: 1em;
}`),
out: cssStyle(map[string]map[string]string{
".foo": map[string]string{
"margin-top": "1em",
},
}),
ok: true,
},
{
name: "MultipleClasses",
inNode: makeStyleNode(`.foo {
margin-top: 1em;
margin-left: 2em;
}
.bar {
padding-top: 3em;
padding-left: 4em;
}
`),
out: cssStyle(map[string]map[string]string{
".foo": map[string]string{
"margin-top": "1em",
"margin-left": "2em",
},
".bar": map[string]string{
"padding-top": "3em",
"padding-left": "4em",
},
}),
ok: true,
},
{
name: "MultipleTypes",
inNode: makeStyleNode(`.foo {
margin-top: 1em;
margin-left: 2em;
}
#bar {
padding-top: 3em;
padding-left: 4em;
}
.baz {
color: #ff0000;
}
`),
out: cssStyle(map[string]map[string]string{
".foo": map[string]string{
"margin-top": "1em",
"margin-left": "2em",
},
".baz": map[string]string{
"color": "#ff0000",
},
}),
ok: true,
},
{
name: "PushedRandomKeys",
inNode: makeStyleNode("0<F3>jffffffff[9,uc"),
out: make(cssStyle),
ok: true,
},
{
name: "AtRuleSimple",
inNode: makeStyleNode("@charset \"ascii\";"),
out: make(cssStyle),
ok: true,
},
{
name: "InvalidCSS",
inNode: makeStyleNode("@media something(max-width: 1)"),
},
{
name: "AtRuleBlock",
inNode: makeStyleNode(`@media something(max-width: 1) {
foo
bar
baz
}`),
out: make(cssStyle),
ok: true,
},
{
name: "Capitalization",
inNode: makeStyleNode(`.foo {
color: #00FF00;
MARGIN-TOP: 3px;
margin-left: 3PX;
}`),
out: cssStyle(map[string]map[string]string{
".foo": map[string]string{
"color": "#00ff00",
"margin-top": "3px",
"margin-left": "3px",
},
}),
ok: true,
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
out, err := parseStyle(tc.inNode)
if err != nil && tc.ok {
t.Errorf("parseStyle(%+v) = %+v, want %+v", tc.inNode, err, tc.out)
return
}
if err == nil && !tc.ok {
t.Errorf("parseStyle(%+v) = %+v, want err", tc.inNode, out)
return
}
if tc.ok {
if diff := cmp.Diff(tc.out, out); diff != "" {
t.Errorf("parseStyle(%+v) got diff (-want +got):\n%s", tc.inNode, diff)
return
}
}
})
}
}
func TestClassList(t *testing.T) {
tests := []struct {
name string
inNode *html.Node
out []string
}{
{
name: "Simple",
inNode: nodeWithAttrs(map[string]string{"class": "foo"}),
out: []string{"foo"},
},
{
name: "MultipleClassesPresorted",
inNode: nodeWithAttrs(map[string]string{"class": "bar baz foo"}),
out: []string{"bar", "baz", "foo"},
},
{
name: "MultipleClassesUnsorted",
inNode: nodeWithAttrs(map[string]string{"class": "foo bar baz"}),
out: []string{"bar", "baz", "foo"},
},
{
name: "OtherAttrs",
inNode: nodeWithAttrs(map[string]string{"style": "margin-left: 2em", "class": "bar baz foo", "data-something": "a value"}),
out: []string{"bar", "baz", "foo"},
},
{
// TODO should this just return nil?
name: "NoAttrs",
inNode: makePNode(),
out: []string{""},
},
{
// TODO should this just return nil?
// TODO should capitalization be handled?
name: "CapitalizationKey",
inNode: nodeWithAttrs(map[string]string{"Class": "bar baz foo"}),
out: []string{""},
},
{
// TODO should this just return nil?
name: "NoClass",
inNode: nodeWithAttrs(map[string]string{"data-whatever": "lol"}),
out: []string{""},
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
if diff := cmp.Diff(tc.out, classList(tc.inNode)); diff != "" {
t.Errorf("classList(%+v) got diff (-want +got):\n%s", tc.inNode, diff)
}
})
}
}
func TestHasClass(t *testing.T) {
tests := []struct {
name string
inNode *html.Node
inName string
out bool
}{
{
name: "Simple",
inNode: nodeWithAttrs(map[string]string{"class": "foo"}),
inName: "foo",
out: true,
},
{
name: "Multiple",
inNode: nodeWithAttrs(map[string]string{"class": "foo bar baz"}),
inName: "bar",
out: true,
},
{
name: "NotFound",
inNode: nodeWithAttrs(map[string]string{"class": "foo bar baz"}),
inName: "qux",
},
{
name: "NoClasses",
inNode: makePNode(),
inName: "foo",
},
{
name: "CapitalizationInput",
inNode: nodeWithAttrs(map[string]string{"class": "foo bar baz"}),
inName: "Foo",
},
{
name: "CapitalizationClass",
inNode: nodeWithAttrs(map[string]string{"class": "foo bar baZ"}),
inName: "baz",
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
if out := hasClass(tc.inNode, tc.inName); out != tc.out {
t.Errorf("hasClass(%+v, %q) = %t, want %t", tc.inNode, tc.inName, out, tc.out)
}
})
}
}
// TODO: test hasClassStyle
func TestStyleValue(t *testing.T) {
tests := []struct {
name string
inNode *html.Node
inName string
out string
}{
{
name: "NoName",
inNode: makePNode(),
},
{
name: "NoStyle",
inNode: makePNode(),
inName: "foobar",
},
{
name: "One",
inNode: nodeWithStyle("position: absolute"),
inName: "position",
out: "absolute",
},
{
name: "CapitalizationKeyStyle",
inNode: nodeWithStyle("Position: relative"),
inName: "position",
out: "relative",
},
{
name: "CapitalizationValueStyle",
inNode: nodeWithStyle("color: #0000FF"),
inName: "color",
out: "#0000ff",
},
{
name: "CapitalizationKeyInput",
inNode: nodeWithStyle("position: relative"),
inName: "Position",
out: "relative",
},
{
name: "Multiple",
inNode: nodeWithStyle("position: absolute; color: #ff00ff; font-weight: 300"),
inName: "color",
out: "#ff00ff",
},
{
name: "NotFound",
inNode: nodeWithStyle("position: absolute; color: #FF00FF; font-weight: 300"),
inName: "margin-left",
},
{
name: "NoKVPair",
inNode: nodeWithStyle("margin-left"),
inName: "margin-left",
},
{
// TODO should this be the behavior?
name: "BadSyntax",
inNode: nodeWithStyle("margin-left: font-weight: #00ff00"),
inName: "margin-left",
out: "font-weight: #00ff00",
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
if out := styleValue(tc.inNode, tc.inName); out != tc.out {
t.Errorf("styleValue(%+v, %q) = %q, want %q", tc.inNode, tc.inName, out, tc.out)
}
})
}
}
func TestStyleFloatValue(t *testing.T) {
tests := []struct {
name string
inNode *html.Node
inName string
out float32
}{
{
name: "NoName",
inNode: makePNode(),
},
{
name: "NoStyle",
inNode: makePNode(),
inName: "foobar",
},
{
name: "Simple",
inNode: nodeWithStyle("margin-top: 3.14em"),
inName: "margin-top",
out: 3.14,
},
{
name: "NoDecimalPlaces",
inNode: nodeWithStyle("margin-left: 2in"),
inName: "margin-left",
out: 2,
},
{
name: "DecimalZeroes",
inNode: nodeWithStyle("margin-right: 1.0px"),
inName: "margin-right",
out: 1,
},
{
name: "NoUnit",
inNode: nodeWithStyle("margin-bottom: 4"),
inName: "margin-bottom",
out: 4,
},
{
name: "Multiple",
inNode: nodeWithStyle("padding-top: 1.2; padding-left: 3.4; padding-right: 5.6"),
inName: "padding-left",
out: 3.4,
},
{
name: "NotFound",
inNode: nodeWithStyle("border-top: 7.8; border-left: 0.9"),
inName: "border-right",
},
{
name: "NoKVPair",
inNode: nodeWithStyle("margin-left"),
inName: "margin-left",
},
{
name: "BadSyntax",
inNode: nodeWithStyle("margin-left: margin-top: 1.234em"),
inName: "margin-left",
out: -1,
},
{
// TODO should this be the behavior?
name: "BadSyntaxMiddle",
inNode: nodeWithStyle("margin-left: margin-top: 1.234em"),
inName: "margin-top",
},
{
// TODO should this be the behavior?
name: "BadValue",
inNode: nodeWithStyle("margin-left: 7jv9ue4if4.21"),
inName: "margin-left",
out: 7,
},
{
name: "CapitalizationKeyStyle",
inNode: nodeWithStyle("Margin-Left: 2.3px"),
inName: "margin-left",
out: 2.3,
},
{
name: "CapitalizationKeyInput",
inNode: nodeWithStyle("margin-left: 4.5px"),
inName: "Margin-Left",
out: 4.5,
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
if out := styleFloatValue(tc.inNode, tc.inName); out != tc.out {
t.Errorf("styleFloatValue(%+v, %q) = %f, want %f", tc.inNode, tc.inName, out, tc.out)
}
})
}
}
|
package fetch
import (
"bytes"
"fmt"
"github.com/disintegration/imaging"
"github.com/rwcarlsen/goexif/exif"
"image"
"image/jpeg"
"image/png"
"io"
)
func needsRotation(src io.Reader) (bool, int) {
metadata, err := exif.Decode(src)
if err != nil {
fmt.Println(err.Error())
return false, 0
}
orientation, err := x.Get(exif.Orientation)
if err != nil {
fmt.Println(err.Error())
return false, 0
}
angle := 0
rotate := false
switch orientation.String() {
case "6":
angle = 90
rotate = true
case "3":
angle = 180
rotate = true
case "8":
angle = 270
rotate = true
}
return rotate, angle
}
func Resize(src io.Reader, c *CacheContext) (io.Reader, error) {
image, format, err := image.Decode(src)
if err != nil {
fmt.Println(err.Error())
return nil, err
}
buf := new(bytes.Buffer)
factor := float64(c.Width) / float64(image.Bounds().Size().X)
height := int(float64(image.Bounds().Size().Y) * factor)
image = imaging.Resize(image, c.Width, height, imaging.Linear)
if rotate, angle := needsRotation(src); rotate {
switch angle {
case 90:
image = imaging.Rotate90(image)
case 180:
image = imaging.Rotate180(image)
case 270:
image = imaging.Rotate270(image)
}
}
switch format {
case "jpeg":
jpeg.Encode(buf, image, nil)
case "png":
err = png.Encode(buf, image)
}
return buf, err
}
func CenterCrop(src io.Reader, c *CacheContext) (io.Reader, error) {
image, format, err := image.Decode(src)
if err != nil {
fmt.Println(err.Error())
return nil, err
}
buf := new(bytes.Buffer)
height := image.Bounds().Size().Y
width := image.Bounds().Size().X
if width < height {
image = imaging.CropCenter(image, width, width)
} else if width > height {
image = imaging.CropCenter(image, height, height)
} else {
image = imaging.CropCenter(image, width, height)
}
switch format {
case "jpeg":
jpeg.Encode(buf, image, nil)
case "png":
err = png.Encode(buf, image)
}
return buf, err
}
Rotate before calculating crop/resize
package fetch
import (
"bytes"
"fmt"
"github.com/disintegration/imaging"
"github.com/rwcarlsen/goexif/exif"
"image"
"image/jpeg"
"image/png"
"io"
)
func needsRotation(src io.Reader) (bool, int) {
metadata, err := exif.Decode(src)
if err != nil {
fmt.Println(err.Error())
return false, 0
}
orientation, err := x.Get(exif.Orientation)
if err != nil {
fmt.Println(err.Error())
return false, 0
}
angle := 0
rotate := false
switch orientation.String() {
case "6":
angle = 90
rotate = true
case "3":
angle = 180
rotate = true
case "8":
angle = 270
rotate = true
}
return rotate, angle
}
func Resize(src io.Reader, c *CacheContext) (io.Reader, error) {
image, format, err := image.Decode(src)
if err != nil {
fmt.Println(err.Error())
return nil, err
}
buf := new(bytes.Buffer)
factor := float64(c.Width) / float64(image.Bounds().Size().X)
height := int(float64(image.Bounds().Size().Y) * factor)
image = imaging.Resize(image, c.Width, height, imaging.Linear)
if rotate, angle := needsRotation(src); rotate {
switch angle {
case 90:
image = imaging.Rotate90(image)
case 180:
image = imaging.Rotate180(image)
case 270:
image = imaging.Rotate270(image)
}
}
switch format {
case "jpeg":
jpeg.Encode(buf, image, nil)
case "png":
err = png.Encode(buf, image)
}
return buf, err
}
func CenterCrop(src io.Reader, c *CacheContext) (io.Reader, error) {
image, format, err := image.Decode(src)
if err != nil {
fmt.Println(err.Error())
return nil, err
}
if rotate, angle := needsRotation(src); rotate {
switch angle {
case 90:
image = imaging.Rotate90(image)
case 180:
image = imaging.Rotate180(image)
case 270:
image = imaging.Rotate270(image)
}
}
buf := new(bytes.Buffer)
height := image.Bounds().Size().Y
width := image.Bounds().Size().X
if width < height {
image = imaging.CropCenter(image, width, width)
} else if width > height {
image = imaging.CropCenter(image, height, height)
} else {
image = imaging.CropCenter(image, width, height)
}
switch format {
case "jpeg":
jpeg.Encode(buf, image, nil)
case "png":
err = png.Encode(buf, image)
}
return buf, err
}
|
package fetch
import (
"bytes"
"fmt"
"github.com/disintegration/imaging"
"image"
"image/jpeg"
"image/png"
"io"
)
func Resize(src io.Reader, c *CacheContext) (io.Reader, error) {
image, format, err := image.Decode(src)
if err != nil {
fmt.Println(err.Error())
return nil, err
}
buf := new(bytes.Buffer)
factor := float64(c.Width) / float64(image.Bounds().Size().X)
height := int(float64(image.Bounds().Size().Y) * factor)
image = imaging.Resize(image, c.Width, height, imaging.Linear)
switch format {
case "jpeg":
jpeg.Encode(buf, image, nil)
case "png":
err = png.Encode(buf, image)
}
return buf, err
}
func CenterCrop(src io.Reader, c *CacheContext) (io.Reader, error) {
image, format, err := image.Decode(src)
if err != nil {
fmt.Println(err.Error())
return nil, err
}
buf := new(bytes.Buffer)
height := image.Bounds().Size().Y
width := image.Bounds().Size().X
if width < height {
image = imaging.CropCenter(image, width, width)
} else if width > height {
image = imaging.CropCenter(image, height, height)
} else {
image = imaging.CropCenter(image, width, height)
}
switch format {
case "jpeg":
jpeg.Encode(buf, image, nil)
case "png":
err = png.Encode(buf, image)
}
return buf, err
}
Add check for rotation, and rotate on resize
Here we don’t need to change anything on upload, just rotate if needed
when resizing and cache the rotated results.
package fetch
import (
"bytes"
"fmt"
"github.com/disintegration/imaging"
"github.com/rwcarlsen/goexif/exif"
"image"
"image/jpeg"
"image/png"
"io"
)
func needsRotation(src io.Reader) (bool, int) {
metadata, err := exif.Decode(src)
if err != nil {
fmt.Println(err.Error())
return false, 0
}
orientation, err := x.Get(exif.Orientation)
if err != nil {
fmt.Println(err.Error())
return false, 0
}
angle := 0
rotate := false
switch orientation.String() {
case "6":
angle = 90
rotate = true
case "3":
angle = 180
rotate = true
case "8":
angle = 270
rotate = true
}
return rotate, angle
}
func Resize(src io.Reader, c *CacheContext) (io.Reader, error) {
image, format, err := image.Decode(src)
if err != nil {
fmt.Println(err.Error())
return nil, err
}
buf := new(bytes.Buffer)
factor := float64(c.Width) / float64(image.Bounds().Size().X)
height := int(float64(image.Bounds().Size().Y) * factor)
image = imaging.Resize(image, c.Width, height, imaging.Linear)
if rotate, angle := needsRotation(src); rotate {
switch angle {
case 90:
image = imaging.Rotate90(image)
case 180:
image = imaging.Rotate180(image)
case 270:
image = imaging.Rotate270(image)
}
}
switch format {
case "jpeg":
jpeg.Encode(buf, image, nil)
case "png":
err = png.Encode(buf, image)
}
return buf, err
}
func CenterCrop(src io.Reader, c *CacheContext) (io.Reader, error) {
image, format, err := image.Decode(src)
if err != nil {
fmt.Println(err.Error())
return nil, err
}
buf := new(bytes.Buffer)
height := image.Bounds().Size().Y
width := image.Bounds().Size().X
if width < height {
image = imaging.CropCenter(image, width, width)
} else if width > height {
image = imaging.CropCenter(image, height, height)
} else {
image = imaging.CropCenter(image, width, height)
}
switch format {
case "jpeg":
jpeg.Encode(buf, image, nil)
case "png":
err = png.Encode(buf, image)
}
return buf, err
}
|
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package fuse
import (
"fmt"
"os"
"time"
bazilfuse "bazil.org/fuse"
"golang.org/x/net/context"
)
// An interface that must be implemented by file systems to be mounted with
// FUSE. See also the comments on request and response structs.
//
// Not all methods need to have interesting implementations. Embed a field of
// type fuseutil.NotImplementedFileSystem to inherit defaults that return
// ENOSYS to the kernel.
//
// Must be safe for concurrent access via all methods.
type FileSystem interface {
// This method is called once when mounting the file system. It must succeed
// in order for the mount to succeed.
Init(
ctx context.Context,
req *InitRequest) (*InitResponse, error)
///////////////////////////////////
// Inodes
///////////////////////////////////
// Look up a child by name within a parent directory. The kernel calls this
// when resolving user paths to dentry structs, which are then cached.
LookUpInode(
ctx context.Context,
req *LookUpInodeRequest) (*LookUpInodeResponse, error)
// Refresh the attributes for an inode whose ID was previously returned by
// LookUpInode. The kernel calls this when the FUSE VFS layer's cache of
// inode attributes is stale. This is controlled by the AttributesExpiration
// field of responses to LookUp, etc.
GetInodeAttributes(
ctx context.Context,
req *GetInodeAttributesRequest) (*GetInodeAttributesResponse, error)
// Forget an inode ID previously issued (e.g. by LookUpInode or MkDir). The
// kernel calls this when removing an inode from its internal caches.
ForgetInode(
ctx context.Context,
req *ForgetInodeRequest) (*ForgetInodeResponse, error)
///////////////////////////////////
// Inode creation
///////////////////////////////////
// Create a directory inode as a child of an existing directory inode. The
// kernel sends this in response to a mkdir(2) call.
//
// The kernel appears to verify the name doesn't already exist (mkdir calls
// mkdirat calls user_path_create calls filename_create, which verifies:
// http://goo.gl/FZpLu5). But volatile file systems and paranoid non-volatile
// file systems should check for the reasons described below on CreateFile.
MkDir(
ctx context.Context,
req *MkDirRequest) (*MkDirResponse, error)
// Create a file inode and open it.
//
// The kernel calls this method when the user asks to open a file with the
// O_CREAT flag and the kernel has observed that the file doesn't exist. (See
// for example lookup_open, http://goo.gl/PlqE9d).
//
// However it's impossible to tell for sure that all kernels make this check
// in all cases and the official fuse documentation is less than encouraging
// (" the file does not exist, first create it with the specified mode, and
// then open it"). Therefore file systems would be smart to be paranoid and
// check themselves, returning EEXIST when the file already exists. This of
// course particularly applies to file systems that are volatile from the
// kernel's point of view.
CreateFile(
ctx context.Context,
req *CreateFileRequest) (*CreateFileResponse, error)
///////////////////////////////////
// Inode destruction
///////////////////////////////////
// Unlink a directory from its parent. Because directories cannot have a link
// count above one, this means the directory inode should be deleted as well
// once the kernel calls ForgetInode.
//
// The file system is responsible for checking that the directory is empty.
//
// Sample implementation in ext2: ext2_rmdir (http://goo.gl/B9QmFf)
//
// TODO(jacobsa): Add tests for the assertion about directory link counts
// above (on a real file system and on memfs).
RmDir(
ctx context.Context,
req *RmDirRequest) (*RmDirResponse, error)
///////////////////////////////////
// Directory handles
///////////////////////////////////
// Open a directory inode. The kernel calls this method when setting up a
// struct file for a particular inode with type directory, usually in
// response to an open(2) call from a user-space process.
OpenDir(
ctx context.Context,
req *OpenDirRequest) (*OpenDirResponse, error)
// Read entries from a directory previously opened with OpenDir.
ReadDir(
ctx context.Context,
req *ReadDirRequest) (*ReadDirResponse, error)
// Release a previously-minted directory handle. The kernel calls this when
// there are no more references to an open directory: all file descriptors
// are closed and all memory mappings are unmapped.
//
// The kernel guarantees that the handle ID will not be used in further calls
// to the file system (unless it is reissued by the file system).
ReleaseDirHandle(
ctx context.Context,
req *ReleaseDirHandleRequest) (*ReleaseDirHandleResponse, error)
///////////////////////////////////
// File handles
///////////////////////////////////
// Open a file inode. The kernel calls this method when setting up a struct
// file for a particular inode with type file, usually in response to an
// open(2) call from a user-space process.
OpenFile(
ctx context.Context,
req *OpenFileRequest) (*OpenFileResponse, error)
// Read data from a file previously opened with OpenFile.
ReadFile(
ctx context.Context,
req *ReadFileRequest) (*ReadFileResponse, error)
// Release a previously-minted file handle. The kernel calls this when there
// are no more references to an open file: all file descriptors are closed
// and all memory mappings are unmapped.
//
// The kernel guarantees that the handle ID will not be used in further calls
// to the file system (unless it is reissued by the file system).
ReleaseFileHandle(
ctx context.Context,
req *ReleaseFileHandleRequest) (*ReleaseFileHandleResponse, error)
}
////////////////////////////////////////////////////////////////////////
// Simple types
////////////////////////////////////////////////////////////////////////
// A 64-bit number used to uniquely identify a file or directory in the file
// system. File systems may mint inode IDs with any value except for
// RootInodeID.
//
// This corresponds to struct inode::i_no in the VFS layer.
// (Cf. http://goo.gl/tvYyQt)
type InodeID uint64
// A distinguished inode ID that identifies the root of the file system, e.g.
// in a request to OpenDir or LookUpInode. Unlike all other inode IDs, which
// are minted by the file system, the FUSE VFS layer may send a request for
// this ID without the file system ever having referenced it in a previous
// response.
const RootInodeID = 1
func init() {
// Make sure the constant above is correct. We do this at runtime rather than
// defining the constant in terms of bazilfuse.RootID for two reasons:
//
// 1. Users can more clearly see that the root ID is low and can therefore
// be used as e.g. an array index, with space reserved up to the root.
//
// 2. The constant can be untyped and can therefore more easily be used as
// an array index.
//
if RootInodeID != bazilfuse.RootID {
panic(
fmt.Sprintf(
"Oops, RootInodeID is wrong: %v vs. %v",
RootInodeID,
bazilfuse.RootID))
}
}
// Attributes for a file or directory inode. Corresponds to struct inode (cf.
// http://goo.gl/tvYyQt).
type InodeAttributes struct {
Size uint64
Mode os.FileMode
// Time information. See `man 2 stat` for full details.
Atime time.Time // Time of last access
Mtime time.Time // Time of last modification
Ctime time.Time // Time of last modification to inode
Crtime time.Time // Time of creation (OS X only)
// Ownership information
Uid uint32
Gid uint32
}
// A generation number for an inode. Irrelevant for file systems that won't be
// exported over NFS. For those that will and that reuse inode IDs when they
// become free, the generation number must change when an ID is reused.
//
// This corresponds to struct inode::i_generation in the VFS layer.
// (Cf. http://goo.gl/tvYyQt)
//
// Some related reading:
//
// http://fuse.sourceforge.net/doxygen/structfuse__entry__param.html
// http://stackoverflow.com/q/11071996/1505451
// http://goo.gl/CqvwyX
// http://julipedia.meroh.net/2005/09/nfs-file-handles.html
// http://goo.gl/wvo3MB
//
type GenerationNumber uint64
// An opaque 64-bit number used to identify a particular open handle to a file
// or directory.
//
// This corresponds to fuse_file_info::fh.
type HandleID uint64
// An offset into an open directory handle. This is opaque to FUSE, and can be
// used for whatever purpose the file system desires. See notes on
// ReadDirRequest.Offset for details.
type DirOffset uint64
// A header that is included with every request.
type RequestHeader struct {
// Credentials information for the process making the request.
Uid uint32
Gid uint32
}
// Information about a child inode within its parent directory. Shared by the
// responses for LookUpInode, MkDir, CreateFile, etc. Consumed by the kernel in
// order to set up a dcache entry.
type ChildInodeEntry struct {
// The ID of the child inode. The file system must ensure that the returned
// inode ID remains valid until a later call to ForgetInode.
Child InodeID
// A generation number for this incarnation of the inode with the given ID.
// See comments on type GenerationNumber for more.
Generation GenerationNumber
// Current attributes for the child inode.
//
// When creating a new inode, the file system is responsible for initializing
// and recording (where supported) attributes like time information,
// ownership information, etc.
//
// Ownership information in particular must be set to something reasonable or
// by default root will own everything and unprivileged users won't be able
// to do anything useful. In traditional file systems in the kernel, the
// function inode_init_owner (http://goo.gl/5qavg8) contains the
// standards-compliant logic for this.
Attributes InodeAttributes
// The FUSE VFS layer in the kernel maintains a cache of file attributes,
// used whenever up to date information about size, mode, etc. is needed.
//
// For example, this is the abridged call chain for fstat(2):
//
// * (http://goo.gl/tKBH1p) fstat calls vfs_fstat.
// * (http://goo.gl/3HeITq) vfs_fstat eventuall calls vfs_getattr_nosec.
// * (http://goo.gl/DccFQr) vfs_getattr_nosec calls i_op->getattr.
// * (http://goo.gl/dpKkst) fuse_getattr calls fuse_update_attributes.
// * (http://goo.gl/yNlqPw) fuse_update_attributes uses the values in the
// struct inode if allowed, otherwise calling out to the user-space code.
//
// In addition to obvious cases like fstat, this is also used in more subtle
// cases like updating size information before seeking (http://goo.gl/2nnMFa)
// or reading (http://goo.gl/FQSWs8).
//
// Most 'real' file systems do not set inode_operations::getattr, and
// therefore vfs_getattr_nosec calls generic_fillattr which simply grabs the
// information from the inode struct. This makes sense because these file
// systems cannot spontaneously change; all modifications go through the
// kernel which can update the inode struct as appropriate.
//
// In contrast, a FUSE file system may have spontaneous changes, so it calls
// out to user space to fetch attributes. However this is expensive, so the
// FUSE layer in the kernel caches the attributes if requested.
//
// This field controls when the attributes returned in this response and
// stashed in the struct inode should be re-queried. Leave at the zero value
// to disable caching.
//
// More reading:
// http://stackoverflow.com/q/21540315/1505451
AttributesExpiration time.Time
// The time until which the kernel may maintain an entry for this name to
// inode mapping in its dentry cache. After this time, it will revalidate the
// dentry.
//
// As in the discussion of attribute caching above, unlike real file systems,
// FUSE file systems may spontaneously change their name -> inode mapping.
// Therefore the FUSE VFS layer uses dentry_operations::d_revalidate
// (http://goo.gl/dVea0h) to intercept lookups and revalidate by calling the
// user-space LookUpInode method. However the latter may be slow, so it
// caches the entries until the time defined by this field.
//
// Example code walk:
//
// * (http://goo.gl/M2G3tO) lookup_dcache calls d_revalidate if enabled.
// * (http://goo.gl/ef0Elu) fuse_dentry_revalidate just uses the dentry's
// inode if fuse_dentry_time(entry) hasn't passed. Otherwise it sends a
// lookup request.
//
// Leave at the zero value to disable caching.
EntryExpiration time.Time
}
////////////////////////////////////////////////////////////////////////
// Requests and responses
////////////////////////////////////////////////////////////////////////
type InitRequest struct {
Header RequestHeader
}
type InitResponse struct {
}
type LookUpInodeRequest struct {
Header RequestHeader
// The ID of the directory inode to which the child belongs.
Parent InodeID
// The name of the child of interest, relative to the parent. For example, in
// this directory structure:
//
// foo/
// bar/
// baz
//
// the file system may receive a request to look up the child named "bar" for
// the parent foo/.
Name string
}
type LookUpInodeResponse struct {
Entry ChildInodeEntry
}
type GetInodeAttributesRequest struct {
Header RequestHeader
// The inode of interest.
Inode InodeID
}
type GetInodeAttributesResponse struct {
// Attributes for the inode, and the time at which they should expire. See
// notes on LookUpInodeResponse.AttributesExpiration for more.
Attributes InodeAttributes
AttributesExpiration time.Time
}
type ForgetInodeRequest struct {
Header RequestHeader
// The inode to be forgotten. The kernel guarantees that the node ID will not
// be used in further calls to the file system (unless it is reissued by the
// file system).
ID InodeID
}
type ForgetInodeResponse struct {
}
type MkDirRequest struct {
Header RequestHeader
// The ID of parent directory inode within which to create the child.
Parent InodeID
// The name of the child to create, and the mode with which to create it.
Name string
Mode os.FileMode
}
type MkDirResponse struct {
// Information about the inode that was created.
Entry ChildInodeEntry
}
type CreateFileRequest struct {
Header RequestHeader
// The ID of parent directory inode within which to create the child file.
Parent InodeID
// The name of the child to create, and the mode with which to create it.
Name string
Mode os.FileMode
// Flags for the open operation.
Flags bazilfuse.OpenFlags
}
type CreateFileResponse struct {
// Information about the inode that was created.
Entry ChildInodeEntry
// An opaque ID that will be echoed in follow-up calls for this file using
// the same struct file in the kernel. In practice this usually means
// follow-up calls using the file descriptor returned by open(2).
//
// The handle may be supplied to the following methods:
//
// * ReadFile
// * ReleaseFileHandle
//
// The file system must ensure this ID remains valid until a later call to
// ReleaseFileHandle.
Handle HandleID
}
type RmDirRequest struct {
Header RequestHeader
// The ID of parent directory inode, and the name of the directory being
// removed within it.
Parent InodeID
Name string
}
type RmDirResponse struct {
}
type OpenDirRequest struct {
Header RequestHeader
// The ID of the inode to be opened.
Inode InodeID
// Mode and options flags.
Flags bazilfuse.OpenFlags
}
type OpenDirResponse struct {
// An opaque ID that will be echoed in follow-up calls for this directory
// using the same struct file in the kernel. In practice this usually means
// follow-up calls using the file descriptor returned by open(2).
//
// The handle may be supplied to the following methods:
//
// * ReadDir
// * ReleaseDirHandle
//
// The file system must ensure this ID remains valid until a later call to
// ReleaseDirHandle.
Handle HandleID
}
type ReadDirRequest struct {
Header RequestHeader
// The directory inode that we are reading, and the handle previously
// returned by OpenDir when opening that inode.
Inode InodeID
Handle HandleID
// The offset within the directory at which to read.
//
// Warning: this field is not necessarily a count of bytes. Its legal values
// are defined by the results returned in ReadDirResponse. See the notes
// below and the notes on that struct.
//
// In the Linux kernel this ultimately comes from file::f_pos, which starts
// at zero and is set by llseek and by the final consumed result returned by
// each call to ReadDir:
//
// * (http://goo.gl/2nWJPL) iterate_dir, which is called by getdents(2) and
// readdir(2), sets dir_context::pos to file::f_pos before calling
// f_op->iterate, and then does the opposite assignment afterward.
//
// * (http://goo.gl/rTQVSL) fuse_readdir, which implements iterate for fuse
// directories, passes dir_context::pos as the offset to fuse_read_fill,
// which passes it on to user-space. fuse_readdir later calls
// parse_dirfile with the same context.
//
// * (http://goo.gl/vU5ukv) For each returned result (except perhaps the
// last, which may be truncated by the page boundary), parse_dirfile
// updates dir_context::pos with fuse_dirent::off.
//
// It is affected by the Posix directory stream interfaces in the following
// manner:
//
// * (http://goo.gl/fQhbyn, http://goo.gl/ns1kDF) opendir initially causes
// filepos to be set to zero.
//
// * (http://goo.gl/ezNKyR, http://goo.gl/xOmDv0) readdir allows the user
// to iterate through the directory one entry at a time. As each entry is
// consumed, its d_off field is stored in __dirstream::filepos.
//
// * (http://goo.gl/WEOXG8, http://goo.gl/rjSXl3) telldir allows the user
// to obtain the d_off field from the most recently returned entry.
//
// * (http://goo.gl/WG3nDZ, http://goo.gl/Lp0U6W) seekdir allows the user
// to seek backward to an offset previously returned by telldir. It
// stores the new offset in filepos, and calls llseek to update the
// kernel's struct file.
//
// * (http://goo.gl/gONQhz, http://goo.gl/VlrQkc) rewinddir allows the user
// to go back to the beginning of the directory, obtaining a fresh view.
// It updates filepos and calls llseek to update the kernel's struct
// file.
//
// Unfortunately, FUSE offers no way to intercept seeks
// (http://goo.gl/H6gEXa), so there is no way to cause seekdir or rewinddir
// to fail. Additionally, there is no way to distinguish an explicit
// rewinddir followed by readdir from the initial readdir, or a rewinddir
// from a seekdir to the value returned by telldir just after opendir.
//
// Luckily, Posix is vague about what the user will see if they seek
// backwards, and requires the user not to seek to an old offset after a
// rewind. The only requirement on freshness is that rewinddir results in
// something that looks like a newly-opened directory. So FUSE file systems
// may e.g. cache an entire fresh listing for each ReadDir with a zero
// offset, and return array offsets into that cached listing.
Offset DirOffset
// The maximum number of bytes to return in ReadDirResponse.Data. A smaller
// number is acceptable.
Size int
}
type ReadDirResponse struct {
// A buffer consisting of a sequence of FUSE directory entries in the format
// generated by fuse_add_direntry (http://goo.gl/qCcHCV), which is consumed
// by parse_dirfile (http://goo.gl/2WUmD2). Use fuseutil.AppendDirent to
// generate this data.
//
// The buffer must not exceed the length specified in ReadDirRequest.Size. It
// is okay for the final entry to be truncated; parse_dirfile copes with this
// by ignoring the partial record.
//
// Each entry returned exposes a directory offset to the user that may later
// show up in ReadDirRequest.Offset. See notes on that field for more
// information.
//
// An empty buffer indicates the end of the directory has been reached.
Data []byte
}
type ReleaseDirHandleRequest struct {
Header RequestHeader
// The handle ID to be released. The kernel guarantees that this ID will not
// be used in further calls to the file system (unless it is reissued by the
// file system).
Handle HandleID
}
type ReleaseDirHandleResponse struct {
}
type OpenFileRequest struct {
Header RequestHeader
// The ID of the inode to be opened.
Inode InodeID
// Mode and options flags.
Flags bazilfuse.OpenFlags
}
type OpenFileResponse struct {
// An opaque ID that will be echoed in follow-up calls for this file using
// the same struct file in the kernel. In practice this usually means
// follow-up calls using the file descriptor returned by open(2).
//
// The handle may be supplied to the following methods:
//
// * ReadFile
// * ReleaseFileHandle
//
// The file system must ensure this ID remains valid until a later call to
// ReleaseFileHandle.
Handle HandleID
}
type ReadFileRequest struct {
Header RequestHeader
// The file inode that we are reading, and the handle previously returned by
// OpenFile when opening that inode.
Inode InodeID
Handle HandleID
// The range of the file to read.
//
// The FUSE documentation requires that exactly the number of bytes be
// returned, except in the case of EOF or error (http://goo.gl/ZgfBkF). This
// appears to be because it uses file mmapping machinery
// (http://goo.gl/SGxnaN) to read a page at a time. It appears to understand
// where EOF is by checking the inode size (http://goo.gl/0BkqKD), returned
// by a previous call to LookUpInode, GetInodeAttributes, etc.
Offset int64
Size int
}
type ReadFileResponse struct {
// The data read.
Data []byte
}
type ReleaseFileHandleRequest struct {
Header RequestHeader
// The handle ID to be released. The kernel guarantees that this ID will not
// be used in further calls to the file system (unless it is reissued by the
// file system).
Handle HandleID
}
type ReleaseFileHandleResponse struct {
}
Added a WriteFile method.
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package fuse
import (
"fmt"
"os"
"time"
bazilfuse "bazil.org/fuse"
"golang.org/x/net/context"
)
// An interface that must be implemented by file systems to be mounted with
// FUSE. See also the comments on request and response structs.
//
// Not all methods need to have interesting implementations. Embed a field of
// type fuseutil.NotImplementedFileSystem to inherit defaults that return
// ENOSYS to the kernel.
//
// Must be safe for concurrent access via all methods.
type FileSystem interface {
// This method is called once when mounting the file system. It must succeed
// in order for the mount to succeed.
Init(
ctx context.Context,
req *InitRequest) (*InitResponse, error)
///////////////////////////////////
// Inodes
///////////////////////////////////
// Look up a child by name within a parent directory. The kernel calls this
// when resolving user paths to dentry structs, which are then cached.
LookUpInode(
ctx context.Context,
req *LookUpInodeRequest) (*LookUpInodeResponse, error)
// Refresh the attributes for an inode whose ID was previously returned by
// LookUpInode. The kernel calls this when the FUSE VFS layer's cache of
// inode attributes is stale. This is controlled by the AttributesExpiration
// field of responses to LookUp, etc.
GetInodeAttributes(
ctx context.Context,
req *GetInodeAttributesRequest) (*GetInodeAttributesResponse, error)
// Forget an inode ID previously issued (e.g. by LookUpInode or MkDir). The
// kernel calls this when removing an inode from its internal caches.
ForgetInode(
ctx context.Context,
req *ForgetInodeRequest) (*ForgetInodeResponse, error)
///////////////////////////////////
// Inode creation
///////////////////////////////////
// Create a directory inode as a child of an existing directory inode. The
// kernel sends this in response to a mkdir(2) call.
//
// The kernel appears to verify the name doesn't already exist (mkdir calls
// mkdirat calls user_path_create calls filename_create, which verifies:
// http://goo.gl/FZpLu5). But volatile file systems and paranoid non-volatile
// file systems should check for the reasons described below on CreateFile.
MkDir(
ctx context.Context,
req *MkDirRequest) (*MkDirResponse, error)
// Create a file inode and open it.
//
// The kernel calls this method when the user asks to open a file with the
// O_CREAT flag and the kernel has observed that the file doesn't exist. (See
// for example lookup_open, http://goo.gl/PlqE9d).
//
// However it's impossible to tell for sure that all kernels make this check
// in all cases and the official fuse documentation is less than encouraging
// (" the file does not exist, first create it with the specified mode, and
// then open it"). Therefore file systems would be smart to be paranoid and
// check themselves, returning EEXIST when the file already exists. This of
// course particularly applies to file systems that are volatile from the
// kernel's point of view.
CreateFile(
ctx context.Context,
req *CreateFileRequest) (*CreateFileResponse, error)
///////////////////////////////////
// Inode destruction
///////////////////////////////////
// Unlink a directory from its parent. Because directories cannot have a link
// count above one, this means the directory inode should be deleted as well
// once the kernel calls ForgetInode.
//
// The file system is responsible for checking that the directory is empty.
//
// Sample implementation in ext2: ext2_rmdir (http://goo.gl/B9QmFf)
//
// TODO(jacobsa): Add tests for the assertion about directory link counts
// above (on a real file system and on memfs).
RmDir(
ctx context.Context,
req *RmDirRequest) (*RmDirResponse, error)
///////////////////////////////////
// Directory handles
///////////////////////////////////
// Open a directory inode. The kernel calls this method when setting up a
// struct file for a particular inode with type directory, usually in
// response to an open(2) call from a user-space process.
OpenDir(
ctx context.Context,
req *OpenDirRequest) (*OpenDirResponse, error)
// Read entries from a directory previously opened with OpenDir.
ReadDir(
ctx context.Context,
req *ReadDirRequest) (*ReadDirResponse, error)
// Release a previously-minted directory handle. The kernel calls this when
// there are no more references to an open directory: all file descriptors
// are closed and all memory mappings are unmapped.
//
// The kernel guarantees that the handle ID will not be used in further calls
// to the file system (unless it is reissued by the file system).
ReleaseDirHandle(
ctx context.Context,
req *ReleaseDirHandleRequest) (*ReleaseDirHandleResponse, error)
///////////////////////////////////
// File handles
///////////////////////////////////
// Open a file inode. The kernel calls this method when setting up a struct
// file for a particular inode with type file, usually in response to an
// open(2) call from a user-space process.
OpenFile(
ctx context.Context,
req *OpenFileRequest) (*OpenFileResponse, error)
// Read data from a file previously opened with CreateFile or OpenFile.
ReadFile(
ctx context.Context,
req *ReadFileRequest) (*ReadFileResponse, error)
// Write data to a file previously opened with CreateFile or OpenFile.
WriteFile(
ctx context.Context,
req *WriteFileRequest) (*WriteFileResponse, error)
// Release a previously-minted file handle. The kernel calls this when there
// are no more references to an open file: all file descriptors are closed
// and all memory mappings are unmapped.
//
// The kernel guarantees that the handle ID will not be used in further calls
// to the file system (unless it is reissued by the file system).
ReleaseFileHandle(
ctx context.Context,
req *ReleaseFileHandleRequest) (*ReleaseFileHandleResponse, error)
}
////////////////////////////////////////////////////////////////////////
// Simple types
////////////////////////////////////////////////////////////////////////
// A 64-bit number used to uniquely identify a file or directory in the file
// system. File systems may mint inode IDs with any value except for
// RootInodeID.
//
// This corresponds to struct inode::i_no in the VFS layer.
// (Cf. http://goo.gl/tvYyQt)
type InodeID uint64
// A distinguished inode ID that identifies the root of the file system, e.g.
// in a request to OpenDir or LookUpInode. Unlike all other inode IDs, which
// are minted by the file system, the FUSE VFS layer may send a request for
// this ID without the file system ever having referenced it in a previous
// response.
const RootInodeID = 1
func init() {
// Make sure the constant above is correct. We do this at runtime rather than
// defining the constant in terms of bazilfuse.RootID for two reasons:
//
// 1. Users can more clearly see that the root ID is low and can therefore
// be used as e.g. an array index, with space reserved up to the root.
//
// 2. The constant can be untyped and can therefore more easily be used as
// an array index.
//
if RootInodeID != bazilfuse.RootID {
panic(
fmt.Sprintf(
"Oops, RootInodeID is wrong: %v vs. %v",
RootInodeID,
bazilfuse.RootID))
}
}
// Attributes for a file or directory inode. Corresponds to struct inode (cf.
// http://goo.gl/tvYyQt).
type InodeAttributes struct {
Size uint64
Mode os.FileMode
// Time information. See `man 2 stat` for full details.
Atime time.Time // Time of last access
Mtime time.Time // Time of last modification
Ctime time.Time // Time of last modification to inode
Crtime time.Time // Time of creation (OS X only)
// Ownership information
Uid uint32
Gid uint32
}
// A generation number for an inode. Irrelevant for file systems that won't be
// exported over NFS. For those that will and that reuse inode IDs when they
// become free, the generation number must change when an ID is reused.
//
// This corresponds to struct inode::i_generation in the VFS layer.
// (Cf. http://goo.gl/tvYyQt)
//
// Some related reading:
//
// http://fuse.sourceforge.net/doxygen/structfuse__entry__param.html
// http://stackoverflow.com/q/11071996/1505451
// http://goo.gl/CqvwyX
// http://julipedia.meroh.net/2005/09/nfs-file-handles.html
// http://goo.gl/wvo3MB
//
type GenerationNumber uint64
// An opaque 64-bit number used to identify a particular open handle to a file
// or directory.
//
// This corresponds to fuse_file_info::fh.
type HandleID uint64
// An offset into an open directory handle. This is opaque to FUSE, and can be
// used for whatever purpose the file system desires. See notes on
// ReadDirRequest.Offset for details.
type DirOffset uint64
// A header that is included with every request.
type RequestHeader struct {
// Credentials information for the process making the request.
Uid uint32
Gid uint32
}
// Information about a child inode within its parent directory. Shared by the
// responses for LookUpInode, MkDir, CreateFile, etc. Consumed by the kernel in
// order to set up a dcache entry.
type ChildInodeEntry struct {
// The ID of the child inode. The file system must ensure that the returned
// inode ID remains valid until a later call to ForgetInode.
Child InodeID
// A generation number for this incarnation of the inode with the given ID.
// See comments on type GenerationNumber for more.
Generation GenerationNumber
// Current attributes for the child inode.
//
// When creating a new inode, the file system is responsible for initializing
// and recording (where supported) attributes like time information,
// ownership information, etc.
//
// Ownership information in particular must be set to something reasonable or
// by default root will own everything and unprivileged users won't be able
// to do anything useful. In traditional file systems in the kernel, the
// function inode_init_owner (http://goo.gl/5qavg8) contains the
// standards-compliant logic for this.
Attributes InodeAttributes
// The FUSE VFS layer in the kernel maintains a cache of file attributes,
// used whenever up to date information about size, mode, etc. is needed.
//
// For example, this is the abridged call chain for fstat(2):
//
// * (http://goo.gl/tKBH1p) fstat calls vfs_fstat.
// * (http://goo.gl/3HeITq) vfs_fstat eventuall calls vfs_getattr_nosec.
// * (http://goo.gl/DccFQr) vfs_getattr_nosec calls i_op->getattr.
// * (http://goo.gl/dpKkst) fuse_getattr calls fuse_update_attributes.
// * (http://goo.gl/yNlqPw) fuse_update_attributes uses the values in the
// struct inode if allowed, otherwise calling out to the user-space code.
//
// In addition to obvious cases like fstat, this is also used in more subtle
// cases like updating size information before seeking (http://goo.gl/2nnMFa)
// or reading (http://goo.gl/FQSWs8).
//
// Most 'real' file systems do not set inode_operations::getattr, and
// therefore vfs_getattr_nosec calls generic_fillattr which simply grabs the
// information from the inode struct. This makes sense because these file
// systems cannot spontaneously change; all modifications go through the
// kernel which can update the inode struct as appropriate.
//
// In contrast, a FUSE file system may have spontaneous changes, so it calls
// out to user space to fetch attributes. However this is expensive, so the
// FUSE layer in the kernel caches the attributes if requested.
//
// This field controls when the attributes returned in this response and
// stashed in the struct inode should be re-queried. Leave at the zero value
// to disable caching.
//
// More reading:
// http://stackoverflow.com/q/21540315/1505451
AttributesExpiration time.Time
// The time until which the kernel may maintain an entry for this name to
// inode mapping in its dentry cache. After this time, it will revalidate the
// dentry.
//
// As in the discussion of attribute caching above, unlike real file systems,
// FUSE file systems may spontaneously change their name -> inode mapping.
// Therefore the FUSE VFS layer uses dentry_operations::d_revalidate
// (http://goo.gl/dVea0h) to intercept lookups and revalidate by calling the
// user-space LookUpInode method. However the latter may be slow, so it
// caches the entries until the time defined by this field.
//
// Example code walk:
//
// * (http://goo.gl/M2G3tO) lookup_dcache calls d_revalidate if enabled.
// * (http://goo.gl/ef0Elu) fuse_dentry_revalidate just uses the dentry's
// inode if fuse_dentry_time(entry) hasn't passed. Otherwise it sends a
// lookup request.
//
// Leave at the zero value to disable caching.
EntryExpiration time.Time
}
////////////////////////////////////////////////////////////////////////
// Requests and responses
////////////////////////////////////////////////////////////////////////
type InitRequest struct {
Header RequestHeader
}
type InitResponse struct {
}
type LookUpInodeRequest struct {
Header RequestHeader
// The ID of the directory inode to which the child belongs.
Parent InodeID
// The name of the child of interest, relative to the parent. For example, in
// this directory structure:
//
// foo/
// bar/
// baz
//
// the file system may receive a request to look up the child named "bar" for
// the parent foo/.
Name string
}
type LookUpInodeResponse struct {
Entry ChildInodeEntry
}
type GetInodeAttributesRequest struct {
Header RequestHeader
// The inode of interest.
Inode InodeID
}
type GetInodeAttributesResponse struct {
// Attributes for the inode, and the time at which they should expire. See
// notes on LookUpInodeResponse.AttributesExpiration for more.
Attributes InodeAttributes
AttributesExpiration time.Time
}
type ForgetInodeRequest struct {
Header RequestHeader
// The inode to be forgotten. The kernel guarantees that the node ID will not
// be used in further calls to the file system (unless it is reissued by the
// file system).
ID InodeID
}
type ForgetInodeResponse struct {
}
type MkDirRequest struct {
Header RequestHeader
// The ID of parent directory inode within which to create the child.
Parent InodeID
// The name of the child to create, and the mode with which to create it.
Name string
Mode os.FileMode
}
type MkDirResponse struct {
// Information about the inode that was created.
Entry ChildInodeEntry
}
type CreateFileRequest struct {
Header RequestHeader
// The ID of parent directory inode within which to create the child file.
Parent InodeID
// The name of the child to create, and the mode with which to create it.
Name string
Mode os.FileMode
// Flags for the open operation.
Flags bazilfuse.OpenFlags
}
type CreateFileResponse struct {
// Information about the inode that was created.
Entry ChildInodeEntry
// An opaque ID that will be echoed in follow-up calls for this file using
// the same struct file in the kernel. In practice this usually means
// follow-up calls using the file descriptor returned by open(2).
//
// The handle may be supplied to the following methods:
//
// * ReadFile
// * WriteFile
// * ReleaseFileHandle
//
// The file system must ensure this ID remains valid until a later call to
// ReleaseFileHandle.
Handle HandleID
}
type RmDirRequest struct {
Header RequestHeader
// The ID of parent directory inode, and the name of the directory being
// removed within it.
Parent InodeID
Name string
}
type RmDirResponse struct {
}
type OpenDirRequest struct {
Header RequestHeader
// The ID of the inode to be opened.
Inode InodeID
// Mode and options flags.
Flags bazilfuse.OpenFlags
}
type OpenDirResponse struct {
// An opaque ID that will be echoed in follow-up calls for this directory
// using the same struct file in the kernel. In practice this usually means
// follow-up calls using the file descriptor returned by open(2).
//
// The handle may be supplied to the following methods:
//
// * ReadDir
// * ReleaseDirHandle
//
// The file system must ensure this ID remains valid until a later call to
// ReleaseDirHandle.
Handle HandleID
}
type ReadDirRequest struct {
Header RequestHeader
// The directory inode that we are reading, and the handle previously
// returned by OpenDir when opening that inode.
Inode InodeID
Handle HandleID
// The offset within the directory at which to read.
//
// Warning: this field is not necessarily a count of bytes. Its legal values
// are defined by the results returned in ReadDirResponse. See the notes
// below and the notes on that struct.
//
// In the Linux kernel this ultimately comes from file::f_pos, which starts
// at zero and is set by llseek and by the final consumed result returned by
// each call to ReadDir:
//
// * (http://goo.gl/2nWJPL) iterate_dir, which is called by getdents(2) and
// readdir(2), sets dir_context::pos to file::f_pos before calling
// f_op->iterate, and then does the opposite assignment afterward.
//
// * (http://goo.gl/rTQVSL) fuse_readdir, which implements iterate for fuse
// directories, passes dir_context::pos as the offset to fuse_read_fill,
// which passes it on to user-space. fuse_readdir later calls
// parse_dirfile with the same context.
//
// * (http://goo.gl/vU5ukv) For each returned result (except perhaps the
// last, which may be truncated by the page boundary), parse_dirfile
// updates dir_context::pos with fuse_dirent::off.
//
// It is affected by the Posix directory stream interfaces in the following
// manner:
//
// * (http://goo.gl/fQhbyn, http://goo.gl/ns1kDF) opendir initially causes
// filepos to be set to zero.
//
// * (http://goo.gl/ezNKyR, http://goo.gl/xOmDv0) readdir allows the user
// to iterate through the directory one entry at a time. As each entry is
// consumed, its d_off field is stored in __dirstream::filepos.
//
// * (http://goo.gl/WEOXG8, http://goo.gl/rjSXl3) telldir allows the user
// to obtain the d_off field from the most recently returned entry.
//
// * (http://goo.gl/WG3nDZ, http://goo.gl/Lp0U6W) seekdir allows the user
// to seek backward to an offset previously returned by telldir. It
// stores the new offset in filepos, and calls llseek to update the
// kernel's struct file.
//
// * (http://goo.gl/gONQhz, http://goo.gl/VlrQkc) rewinddir allows the user
// to go back to the beginning of the directory, obtaining a fresh view.
// It updates filepos and calls llseek to update the kernel's struct
// file.
//
// Unfortunately, FUSE offers no way to intercept seeks
// (http://goo.gl/H6gEXa), so there is no way to cause seekdir or rewinddir
// to fail. Additionally, there is no way to distinguish an explicit
// rewinddir followed by readdir from the initial readdir, or a rewinddir
// from a seekdir to the value returned by telldir just after opendir.
//
// Luckily, Posix is vague about what the user will see if they seek
// backwards, and requires the user not to seek to an old offset after a
// rewind. The only requirement on freshness is that rewinddir results in
// something that looks like a newly-opened directory. So FUSE file systems
// may e.g. cache an entire fresh listing for each ReadDir with a zero
// offset, and return array offsets into that cached listing.
Offset DirOffset
// The maximum number of bytes to return in ReadDirResponse.Data. A smaller
// number is acceptable.
Size int
}
type ReadDirResponse struct {
// A buffer consisting of a sequence of FUSE directory entries in the format
// generated by fuse_add_direntry (http://goo.gl/qCcHCV), which is consumed
// by parse_dirfile (http://goo.gl/2WUmD2). Use fuseutil.AppendDirent to
// generate this data.
//
// The buffer must not exceed the length specified in ReadDirRequest.Size. It
// is okay for the final entry to be truncated; parse_dirfile copes with this
// by ignoring the partial record.
//
// Each entry returned exposes a directory offset to the user that may later
// show up in ReadDirRequest.Offset. See notes on that field for more
// information.
//
// An empty buffer indicates the end of the directory has been reached.
Data []byte
}
type ReleaseDirHandleRequest struct {
Header RequestHeader
// The handle ID to be released. The kernel guarantees that this ID will not
// be used in further calls to the file system (unless it is reissued by the
// file system).
Handle HandleID
}
type ReleaseDirHandleResponse struct {
}
type OpenFileRequest struct {
Header RequestHeader
// The ID of the inode to be opened.
Inode InodeID
// Mode and options flags.
Flags bazilfuse.OpenFlags
}
type OpenFileResponse struct {
// An opaque ID that will be echoed in follow-up calls for this file using
// the same struct file in the kernel. In practice this usually means
// follow-up calls using the file descriptor returned by open(2).
//
// The handle may be supplied to the following methods:
//
// * ReadFile
// * WriteFile
// * ReleaseFileHandle
//
// The file system must ensure this ID remains valid until a later call to
// ReleaseFileHandle.
Handle HandleID
}
type ReadFileRequest struct {
Header RequestHeader
// The file inode that we are reading, and the handle previously returned by
// CreateFile or OpenFile when opening that inode.
Inode InodeID
Handle HandleID
// The range of the file to read.
//
// The FUSE documentation requires that exactly the number of bytes be
// returned, except in the case of EOF or error (http://goo.gl/ZgfBkF). This
// appears to be because it uses file mmapping machinery
// (http://goo.gl/SGxnaN) to read a page at a time. It appears to understand
// where EOF is by checking the inode size (http://goo.gl/0BkqKD), returned
// by a previous call to LookUpInode, GetInodeAttributes, etc.
Offset int64
Size int
}
type ReadFileResponse struct {
// The data read.
Data []byte
}
type WriteFileRequest struct {
Header RequestHeader
// The file inode that we are modifying, and the handle previously returned
// by CreateFile or OpenFile when opening that inode.
Inode InodeID
Handle HandleID
// The data to write, and the offset at which to write it.
//
// The FUSE documentation requires that exactly the number of bytes supplied
// be written, except on error (http://goo.gl/KUpwwn). This appears to be
// because it uses file mmapping machinery (http://goo.gl/SGxnaN) to write a
// page at a time.
//
// TODO(jacobsa): Figure out what the posix semantics are for extending the
// file, and document them here.
Data []byte
Offset int64
}
type WriteFileResponse struct {
}
type ReleaseFileHandleRequest struct {
Header RequestHeader
// The handle ID to be released. The kernel guarantees that this ID will not
// be used in further calls to the file system (unless it is reissued by the
// file system).
Handle HandleID
}
type ReleaseFileHandleResponse struct {
}
|
package verbose
import (
"fmt"
"os"
"path"
"path/filepath"
"strings"
"sync"
)
// FileHandler writes log messages to a file to a directory
type FileHandler struct {
min LogLevel
max LogLevel
path string
separate bool
formatter Formatter
m sync.Mutex
}
// NewFileHandler takes the path and returns a FileHandler. If the path exists,
// file or directory mode will be Determined by what path is. If it doesn't exist,
// the mode will be file if path has an extension, otherwise it will be directory.
// In file mode, all log messages are written to a single file.
// In directory mode, each level is written to it's own file.
func NewFileHandler(path string) (*FileHandler, error) {
f := &FileHandler{
min: LogLevelDebug,
max: LogLevelFatal,
path: path,
formatter: &LineFormatter{},
m: sync.Mutex{},
}
// Determine of the path is a file or directory
// We cannot assume the path exists yet
stat, err := os.Stat(path)
if err == nil { // Easiest, path exists
f.separate = stat.IsDir()
} else if os.IsNotExist(err) {
// Typically an extension means it's a file
ext := filepath.Ext(path)
if ext == "" {
// Attempt to create the directory
if err := os.MkdirAll(path, 0755); err != nil {
return nil, err
}
f.separate = true
} else {
// Attempt to create the file
file, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE, 0644)
if err != nil {
return nil, err
}
file.Close()
f.separate = false
}
}
return f, nil
}
// SetLevel will set both the minimum and maximum log levels to l. This makes
// the handler only respond to the single level l.
func (f *FileHandler) SetLevel(l LogLevel) {
f.min = l
f.max = l
}
// SetMinLevel will set the minimum log level the handler will handle.
func (f *FileHandler) SetMinLevel(l LogLevel) {
if l > f.max {
return
}
f.min = l
}
// SetMaxLevel will set the maximum log level the handler will handle.
func (f *FileHandler) SetMaxLevel(l LogLevel) {
if l < f.min {
return
}
f.max = l
}
// SetFormatter gives FileHandler a formatter for log messages.
func (f *FileHandler) SetFormatter(fo Formatter) {
f.formatter = fo
}
// Handles returns whether the handler handles log level l.
func (f *FileHandler) Handles(l LogLevel) bool {
return (f.min <= l && l <= f.max)
}
// WriteLog will write the log message to a file.
func (f *FileHandler) WriteLog(e *Entry) {
var logfile string
if !f.separate {
logfile = f.path
} else {
logfile = fmt.Sprintf("%s-%s.log", strings.ToLower(e.Level.String()), e.Logger.Name())
logfile = path.Join(f.path, logfile)
}
f.m.Lock()
defer f.m.Unlock()
file, err := os.OpenFile(logfile, os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0644)
if err != nil {
fmt.Printf("Error opening log file: %v\n", err)
}
defer file.Close()
_, err = file.Write(f.formatter.FormatByte(e))
if err != nil {
fmt.Printf("Error writing to log file: %v\n", err)
}
return
}
// Close satisfies the interface, NOOP
func (f *FileHandler) Close() {}
FileHandler: create directory path for log file if needed
package verbose
import (
"fmt"
"os"
"path"
"path/filepath"
"strings"
"sync"
)
// FileHandler writes log messages to a file to a directory
type FileHandler struct {
min LogLevel
max LogLevel
path string
separate bool
formatter Formatter
m sync.Mutex
}
// NewFileHandler takes the path and returns a FileHandler. If the path exists,
// file or directory mode will be Determined by what path is. If it doesn't exist,
// the mode will be file if path has an extension, otherwise it will be directory.
// In file mode, all log messages are written to a single file.
// In directory mode, each level is written to it's own file.
func NewFileHandler(path string) (*FileHandler, error) {
path, _ = filepath.Abs(path)
f := &FileHandler{
min: LogLevelDebug,
max: LogLevelFatal,
path: path,
formatter: &LineFormatter{},
m: sync.Mutex{},
}
// Determine of the path is a file or directory
// We cannot assume the path exists yet
stat, err := os.Stat(path)
if err == nil { // Easiest, path exists
f.separate = stat.IsDir()
return f, nil
}
if !os.IsNotExist(err) {
return nil, err
}
// Typically an extension means it's a file
ext := filepath.Ext(path)
if ext == "" {
// Attempt to create the directory
if err := os.MkdirAll(path, 0755); err != nil {
return nil, err
}
f.separate = true
} else {
// Attempt to create directory path
dir := filepath.Dir(path)
if err := os.MkdirAll(dir, 0755); err != nil {
return nil, err
}
// Attempt to create the file
file, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE, 0644)
if err != nil {
return nil, err
}
file.Close()
f.separate = false
}
return f, nil
}
// SetLevel will set both the minimum and maximum log levels to l. This makes
// the handler only respond to the single level l.
func (f *FileHandler) SetLevel(l LogLevel) {
f.min = l
f.max = l
}
// SetMinLevel will set the minimum log level the handler will handle.
func (f *FileHandler) SetMinLevel(l LogLevel) {
if l > f.max {
return
}
f.min = l
}
// SetMaxLevel will set the maximum log level the handler will handle.
func (f *FileHandler) SetMaxLevel(l LogLevel) {
if l < f.min {
return
}
f.max = l
}
// SetFormatter gives FileHandler a formatter for log messages.
func (f *FileHandler) SetFormatter(fo Formatter) {
f.formatter = fo
}
// Handles returns whether the handler handles log level l.
func (f *FileHandler) Handles(l LogLevel) bool {
return (f.min <= l && l <= f.max)
}
// WriteLog will write the log message to a file.
func (f *FileHandler) WriteLog(e *Entry) {
var logfile string
if !f.separate {
logfile = f.path
} else {
logfile = fmt.Sprintf("%s-%s.log", strings.ToLower(e.Level.String()), e.Logger.Name())
logfile = path.Join(f.path, logfile)
}
f.m.Lock()
defer f.m.Unlock()
file, err := os.OpenFile(logfile, os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0644)
if err != nil {
fmt.Printf("Error opening log file: %v\n", err)
}
defer file.Close()
_, err = file.Write(f.formatter.FormatByte(e))
if err != nil {
fmt.Printf("Error writing to log file: %v\n", err)
}
return
}
// Close satisfies the interface, NOOP
func (f *FileHandler) Close() {}
|
package verbose
import (
"fmt"
"os"
"path"
"path/filepath"
"strings"
"sync"
)
// FileHandler writes log messages to a file to a directory
type FileHandler struct {
min LogLevel
max LogLevel
path string
separate bool
formatter Formatter
m sync.Mutex
}
// NewFileHandler takes the path and returns a FileHandler. If the path exists,
// file or directory mode will be Determined by what path is. If it doesn't exist,
// the mode will be file if path has an extension, otherwise it will be directory.
// In file mode, all log messages are written to a single file.
// In directory mode, each level is written to it's own file.
func NewFileHandler(path string) (*FileHandler, error) {
f := &FileHandler{
min: LogLevelDebug,
max: LogLevelFatal,
path: path,
formatter: &LineFormatter{},
m: sync.Mutex{},
}
// Determine of the path is a file or directory
// We cannot assume the path exists yet
stat, err := os.Stat(path)
if err == nil { // Easiest, path exists
f.separate = stat.IsDir()
} else if os.IsNotExist(err) {
// Typically an extension means it's a file
ext := filepath.Ext(path)
if ext == "" {
// Attempt to create the directory
if err := os.MkdirAll(path, 0755); err != nil {
return nil, err
}
f.separate = true
} else {
// Attempt to create the file
file, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE, 0644)
if err != nil {
return nil, err
}
file.Close()
f.separate = false
}
}
return f, nil
}
// SetLevel will set both the minimum and maximum log levels to l. This makes
// the handler only respond to the single level l.
func (f *FileHandler) SetLevel(l LogLevel) {
f.min = l
f.max = l
}
// SetMinLevel will set the minimum log level the handler will handle.
func (f *FileHandler) SetMinLevel(l LogLevel) {
if l > f.max {
return
}
f.min = l
}
// SetMaxLevel will set the maximum log level the handler will handle.
func (f *FileHandler) SetMaxLevel(l LogLevel) {
if l < f.min {
return
}
f.max = l
}
// SetFormatter gives FileHandler a formatter for log messages.
func (f *FileHandler) SetFormatter(fo Formatter) {
f.formatter = fo
}
// Handles returns whether the handler handles log level l.
func (f *FileHandler) Handles(l LogLevel) bool {
return (f.min <= l && l <= f.max)
}
// WriteLog will write the log message to a file.
func (f *FileHandler) WriteLog(e *Entry) {
var logfile string
if !f.separate {
logfile = f.path
} else {
logfile = fmt.Sprintf("%s-%s.log", strings.ToLower(e.Level.String()), e.Logger.Name())
logfile = path.Join(f.path, logfile)
}
f.m.Lock()
defer f.m.Unlock()
file, err := os.OpenFile(logfile, os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0644)
if err != nil {
fmt.Printf("Error opening log file: %v\n", err)
}
defer file.Close()
_, err = file.Write(f.formatter.FormatByte(e))
if err != nil {
fmt.Printf("Error writing to log file: %v\n", err)
}
return
}
// Close satisfies the interface, NOOP
func (f *FileHandler) Close() {}
FileHandler: create directory path for log file if needed
package verbose
import (
"fmt"
"os"
"path"
"path/filepath"
"strings"
"sync"
)
// FileHandler writes log messages to a file to a directory
type FileHandler struct {
min LogLevel
max LogLevel
path string
separate bool
formatter Formatter
m sync.Mutex
}
// NewFileHandler takes the path and returns a FileHandler. If the path exists,
// file or directory mode will be Determined by what path is. If it doesn't exist,
// the mode will be file if path has an extension, otherwise it will be directory.
// In file mode, all log messages are written to a single file.
// In directory mode, each level is written to it's own file.
func NewFileHandler(path string) (*FileHandler, error) {
path, _ = filepath.Abs(path)
f := &FileHandler{
min: LogLevelDebug,
max: LogLevelFatal,
path: path,
formatter: &LineFormatter{},
m: sync.Mutex{},
}
// Determine of the path is a file or directory
// We cannot assume the path exists yet
stat, err := os.Stat(path)
if err == nil { // Easiest, path exists
f.separate = stat.IsDir()
return f, nil
}
if !os.IsNotExist(err) {
return nil, err
}
// Typically an extension means it's a file
ext := filepath.Ext(path)
if ext == "" {
// Attempt to create the directory
if err := os.MkdirAll(path, 0755); err != nil {
return nil, err
}
f.separate = true
} else {
// Attempt to create directory path
dir := filepath.Dir(path)
if err := os.MkdirAll(dir, 0755); err != nil {
return nil, err
}
// Attempt to create the file
file, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE, 0644)
if err != nil {
return nil, err
}
file.Close()
f.separate = false
}
return f, nil
}
// SetLevel will set both the minimum and maximum log levels to l. This makes
// the handler only respond to the single level l.
func (f *FileHandler) SetLevel(l LogLevel) {
f.min = l
f.max = l
}
// SetMinLevel will set the minimum log level the handler will handle.
func (f *FileHandler) SetMinLevel(l LogLevel) {
if l > f.max {
return
}
f.min = l
}
// SetMaxLevel will set the maximum log level the handler will handle.
func (f *FileHandler) SetMaxLevel(l LogLevel) {
if l < f.min {
return
}
f.max = l
}
// SetFormatter gives FileHandler a formatter for log messages.
func (f *FileHandler) SetFormatter(fo Formatter) {
f.formatter = fo
}
// Handles returns whether the handler handles log level l.
func (f *FileHandler) Handles(l LogLevel) bool {
return (f.min <= l && l <= f.max)
}
// WriteLog will write the log message to a file.
func (f *FileHandler) WriteLog(e *Entry) {
var logfile string
if !f.separate {
logfile = f.path
} else {
logfile = fmt.Sprintf("%s-%s.log", strings.ToLower(e.Level.String()), e.Logger.Name())
logfile = path.Join(f.path, logfile)
}
f.m.Lock()
defer f.m.Unlock()
file, err := os.OpenFile(logfile, os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0644)
if err != nil {
fmt.Printf("Error opening log file: %v\n", err)
}
defer file.Close()
_, err = file.Write(f.formatter.FormatByte(e))
if err != nil {
fmt.Printf("Error writing to log file: %v\n", err)
}
return
}
// Close satisfies the interface, NOOP
func (f *FileHandler) Close() {}
|
package filemanager
import (
"crypto/rand"
"errors"
"fmt"
"log"
"net/http"
"os"
"os/exec"
"reflect"
"regexp"
"strings"
"time"
"golang.org/x/crypto/bcrypt"
"github.com/GeertJohan/go.rice"
"github.com/hacdias/fileutils"
"github.com/mholt/caddy"
"github.com/robfig/cron"
)
const (
// Version is the current File Manager version.
Version = "1.4.0"
ListViewMode = "list"
MosaicViewMode = "mosaic"
)
var (
ErrExist = errors.New("the resource already exists")
ErrNotExist = errors.New("the resource does not exist")
ErrEmptyRequest = errors.New("request body is empty")
ErrEmptyPassword = errors.New("password is empty")
ErrEmptyUsername = errors.New("username is empty")
ErrEmptyScope = errors.New("scope is empty")
ErrWrongDataType = errors.New("wrong data type")
ErrInvalidUpdateField = errors.New("invalid field to update")
ErrInvalidOption = errors.New("invalid option")
)
// FileManager is a file manager instance. It should be creating using the
// 'New' function and not directly.
type FileManager struct {
// Cron job to manage schedulings.
Cron *cron.Cron
// The key used to sign the JWT tokens.
Key []byte
// The static assets.
Assets *rice.Box
// The Store is used to manage users, shareable links and
// other stuff that is saved on the database.
Store *Store
// PrefixURL is a part of the URL that is already trimmed from the request URL before it
// arrives to our handlers. It may be useful when using File Manager as a middleware
// such as in caddy-filemanager plugin. It is only useful in certain situations.
PrefixURL string
// BaseURL is the path where the GUI will be accessible. It musn't end with
// a trailing slash and mustn't contain PrefixURL, if set. It shouldn't be
// edited directly. Use SetBaseURL.
BaseURL string
// NoAuth disables the authentication. When the authentication is disabled,
// there will only exist one user, called "admin".
NoAuth bool
// ReCaptcha Site key and secret.
ReCaptchaKey string
ReCaptchaSecret string
// StaticGen is the static websit generator handler.
StaticGen StaticGen
// The Default User needed to build the New User page.
DefaultUser *User
// A map of events to a slice of commands.
Commands map[string][]string
// Global stylesheet.
CSS string
// NewFS should build a new file system for a given path.
NewFS FSBuilder
}
var commandEvents = []string{
"before_save",
"after_save",
"before_publish",
"after_publish",
"before_copy",
"after_copy",
"before_rename",
"after_rename",
"before_upload",
"after_upload",
"before_delete",
"after_delete",
}
// Command is a command function.
type Command func(r *http.Request, m *FileManager, u *User) error
// FSBuilder is the File System Builder.
type FSBuilder func(scope string) FileSystem
// Setup loads the configuration from the database and configures
// the Assets and the Cron job. It must always be run after
// creating a File Manager object.
func (m *FileManager) Setup() error {
// Creates a new File Manager instance with the Users
// map and Assets box.
m.Assets = rice.MustFindBox("./assets/dist")
m.Cron = cron.New()
// Tries to get the encryption key from the database.
// If it doesn't exist, create a new one of 256 bits.
err := m.Store.Config.Get("key", &m.Key)
if err != nil && err == ErrNotExist {
var bytes []byte
bytes, err = GenerateRandomBytes(64)
if err != nil {
return err
}
m.Key = bytes
err = m.Store.Config.Save("key", m.Key)
}
if err != nil {
return err
}
// Get the global CSS.
err = m.Store.Config.Get("css", &m.CSS)
if err != nil && err == ErrNotExist {
err = m.Store.Config.Save("css", "")
}
if err != nil {
return err
}
// Tries to get the event commands from the database.
// If they don't exist, initialize them.
err = m.Store.Config.Get("commands", &m.Commands)
if err == nil {
// Add hypothetically new command handlers.
for _, command := range commandEvents {
if _, ok := m.Commands[command]; ok {
continue
}
m.Commands[command] = []string{}
}
}
if err != nil && err == ErrNotExist {
m.Commands = map[string][]string{}
// Initialize the command handlers.
for _, command := range commandEvents {
m.Commands[command] = []string{}
}
err = m.Store.Config.Save("commands", m.Commands)
}
if err != nil {
return err
}
// Tries to fetch the users from the database.
users, err := m.Store.Users.Gets(m.NewFS)
if err != nil && err != ErrNotExist {
return err
}
// If there are no users in the database, it creates a new one
// based on 'base' User that must be provided by the function caller.
if len(users) == 0 {
u := *m.DefaultUser
u.Username = "admin"
// Hashes the password.
u.Password, err = HashPassword("admin")
if err != nil {
return err
}
// The first user must be an administrator.
u.Admin = true
u.AllowCommands = true
u.AllowNew = true
u.AllowEdit = true
u.AllowPublish = true
// Saves the user to the database.
if err := m.Store.Users.Save(&u); err != nil {
return err
}
}
// TODO: remove this after 1.5
for _, user := range users {
if user.ViewMode != ListViewMode && user.ViewMode != MosaicViewMode {
user.ViewMode = ListViewMode
m.Store.Users.Update(user, "ViewMode")
}
}
m.DefaultUser.Username = ""
m.DefaultUser.Password = ""
m.Cron.AddFunc("@hourly", m.ShareCleaner)
m.Cron.Start()
return nil
}
// RootURL returns the actual URL where
// File Manager interface can be accessed.
func (m FileManager) RootURL() string {
return m.PrefixURL + m.BaseURL
}
// SetPrefixURL updates the prefixURL of a File
// Manager object.
func (m *FileManager) SetPrefixURL(url string) {
url = strings.TrimPrefix(url, "/")
url = strings.TrimSuffix(url, "/")
url = "/" + url
m.PrefixURL = strings.TrimSuffix(url, "/")
}
// SetBaseURL updates the baseURL of a File Manager
// object.
func (m *FileManager) SetBaseURL(url string) {
url = strings.TrimPrefix(url, "/")
url = strings.TrimSuffix(url, "/")
url = "/" + url
m.BaseURL = strings.TrimSuffix(url, "/")
}
// Attach attaches a static generator to the current File Manager.
func (m *FileManager) Attach(s StaticGen) error {
if reflect.TypeOf(s).Kind() != reflect.Ptr {
return errors.New("data should be a pointer to interface, not interface")
}
err := s.Setup()
if err != nil {
return err
}
m.StaticGen = s
err = m.Store.Config.Get("staticgen_"+s.Name(), s)
if err == ErrNotExist {
return m.Store.Config.Save("staticgen_"+s.Name(), s)
}
return err
}
// ShareCleaner removes sharing links that are no longer active.
// This function is set to run periodically.
func (m FileManager) ShareCleaner() {
// Get all links.
links, err := m.Store.Share.Gets()
if err != nil {
log.Print(err)
return
}
// Find the expired ones.
for i := range links {
if links[i].Expires && links[i].ExpireDate.Before(time.Now()) {
err = m.Store.Share.Delete(links[i].Hash)
if err != nil {
log.Print(err)
}
}
}
}
// Runner runs the commands for a certain event type.
func (m FileManager) Runner(event string, path string, destination string, user *User) error {
commands := []string{}
// Get the commands from the File Manager instance itself.
if val, ok := m.Commands[event]; ok {
commands = append(commands, val...)
}
// Execute the commands.
for _, command := range commands {
args := strings.Split(command, " ")
nonblock := false
if len(args) > 1 && args[len(args)-1] == "&" {
// Run command in background; non-blocking
nonblock = true
args = args[:len(args)-1]
}
command, args, err := caddy.SplitCommandAndArgs(strings.Join(args, " "))
if err != nil {
return err
}
cmd := exec.Command(command, args...)
cmd.Env = append(os.Environ(), fmt.Sprintf("FILE=%s", path))
cmd.Env = append(cmd.Env, fmt.Sprintf("ROOT=%s", string(user.Scope)))
cmd.Env = append(cmd.Env, fmt.Sprintf("TRIGGER=%s", event))
cmd.Env = append(cmd.Env, fmt.Sprintf("USERNAME=%s", user.Username))
if destination != "" {
cmd.Env = append(cmd.Env, fmt.Sprintf("DESTINATION=%s", destination))
}
cmd.Stdin = os.Stdin
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
if nonblock {
log.Printf("[INFO] Nonblocking Command:\"%s %s\"", command, strings.Join(args, " "))
if err := cmd.Start(); err != nil {
return err
}
continue
}
log.Printf("[INFO] Blocking Command:\"%s %s\"", command, strings.Join(args, " "))
if err := cmd.Run(); err != nil {
return err
}
}
return nil
}
// DefaultUser is used on New, when no 'base' user is provided.
var DefaultUser = User{
AllowCommands: true,
AllowEdit: true,
AllowNew: true,
AllowPublish: true,
LockPassword: false,
Commands: []string{},
Rules: []*Rule{},
CSS: "",
Admin: true,
Locale: "",
Scope: ".",
FileSystem: fileutils.Dir("."),
ViewMode: "mosaic",
}
// User contains the configuration for each user.
type User struct {
// ID is the required primary key with auto increment0
ID int `storm:"id,increment"`
// Username is the user username used to login.
Username string `json:"username" storm:"index,unique"`
// The hashed password. This never reaches the front-end because it's temporarily
// emptied during JSON marshall.
Password string `json:"password"`
// Tells if this user is an admin.
Admin bool `json:"admin"`
// Scope is the path the user has access to.
Scope string `json:"filesystem"`
// FileSystem is the virtual file system the user has access.
FileSystem FileSystem `json:"-"`
// Rules is an array of access and deny rules.
Rules []*Rule `json:"rules"`
// Custom styles for this user.
CSS string `json:"css"`
// Locale is the language of the user.
Locale string `json:"locale"`
// Prevents the user to change its password.
LockPassword bool `json:"lockPassword"`
// These indicate if the user can perform certain actions.
AllowNew bool `json:"allowNew"` // Create files and folders
AllowEdit bool `json:"allowEdit"` // Edit/rename files
AllowCommands bool `json:"allowCommands"` // Execute commands
AllowPublish bool `json:"allowPublish"` // Publish content (to use with static gen)
// Commands is the list of commands the user can execute.
Commands []string `json:"commands"`
// User view mode for files and folders.
ViewMode string `json:"viewMode"`
}
// Allowed checks if the user has permission to access a directory/file.
func (u User) Allowed(url string) bool {
var rule *Rule
i := len(u.Rules) - 1
for i >= 0 {
rule = u.Rules[i]
if rule.Regex {
if rule.Regexp.MatchString(url) {
return rule.Allow
}
} else if strings.HasPrefix(url, rule.Path) {
return rule.Allow
}
i--
}
return true
}
// Rule is a dissalow/allow rule.
type Rule struct {
// Regex indicates if this rule uses Regular Expressions or not.
Regex bool `json:"regex"`
// Allow indicates if this is an allow rule. Set 'false' to be a disallow rule.
Allow bool `json:"allow"`
// Path is the corresponding URL path for this rule.
Path string `json:"path"`
// Regexp is the regular expression. Only use this when 'Regex' was set to true.
Regexp *Regexp `json:"regexp"`
}
// Regexp is a regular expression wrapper around native regexp.
type Regexp struct {
Raw string `json:"raw"`
regexp *regexp.Regexp
}
// MatchString checks if this string matches the regular expression.
func (r *Regexp) MatchString(s string) bool {
if r.regexp == nil {
r.regexp = regexp.MustCompile(r.Raw)
}
return r.regexp.MatchString(s)
}
// ShareLink is the information needed to build a shareable link.
type ShareLink struct {
Hash string `json:"hash" storm:"id,index"`
Path string `json:"path" storm:"index"`
Expires bool `json:"expires"`
ExpireDate time.Time `json:"expireDate"`
}
// Store is a collection of the stores needed to get
// and save information.
type Store struct {
Users UsersStore
Config ConfigStore
Share ShareStore
}
// UsersStore is the interface to manage users.
type UsersStore interface {
Get(id int, builder FSBuilder) (*User, error)
GetByUsername(username string, builder FSBuilder) (*User, error)
Gets(builder FSBuilder) ([]*User, error)
Save(u *User) error
Update(u *User, fields ...string) error
Delete(id int) error
}
// ConfigStore is the interface to manage configuration.
type ConfigStore interface {
Get(name string, to interface{}) error
Save(name string, from interface{}) error
}
// ShareStore is the interface to manage share links.
type ShareStore interface {
Get(hash string) (*ShareLink, error)
GetPermanent(path string) (*ShareLink, error)
GetByPath(path string) ([]*ShareLink, error)
Gets() ([]*ShareLink, error)
Save(s *ShareLink) error
Delete(hash string) error
}
// StaticGen is a static website generator.
type StaticGen interface {
SettingsPath() string
Name() string
Setup() error
Hook(c *Context, w http.ResponseWriter, r *http.Request) (int, error)
Preview(c *Context, w http.ResponseWriter, r *http.Request) (int, error)
Publish(c *Context, w http.ResponseWriter, r *http.Request) (int, error)
}
// FileSystem is the interface to work with the file system.
type FileSystem interface {
Mkdir(name string, perm os.FileMode) error
OpenFile(name string, flag int, perm os.FileMode) (*os.File, error)
RemoveAll(name string) error
Rename(oldName, newName string) error
Stat(name string) (os.FileInfo, error)
Copy(src, dst string) error
}
// Context contains the needed information to make handlers work.
type Context struct {
*FileManager
User *User
File *File
// On API handlers, Router is the APi handler we want.
Router string
}
// HashPassword generates an hash from a password using bcrypt.
func HashPassword(password string) (string, error) {
bytes, err := bcrypt.GenerateFromPassword([]byte(password), bcrypt.DefaultCost)
return string(bytes), err
}
// CheckPasswordHash compares a password with an hash to check if they match.
func CheckPasswordHash(password, hash string) bool {
err := bcrypt.CompareHashAndPassword([]byte(hash), []byte(password))
return err == nil
}
// GenerateRandomBytes returns securely generated random bytes.
// It will return an fm.Error if the system's secure random
// number generator fails to function correctly, in which
// case the caller should not continue.
func GenerateRandomBytes(n int) ([]byte, error) {
b := make([]byte, n)
_, err := rand.Read(b)
// Note that err == nil only if we read len(b) bytes.
if err != nil {
return nil, err
}
return b, nil
}
[ci skip] auto: setting untracked version
Former-commit-id: ea04ce9843fafcc33961d3aacd5f31b5f93d1a08 [formerly d97fa13252b9201ea17bb9aa188b7f93b03d3f6a] [formerly c647d2080414bccf957ffb713cf029219780f606 [formerly 2b9cf83febe9974c533589d59eb591613119a7ad]]
Former-commit-id: 82f4a48d20d439257a70e49726bc26ddbd259c58 [formerly 8d3787cabd875c3ee84044051e5f97fbf1922585]
Former-commit-id: 45a6d0ca105b6b9591928d69db81c7c7777b62df
package filemanager
import (
"crypto/rand"
"errors"
"fmt"
"log"
"net/http"
"os"
"os/exec"
"reflect"
"regexp"
"strings"
"time"
"golang.org/x/crypto/bcrypt"
"github.com/GeertJohan/go.rice"
"github.com/hacdias/fileutils"
"github.com/mholt/caddy"
"github.com/robfig/cron"
)
const (
// Version is the current File Manager version.
Version = "(untracked)"
ListViewMode = "list"
MosaicViewMode = "mosaic"
)
var (
ErrExist = errors.New("the resource already exists")
ErrNotExist = errors.New("the resource does not exist")
ErrEmptyRequest = errors.New("request body is empty")
ErrEmptyPassword = errors.New("password is empty")
ErrEmptyUsername = errors.New("username is empty")
ErrEmptyScope = errors.New("scope is empty")
ErrWrongDataType = errors.New("wrong data type")
ErrInvalidUpdateField = errors.New("invalid field to update")
ErrInvalidOption = errors.New("invalid option")
)
// FileManager is a file manager instance. It should be creating using the
// 'New' function and not directly.
type FileManager struct {
// Cron job to manage schedulings.
Cron *cron.Cron
// The key used to sign the JWT tokens.
Key []byte
// The static assets.
Assets *rice.Box
// The Store is used to manage users, shareable links and
// other stuff that is saved on the database.
Store *Store
// PrefixURL is a part of the URL that is already trimmed from the request URL before it
// arrives to our handlers. It may be useful when using File Manager as a middleware
// such as in caddy-filemanager plugin. It is only useful in certain situations.
PrefixURL string
// BaseURL is the path where the GUI will be accessible. It musn't end with
// a trailing slash and mustn't contain PrefixURL, if set. It shouldn't be
// edited directly. Use SetBaseURL.
BaseURL string
// NoAuth disables the authentication. When the authentication is disabled,
// there will only exist one user, called "admin".
NoAuth bool
// ReCaptcha Site key and secret.
ReCaptchaKey string
ReCaptchaSecret string
// StaticGen is the static websit generator handler.
StaticGen StaticGen
// The Default User needed to build the New User page.
DefaultUser *User
// A map of events to a slice of commands.
Commands map[string][]string
// Global stylesheet.
CSS string
// NewFS should build a new file system for a given path.
NewFS FSBuilder
}
var commandEvents = []string{
"before_save",
"after_save",
"before_publish",
"after_publish",
"before_copy",
"after_copy",
"before_rename",
"after_rename",
"before_upload",
"after_upload",
"before_delete",
"after_delete",
}
// Command is a command function.
type Command func(r *http.Request, m *FileManager, u *User) error
// FSBuilder is the File System Builder.
type FSBuilder func(scope string) FileSystem
// Setup loads the configuration from the database and configures
// the Assets and the Cron job. It must always be run after
// creating a File Manager object.
func (m *FileManager) Setup() error {
// Creates a new File Manager instance with the Users
// map and Assets box.
m.Assets = rice.MustFindBox("./assets/dist")
m.Cron = cron.New()
// Tries to get the encryption key from the database.
// If it doesn't exist, create a new one of 256 bits.
err := m.Store.Config.Get("key", &m.Key)
if err != nil && err == ErrNotExist {
var bytes []byte
bytes, err = GenerateRandomBytes(64)
if err != nil {
return err
}
m.Key = bytes
err = m.Store.Config.Save("key", m.Key)
}
if err != nil {
return err
}
// Get the global CSS.
err = m.Store.Config.Get("css", &m.CSS)
if err != nil && err == ErrNotExist {
err = m.Store.Config.Save("css", "")
}
if err != nil {
return err
}
// Tries to get the event commands from the database.
// If they don't exist, initialize them.
err = m.Store.Config.Get("commands", &m.Commands)
if err == nil {
// Add hypothetically new command handlers.
for _, command := range commandEvents {
if _, ok := m.Commands[command]; ok {
continue
}
m.Commands[command] = []string{}
}
}
if err != nil && err == ErrNotExist {
m.Commands = map[string][]string{}
// Initialize the command handlers.
for _, command := range commandEvents {
m.Commands[command] = []string{}
}
err = m.Store.Config.Save("commands", m.Commands)
}
if err != nil {
return err
}
// Tries to fetch the users from the database.
users, err := m.Store.Users.Gets(m.NewFS)
if err != nil && err != ErrNotExist {
return err
}
// If there are no users in the database, it creates a new one
// based on 'base' User that must be provided by the function caller.
if len(users) == 0 {
u := *m.DefaultUser
u.Username = "admin"
// Hashes the password.
u.Password, err = HashPassword("admin")
if err != nil {
return err
}
// The first user must be an administrator.
u.Admin = true
u.AllowCommands = true
u.AllowNew = true
u.AllowEdit = true
u.AllowPublish = true
// Saves the user to the database.
if err := m.Store.Users.Save(&u); err != nil {
return err
}
}
// TODO: remove this after 1.5
for _, user := range users {
if user.ViewMode != ListViewMode && user.ViewMode != MosaicViewMode {
user.ViewMode = ListViewMode
m.Store.Users.Update(user, "ViewMode")
}
}
m.DefaultUser.Username = ""
m.DefaultUser.Password = ""
m.Cron.AddFunc("@hourly", m.ShareCleaner)
m.Cron.Start()
return nil
}
// RootURL returns the actual URL where
// File Manager interface can be accessed.
func (m FileManager) RootURL() string {
return m.PrefixURL + m.BaseURL
}
// SetPrefixURL updates the prefixURL of a File
// Manager object.
func (m *FileManager) SetPrefixURL(url string) {
url = strings.TrimPrefix(url, "/")
url = strings.TrimSuffix(url, "/")
url = "/" + url
m.PrefixURL = strings.TrimSuffix(url, "/")
}
// SetBaseURL updates the baseURL of a File Manager
// object.
func (m *FileManager) SetBaseURL(url string) {
url = strings.TrimPrefix(url, "/")
url = strings.TrimSuffix(url, "/")
url = "/" + url
m.BaseURL = strings.TrimSuffix(url, "/")
}
// Attach attaches a static generator to the current File Manager.
func (m *FileManager) Attach(s StaticGen) error {
if reflect.TypeOf(s).Kind() != reflect.Ptr {
return errors.New("data should be a pointer to interface, not interface")
}
err := s.Setup()
if err != nil {
return err
}
m.StaticGen = s
err = m.Store.Config.Get("staticgen_"+s.Name(), s)
if err == ErrNotExist {
return m.Store.Config.Save("staticgen_"+s.Name(), s)
}
return err
}
// ShareCleaner removes sharing links that are no longer active.
// This function is set to run periodically.
func (m FileManager) ShareCleaner() {
// Get all links.
links, err := m.Store.Share.Gets()
if err != nil {
log.Print(err)
return
}
// Find the expired ones.
for i := range links {
if links[i].Expires && links[i].ExpireDate.Before(time.Now()) {
err = m.Store.Share.Delete(links[i].Hash)
if err != nil {
log.Print(err)
}
}
}
}
// Runner runs the commands for a certain event type.
func (m FileManager) Runner(event string, path string, destination string, user *User) error {
commands := []string{}
// Get the commands from the File Manager instance itself.
if val, ok := m.Commands[event]; ok {
commands = append(commands, val...)
}
// Execute the commands.
for _, command := range commands {
args := strings.Split(command, " ")
nonblock := false
if len(args) > 1 && args[len(args)-1] == "&" {
// Run command in background; non-blocking
nonblock = true
args = args[:len(args)-1]
}
command, args, err := caddy.SplitCommandAndArgs(strings.Join(args, " "))
if err != nil {
return err
}
cmd := exec.Command(command, args...)
cmd.Env = append(os.Environ(), fmt.Sprintf("FILE=%s", path))
cmd.Env = append(cmd.Env, fmt.Sprintf("ROOT=%s", string(user.Scope)))
cmd.Env = append(cmd.Env, fmt.Sprintf("TRIGGER=%s", event))
cmd.Env = append(cmd.Env, fmt.Sprintf("USERNAME=%s", user.Username))
if destination != "" {
cmd.Env = append(cmd.Env, fmt.Sprintf("DESTINATION=%s", destination))
}
cmd.Stdin = os.Stdin
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
if nonblock {
log.Printf("[INFO] Nonblocking Command:\"%s %s\"", command, strings.Join(args, " "))
if err := cmd.Start(); err != nil {
return err
}
continue
}
log.Printf("[INFO] Blocking Command:\"%s %s\"", command, strings.Join(args, " "))
if err := cmd.Run(); err != nil {
return err
}
}
return nil
}
// DefaultUser is used on New, when no 'base' user is provided.
var DefaultUser = User{
AllowCommands: true,
AllowEdit: true,
AllowNew: true,
AllowPublish: true,
LockPassword: false,
Commands: []string{},
Rules: []*Rule{},
CSS: "",
Admin: true,
Locale: "",
Scope: ".",
FileSystem: fileutils.Dir("."),
ViewMode: "mosaic",
}
// User contains the configuration for each user.
type User struct {
// ID is the required primary key with auto increment0
ID int `storm:"id,increment"`
// Username is the user username used to login.
Username string `json:"username" storm:"index,unique"`
// The hashed password. This never reaches the front-end because it's temporarily
// emptied during JSON marshall.
Password string `json:"password"`
// Tells if this user is an admin.
Admin bool `json:"admin"`
// Scope is the path the user has access to.
Scope string `json:"filesystem"`
// FileSystem is the virtual file system the user has access.
FileSystem FileSystem `json:"-"`
// Rules is an array of access and deny rules.
Rules []*Rule `json:"rules"`
// Custom styles for this user.
CSS string `json:"css"`
// Locale is the language of the user.
Locale string `json:"locale"`
// Prevents the user to change its password.
LockPassword bool `json:"lockPassword"`
// These indicate if the user can perform certain actions.
AllowNew bool `json:"allowNew"` // Create files and folders
AllowEdit bool `json:"allowEdit"` // Edit/rename files
AllowCommands bool `json:"allowCommands"` // Execute commands
AllowPublish bool `json:"allowPublish"` // Publish content (to use with static gen)
// Commands is the list of commands the user can execute.
Commands []string `json:"commands"`
// User view mode for files and folders.
ViewMode string `json:"viewMode"`
}
// Allowed checks if the user has permission to access a directory/file.
func (u User) Allowed(url string) bool {
var rule *Rule
i := len(u.Rules) - 1
for i >= 0 {
rule = u.Rules[i]
if rule.Regex {
if rule.Regexp.MatchString(url) {
return rule.Allow
}
} else if strings.HasPrefix(url, rule.Path) {
return rule.Allow
}
i--
}
return true
}
// Rule is a dissalow/allow rule.
type Rule struct {
// Regex indicates if this rule uses Regular Expressions or not.
Regex bool `json:"regex"`
// Allow indicates if this is an allow rule. Set 'false' to be a disallow rule.
Allow bool `json:"allow"`
// Path is the corresponding URL path for this rule.
Path string `json:"path"`
// Regexp is the regular expression. Only use this when 'Regex' was set to true.
Regexp *Regexp `json:"regexp"`
}
// Regexp is a regular expression wrapper around native regexp.
type Regexp struct {
Raw string `json:"raw"`
regexp *regexp.Regexp
}
// MatchString checks if this string matches the regular expression.
func (r *Regexp) MatchString(s string) bool {
if r.regexp == nil {
r.regexp = regexp.MustCompile(r.Raw)
}
return r.regexp.MatchString(s)
}
// ShareLink is the information needed to build a shareable link.
type ShareLink struct {
Hash string `json:"hash" storm:"id,index"`
Path string `json:"path" storm:"index"`
Expires bool `json:"expires"`
ExpireDate time.Time `json:"expireDate"`
}
// Store is a collection of the stores needed to get
// and save information.
type Store struct {
Users UsersStore
Config ConfigStore
Share ShareStore
}
// UsersStore is the interface to manage users.
type UsersStore interface {
Get(id int, builder FSBuilder) (*User, error)
GetByUsername(username string, builder FSBuilder) (*User, error)
Gets(builder FSBuilder) ([]*User, error)
Save(u *User) error
Update(u *User, fields ...string) error
Delete(id int) error
}
// ConfigStore is the interface to manage configuration.
type ConfigStore interface {
Get(name string, to interface{}) error
Save(name string, from interface{}) error
}
// ShareStore is the interface to manage share links.
type ShareStore interface {
Get(hash string) (*ShareLink, error)
GetPermanent(path string) (*ShareLink, error)
GetByPath(path string) ([]*ShareLink, error)
Gets() ([]*ShareLink, error)
Save(s *ShareLink) error
Delete(hash string) error
}
// StaticGen is a static website generator.
type StaticGen interface {
SettingsPath() string
Name() string
Setup() error
Hook(c *Context, w http.ResponseWriter, r *http.Request) (int, error)
Preview(c *Context, w http.ResponseWriter, r *http.Request) (int, error)
Publish(c *Context, w http.ResponseWriter, r *http.Request) (int, error)
}
// FileSystem is the interface to work with the file system.
type FileSystem interface {
Mkdir(name string, perm os.FileMode) error
OpenFile(name string, flag int, perm os.FileMode) (*os.File, error)
RemoveAll(name string) error
Rename(oldName, newName string) error
Stat(name string) (os.FileInfo, error)
Copy(src, dst string) error
}
// Context contains the needed information to make handlers work.
type Context struct {
*FileManager
User *User
File *File
// On API handlers, Router is the APi handler we want.
Router string
}
// HashPassword generates an hash from a password using bcrypt.
func HashPassword(password string) (string, error) {
bytes, err := bcrypt.GenerateFromPassword([]byte(password), bcrypt.DefaultCost)
return string(bytes), err
}
// CheckPasswordHash compares a password with an hash to check if they match.
func CheckPasswordHash(password, hash string) bool {
err := bcrypt.CompareHashAndPassword([]byte(hash), []byte(password))
return err == nil
}
// GenerateRandomBytes returns securely generated random bytes.
// It will return an fm.Error if the system's secure random
// number generator fails to function correctly, in which
// case the caller should not continue.
func GenerateRandomBytes(n int) ([]byte, error) {
b := make([]byte, n)
_, err := rand.Read(b)
// Note that err == nil only if we read len(b) bytes.
if err != nil {
return nil, err
}
return b, nil
}
|
package peco
import (
"fmt"
"testing"
"github.com/stretchr/testify/assert"
)
// TestFuzzyFilter tests a fuzzy filter against various inputs
func TestFuzzyFilter(t *testing.T) {
testValues := []struct {
input string
query string
selected bool
}{
{"this is a test to test the fuzzy Filter", "tf", true}, // normal selection
{"this is a test to test the fuzzy Filter", "wp", false}, // incorrect selection
{"THIS IS A TEST TO TEST THE FUZZY FILTER", "tu", true}, // case insensitivity
{"this is a Test to test the fuzzy filter", "Tu", true}, // case sensitivity
{"this is a Test to test the fUzzy filter", "TU", true}, // case sensitivity
{"this is a test to test the fuzzy filter", "Tu", false}, // case sensitivity
{"this is a test to Test the fuzzy filter", "TU", false}, // case sensitivity
{"日本語は難しいです", "難", true}, // kanji
{"あ、日本語は難しいですよ", "あい", true}, // hiragana
{"パソコンは遅いですネ", "ソネ", true}, // katana
{"🚴🏻 abcd efgh", "🚴🏻e", true}, // unicode
}
filter := NewFuzzyFilter()
for i, v := range testValues {
t.Run(fmt.Sprintf(`"%s" against "%s", expect "%t"`, v.input, v.query, v.selected), func(t *testing.T) {
filter.SetQuery(v.query)
l := NewRawLine(uint64(i), v.input, false)
res, err := filter.filter(l)
if !v.selected {
if !assert.Error(t, err, "filter should fail") {
return
}
if !assert.Nil(t, res, "return value should be nil") {
return
}
return
}
if !assert.NoError(t, err, "filtering failed") {
return
}
if !assert.NotNil(t, res, "return value should NOT be nil") {
return
}
t.Logf("%#v", res.Indices())
})
}
}
tweak comments
package peco
import (
"fmt"
"testing"
"github.com/stretchr/testify/assert"
)
// TestFuzzyFilter tests a fuzzy filter against various inputs
func TestFuzzyFilter(t *testing.T) {
testValues := []struct {
input string
query string
selected bool
}{
{"this is a test to test the fuzzy Filter", "tf", true}, // normal selection
{"this is a test to test the fuzzy Filter", "wp", false}, // incorrect selection
{"THIS IS A TEST TO TEST THE FUZZY FILTER", "tu", true}, // case insensitivity
{"this is a Test to test the fuzzy filter", "Tu", true}, // case sensitivity
{"this is a Test to test the fUzzy filter", "TU", true}, // case sensitivity
{"this is a test to test the fuzzy filter", "Tu", false}, // case sensitivity
{"this is a test to Test the fuzzy filter", "TU", false}, // case sensitivity
{"日本語は難しいです", "難", true}, // kanji
{"あ、日本語は難しいですよ", "あい", true}, // hiragana
{"パソコンは遅いですネ", "ソネ", true}, // katakana
{"🚴🏻 abcd efgh", "🚴🏻e", true}, // unicode
}
filter := NewFuzzyFilter()
for i, v := range testValues {
t.Run(fmt.Sprintf(`"%s" against "%s", expect "%t"`, v.input, v.query, v.selected), func(t *testing.T) {
filter.SetQuery(v.query)
l := NewRawLine(uint64(i), v.input, false)
res, err := filter.filter(l)
if !v.selected {
if !assert.Error(t, err, "filter should fail") {
return
}
if !assert.Nil(t, res, "return value should be nil") {
return
}
return
}
if !assert.NoError(t, err, "filtering failed") {
return
}
if !assert.NotNil(t, res, "return value should NOT be nil") {
return
}
t.Logf("%#v", res.Indices())
})
}
}
|
package eval
import (
"bytes"
"encoding/json"
"monkey/ast"
"text/template"
)
const (
TEMPLATE_OBJ = "TEMPLATE_OBJ"
template_name = "template"
)
type TemplateObj struct {
Template *template.Template
}
func NewTemplateObj() Object {
ret := &TemplateObj{}
SetGlobalObj(template_name, ret)
return ret
}
func (t *TemplateObj) Inspect() string { return template_name }
func (t *TemplateObj) Type() ObjectType { return TEMPLATE_OBJ }
func (t *TemplateObj) CallMethod(line string, scope *Scope, method string, args ...Object) Object {
switch method {
case "new":
return t.New(line, args...)
case "parse":
return t.Parse(line, args...)
case "parseFiles":
return t.ParseFiles(line, args...)
case "parseGlob":
return t.ParseGlob(line, args...)
case "clone":
return t.Clone(line, args...)
case "definedTemplates":
return t.DefinedTemplates(line, args...)
case "delims":
return t.Delims(line, args...)
case "execute":
return t.Execute(line, args...)
case "executeTemplate":
return t.ExecuteTemplate(line, args...)
case "funcs":
return t.Funcs(line, scope, args...)
case "lookup":
return t.Lookup(line, args...)
case "name":
return t.Name(line, args...)
case "option":
return t.Option(line, args...)
case "templates":
return t.Templates(line, args...)
case "htmlEscapeString":
return t.HTMLEscapeString(line, args...)
case "jsEscapeString":
return t.JSEscapeString(line, args...)
}
panic(NewError(line, NOMETHODERROR, method, t.Type()))
}
func (t *TemplateObj) New(line string, args ...Object) Object {
argLen := len(args)
if argLen != 0 && argLen != 1 {
panic(NewError(line, ARGUMENTERROR, "0|1", argLen))
}
var name string = "main"
if argLen == 1 {
strObj, ok := args[0].(*String)
if !ok {
panic(NewError(line, PARAMTYPEERROR, "first", "new", "*String", args[0].Type()))
}
name = strObj.String
}
return &TemplateObj{Template:template.New(name)}
}
func (t *TemplateObj) Parse(line string, args ...Object) Object {
if len(args) != 1 {
panic(NewError(line, ARGUMENTERROR, "1", len(args)))
}
strObj, ok := args[0].(*String)
if !ok {
panic(NewError(line, PARAMTYPEERROR, "first", "parse", "*String", args[0].Type()))
}
if t.Template == nil {
return NewNil("Before calling parse(), you should first call 'new|parseFiles|parseGlob' function")
}
var err error = nil
t.Template, err = t.Template.Parse(strObj.String)
if err != nil {
return NewNil(err.Error())
}
return t
}
func (t *TemplateObj) ParseFiles(line string, args ...Object) Object {
if len(args) != 1 {
panic(NewError(line, ARGUMENTERROR, "1", len(args)))
}
strObj, ok := args[0].(*String)
if !ok {
panic(NewError(line, PARAMTYPEERROR, "first", "parseFiles", "*String", args[0].Type()))
}
temp, err := template.ParseFiles(strObj.String)
if err != nil {
return NewNil(err.Error())
}
return &TemplateObj{Template:temp}
}
func (t *TemplateObj) ParseGlob(line string, args ...Object) Object {
if len(args) != 1 {
panic(NewError(line, ARGUMENTERROR, "1", len(args)))
}
strObj, ok := args[0].(*String)
if !ok {
panic(NewError(line, PARAMTYPEERROR, "first", "parseGlob", "*String", args[0].Type()))
}
temp, err := template.ParseGlob(strObj.String)
if err != nil {
return NewNil(err.Error())
}
return &TemplateObj{Template:temp}
}
func (t *TemplateObj) Clone(line string, args ...Object) Object {
if len(args) != 0 {
panic(NewError(line, ARGUMENTERROR, "0", len(args)))
}
if t.Template == nil {
return NewNil("Before calling clone(), you should first call 'new|parseFiles|parseGlob' function")
}
temp, err := t.Template.Clone()
if err != nil {
return NewNil(err.Error())
}
return &TemplateObj{Template:temp}
}
func (t *TemplateObj) DefinedTemplates(line string, args ...Object) Object {
if len(args) != 0 {
panic(NewError(line, ARGUMENTERROR, "0", len(args)))
}
if t.Template == nil {
return NewNil("Before calling definedTemplates(), you should first call 'new|parseFiles|parseGlob' function")
}
s := t.Template.DefinedTemplates()
return NewString(s)
}
func (t *TemplateObj) Delims(line string, args ...Object) Object {
if len(args) != 2 {
panic(NewError(line, ARGUMENTERROR, "2", len(args)))
}
if t.Template == nil {
return NewNil("Before calling delims(), you should first call 'new|parseFiles|parseGlob' function")
}
leftStrObj, ok := args[0].(*String)
if !ok {
panic(NewError(line, PARAMTYPEERROR, "first", "delims", "*String", args[0].Type()))
}
rightStrObj, ok := args[1].(*String)
if !ok {
panic(NewError(line, PARAMTYPEERROR, "second", "delims", "*String", args[1].Type()))
}
t.Template.Delims(leftStrObj.String, rightStrObj.String)
return t
}
func (t *TemplateObj) Execute(line string, args ...Object) Object {
if len(args) != 1 {
panic(NewError(line, ARGUMENTERROR, "1", len(args)))
}
if t.Template == nil {
return NewNil("Before calling execute(), you should first call 'new|parseFiles|parseGlob' function")
}
var data []byte
var err error
objType := args[0].Type()
switch objType {
case HASH_OBJ:
//convert 'Hash' object to interface{}
h := args[0].(*Hash)
data, err = h.MarshalJSON()
if err != nil {
return NewNil(err.Error())
}
case ARRAY_OBJ:
//convert 'Array' object to interface{}
arr := args[0].(*Array)
data, err = arr.MarshalJSON()
if err != nil {
return NewNil(err.Error())
}
case INTEGER_OBJ:
//convert 'Integer' object to interface{}
i := args[0].(*Integer)
data, err = i.MarshalJSON()
if err != nil {
return NewNil(err.Error())
}
case FLOAT_OBJ:
//convert 'Float' object to interface{}
f := args[0].(*Float)
data, err = f.MarshalJSON()
if err != nil {
return NewNil(err.Error())
}
case BOOLEAN_OBJ:
//convert 'Boolean' object to interface{}
b := args[0].(*Boolean)
data, err = b.MarshalJSON()
if err != nil {
return NewNil(err.Error())
}
case NIL_OBJ:
//convert 'Nil' object to interface{}
n := args[0].(*Nil)
data, err = n.MarshalJSON()
if err != nil {
return NewNil(err.Error())
}
case STRING_OBJ:
//convert 'String' object to interface{}
s := args[0].(*String)
data, err = s.MarshalJSON()
if err != nil {
return NewNil(err.Error())
}
case TIME_OBJ:
//convert 'TimeObj' object to interface{}
t := args[0].(*TimeObj)
data, err = t.MarshalJSON()
if err != nil {
return NewNil(err.Error())
}
default:
panic(NewError(line, PARAMTYPEERROR, "second", "execute", "*Integer|*Float|*String|*Boolean|*Nil|*TimeObj|*Array|*Hash", objType))
}
var obj interface{}
err = json.Unmarshal(data, &obj)
if err != nil {
return NewNil(err.Error())
}
var out bytes.Buffer
err = t.Template.Execute(&out, obj)
if err != nil {
return NewNil(err.Error())
}
return NewString(out.String())
}
//Note :ExecuteTemplate is similar to Execute function, should extract a common private function.
func (t *TemplateObj) ExecuteTemplate(line string, args ...Object) Object {
if len(args) != 2 {
panic(NewError(line, ARGUMENTERROR, "2", len(args)))
}
if t.Template == nil {
return NewNil("Before calling executeTemplate(), you should first call 'new|parseFiles|parseGlob' function")
}
nameStrObj, ok := args[0].(*String)
if !ok {
panic(NewError(line, PARAMTYPEERROR, "first", "executeTemplate", "*String", args[0].Type()))
}
var data []byte
var err error
objType := args[1].Type()
switch objType {
case HASH_OBJ:
//convert 'Hash' object to interface{}
h := args[1].(*Hash)
data, err = h.MarshalJSON()
if err != nil {
return NewNil(err.Error())
}
case ARRAY_OBJ:
//convert 'Array' object to interface{}
arr := args[1].(*Array)
data, err = arr.MarshalJSON()
if err != nil {
return NewNil(err.Error())
}
case INTEGER_OBJ:
//convert 'Integer' object to interface{}
i := args[1].(*Integer)
data, err = i.MarshalJSON()
if err != nil {
return NewNil(err.Error())
}
case FLOAT_OBJ:
//convert 'Float' object to interface{}
f := args[1].(*Float)
data, err = f.MarshalJSON()
if err != nil {
return NewNil(err.Error())
}
case BOOLEAN_OBJ:
//convert 'Boolean' object to interface{}
b := args[1].(*Boolean)
data, err = b.MarshalJSON()
if err != nil {
return NewNil(err.Error())
}
case NIL_OBJ:
//convert 'Nil' object to interface{}
n := args[1].(*Nil)
data, err = n.MarshalJSON()
if err != nil {
return NewNil(err.Error())
}
case STRING_OBJ:
//convert 'String' object to interface{}
s := args[1].(*String)
data, err = s.MarshalJSON()
if err != nil {
return NewNil(err.Error())
}
case TIME_OBJ:
//convert 'TimeObj' object to interface{}
t := args[1].(*TimeObj)
data, err = t.MarshalJSON()
if err != nil {
return NewNil(err.Error())
}
default:
panic(NewError(line, PARAMTYPEERROR, "second", "execute", "*Integer|*Float|*String|*Boolean|*Nil|*TimeObj|*Array|*Hash", objType))
}
var obj interface{}
err = json.Unmarshal(data, &obj)
if err != nil {
return NewNil(err.Error())
}
var out bytes.Buffer
err = t.Template.ExecuteTemplate(&out, nameStrObj.String, obj)
if err != nil {
return NewNil(err.Error())
}
return NewString(out.String())
}
func (t *TemplateObj) Funcs(line string, scope *Scope, args ...Object) Object {
if len(args) != 1 {
panic(NewError(line, ARGUMENTERROR, "1", len(args)))
}
hashObj, ok := args[0].(*Hash)
if !ok {
panic(NewError(line, PARAMTYPEERROR, "first", "funcs", "*Hash", args[0].Type()))
}
if t.Template == nil {
return NewNil("Before calling funcs(), you should first call 'new|parseFiles|parseGlob' function")
}
funcMaps := template.FuncMap{}
for _, pair := range hashObj.Pairs {
if pair.Key.Type() != STRING_OBJ {
panic(NewError(line, GENERICERROR, "Hash's key type should be 'STRING', got '" + pair.Key.Type() + "'"))
}
if pair.Value.Type() != FUNCTION_OBJ {
panic(NewError(line, GENERICERROR, "Hash's value type should be 'FUNCTION', got '" + pair.Value.Type() + "'"))
}
key := pair.Key.(*String).String
funcMaps[key] = func(args... interface{}) interface{} {
var innerFn *Function
for _, innerPair := range hashObj.Pairs {
if key == innerPair.Key.(*String).String {
innerFn = innerPair.Value.(*Function)
break
}
}
s := NewScope(scope)
//put all the arguments into scope for later 'Eval'
for idx, arg := range args {
o, _ := unmarshalJsonObject(arg) //convert go object to monkey object
s.Set(innerFn.Literal.Parameters[idx].(*ast.Identifier).Value, o)
}
ret := Eval(innerFn.Literal.Body, s)
if obj, ok := ret.(*ReturnValue); ok {
ret = obj.Value
}
return object2RawValue(ret) //convert monkey object back to go object
}
}
t.Template.Funcs(funcMaps)
return t
}
func (t *TemplateObj) Lookup(line string, args ...Object) Object {
if len(args) != 1 {
panic(NewError(line, ARGUMENTERROR, "1", len(args)))
}
if t.Template == nil {
return NewNil("Before calling lookup(), you should first call 'new|parseFiles|parseGlob' function")
}
nameStrObj, ok := args[0].(*String)
if !ok {
panic(NewError(line, PARAMTYPEERROR, "first", "lookup", "*String", args[0].Type()))
}
temp := t.Template.Lookup(nameStrObj.String)
if temp == nil {
return NIL
}
return &TemplateObj{Template:temp}
}
func (t *TemplateObj) Name(line string, args ...Object) Object {
if len(args) != 0 {
panic(NewError(line, ARGUMENTERROR, "0", len(args)))
}
if t.Template == nil {
return NewNil("Before calling name(), you should first call 'new|parseFiles|parseGlob' function")
}
str := t.Template.Name()
return NewString(str)
}
func (t *TemplateObj) Option(line string, args ...Object) Object {
if t.Template == nil {
return NewNil("Before calling option(), you should first call 'new|parseFiles|parseGlob' function")
}
strArr :=[]string{}
for _, v := range args {
strArr = append(strArr, v.(*String).String)
}
t.Template.Option(strArr...)
return t
}
func (t *TemplateObj) Templates(line string, args ...Object) Object {
if len(args) != 0 {
panic(NewError(line, ARGUMENTERROR, "0", len(args)))
}
if t.Template == nil {
return NewNil("Before calling templates(), you should first call 'new|parseFiles|parseGlob' function")
}
retArr := &Array{}
templateArr := t.Template.Templates()
for _, v := range templateArr {
retArr.Members = append(retArr.Members, &TemplateObj{Template:v})
}
return retArr
}
func (t *TemplateObj) HTMLEscapeString(line string, args ...Object) Object {
if len(args) != 1 {
panic(NewError(line, ARGUMENTERROR, "1", len(args)))
}
strObj, ok := args[0].(*String)
if !ok {
panic(NewError(line, PARAMTYPEERROR, "first", "htmlEscapeString", "*String", args[0].Type()))
}
ret := template.HTMLEscapeString(strObj.String)
return NewString(ret)
}
func (t *TemplateObj) JSEscapeString(line string, args ...Object) Object {
if len(args) != 1 {
panic(NewError(line, ARGUMENTERROR, "1", len(args)))
}
strObj, ok := args[0].(*String)
if !ok {
panic(NewError(line, PARAMTYPEERROR, "first", "jsEscapeString", "*String", args[0].Type()))
}
ret := template.JSEscapeString(strObj.String)
return NewString(ret)
}
Add 'htmlEscaper','jsEscaper' and 'urlQueryEscaper' to template module
package eval
import (
"bytes"
"encoding/json"
"monkey/ast"
"text/template"
)
const (
TEMPLATE_OBJ = "TEMPLATE_OBJ"
template_name = "template"
)
type TemplateObj struct {
Template *template.Template
}
func NewTemplateObj() Object {
ret := &TemplateObj{}
SetGlobalObj(template_name, ret)
return ret
}
func (t *TemplateObj) Inspect() string { return template_name }
func (t *TemplateObj) Type() ObjectType { return TEMPLATE_OBJ }
func (t *TemplateObj) CallMethod(line string, scope *Scope, method string, args ...Object) Object {
switch method {
case "new":
return t.New(line, args...)
case "parse":
return t.Parse(line, args...)
case "parseFiles":
return t.ParseFiles(line, args...)
case "parseGlob":
return t.ParseGlob(line, args...)
case "clone":
return t.Clone(line, args...)
case "definedTemplates":
return t.DefinedTemplates(line, args...)
case "delims":
return t.Delims(line, args...)
case "execute":
return t.Execute(line, args...)
case "executeTemplate":
return t.ExecuteTemplate(line, args...)
case "funcs":
return t.Funcs(line, scope, args...)
case "lookup":
return t.Lookup(line, args...)
case "name":
return t.Name(line, args...)
case "option":
return t.Option(line, args...)
case "templates":
return t.Templates(line, args...)
case "htmlEscape":
return t.HTMLEscape(line, args...)
case "htmlEscaper":
return t.HTMLEscaper(line, args...)
case "htmlEscapeString":
return t.HTMLEscapeString(line, args...)
case "jsEscapeString":
return t.JSEscapeString(line, args...)
case "jsEscape":
return t.JSEscape(line, args...)
case "jsEscaper":
return t.JSEscaper(line, args...)
case "urlQueryEscaper":
return t.URLQueryEscaper(line, args...)
}
panic(NewError(line, NOMETHODERROR, method, t.Type()))
}
func (t *TemplateObj) New(line string, args ...Object) Object {
argLen := len(args)
if argLen != 0 && argLen != 1 {
panic(NewError(line, ARGUMENTERROR, "0|1", argLen))
}
var name string = "main"
if argLen == 1 {
strObj, ok := args[0].(*String)
if !ok {
panic(NewError(line, PARAMTYPEERROR, "first", "new", "*String", args[0].Type()))
}
name = strObj.String
}
return &TemplateObj{Template:template.New(name)}
}
func (t *TemplateObj) Parse(line string, args ...Object) Object {
if len(args) != 1 {
panic(NewError(line, ARGUMENTERROR, "1", len(args)))
}
strObj, ok := args[0].(*String)
if !ok {
panic(NewError(line, PARAMTYPEERROR, "first", "parse", "*String", args[0].Type()))
}
if t.Template == nil {
return NewNil("Before calling parse(), you should first call 'new|parseFiles|parseGlob' function")
}
var err error = nil
t.Template, err = t.Template.Parse(strObj.String)
if err != nil {
return NewNil(err.Error())
}
return t
}
func (t *TemplateObj) ParseFiles(line string, args ...Object) Object {
if len(args) != 1 {
panic(NewError(line, ARGUMENTERROR, "1", len(args)))
}
strObj, ok := args[0].(*String)
if !ok {
panic(NewError(line, PARAMTYPEERROR, "first", "parseFiles", "*String", args[0].Type()))
}
temp, err := template.ParseFiles(strObj.String)
if err != nil {
return NewNil(err.Error())
}
return &TemplateObj{Template:temp}
}
func (t *TemplateObj) ParseGlob(line string, args ...Object) Object {
if len(args) != 1 {
panic(NewError(line, ARGUMENTERROR, "1", len(args)))
}
strObj, ok := args[0].(*String)
if !ok {
panic(NewError(line, PARAMTYPEERROR, "first", "parseGlob", "*String", args[0].Type()))
}
temp, err := template.ParseGlob(strObj.String)
if err != nil {
return NewNil(err.Error())
}
return &TemplateObj{Template:temp}
}
func (t *TemplateObj) Clone(line string, args ...Object) Object {
if len(args) != 0 {
panic(NewError(line, ARGUMENTERROR, "0", len(args)))
}
if t.Template == nil {
return NewNil("Before calling clone(), you should first call 'new|parseFiles|parseGlob' function")
}
temp, err := t.Template.Clone()
if err != nil {
return NewNil(err.Error())
}
return &TemplateObj{Template:temp}
}
func (t *TemplateObj) DefinedTemplates(line string, args ...Object) Object {
if len(args) != 0 {
panic(NewError(line, ARGUMENTERROR, "0", len(args)))
}
if t.Template == nil {
return NewNil("Before calling definedTemplates(), you should first call 'new|parseFiles|parseGlob' function")
}
s := t.Template.DefinedTemplates()
return NewString(s)
}
func (t *TemplateObj) Delims(line string, args ...Object) Object {
if len(args) != 2 {
panic(NewError(line, ARGUMENTERROR, "2", len(args)))
}
if t.Template == nil {
return NewNil("Before calling delims(), you should first call 'new|parseFiles|parseGlob' function")
}
leftStrObj, ok := args[0].(*String)
if !ok {
panic(NewError(line, PARAMTYPEERROR, "first", "delims", "*String", args[0].Type()))
}
rightStrObj, ok := args[1].(*String)
if !ok {
panic(NewError(line, PARAMTYPEERROR, "second", "delims", "*String", args[1].Type()))
}
t.Template.Delims(leftStrObj.String, rightStrObj.String)
return t
}
func (t *TemplateObj) Execute(line string, args ...Object) Object {
if len(args) != 1 {
panic(NewError(line, ARGUMENTERROR, "1", len(args)))
}
if t.Template == nil {
return NewNil("Before calling execute(), you should first call 'new|parseFiles|parseGlob' function")
}
var data []byte
var err error
objType := args[0].Type()
switch objType {
case HASH_OBJ:
//convert 'Hash' object to interface{}
h := args[0].(*Hash)
data, err = h.MarshalJSON()
if err != nil {
return NewNil(err.Error())
}
case ARRAY_OBJ:
//convert 'Array' object to interface{}
arr := args[0].(*Array)
data, err = arr.MarshalJSON()
if err != nil {
return NewNil(err.Error())
}
case INTEGER_OBJ:
//convert 'Integer' object to interface{}
i := args[0].(*Integer)
data, err = i.MarshalJSON()
if err != nil {
return NewNil(err.Error())
}
case FLOAT_OBJ:
//convert 'Float' object to interface{}
f := args[0].(*Float)
data, err = f.MarshalJSON()
if err != nil {
return NewNil(err.Error())
}
case BOOLEAN_OBJ:
//convert 'Boolean' object to interface{}
b := args[0].(*Boolean)
data, err = b.MarshalJSON()
if err != nil {
return NewNil(err.Error())
}
case NIL_OBJ:
//convert 'Nil' object to interface{}
n := args[0].(*Nil)
data, err = n.MarshalJSON()
if err != nil {
return NewNil(err.Error())
}
case STRING_OBJ:
//convert 'String' object to interface{}
s := args[0].(*String)
data, err = s.MarshalJSON()
if err != nil {
return NewNil(err.Error())
}
case TIME_OBJ:
//convert 'TimeObj' object to interface{}
t := args[0].(*TimeObj)
data, err = t.MarshalJSON()
if err != nil {
return NewNil(err.Error())
}
default:
panic(NewError(line, PARAMTYPEERROR, "second", "execute", "*Integer|*Float|*String|*Boolean|*Nil|*TimeObj|*Array|*Hash", objType))
}
var obj interface{}
err = json.Unmarshal(data, &obj)
if err != nil {
return NewNil(err.Error())
}
var out bytes.Buffer
err = t.Template.Execute(&out, obj)
if err != nil {
return NewNil(err.Error())
}
return NewString(out.String())
}
//Note :ExecuteTemplate is similar to Execute function, should extract a common private function.
func (t *TemplateObj) ExecuteTemplate(line string, args ...Object) Object {
if len(args) != 2 {
panic(NewError(line, ARGUMENTERROR, "2", len(args)))
}
if t.Template == nil {
return NewNil("Before calling executeTemplate(), you should first call 'new|parseFiles|parseGlob' function")
}
nameStrObj, ok := args[0].(*String)
if !ok {
panic(NewError(line, PARAMTYPEERROR, "first", "executeTemplate", "*String", args[0].Type()))
}
var data []byte
var err error
objType := args[1].Type()
switch objType {
case HASH_OBJ:
//convert 'Hash' object to interface{}
h := args[1].(*Hash)
data, err = h.MarshalJSON()
if err != nil {
return NewNil(err.Error())
}
case ARRAY_OBJ:
//convert 'Array' object to interface{}
arr := args[1].(*Array)
data, err = arr.MarshalJSON()
if err != nil {
return NewNil(err.Error())
}
case INTEGER_OBJ:
//convert 'Integer' object to interface{}
i := args[1].(*Integer)
data, err = i.MarshalJSON()
if err != nil {
return NewNil(err.Error())
}
case FLOAT_OBJ:
//convert 'Float' object to interface{}
f := args[1].(*Float)
data, err = f.MarshalJSON()
if err != nil {
return NewNil(err.Error())
}
case BOOLEAN_OBJ:
//convert 'Boolean' object to interface{}
b := args[1].(*Boolean)
data, err = b.MarshalJSON()
if err != nil {
return NewNil(err.Error())
}
case NIL_OBJ:
//convert 'Nil' object to interface{}
n := args[1].(*Nil)
data, err = n.MarshalJSON()
if err != nil {
return NewNil(err.Error())
}
case STRING_OBJ:
//convert 'String' object to interface{}
s := args[1].(*String)
data, err = s.MarshalJSON()
if err != nil {
return NewNil(err.Error())
}
case TIME_OBJ:
//convert 'TimeObj' object to interface{}
t := args[1].(*TimeObj)
data, err = t.MarshalJSON()
if err != nil {
return NewNil(err.Error())
}
default:
panic(NewError(line, PARAMTYPEERROR, "second", "execute", "*Integer|*Float|*String|*Boolean|*Nil|*TimeObj|*Array|*Hash", objType))
}
var obj interface{}
err = json.Unmarshal(data, &obj)
if err != nil {
return NewNil(err.Error())
}
var out bytes.Buffer
err = t.Template.ExecuteTemplate(&out, nameStrObj.String, obj)
if err != nil {
return NewNil(err.Error())
}
return NewString(out.String())
}
func (t *TemplateObj) Funcs(line string, scope *Scope, args ...Object) Object {
if len(args) != 1 {
panic(NewError(line, ARGUMENTERROR, "1", len(args)))
}
hashObj, ok := args[0].(*Hash)
if !ok {
panic(NewError(line, PARAMTYPEERROR, "first", "funcs", "*Hash", args[0].Type()))
}
if t.Template == nil {
return NewNil("Before calling funcs(), you should first call 'new|parseFiles|parseGlob' function")
}
funcMaps := template.FuncMap{}
for _, pair := range hashObj.Pairs {
if pair.Key.Type() != STRING_OBJ {
panic(NewError(line, GENERICERROR, "Hash's key type should be 'STRING', got '" + pair.Key.Type() + "'"))
}
if pair.Value.Type() != FUNCTION_OBJ {
panic(NewError(line, GENERICERROR, "Hash's value type should be 'FUNCTION', got '" + pair.Value.Type() + "'"))
}
key := pair.Key.(*String).String
funcMaps[key] = func(args... interface{}) interface{} {
var innerFn *Function
for _, innerPair := range hashObj.Pairs {
if key == innerPair.Key.(*String).String {
innerFn = innerPair.Value.(*Function)
break
}
}
s := NewScope(scope)
//put all the arguments into scope for later 'Eval'
for idx, arg := range args {
o, _ := unmarshalJsonObject(arg) //convert go object to monkey object
s.Set(innerFn.Literal.Parameters[idx].(*ast.Identifier).Value, o)
}
ret := Eval(innerFn.Literal.Body, s)
if obj, ok := ret.(*ReturnValue); ok {
ret = obj.Value
}
return object2RawValue(ret) //convert monkey object back to go object
}
}
t.Template.Funcs(funcMaps)
return t
}
func (t *TemplateObj) Lookup(line string, args ...Object) Object {
if len(args) != 1 {
panic(NewError(line, ARGUMENTERROR, "1", len(args)))
}
if t.Template == nil {
return NewNil("Before calling lookup(), you should first call 'new|parseFiles|parseGlob' function")
}
nameStrObj, ok := args[0].(*String)
if !ok {
panic(NewError(line, PARAMTYPEERROR, "first", "lookup", "*String", args[0].Type()))
}
temp := t.Template.Lookup(nameStrObj.String)
if temp == nil {
return NIL
}
return &TemplateObj{Template:temp}
}
func (t *TemplateObj) Name(line string, args ...Object) Object {
if len(args) != 0 {
panic(NewError(line, ARGUMENTERROR, "0", len(args)))
}
if t.Template == nil {
return NewNil("Before calling name(), you should first call 'new|parseFiles|parseGlob' function")
}
str := t.Template.Name()
return NewString(str)
}
func (t *TemplateObj) Option(line string, args ...Object) Object {
if t.Template == nil {
return NewNil("Before calling option(), you should first call 'new|parseFiles|parseGlob' function")
}
strArr :=[]string{}
for _, v := range args {
strArr = append(strArr, v.(*String).String)
}
t.Template.Option(strArr...)
return t
}
func (t *TemplateObj) Templates(line string, args ...Object) Object {
if len(args) != 0 {
panic(NewError(line, ARGUMENTERROR, "0", len(args)))
}
if t.Template == nil {
return NewNil("Before calling templates(), you should first call 'new|parseFiles|parseGlob' function")
}
retArr := &Array{}
templateArr := t.Template.Templates()
for _, v := range templateArr {
retArr.Members = append(retArr.Members, &TemplateObj{Template:v})
}
return retArr
}
func (t *TemplateObj) HTMLEscapeString(line string, args ...Object) Object {
if len(args) != 1 {
panic(NewError(line, ARGUMENTERROR, "1", len(args)))
}
strObj, ok := args[0].(*String)
if !ok {
panic(NewError(line, PARAMTYPEERROR, "first", "htmlEscapeString", "*String", args[0].Type()))
}
ret := template.HTMLEscapeString(strObj.String)
return NewString(ret)
}
func (t *TemplateObj) HTMLEscape(line string, args ...Object) Object {
if len(args) != 2 {
panic(NewError(line, ARGUMENTERROR, "2", len(args)))
}
writerObj, ok := args[0].(Writable)
if !ok {
panic(NewError(line, PARAMTYPEERROR, "first", "htmlEscape", "Writable", args[0].Type()))
}
strObj, ok := args[1].(*String)
if !ok {
panic(NewError(line, PARAMTYPEERROR, "second", "htmlEscape", "*String", args[1].Type()))
}
b := []byte(strObj.String)
template.HTMLEscape(writerObj.IOWriter(), b)
return NIL
}
func (t *TemplateObj) HTMLEscaper(line string, args ...Object) Object {
if len(args) == 0 {
return NewString("")
}
arrIntf := make([]interface{}, len(args))
for i, v := range args {
arrIntf[i] = object2RawValue(v)
}
ret := template.HTMLEscaper(arrIntf...)
return NewString(ret)
}
func (t *TemplateObj) JSEscapeString(line string, args ...Object) Object {
if len(args) != 1 {
panic(NewError(line, ARGUMENTERROR, "1", len(args)))
}
strObj, ok := args[0].(*String)
if !ok {
panic(NewError(line, PARAMTYPEERROR, "first", "jsEscapeString", "*String", args[0].Type()))
}
ret := template.JSEscapeString(strObj.String)
return NewString(ret)
}
func (t *TemplateObj) JSEscape(line string, args ...Object) Object {
if len(args) != 2 {
panic(NewError(line, ARGUMENTERROR, "2", len(args)))
}
writerObj, ok := args[0].(Writable)
if !ok {
panic(NewError(line, PARAMTYPEERROR, "first", "jsEscape", "Writable", args[0].Type()))
}
strObj, ok := args[1].(*String)
if !ok {
panic(NewError(line, PARAMTYPEERROR, "second", "jsEscape", "*String", args[1].Type()))
}
b := []byte(strObj.String)
template.JSEscape(writerObj.IOWriter(), b)
return NIL
}
func (t *TemplateObj) JSEscaper(line string, args ...Object) Object {
if len(args) == 0 {
return NewString("")
}
arrIntf := make([]interface{}, len(args))
for i, v := range args {
arrIntf[i] = object2RawValue(v)
}
ret := template.JSEscaper(arrIntf...)
return NewString(ret)
}
func (t *TemplateObj) URLQueryEscaper(line string, args ...Object) Object {
if len(args) == 0 {
return NewString("")
}
arrIntf := make([]interface{}, len(args))
for i, v := range args {
arrIntf[i] = object2RawValue(v)
}
ret := template.URLQueryEscaper(arrIntf...)
return NewString(ret)
}
|
package main
import (
"bytes"
"io/ioutil"
"net/http"
"net/http/httptest"
. "launchpad.net/gocheck"
)
type HTTPSuite struct {
servers []*testServer
httpSvr *httptest.Server
}
var _ = Suite(&HTTPSuite{})
func (s *HTTPSuite) SetUpSuite(c *C) {
addHandlers()
s.httpSvr = httptest.NewServer(nil)
}
func (s *HTTPSuite) TearDownSuite(c *C) {
s.httpSvr.Close()
}
func (s *HTTPSuite) SetUpTest(c *C) {
// start 4 possible backend servers
ports := []string{"9001", "9002", "9003", "9004"}
for _, p := range ports {
server, err := NewTestServer("127.0.0.1:"+p, c)
if err != nil {
c.Fatal(err)
}
s.servers = append(s.servers, server)
}
}
// shutdown our backend servers
func (s *HTTPSuite) TearDownTest(c *C) {
for _, s := range s.servers {
s.Stop()
}
for _, svc := range Registry.svcs {
Registry.RemoveService(svc.Name)
}
}
// These don't yet *really* test anything other than code coverage
func (s *HTTPSuite) TestAddService(c *C) {
svcDef := bytes.NewReader([]byte(`{"address": "127.0.0.1:9000"}`))
req, _ := http.NewRequest("PUT", s.httpSvr.URL+"/testService", svcDef)
resp, err := http.DefaultClient.Do(req)
if err != nil {
c.Fatal(err)
}
defer resp.Body.Close()
body, _ := ioutil.ReadAll(resp.Body)
c.Assert(Registry.String(), DeepEquals, string(body))
}
func (s *HTTPSuite) TestAddBackend(c *C) {
svcDef := bytes.NewReader([]byte(`{"address": "127.0.0.1:9000"}`))
req, _ := http.NewRequest("PUT", s.httpSvr.URL+"/testService", svcDef)
_, err := http.DefaultClient.Do(req)
if err != nil {
c.Fatal(err)
}
backendDef := bytes.NewReader([]byte(`{"address": "127.0.0.1:9001"}`))
req, _ = http.NewRequest("PUT", s.httpSvr.URL+"/testService/testBackend", backendDef)
resp, err := http.DefaultClient.Do(req)
if err != nil {
c.Fatal(err)
}
defer resp.Body.Close()
body, _ := ioutil.ReadAll(resp.Body)
c.Assert(Registry.String(), DeepEquals, string(body))
}
Remove dead file from the migration
|
/*
Copyright 2014 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// apiserver is the main api server and master for the cluster.
// it is responsible for serving the cluster management API.
package main
import (
"flag"
"fmt"
"net"
"net/http"
"strconv"
"strings"
"time"
"code.google.com/p/goprotobuf/proto"
"github.com/GoogleCloudPlatform/kubernetes/pkg/apiserver"
"github.com/GoogleCloudPlatform/kubernetes/pkg/client"
"github.com/GoogleCloudPlatform/kubernetes/pkg/cloudprovider"
"github.com/GoogleCloudPlatform/kubernetes/pkg/master"
"github.com/GoogleCloudPlatform/kubernetes/pkg/registry/binding"
"github.com/GoogleCloudPlatform/kubernetes/pkg/registry/controller"
"github.com/GoogleCloudPlatform/kubernetes/pkg/registry/etcd"
"github.com/GoogleCloudPlatform/kubernetes/pkg/registry/minion"
"github.com/GoogleCloudPlatform/kubernetes/pkg/registry/pod"
"github.com/GoogleCloudPlatform/kubernetes/pkg/registry/service"
"github.com/GoogleCloudPlatform/kubernetes/pkg/runtime"
kscheduler "github.com/GoogleCloudPlatform/kubernetes/pkg/scheduler"
kendpoint "github.com/GoogleCloudPlatform/kubernetes/pkg/service"
"github.com/GoogleCloudPlatform/kubernetes/pkg/tools"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
plugin "github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler"
goetcd "github.com/coreos/go-etcd/etcd"
log "github.com/golang/glog"
"github.com/mesos/mesos-go/mesos"
kmscheduler "github.com/mesosphere/kubernetes-mesos/scheduler"
kmendpoint "github.com/mesosphere/kubernetes-mesos/service"
)
var (
port = flag.Uint("port", 8888, "The port to listen on. Default 8888.")
address = flag.String("address", "127.0.0.1", "The address on the local server to listen to. Default 127.0.0.1")
apiPrefix = flag.String("api_prefix", "/api/v1beta1", "The prefix for API requests on the server. Default '/api/v1beta1'")
mesosMaster = flag.String("mesos_master", "localhost:5050", "Location of leading Mesos master")
executorPath = flag.String("executor_path", "", "Location of the kubernetes executor executable")
proxyPath = flag.String("proxy_path", "", "Location of the kubernetes proxy executable")
minionPort = flag.Uint("minion_port", 10250, "The port at which kubelet will be listening on the minions.")
useHostPortEndpoints = flag.Bool("host_port_endpoints", true, "Map service endpoints to hostIP:hostPort instead of podIP:containerPort. Default true.")
etcdServerList, machineList util.StringList
)
const (
artifactPort = 9000
cachePeriod = 10 * time.Second
syncPeriod = 30 * time.Second
httpReadTimeout = 10 * time.Second
httpWriteTimeout = 10 * time.Second
)
func init() {
flag.Var(&etcdServerList, "etcd_servers", "Servers for the etcd (http://ip:port), comma separated")
flag.Var(&machineList, "machines", "List of machines to schedule onto, comma separated.")
}
type kubernetesMaster struct {
podRegistry pod.Registry
controllerRegistry controller.Registry
serviceRegistry service.Registry
minionRegistry minion.Registry
bindingRegistry binding.Registry
storage map[string]apiserver.RESTStorage
client *client.Client
scheduler *kmscheduler.KubernetesScheduler
}
// Copied from cmd/apiserver.go
func main() {
flag.Parse()
util.InitLogs()
defer util.FlushLogs()
if len(machineList) == 0 {
log.Fatal("No machines specified!")
}
if len(etcdServerList) <= 0 {
log.Fatal("No etcd severs specified!")
}
serveExecutorArtifact := func(path string) string {
serveFile := func(pattern string, filename string) {
http.HandleFunc(pattern, func(w http.ResponseWriter, r *http.Request) {
http.ServeFile(w, r, filename)
})
}
// Create base path (http://foobar:5000/<base>)
pathSplit := strings.Split(path, "/")
var base string
if len(pathSplit) > 0 {
base = pathSplit[len(pathSplit)-1]
} else {
base = path
}
serveFile("/"+base, path)
hostURI := fmt.Sprintf("http://%s:%d/%s", *address, artifactPort, base)
log.V(2).Infof("Hosting artifact '%s' at '%s'", path, hostURI)
return hostURI
}
executorURI := serveExecutorArtifact(*executorPath)
proxyURI := serveExecutorArtifact(*proxyPath)
go http.ListenAndServe(fmt.Sprintf("%s:%d", *address, artifactPort), nil)
podInfoGetter := &client.HTTPPodInfoGetter{
Client: http.DefaultClient,
Port: *minionPort,
}
client, err := client.New("http://"+net.JoinHostPort(*address, strconv.Itoa(int(*port))), nil)
if err != nil {
log.Fatal(err)
}
executorCommand := "./kubernetes-executor -v=2"
if len(etcdServerList) > 0 {
etcdServerArguments := strings.Join(etcdServerList, ",")
executorCommand = "./kubernetes-executor -v=2 -hostname_override=0.0.0.0 -etcd_servers=" + etcdServerArguments
}
// Create mesos scheduler driver.
executor := &mesos.ExecutorInfo{
ExecutorId: &mesos.ExecutorID{Value: proto.String("KubeleteExecutorID")},
Command: &mesos.CommandInfo{
Value: proto.String(executorCommand),
Uris: []*mesos.CommandInfo_URI{
{Value: &executorURI},
{Value: &proxyURI},
},
},
Name: proto.String("Kubelet Executor"),
Source: proto.String("kubernetes"),
}
etcdClient := goetcd.NewClient(etcdServerList)
helper := tools.EtcdHelper{
etcdClient,
runtime.DefaultCodec,
runtime.DefaultResourceVersioner,
}
serviceRegistry := etcd.NewRegistry(etcdClient)
mesosPodScheduler := kmscheduler.New(executor, kmscheduler.FCFSScheduleFunc, client, helper, serviceRegistry)
driver := &mesos.MesosSchedulerDriver{
Master: *mesosMaster,
Framework: mesos.FrameworkInfo{
Name: proto.String("KubernetesScheduler"),
User: proto.String("root"),
},
Scheduler: mesosPodScheduler,
}
mesosPodScheduler.Driver = driver
driver.Init()
defer driver.Destroy()
go driver.Start()
log.V(2).Info("Serving executor artifacts...")
// TODO(nnielsen): Using default pod info getter until
// MesosPodInfoGetter supports network containers.
// podInfoGetter := MesosPodInfoGetter.New(mesosPodScheduler)
m := newKubernetesMaster(mesosPodScheduler, &master.Config{
Client: client,
Cloud: mesosPodScheduler,
Minions: machineList,
PodInfoGetter: podInfoGetter,
EtcdServers: etcdServerList,
}, etcdClient, serviceRegistry)
log.Fatal(m.run(net.JoinHostPort(*address, strconv.Itoa(int(*port))), *apiPrefix, helper.Codec))
}
func newKubernetesMaster(scheduler *kmscheduler.KubernetesScheduler, c *master.Config, etcdClient tools.EtcdClient, sr service.Registry) *kubernetesMaster {
var m *kubernetesMaster
minionRegistry := minion.NewRegistry(c.Minions) // TODO(adam): Mimic minionRegistryMaker(c)?
m = &kubernetesMaster{
podRegistry: scheduler,
controllerRegistry: etcd.NewRegistry(etcdClient),
serviceRegistry: sr,
minionRegistry: minionRegistry,
bindingRegistry: etcd.NewRegistry(etcdClient),
client: c.Client,
scheduler: scheduler,
}
m.init(scheduler, c.Cloud, c.PodInfoGetter)
return m
}
func (m *kubernetesMaster) init(scheduler kscheduler.Scheduler, cloud cloudprovider.Interface, podInfoGetter client.PodInfoGetter) {
podCache := master.NewPodCache(podInfoGetter, m.podRegistry)
go util.Forever(func() { podCache.UpdateAllContainers() }, cachePeriod)
m.storage = map[string]apiserver.RESTStorage{
"pods": pod.NewREST(&pod.RESTConfig{
CloudProvider: cloud,
PodCache: podCache,
PodInfoGetter: podInfoGetter,
Registry: m.podRegistry,
}),
"replicationControllers": controller.NewREST(m.controllerRegistry, m.podRegistry),
"services": service.NewREST(m.serviceRegistry, cloud, m.minionRegistry),
"minions": minion.NewREST(m.minionRegistry),
// TODO: should appear only in scheduler API group.
"bindings": binding.NewREST(m.bindingRegistry),
}
}
// Run begins serving the Kubernetes API. It never returns.
func (m *kubernetesMaster) run(myAddress, apiPrefix string, codec runtime.Codec) error {
endpoints := m.createEndpointController()
go util.Forever(func() { endpoints.SyncServiceEndpoints() }, syncPeriod)
plugin.New(m.scheduler.NewPluginConfig()).Run()
s := &http.Server{
Addr: myAddress,
Handler: apiserver.Handle(m.storage, codec, apiPrefix),
ReadTimeout: httpReadTimeout,
WriteTimeout: httpWriteTimeout,
MaxHeaderBytes: 1 << 20,
}
return s.ListenAndServe()
}
func (m *kubernetesMaster) createEndpointController() kmendpoint.EndpointController {
if *useHostPortEndpoints {
return kmendpoint.NewEndpointController(m.serviceRegistry, m.client)
}
stockEndpointController := kendpoint.NewEndpointController(m.serviceRegistry, m.client)
return stockEndpointController
}
log which endpoint controller we use
/*
Copyright 2014 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// apiserver is the main api server and master for the cluster.
// it is responsible for serving the cluster management API.
package main
import (
"flag"
"fmt"
"net"
"net/http"
"strconv"
"strings"
"time"
"code.google.com/p/goprotobuf/proto"
"github.com/GoogleCloudPlatform/kubernetes/pkg/apiserver"
"github.com/GoogleCloudPlatform/kubernetes/pkg/client"
"github.com/GoogleCloudPlatform/kubernetes/pkg/cloudprovider"
"github.com/GoogleCloudPlatform/kubernetes/pkg/master"
"github.com/GoogleCloudPlatform/kubernetes/pkg/registry/binding"
"github.com/GoogleCloudPlatform/kubernetes/pkg/registry/controller"
"github.com/GoogleCloudPlatform/kubernetes/pkg/registry/etcd"
"github.com/GoogleCloudPlatform/kubernetes/pkg/registry/minion"
"github.com/GoogleCloudPlatform/kubernetes/pkg/registry/pod"
"github.com/GoogleCloudPlatform/kubernetes/pkg/registry/service"
"github.com/GoogleCloudPlatform/kubernetes/pkg/runtime"
kscheduler "github.com/GoogleCloudPlatform/kubernetes/pkg/scheduler"
kendpoint "github.com/GoogleCloudPlatform/kubernetes/pkg/service"
"github.com/GoogleCloudPlatform/kubernetes/pkg/tools"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
plugin "github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler"
goetcd "github.com/coreos/go-etcd/etcd"
log "github.com/golang/glog"
"github.com/mesos/mesos-go/mesos"
kmscheduler "github.com/mesosphere/kubernetes-mesos/scheduler"
kmendpoint "github.com/mesosphere/kubernetes-mesos/service"
)
var (
port = flag.Uint("port", 8888, "The port to listen on. Default 8888.")
address = flag.String("address", "127.0.0.1", "The address on the local server to listen to. Default 127.0.0.1")
apiPrefix = flag.String("api_prefix", "/api/v1beta1", "The prefix for API requests on the server. Default '/api/v1beta1'")
mesosMaster = flag.String("mesos_master", "localhost:5050", "Location of leading Mesos master")
executorPath = flag.String("executor_path", "", "Location of the kubernetes executor executable")
proxyPath = flag.String("proxy_path", "", "Location of the kubernetes proxy executable")
minionPort = flag.Uint("minion_port", 10250, "The port at which kubelet will be listening on the minions.")
useHostPortEndpoints = flag.Bool("host_port_endpoints", true, "Map service endpoints to hostIP:hostPort instead of podIP:containerPort. Default true.")
etcdServerList, machineList util.StringList
)
const (
artifactPort = 9000
cachePeriod = 10 * time.Second
syncPeriod = 30 * time.Second
httpReadTimeout = 10 * time.Second
httpWriteTimeout = 10 * time.Second
)
func init() {
flag.Var(&etcdServerList, "etcd_servers", "Servers for the etcd (http://ip:port), comma separated")
flag.Var(&machineList, "machines", "List of machines to schedule onto, comma separated.")
}
type kubernetesMaster struct {
podRegistry pod.Registry
controllerRegistry controller.Registry
serviceRegistry service.Registry
minionRegistry minion.Registry
bindingRegistry binding.Registry
storage map[string]apiserver.RESTStorage
client *client.Client
scheduler *kmscheduler.KubernetesScheduler
}
// Copied from cmd/apiserver.go
func main() {
flag.Parse()
util.InitLogs()
defer util.FlushLogs()
if len(machineList) == 0 {
log.Fatal("No machines specified!")
}
if len(etcdServerList) <= 0 {
log.Fatal("No etcd severs specified!")
}
serveExecutorArtifact := func(path string) string {
serveFile := func(pattern string, filename string) {
http.HandleFunc(pattern, func(w http.ResponseWriter, r *http.Request) {
http.ServeFile(w, r, filename)
})
}
// Create base path (http://foobar:5000/<base>)
pathSplit := strings.Split(path, "/")
var base string
if len(pathSplit) > 0 {
base = pathSplit[len(pathSplit)-1]
} else {
base = path
}
serveFile("/"+base, path)
hostURI := fmt.Sprintf("http://%s:%d/%s", *address, artifactPort, base)
log.V(2).Infof("Hosting artifact '%s' at '%s'", path, hostURI)
return hostURI
}
executorURI := serveExecutorArtifact(*executorPath)
proxyURI := serveExecutorArtifact(*proxyPath)
go http.ListenAndServe(fmt.Sprintf("%s:%d", *address, artifactPort), nil)
podInfoGetter := &client.HTTPPodInfoGetter{
Client: http.DefaultClient,
Port: *minionPort,
}
client, err := client.New("http://"+net.JoinHostPort(*address, strconv.Itoa(int(*port))), nil)
if err != nil {
log.Fatal(err)
}
executorCommand := "./kubernetes-executor -v=2"
if len(etcdServerList) > 0 {
etcdServerArguments := strings.Join(etcdServerList, ",")
executorCommand = "./kubernetes-executor -v=2 -hostname_override=0.0.0.0 -etcd_servers=" + etcdServerArguments
}
// Create mesos scheduler driver.
executor := &mesos.ExecutorInfo{
ExecutorId: &mesos.ExecutorID{Value: proto.String("KubeleteExecutorID")},
Command: &mesos.CommandInfo{
Value: proto.String(executorCommand),
Uris: []*mesos.CommandInfo_URI{
{Value: &executorURI},
{Value: &proxyURI},
},
},
Name: proto.String("Kubelet Executor"),
Source: proto.String("kubernetes"),
}
etcdClient := goetcd.NewClient(etcdServerList)
helper := tools.EtcdHelper{
etcdClient,
runtime.DefaultCodec,
runtime.DefaultResourceVersioner,
}
serviceRegistry := etcd.NewRegistry(etcdClient)
mesosPodScheduler := kmscheduler.New(executor, kmscheduler.FCFSScheduleFunc, client, helper, serviceRegistry)
driver := &mesos.MesosSchedulerDriver{
Master: *mesosMaster,
Framework: mesos.FrameworkInfo{
Name: proto.String("KubernetesScheduler"),
User: proto.String("root"),
},
Scheduler: mesosPodScheduler,
}
mesosPodScheduler.Driver = driver
driver.Init()
defer driver.Destroy()
go driver.Start()
log.V(2).Info("Serving executor artifacts...")
// TODO(nnielsen): Using default pod info getter until
// MesosPodInfoGetter supports network containers.
// podInfoGetter := MesosPodInfoGetter.New(mesosPodScheduler)
m := newKubernetesMaster(mesosPodScheduler, &master.Config{
Client: client,
Cloud: mesosPodScheduler,
Minions: machineList,
PodInfoGetter: podInfoGetter,
EtcdServers: etcdServerList,
}, etcdClient, serviceRegistry)
log.Fatal(m.run(net.JoinHostPort(*address, strconv.Itoa(int(*port))), *apiPrefix, helper.Codec))
}
func newKubernetesMaster(scheduler *kmscheduler.KubernetesScheduler, c *master.Config, etcdClient tools.EtcdClient, sr service.Registry) *kubernetesMaster {
var m *kubernetesMaster
minionRegistry := minion.NewRegistry(c.Minions) // TODO(adam): Mimic minionRegistryMaker(c)?
m = &kubernetesMaster{
podRegistry: scheduler,
controllerRegistry: etcd.NewRegistry(etcdClient),
serviceRegistry: sr,
minionRegistry: minionRegistry,
bindingRegistry: etcd.NewRegistry(etcdClient),
client: c.Client,
scheduler: scheduler,
}
m.init(scheduler, c.Cloud, c.PodInfoGetter)
return m
}
func (m *kubernetesMaster) init(scheduler kscheduler.Scheduler, cloud cloudprovider.Interface, podInfoGetter client.PodInfoGetter) {
podCache := master.NewPodCache(podInfoGetter, m.podRegistry)
go util.Forever(func() { podCache.UpdateAllContainers() }, cachePeriod)
m.storage = map[string]apiserver.RESTStorage{
"pods": pod.NewREST(&pod.RESTConfig{
CloudProvider: cloud,
PodCache: podCache,
PodInfoGetter: podInfoGetter,
Registry: m.podRegistry,
}),
"replicationControllers": controller.NewREST(m.controllerRegistry, m.podRegistry),
"services": service.NewREST(m.serviceRegistry, cloud, m.minionRegistry),
"minions": minion.NewREST(m.minionRegistry),
// TODO: should appear only in scheduler API group.
"bindings": binding.NewREST(m.bindingRegistry),
}
}
// Run begins serving the Kubernetes API. It never returns.
func (m *kubernetesMaster) run(myAddress, apiPrefix string, codec runtime.Codec) error {
endpoints := m.createEndpointController()
go util.Forever(func() { endpoints.SyncServiceEndpoints() }, syncPeriod)
plugin.New(m.scheduler.NewPluginConfig()).Run()
s := &http.Server{
Addr: myAddress,
Handler: apiserver.Handle(m.storage, codec, apiPrefix),
ReadTimeout: httpReadTimeout,
WriteTimeout: httpWriteTimeout,
MaxHeaderBytes: 1 << 20,
}
return s.ListenAndServe()
}
func (m *kubernetesMaster) createEndpointController() kmendpoint.EndpointController {
if *useHostPortEndpoints {
log.V(2).Infof("Creating hostIP:hostPort endpoint controller")
return kmendpoint.NewEndpointController(m.serviceRegistry, m.client)
}
log.V(2).Infof("Creating podIP:containerPort endpoint controller")
stockEndpointController := kendpoint.NewEndpointController(m.serviceRegistry, m.client)
return stockEndpointController
}
|
package flame
import (
"net/http"
"net/http/httptest"
"testing"
"time"
"github.com/256dpi/oauth2"
"github.com/256dpi/oauth2/spec"
"github.com/stretchr/testify/assert"
"gopkg.in/mgo.v2/bson"
)
func TestIntegration(t *testing.T) {
tester.Clean()
var testPassword = "foo"
var allowedScope = oauth2.ParseScope("foo bar")
var requiredScope = oauth2.ParseScope("foo")
p := DefaultPolicy("")
p.PasswordGrant = true
p.ClientCredentialsGrant = true
p.ImplicitGrant = true
p.GrantStrategy = func(scope oauth2.Scope, _ Client, _ ResourceOwner) (oauth2.Scope, error) {
if !allowedScope.Includes(scope) {
return nil, ErrInvalidScope
}
if !scope.Includes(requiredScope) {
return nil, ErrInvalidScope
}
return scope, nil
}
authenticator := NewAuthenticator(tester.Store, p)
authenticator.Reporter = func(err error) {
t.Error(err)
}
app1 := tester.Save(&Application{
Name: "Application 1",
Key: "app1",
SecretHash: mustHash(testPassword),
RedirectURL: "http://example.com/callback1",
}).(*Application)
app2 := tester.Save(&Application{
Name: "Application 2",
Key: "app2",
SecretHash: mustHash(testPassword),
RedirectURL: "http://example.com/callback2",
}).(*Application)
user := tester.Save(&User{
Name: "User",
Email: "user@example.com",
PasswordHash: mustHash(testPassword),
}).(*User)
config := spec.Default(newHandler(authenticator, true))
config.PasswordGrantSupport = true
config.ClientCredentialsGrantSupport = true
config.ImplicitGrantSupport = true
config.RefreshTokenGrantSupport = true
config.PrimaryClientID = app1.Key
config.PrimaryClientSecret = testPassword
config.SecondaryClientID = app2.Key
config.SecondaryClientSecret = testPassword
config.ResourceOwnerUsername = user.Email
config.ResourceOwnerPassword = testPassword
config.InvalidScope = "baz"
config.ValidScope = "foo bar"
config.ExceedingScope = "foo bar baz"
config.ExpectedExpiresIn = int(authenticator.policy.AccessTokenLifespan / time.Second)
expiredToken := tester.Save(&AccessToken{
ExpiresAt: time.Now().Add(-authenticator.policy.AccessTokenLifespan),
Scope: []string{"foo"},
Client: app1.ID(),
}).(*AccessToken)
insufficientToken := tester.Save(&AccessToken{
ExpiresAt: time.Now().Add(authenticator.policy.AccessTokenLifespan),
Scope: []string{},
Client: app1.ID(),
}).(*AccessToken)
config.UnknownToken = mustGenerateAccessToken(p, bson.NewObjectId(), time.Now())
config.ExpiredToken = mustGenerateAccessToken(p, expiredToken.ID(), expiredToken.ExpiresAt)
config.InsufficientToken = mustGenerateAccessToken(p, insufficientToken.ID(), insufficientToken.ExpiresAt)
config.PrimaryRedirectURI = "http://example.com/callback1"
config.SecondaryRedirectURI = "http://example.com/callback2"
validRefreshToken := tester.Save(&RefreshToken{
ExpiresAt: time.Now().Add(authenticator.policy.RefreshTokenLifespan),
Scope: []string{"foo", "bar"},
Client: app1.ID(),
}).(*RefreshToken)
expiredRefreshToken := tester.Save(&RefreshToken{
ExpiresAt: time.Now().Add(-authenticator.policy.RefreshTokenLifespan),
Scope: []string{"foo", "bar"},
Client: app1.ID(),
}).(*RefreshToken)
config.UnknownRefreshToken = mustGenerateRefreshToken(p, bson.NewObjectId(), time.Now())
config.ValidRefreshToken = mustGenerateRefreshToken(p, validRefreshToken.ID(), validRefreshToken.ExpiresAt)
config.ExpiredRefreshToken = mustGenerateRefreshToken(p, expiredRefreshToken.ID(), expiredRefreshToken.ExpiresAt)
config.AuthorizationParams = map[string]string{
"username": user.Email,
"password": testPassword,
}
spec.Run(t, config)
}
func TestPublicAccess(t *testing.T) {
tester.Clean()
authenticator := NewAuthenticator(tester.Store, DefaultPolicy(""))
tester.Handler = newHandler(authenticator, false)
tester.Request("GET", "api/protected", "", func(r *httptest.ResponseRecorder, rq *http.Request) {
assert.Equal(t, "OK", r.Body.String())
})
}
func mustGenerateAccessToken(p *Policy, id bson.ObjectId, expiresAt time.Time) string {
str, err := p.GenerateToken(id, time.Now(), expiresAt, nil, nil, nil)
if err != nil {
panic(err)
}
return str
}
func mustGenerateRefreshToken(p *Policy, id bson.ObjectId, expiresAt time.Time) string {
str, err := p.GenerateToken(id, time.Now(), expiresAt, nil, nil, nil)
if err != nil {
panic(err)
}
return str
}
added context key test
package flame
import (
"net/http"
"net/http/httptest"
"testing"
"time"
"github.com/256dpi/oauth2"
"github.com/256dpi/oauth2/spec"
"github.com/stretchr/testify/assert"
"gopkg.in/mgo.v2/bson"
)
func TestIntegration(t *testing.T) {
tester.Clean()
var testPassword = "foo"
var allowedScope = oauth2.ParseScope("foo bar")
var requiredScope = oauth2.ParseScope("foo")
p := DefaultPolicy("")
p.PasswordGrant = true
p.ClientCredentialsGrant = true
p.ImplicitGrant = true
p.GrantStrategy = func(scope oauth2.Scope, _ Client, _ ResourceOwner) (oauth2.Scope, error) {
if !allowedScope.Includes(scope) {
return nil, ErrInvalidScope
}
if !scope.Includes(requiredScope) {
return nil, ErrInvalidScope
}
return scope, nil
}
authenticator := NewAuthenticator(tester.Store, p)
authenticator.Reporter = func(err error) {
t.Error(err)
}
app1 := tester.Save(&Application{
Name: "Application 1",
Key: "app1",
SecretHash: mustHash(testPassword),
RedirectURL: "http://example.com/callback1",
}).(*Application)
app2 := tester.Save(&Application{
Name: "Application 2",
Key: "app2",
SecretHash: mustHash(testPassword),
RedirectURL: "http://example.com/callback2",
}).(*Application)
user := tester.Save(&User{
Name: "User",
Email: "user@example.com",
PasswordHash: mustHash(testPassword),
}).(*User)
config := spec.Default(newHandler(authenticator, true))
config.PasswordGrantSupport = true
config.ClientCredentialsGrantSupport = true
config.ImplicitGrantSupport = true
config.RefreshTokenGrantSupport = true
config.PrimaryClientID = app1.Key
config.PrimaryClientSecret = testPassword
config.SecondaryClientID = app2.Key
config.SecondaryClientSecret = testPassword
config.ResourceOwnerUsername = user.Email
config.ResourceOwnerPassword = testPassword
config.InvalidScope = "baz"
config.ValidScope = "foo bar"
config.ExceedingScope = "foo bar baz"
config.ExpectedExpiresIn = int(authenticator.policy.AccessTokenLifespan / time.Second)
expiredToken := tester.Save(&AccessToken{
ExpiresAt: time.Now().Add(-authenticator.policy.AccessTokenLifespan),
Scope: []string{"foo"},
Client: app1.ID(),
}).(*AccessToken)
insufficientToken := tester.Save(&AccessToken{
ExpiresAt: time.Now().Add(authenticator.policy.AccessTokenLifespan),
Scope: []string{},
Client: app1.ID(),
}).(*AccessToken)
config.UnknownToken = mustGenerateAccessToken(p, bson.NewObjectId(), time.Now())
config.ExpiredToken = mustGenerateAccessToken(p, expiredToken.ID(), expiredToken.ExpiresAt)
config.InsufficientToken = mustGenerateAccessToken(p, insufficientToken.ID(), insufficientToken.ExpiresAt)
config.PrimaryRedirectURI = "http://example.com/callback1"
config.SecondaryRedirectURI = "http://example.com/callback2"
validRefreshToken := tester.Save(&RefreshToken{
ExpiresAt: time.Now().Add(authenticator.policy.RefreshTokenLifespan),
Scope: []string{"foo", "bar"},
Client: app1.ID(),
}).(*RefreshToken)
expiredRefreshToken := tester.Save(&RefreshToken{
ExpiresAt: time.Now().Add(-authenticator.policy.RefreshTokenLifespan),
Scope: []string{"foo", "bar"},
Client: app1.ID(),
}).(*RefreshToken)
config.UnknownRefreshToken = mustGenerateRefreshToken(p, bson.NewObjectId(), time.Now())
config.ValidRefreshToken = mustGenerateRefreshToken(p, validRefreshToken.ID(), validRefreshToken.ExpiresAt)
config.ExpiredRefreshToken = mustGenerateRefreshToken(p, expiredRefreshToken.ID(), expiredRefreshToken.ExpiresAt)
config.AuthorizationParams = map[string]string{
"username": user.Email,
"password": testPassword,
}
spec.Run(t, config)
}
func TestPublicAccess(t *testing.T) {
tester.Clean()
authenticator := NewAuthenticator(tester.Store, DefaultPolicy(""))
tester.Handler = newHandler(authenticator, false)
tester.Request("GET", "api/protected", "", func(r *httptest.ResponseRecorder, rq *http.Request) {
assert.Equal(t, "OK", r.Body.String())
})
}
func TestContextKeys(t *testing.T) {
tester.Clean()
authenticator := NewAuthenticator(tester.Store, DefaultPolicy(""))
tester.Handler = newHandler(authenticator, false)
application := tester.Save(&Application{
Key: "application",
}).(*Application).ID()
user := tester.Save(&User{
Name: "User",
Email: "email@example.com",
}).(*User).ID()
accessToken := tester.Save(&AccessToken{
ExpiresAt: time.Now().Add(authenticator.policy.AccessTokenLifespan),
Client: application,
ResourceOwner: &user,
}).(*AccessToken).ID()
token := mustGenerateAccessToken(authenticator.policy, accessToken, time.Now().Add(time.Hour))
auth := authenticator.Authorizer("", true, true, true)
tester.Handler.(*http.ServeMux).Handle("/api/info", auth(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
assert.Equal(t, accessToken, r.Context().Value(AccessTokenContextKey).(*AccessToken).ID())
assert.Equal(t, application, r.Context().Value(ClientContextKey).(*Application).ID())
assert.Equal(t, user, r.Context().Value(ResourceOwnerContextKey).(*User).ID())
})))
tester.Header["Authorization"] = "Bearer " + token
tester.Request("GET", "api/info", "", func(r *httptest.ResponseRecorder, rq *http.Request) {
assert.Equal(t, http.StatusOK, r.Code, tester.DebugRequest(rq, r))
})
}
func mustGenerateAccessToken(p *Policy, id bson.ObjectId, expiresAt time.Time) string {
str, err := p.GenerateToken(id, time.Now(), expiresAt, nil, nil, nil)
if err != nil {
panic(err)
}
return str
}
func mustGenerateRefreshToken(p *Policy, id bson.ObjectId, expiresAt time.Time) string {
str, err := p.GenerateToken(id, time.Now(), expiresAt, nil, nil, nil)
if err != nil {
panic(err)
}
return str
}
|
/*
*
* Copyright IBM Corp. All Rights Reserved.
*
* SPDX-License-Identifier: Apache-2.0
* /
*
*/
package gossip
import (
"fmt"
"io/ioutil"
"os"
"syscall"
docker "github.com/fsouza/go-dockerclient"
"github.com/hyperledger/fabric/integration/nwo"
"github.com/hyperledger/fabric/integration/nwo/commands"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/gomega/gbytes"
"github.com/onsi/gomega/gexec"
"github.com/tedsuo/ifrit"
"github.com/tedsuo/ifrit/ginkgomon"
)
var _ = Describe("Gossip Test", func() {
var (
testDir string
client *docker.Client
network *nwo.Network
chaincode nwo.Chaincode
process ifrit.Process
)
BeforeEach(func() {
var err error
testDir, err = ioutil.TempDir("", "e2e")
Expect(err).NotTo(HaveOccurred())
client, err = docker.NewClientFromEnv()
Expect(err).NotTo(HaveOccurred())
chaincode = nwo.Chaincode{
Name: "mycc",
Version: "0.0",
Path: "github.com/hyperledger/fabric/integration/chaincode/simple/cmd",
Ctor: `{"Args":["init","a","100","b","200"]}`,
Policy: `OR ('Org1MSP.member','Org2MSP.member')`,
}
})
AfterEach(func() {
if process != nil {
process.Signal(syscall.SIGTERM)
Eventually(process.Wait(), network.EventuallyTimeout).Should(Receive())
}
if network != nil {
network.Cleanup()
}
os.RemoveAll(testDir)
})
PDescribe("State transfer test", func() {
var (
ordererProcess ifrit.Process
peerProcesses = map[string]ifrit.Process{}
peerRunners = map[string]*ginkgomon.Runner{}
)
BeforeEach(func() {
network = nwo.New(nwo.BasicSolo(), testDir, client, StartPort(), components)
network.GenerateConfigTree()
network.Bootstrap()
})
AfterEach(func() {
if ordererProcess != nil {
ordererProcess.Signal(syscall.SIGTERM)
Eventually(ordererProcess.Wait(), network.EventuallyTimeout).Should(Receive())
}
for _, process := range peerProcesses {
process.Signal(syscall.SIGTERM)
Eventually(process.Wait(), network.EventuallyTimeout).Should(Receive())
}
})
It("solo network with 2 orgs, 2 peers each, should sync from the peer if no orderer available", func() {
orderer := network.Orderer("orderer")
ordererRunner := network.OrdererRunner(orderer)
ordererProcess = ifrit.Invoke(ordererRunner)
peer0Org1, peer1Org1 := network.Peer("Org1", "peer0"), network.Peer("Org1", "peer1")
peer0Org2, peer1Org2 := network.Peer("Org2", "peer0"), network.Peer("Org2", "peer1")
for _, peer := range []*nwo.Peer{peer0Org1, peer1Org1, peer0Org2, peer1Org2} {
runner := network.PeerRunner(peer)
peerProcesses[peer.ID()] = ifrit.Invoke(runner)
peerRunners[peer.ID()] = runner
}
channelName := "testchannel"
network.CreateChannel(channelName, orderer, peer0Org1)
network.JoinChannel(channelName, orderer, peer0Org1, peer1Org1, peer0Org2, peer1Org2)
nwo.DeployChaincodeLegacy(network, channelName, orderer, chaincode, peer0Org1)
network.UpdateChannelAnchors(orderer, channelName)
for _, peer := range []*nwo.Peer{peer0Org1, peer1Org1, peer0Org2, peer1Org2} {
Eventually(func() int {
return nwo.GetLedgerHeight(network, peer, channelName)
}, network.EventuallyTimeout).Should(BeNumerically(">=", 2))
}
By("stop peers except peer0Org1 to make sure they cannot get blocks from orderer")
for id, proc := range peerProcesses {
if id == peer0Org1.ID() {
continue
}
proc.Signal(syscall.SIGTERM)
Eventually(proc.Wait(), network.EventuallyTimeout).Should(Receive())
delete(peerProcesses, id)
}
By("create transactions")
runTransactions(network, orderer, peer0Org1, "mycc", channelName)
peer0LedgerHeight := nwo.GetLedgerHeight(network, peer0Org1, channelName)
By("turning down ordering service")
ordererProcess.Signal(syscall.SIGTERM)
Eventually(ordererProcess.Wait(), network.EventuallyTimeout).Should(Receive())
ordererProcess = nil
By("restart the three peers that were stopped")
peerList := []*nwo.Peer{peer1Org1, peer0Org2, peer1Org2}
peersRestart(network, orderer, peerList, peerProcesses, peerRunners)
By("Make sure peers are synced up")
assertPeersLedgerHeight(network, orderer, peerList, peer0LedgerHeight, channelName)
By("start the orderer")
orderer = network.Orderer("orderer")
ordererRunner = network.OrdererRunner(orderer)
ordererProcess = ifrit.Invoke(ordererRunner)
By("install chaincode")
nwo.InstallChaincodeLegacy(network, chaincode, peer1Org1)
By("stop leader, peer0Org1, to make sure it cannot get blocks from orderer")
id := peer0Org1.ID()
proc := peerProcesses[id]
proc.Signal(syscall.SIGTERM)
Eventually(proc.Wait(), network.EventuallyTimeout).Should(Receive())
expectedMsg := "Stopped being a leader"
Eventually(peerRunners[id].Err(), network.EventuallyTimeout).Should(gbytes.Say(expectedMsg))
delete(peerProcesses, id)
By("create transactions")
runTransactions(network, orderer, peer1Org1, "mycc", channelName)
peer1LedgerHeight := nwo.GetLedgerHeight(network, peer1Org1, channelName)
By("turning down ordering service")
ordererProcess.Signal(syscall.SIGTERM)
Eventually(ordererProcess.Wait(), network.EventuallyTimeout).Should(Receive())
ordererProcess = nil
By("restart peer0Org1")
peerList = []*nwo.Peer{peer0Org1}
peersRestart(network, orderer, peerList, peerProcesses, peerRunners)
By("Make sure peer0Org1 is synced up")
assertPeersLedgerHeight(network, orderer, peerList, peer1LedgerHeight, channelName)
})
})
})
func runTransactions(n *nwo.Network, orderer *nwo.Orderer, peer *nwo.Peer, chaincodeName string, channelID string) {
for i := 0; i < 10; i++ {
sess, err := n.PeerUserSession(peer, "User1", commands.ChaincodeInvoke{
ChannelID: channelID,
Orderer: n.OrdererAddress(orderer, nwo.ListenPort),
Name: chaincodeName,
Ctor: `{"Args":["invoke","a","b","10"]}`,
PeerAddresses: []string{
n.PeerAddress(peer, nwo.ListenPort),
},
WaitForEvent: true,
})
Expect(err).NotTo(HaveOccurred())
Eventually(sess, n.EventuallyTimeout).Should(gexec.Exit(0))
Expect(sess.Err).To(gbytes.Say("Chaincode invoke successful. result: status:200"))
}
}
func peersRestart(n *nwo.Network, orderer *nwo.Orderer, peerList []*nwo.Peer, peerProc map[string]ifrit.Process, peerRun map[string]*ginkgomon.Runner) {
for _, peer := range peerList {
runner := n.PeerRunner(peer, fmt.Sprint("CORE_PEER_GOSSIP_STATE_CHECKINTERVAL=200ms"),
fmt.Sprint("FABRIC_LOGGING_SPEC=info:gossip.state=debug"),
)
peerProc[peer.ID()] = ifrit.Invoke(runner)
peerRun[peer.ID()] = runner
}
}
func assertPeersLedgerHeight(n *nwo.Network, orderer *nwo.Orderer, peerList []*nwo.Peer, expectedVal int, channelID string) {
for _, peer := range peerList {
Eventually(func() int {
return nwo.GetLedgerHeight(n, peer, channelID)
}, n.EventuallyTimeout).Should(Equal(expectedVal))
}
}
[FAB-15982] gossip IT test with no leader election
This CR is to add gossip tests:
[FAB-15982] a leader peer goes down then comes back
should catch up all blocks with no leader election
[FAB-15984] a non-leader peer, that joins an active
channel, should have all blocks either with or
without leader election
Signed-off-by: Dongming <8a44bf95374aee2f65c5ee249f73073c9eb07cf5@ibm.com>
Change-Id: Ieee47d7dd582255012c3bba26d8f9fa9d60a4cea
/*
*
* Copyright IBM Corp. All Rights Reserved.
*
* SPDX-License-Identifier: Apache-2.0
* /
*
*/
package gossip
import (
"fmt"
"io/ioutil"
"os"
"syscall"
docker "github.com/fsouza/go-dockerclient"
"github.com/hyperledger/fabric/integration/nwo"
"github.com/hyperledger/fabric/integration/nwo/commands"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/gomega/gbytes"
"github.com/onsi/gomega/gexec"
"github.com/tedsuo/ifrit"
"github.com/tedsuo/ifrit/ginkgomon"
)
var _ = Describe("Gossip Test", func() {
var (
testDir string
client *docker.Client
network *nwo.Network
chaincode nwo.Chaincode
process ifrit.Process
)
BeforeEach(func() {
var err error
testDir, err = ioutil.TempDir("", "e2e")
Expect(err).NotTo(HaveOccurred())
client, err = docker.NewClientFromEnv()
Expect(err).NotTo(HaveOccurred())
chaincode = nwo.Chaincode{
Name: "mycc",
Version: "0.0",
Path: "github.com/hyperledger/fabric/integration/chaincode/simple/cmd",
Ctor: `{"Args":["init","a","100","b","200"]}`,
Policy: `OR ('Org1MSP.member','Org2MSP.member')`,
}
})
AfterEach(func() {
if process != nil {
process.Signal(syscall.SIGTERM)
Eventually(process.Wait(), network.EventuallyTimeout).Should(Receive())
}
if network != nil {
network.Cleanup()
}
os.RemoveAll(testDir)
})
PDescribe("Gossip state transfer test", func() {
var (
ordererProcess ifrit.Process
peerProcesses = map[string]ifrit.Process{}
peerRunners = map[string]*ginkgomon.Runner{}
)
BeforeEach(func() {
network = nwo.New(nwo.BasicSolo(), testDir, client, StartPort(), components)
network.GenerateConfigTree()
// modify peer config
// Org1: leader election
// Org2: no leader election
// peer0: follower
// peer1: leader
for _, peer := range network.Peers {
if peer.Organization == "Org1" {
core := network.ReadPeerConfig(peer)
if peer.Name == "peer1" {
core.Peer.Gossip.Bootstrap = "127.0.0.1:21004"
network.WritePeerConfig(peer, core)
}
}
if peer.Organization == "Org2" {
core := network.ReadPeerConfig(peer)
core.Peer.Gossip.UseLeaderElection = false
if peer.Name == "peer1" {
core.Peer.Gossip.OrgLeader = true
} else {
core.Peer.Gossip.OrgLeader = false
}
network.WritePeerConfig(peer, core)
}
}
network.Bootstrap()
})
AfterEach(func() {
if ordererProcess != nil {
ordererProcess.Signal(syscall.SIGTERM)
Eventually(ordererProcess.Wait(), network.EventuallyTimeout).Should(Receive())
}
for _, process := range peerProcesses {
process.Signal(syscall.SIGTERM)
Eventually(process.Wait(), network.EventuallyTimeout).Should(Receive())
}
})
It("syncs blocks from the peer if no orderer is available, using solo network with 2 orgs, 2 peers each", func() {
orderer := network.Orderer("orderer")
ordererRunner := network.OrdererRunner(orderer)
ordererProcess = ifrit.Invoke(ordererRunner)
peer0Org1, peer1Org1 := network.Peer("Org1", "peer0"), network.Peer("Org1", "peer1")
peer0Org2, peer1Org2 := network.Peer("Org2", "peer0"), network.Peer("Org2", "peer1")
By("bring up all four peers")
peersToBringUp := []*nwo.Peer{peer0Org1, peer1Org1, peer0Org2, peer1Org2}
startPeers(network, peersToBringUp, peerProcesses, peerRunners, false)
channelName := "testchannel"
network.CreateChannel(channelName, orderer, peer0Org1)
By("join all peers to channel")
network.JoinChannel(channelName, orderer, peer0Org1, peer1Org1, peer0Org2, peer1Org2)
network.UpdateChannelAnchors(orderer, channelName)
// base peer will be used for chaincode interactions
basePeerForTransactions := peer0Org1
nwo.DeployChaincodeLegacy(network, channelName, orderer, chaincode, basePeerForTransactions)
By("STATE TRANSFER TEST 1: newly joined peers should receive blocks from the peers that are already up")
// Note, a better test would be to bring orderer down before joining the two peers.
// However, network.JoinChannel() requires orderer to be up so that genesis block can be fetched from orderer before joining peers.
// Therefore, for now we've joined all four peers and stop the two peers that should be synced up.
peersToStop := []*nwo.Peer{peer1Org1, peer1Org2}
stopPeers(network, peersToStop, peerProcesses)
peersToSyncUp := []*nwo.Peer{peer1Org1, peer1Org2}
sendTransactionsAndSyncUpPeers(network, orderer, basePeerForTransactions, peersToSyncUp, channelName, &ordererProcess, ordererRunner, peerProcesses, peerRunners)
By("STATE TRANSFER TEST 2: restarted peers should receive blocks from the peers that are already up")
basePeerForTransactions = peer1Org1
nwo.InstallChaincodeLegacy(network, chaincode, basePeerForTransactions)
By("stop peer0Org1 (currently elected leader in Org1) and peer1Org2 (static leader in Org2)")
peersToStop = []*nwo.Peer{peer0Org1, peer1Org2}
stopPeers(network, peersToStop, peerProcesses)
peersToSyncUp = []*nwo.Peer{peer0Org1, peer1Org2}
// Note that with the static leader in Org2 down, the static follower peer0Org2 will also get blocks via state transfer
// This effectively tests leader election as well, since the newly elected leader in Org1 (peer1Org1) will be the only peer
// that receives blocks from orderer and will therefore serve as the provider of blocks to all other peers.
sendTransactionsAndSyncUpPeers(network, orderer, basePeerForTransactions, peersToSyncUp, channelName, &ordererProcess, ordererRunner, peerProcesses, peerRunners)
})
})
})
func runTransactions(n *nwo.Network, orderer *nwo.Orderer, peer *nwo.Peer, chaincodeName string, channelID string) {
for i := 0; i < 5; i++ {
sess, err := n.PeerUserSession(peer, "User1", commands.ChaincodeInvoke{
ChannelID: channelID,
Orderer: n.OrdererAddress(orderer, nwo.ListenPort),
Name: chaincodeName,
Ctor: `{"Args":["invoke","a","b","10"]}`,
PeerAddresses: []string{
n.PeerAddress(peer, nwo.ListenPort),
},
WaitForEvent: true,
})
Expect(err).NotTo(HaveOccurred())
Eventually(sess, n.EventuallyTimeout).Should(gexec.Exit(0))
Expect(sess.Err).To(gbytes.Say("Chaincode invoke successful. result: status:200"))
}
}
func startPeers(network *nwo.Network, peersToStart []*nwo.Peer, peerProc map[string]ifrit.Process, peerRun map[string]*ginkgomon.Runner, forceStateTransfer bool) {
env := []string{fmt.Sprint("FABRIC_LOGGING_SPEC=info:gossip.state=debug")}
// Setting CORE_PEER_GOSSIP_STATE_CHECKINTERVAL to 200ms (from default of 10s) will ensure that state transfer happens quickly,
// before blocks are gossipped through normal mechanisms
if forceStateTransfer {
env = append(env, fmt.Sprint("CORE_PEER_GOSSIP_STATE_CHECKINTERVAL=200ms"))
}
for _, peer := range peersToStart {
runner := network.PeerRunner(peer, env...)
peerProc[peer.ID()] = ifrit.Invoke(runner)
peerRun[peer.ID()] = runner
}
}
func stopPeers(network *nwo.Network, peersToStop []*nwo.Peer, peerProcesses map[string]ifrit.Process) {
for _, peer := range peersToStop {
id := peer.ID()
proc := peerProcesses[id]
proc.Signal(syscall.SIGTERM)
Eventually(proc.Wait(), network.EventuallyTimeout).Should(Receive())
delete(peerProcesses, id)
}
}
func assertPeersLedgerHeight(n *nwo.Network, orderer *nwo.Orderer, peersToSyncUp []*nwo.Peer, expectedVal int, channelID string) {
for _, peer := range peersToSyncUp {
Eventually(func() int {
return nwo.GetLedgerHeight(n, peer, channelID)
}, n.EventuallyTimeout).Should(Equal(expectedVal))
}
}
// send transactions, stop orderering server, then start peers to ensure they received blcoks via state transfer
func sendTransactionsAndSyncUpPeers(network *nwo.Network, orderer *nwo.Orderer, basePeer *nwo.Peer, peersToSyncUp []*nwo.Peer, channelName string,
ordererProcess *ifrit.Process, ordererRunner *ginkgomon.Runner,
peerProcesses map[string]ifrit.Process, peerRunners map[string]*ginkgomon.Runner) {
By("create transactions")
runTransactions(network, orderer, basePeer, "mycc", channelName)
basePeerLedgerHeight := nwo.GetLedgerHeight(network, basePeer, channelName)
By("stop orderer")
(*ordererProcess).Signal(syscall.SIGTERM)
Eventually((*ordererProcess).Wait(), network.EventuallyTimeout).Should(Receive())
*ordererProcess = nil
By("start the peers contained in the peersToSyncUp list")
startPeers(network, peersToSyncUp, peerProcesses, peerRunners, true)
By("ensure the peers are synced up")
assertPeersLedgerHeight(network, orderer, peersToSyncUp, basePeerLedgerHeight, channelName)
By("restart orderer")
orderer = network.Orderer("orderer")
ordererRunner = network.OrdererRunner(orderer)
*ordererProcess = ifrit.Invoke(ordererRunner)
}
|
// Copyright 2017 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package integration
import (
"context"
"strings"
"testing"
"time"
"go.etcd.io/etcd/v3/etcdserver/api/v3rpc/rpctypes"
pb "go.etcd.io/etcd/v3/etcdserver/etcdserverpb"
"go.etcd.io/etcd/v3/pkg/testutil"
)
func TestMoveLeader(t *testing.T) { testMoveLeader(t, true) }
func TestMoveLeaderService(t *testing.T) { testMoveLeader(t, false) }
func testMoveLeader(t *testing.T, auto bool) {
defer testutil.AfterTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
defer clus.Terminate(t)
oldLeadIdx := clus.WaitLeader(t)
oldLeadID := uint64(clus.Members[oldLeadIdx].s.ID())
// ensure followers go through leader transition while learship transfer
idc := make(chan uint64)
for i := range clus.Members {
if oldLeadIdx != i {
go func(m *member) {
idc <- checkLeaderTransition(m, oldLeadID)
}(clus.Members[i])
}
}
target := uint64(clus.Members[(oldLeadIdx+1)%3].s.ID())
if auto {
err := clus.Members[oldLeadIdx].s.TransferLeadership()
if err != nil {
t.Fatal(err)
}
} else {
mvc := toGRPC(clus.Client(oldLeadIdx)).Maintenance
_, err := mvc.MoveLeader(context.TODO(), &pb.MoveLeaderRequest{TargetID: target})
if err != nil {
t.Fatal(err)
}
}
// wait until leader transitions have happened
var newLeadIDs [2]uint64
for i := range newLeadIDs {
select {
case newLeadIDs[i] = <-idc:
case <-time.After(time.Second):
t.Fatal("timed out waiting for leader transition")
}
}
// remaining members must agree on the same leader
if newLeadIDs[0] != newLeadIDs[1] {
t.Fatalf("expected same new leader %d == %d", newLeadIDs[0], newLeadIDs[1])
}
// new leader must be different than the old leader
if oldLeadID == newLeadIDs[0] {
t.Fatalf("expected old leader %d != new leader %d", oldLeadID, newLeadIDs[0])
}
// if move-leader were used, new leader must match transferee
if !auto {
if newLeadIDs[0] != target {
t.Fatalf("expected new leader %d != target %d", newLeadIDs[0], target)
}
}
}
// TestMoveLeaderError ensures that request to non-leader fail.
func TestMoveLeaderError(t *testing.T) {
defer testutil.AfterTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
defer clus.Terminate(t)
oldLeadIdx := clus.WaitLeader(t)
followerIdx := (oldLeadIdx + 1) % 3
target := uint64(clus.Members[(oldLeadIdx+2)%3].s.ID())
mvc := toGRPC(clus.Client(followerIdx)).Maintenance
_, err := mvc.MoveLeader(context.TODO(), &pb.MoveLeaderRequest{TargetID: target})
if !eqErrGRPC(err, rpctypes.ErrGRPCNotLeader) {
t.Errorf("err = %v, want %v", err, rpctypes.ErrGRPCNotLeader)
}
}
// TestMoveLeaderToLearnerError ensures that leader transfer to learner member will fail.
func TestMoveLeaderToLearnerError(t *testing.T) {
defer testutil.AfterTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
defer clus.Terminate(t)
// we have to add and launch learner member after initial cluster was created, because
// bootstrapping a cluster with learner member is not supported.
clus.AddAndLaunchLearnerMember(t)
learners, err := clus.GetLearnerMembers()
if err != nil {
t.Fatalf("failed to get the learner members in cluster: %v", err)
}
if len(learners) != 1 {
t.Fatalf("added 1 learner to cluster, got %d", len(learners))
}
learnerID := learners[0].ID
leaderIdx := clus.WaitLeader(t)
cli := clus.Client(leaderIdx)
_, err = cli.MoveLeader(context.Background(), uint64(learnerID))
if err == nil {
t.Fatalf("expecting leader transfer to learner to fail, got no error")
}
expectedErrKeywords := "bad leader transferee"
if !strings.Contains(err.Error(), expectedErrKeywords) {
t.Errorf("expecting error to contain %s, got %s", expectedErrKeywords, err.Error())
}
}
// TestTransferLeadershipWithLearner ensures TransferLeadership does not timeout due to learner is
// automatically picked by leader as transferee.
func TestTransferLeadershipWithLearner(t *testing.T) {
defer testutil.AfterTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
defer clus.Terminate(t)
clus.AddAndLaunchLearnerMember(t)
learners, err := clus.GetLearnerMembers()
if err != nil {
t.Fatalf("failed to get the learner members in cluster: %v", err)
}
if len(learners) != 1 {
t.Fatalf("added 1 learner to cluster, got %d", len(learners))
}
leaderIdx := clus.WaitLeader(t)
errCh := make(chan error, 1)
go func() {
// note that this cluster has 1 leader and 1 learner. TransferLeadership should return nil.
// Leadership transfer is skipped in cluster with 1 voting member.
errCh <- clus.Members[leaderIdx].s.TransferLeadership()
}()
select {
case err := <-errCh:
if err != nil {
t.Errorf("got error during leadership transfer: %v", err)
}
case <-time.After(5 * time.Second):
t.Error("timed out waiting for leader transition")
}
}
integration: remove unnecessary type conversion
Fixes go 'unconvert' test.
// Copyright 2017 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package integration
import (
"context"
"strings"
"testing"
"time"
"go.etcd.io/etcd/v3/etcdserver/api/v3rpc/rpctypes"
pb "go.etcd.io/etcd/v3/etcdserver/etcdserverpb"
"go.etcd.io/etcd/v3/pkg/testutil"
)
func TestMoveLeader(t *testing.T) { testMoveLeader(t, true) }
func TestMoveLeaderService(t *testing.T) { testMoveLeader(t, false) }
func testMoveLeader(t *testing.T, auto bool) {
defer testutil.AfterTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
defer clus.Terminate(t)
oldLeadIdx := clus.WaitLeader(t)
oldLeadID := uint64(clus.Members[oldLeadIdx].s.ID())
// ensure followers go through leader transition while learship transfer
idc := make(chan uint64)
for i := range clus.Members {
if oldLeadIdx != i {
go func(m *member) {
idc <- checkLeaderTransition(m, oldLeadID)
}(clus.Members[i])
}
}
target := uint64(clus.Members[(oldLeadIdx+1)%3].s.ID())
if auto {
err := clus.Members[oldLeadIdx].s.TransferLeadership()
if err != nil {
t.Fatal(err)
}
} else {
mvc := toGRPC(clus.Client(oldLeadIdx)).Maintenance
_, err := mvc.MoveLeader(context.TODO(), &pb.MoveLeaderRequest{TargetID: target})
if err != nil {
t.Fatal(err)
}
}
// wait until leader transitions have happened
var newLeadIDs [2]uint64
for i := range newLeadIDs {
select {
case newLeadIDs[i] = <-idc:
case <-time.After(time.Second):
t.Fatal("timed out waiting for leader transition")
}
}
// remaining members must agree on the same leader
if newLeadIDs[0] != newLeadIDs[1] {
t.Fatalf("expected same new leader %d == %d", newLeadIDs[0], newLeadIDs[1])
}
// new leader must be different than the old leader
if oldLeadID == newLeadIDs[0] {
t.Fatalf("expected old leader %d != new leader %d", oldLeadID, newLeadIDs[0])
}
// if move-leader were used, new leader must match transferee
if !auto {
if newLeadIDs[0] != target {
t.Fatalf("expected new leader %d != target %d", newLeadIDs[0], target)
}
}
}
// TestMoveLeaderError ensures that request to non-leader fail.
func TestMoveLeaderError(t *testing.T) {
defer testutil.AfterTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
defer clus.Terminate(t)
oldLeadIdx := clus.WaitLeader(t)
followerIdx := (oldLeadIdx + 1) % 3
target := uint64(clus.Members[(oldLeadIdx+2)%3].s.ID())
mvc := toGRPC(clus.Client(followerIdx)).Maintenance
_, err := mvc.MoveLeader(context.TODO(), &pb.MoveLeaderRequest{TargetID: target})
if !eqErrGRPC(err, rpctypes.ErrGRPCNotLeader) {
t.Errorf("err = %v, want %v", err, rpctypes.ErrGRPCNotLeader)
}
}
// TestMoveLeaderToLearnerError ensures that leader transfer to learner member will fail.
func TestMoveLeaderToLearnerError(t *testing.T) {
defer testutil.AfterTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
defer clus.Terminate(t)
// we have to add and launch learner member after initial cluster was created, because
// bootstrapping a cluster with learner member is not supported.
clus.AddAndLaunchLearnerMember(t)
learners, err := clus.GetLearnerMembers()
if err != nil {
t.Fatalf("failed to get the learner members in cluster: %v", err)
}
if len(learners) != 1 {
t.Fatalf("added 1 learner to cluster, got %d", len(learners))
}
learnerID := learners[0].ID
leaderIdx := clus.WaitLeader(t)
cli := clus.Client(leaderIdx)
_, err = cli.MoveLeader(context.Background(), learnerID)
if err == nil {
t.Fatalf("expecting leader transfer to learner to fail, got no error")
}
expectedErrKeywords := "bad leader transferee"
if !strings.Contains(err.Error(), expectedErrKeywords) {
t.Errorf("expecting error to contain %s, got %s", expectedErrKeywords, err.Error())
}
}
// TestTransferLeadershipWithLearner ensures TransferLeadership does not timeout due to learner is
// automatically picked by leader as transferee.
func TestTransferLeadershipWithLearner(t *testing.T) {
defer testutil.AfterTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
defer clus.Terminate(t)
clus.AddAndLaunchLearnerMember(t)
learners, err := clus.GetLearnerMembers()
if err != nil {
t.Fatalf("failed to get the learner members in cluster: %v", err)
}
if len(learners) != 1 {
t.Fatalf("added 1 learner to cluster, got %d", len(learners))
}
leaderIdx := clus.WaitLeader(t)
errCh := make(chan error, 1)
go func() {
// note that this cluster has 1 leader and 1 learner. TransferLeadership should return nil.
// Leadership transfer is skipped in cluster with 1 voting member.
errCh <- clus.Members[leaderIdx].s.TransferLeadership()
}()
select {
case err := <-errCh:
if err != nil {
t.Errorf("got error during leadership transfer: %v", err)
}
case <-time.After(5 * time.Second):
t.Error("timed out waiting for leader transition")
}
}
|
package main
import (
"database/sql"
"encoding/json"
"flag"
_ "github.com/lib/pq"
"io/ioutil"
"log"
"log/syslog"
"os"
"strings"
)
type Config struct {
DataBase DataBase
}
type DataBase struct {
Host, User, Password, SSLMode string
MaxOpenConns, MaxIdleConns int
}
var (
config = initConfig()
db *sql.DB
dataDir string
slog bool
configFile string
dryRun bool
deleteFirst bool
)
func initConfig() Config {
flag.StringVar(&dataDir, "data-dir", "", "path to directory of observation and source files.")
flag.StringVar(&configFile, "config-file", "fits-loader.json", "optional file to load the config from.")
flag.BoolVar(&slog, "syslog", false, "output log messages to syslog instead of stdout.")
flag.BoolVar(&deleteFirst, "delete-first", false, "sync the FITS DB data with the information in each observation file.")
flag.BoolVar(&dryRun, "dry-run", false, "data is parsed and validated but not loaded to the DB. A DB connection is needed for validation.")
flag.Parse()
if slog {
logwriter, err := syslog.New(syslog.LOG_NOTICE, "fits-loader")
if err == nil {
log.Println("** logging to syslog **")
log.SetOutput(logwriter)
} else {
log.Println("problem switching to syslog. Contiuning.")
log.Println(err)
}
}
f, err := ioutil.ReadFile(configFile)
if err != nil {
log.Printf("ERROR - problem loading %s - can't find any config.", configFile)
log.Fatal(err)
}
var c Config
err = json.Unmarshal(f, &c)
if err != nil {
log.Println("Problem parsing config file.")
log.Fatal(err)
}
return c
}
func main() {
if dataDir == "" {
log.Fatal("please specify the data directory")
}
if err := config.initDB(); err != nil {
log.Fatal(err)
}
defer db.Close()
log.Printf("searching for observation and source data in %s", dataDir)
files, err := ioutil.ReadDir(dataDir)
if err != nil {
log.Fatal(err)
}
var proc []data
for _, f := range files {
if !f.IsDir() && strings.HasSuffix(f.Name(), `.csv`) && f.Size() > 0 {
meta := f.Name()
meta = strings.TrimSuffix(meta, `.csv`) + `.json`
if _, err := os.Stat(dataDir + "/" + meta); os.IsNotExist(err) {
log.Fatalf("found no json source file for %s", f.Name())
}
proc = append(proc, data{
sourceFile: dataDir + "/" + meta,
observationFile: dataDir + "/" + f.Name(),
})
}
}
log.Printf("found %d observation files to process", len(proc))
for _, d := range proc {
log.Printf("reading and validating %s", d.observationFile)
if err := d.parseAndValidate(); err != nil {
log.Fatal(err)
}
if !dryRun {
log.Printf("saving site information from %s", d.sourceFile)
if err := d.saveSite(); err != nil {
log.Fatal(err)
}
log.Printf("saving observations from %s", d.observationFile)
if !deleteFirst {
if err := d.updateOrAdd(); err != nil {
log.Fatal(err)
}
} else {
if err := d.deleteThenSave(); err != nil {
log.Fatal(err)
}
}
}
}
}
// initDB starts the DB connection pool. Defer a db.Close() after calling this.
func (c *Config) initDB() (err error) {
db, err = sql.Open("postgres", "connect_timeout=1 user="+c.DataBase.User+
" password="+c.DataBase.Password+
" host="+c.DataBase.Host+
" connect_timeout=30"+
" dbname=fits"+
" sslmode="+c.DataBase.SSLMode)
if err != nil {
return err
}
db.SetMaxIdleConns(c.DataBase.MaxIdleConns)
db.SetMaxOpenConns(c.DataBase.MaxOpenConns)
if err := db.Ping(); err != nil {
return err
}
if err := initData(); err != nil {
return err
}
if err := initSource(); err != nil {
return err
}
return err
}
Version 0.1
package main
import (
"database/sql"
"encoding/json"
"flag"
"fmt"
_ "github.com/lib/pq"
"io/ioutil"
"log"
"log/syslog"
"os"
"strings"
)
type Config struct {
DataBase DataBase
}
type DataBase struct {
Host, User, Password, SSLMode string
MaxOpenConns, MaxIdleConns int
}
const vers = "0.1"
var (
config = initConfig()
db *sql.DB
dataDir string
configFile string
dryRun, deleteFirst, slog, version bool
)
func initConfig() Config {
flag.StringVar(&dataDir, "data-dir", "", "path to directory of observation and source files.")
flag.StringVar(&configFile, "config-file", "fits-loader.json", "optional file to load the config from.")
flag.BoolVar(&slog, "syslog", false, "output log messages to syslog instead of stdout.")
flag.BoolVar(&deleteFirst, "delete-first", false, "sync the FITS DB data with the information in each observation file.")
flag.BoolVar(&dryRun, "dry-run", false, "data is parsed and validated but not loaded to the DB. A DB connection is needed for validation.")
flag.BoolVar(&version, "version", false, "prints the version and exits.")
flag.Parse()
if version {
fmt.Printf("fits-loader version %s\n", vers)
os.Exit(1)
}
if slog {
logwriter, err := syslog.New(syslog.LOG_NOTICE, "fits-loader")
if err == nil {
log.Println("** logging to syslog **")
log.SetOutput(logwriter)
} else {
log.Println("problem switching to syslog. Contiuning.")
log.Println(err)
}
}
f, err := ioutil.ReadFile(configFile)
if err != nil {
log.Printf("ERROR - problem loading %s - can't find any config.", configFile)
log.Fatal(err)
}
var c Config
err = json.Unmarshal(f, &c)
if err != nil {
log.Println("Problem parsing config file.")
log.Fatal(err)
}
return c
}
func main() {
if dataDir == "" {
log.Fatal("please specify the data directory")
}
if err := config.initDB(); err != nil {
log.Fatal(err)
}
defer db.Close()
log.Printf("searching for observation and source data in %s", dataDir)
files, err := ioutil.ReadDir(dataDir)
if err != nil {
log.Fatal(err)
}
var proc []data
for _, f := range files {
if !f.IsDir() && strings.HasSuffix(f.Name(), `.csv`) && f.Size() > 0 {
meta := f.Name()
meta = strings.TrimSuffix(meta, `.csv`) + `.json`
if _, err := os.Stat(dataDir + "/" + meta); os.IsNotExist(err) {
log.Fatalf("found no json source file for %s", f.Name())
}
proc = append(proc, data{
sourceFile: dataDir + "/" + meta,
observationFile: dataDir + "/" + f.Name(),
})
}
}
log.Printf("found %d observation files to process", len(proc))
for _, d := range proc {
log.Printf("reading and validating %s", d.observationFile)
if err := d.parseAndValidate(); err != nil {
log.Fatal(err)
}
if !dryRun {
log.Printf("saving site information from %s", d.sourceFile)
if err := d.saveSite(); err != nil {
log.Fatal(err)
}
log.Printf("saving observations from %s", d.observationFile)
if !deleteFirst {
if err := d.updateOrAdd(); err != nil {
log.Fatal(err)
}
} else {
if err := d.deleteThenSave(); err != nil {
log.Fatal(err)
}
}
}
}
}
// initDB starts the DB connection pool. Defer a db.Close() after calling this.
func (c *Config) initDB() (err error) {
db, err = sql.Open("postgres", "connect_timeout=1 user="+c.DataBase.User+
" password="+c.DataBase.Password+
" host="+c.DataBase.Host+
" connect_timeout=30"+
" dbname=fits"+
" sslmode="+c.DataBase.SSLMode)
if err != nil {
return err
}
db.SetMaxIdleConns(c.DataBase.MaxIdleConns)
db.SetMaxOpenConns(c.DataBase.MaxOpenConns)
if err := db.Ping(); err != nil {
return err
}
if err := initData(); err != nil {
return err
}
if err := initSource(); err != nil {
return err
}
return err
}
|
package main
import (
"strconv"
"strings"
"time"
"github.com/dghubble/go-twitter/twitter"
"github.com/dghubble/oauth1"
)
var (
twitterBot *TwitterBot
)
// TwitterBot ...
type TwitterBot struct {
ID string
ImgPath string
Client *twitter.Client
Follows map[string]string
}
// NewTwitterBot ...
func NewTwitterBot(cfg *TwitterConfig) *TwitterBot {
config := oauth1.NewConfig(cfg.ConsumerKey, cfg.ConsumerSecret)
token := oauth1.NewToken(cfg.AccessToken, cfg.AccessSecret)
httpClient := config.Client(oauth1.NoContext, token)
client := twitter.NewClient(httpClient)
bot := &TwitterBot{
ID: cfg.SelfID,
ImgPath: cfg.ImgPath,
Client: client,
Follows: map[string]string{
"KanColle_STAFF": "294025417",
"komatan": "96604067",
"maesanpicture": "2381595966",
"Strangestone": "93332575",
// "kazuharukina": "28787294",
// "sinoalice_jp": "818752826025181184",
"imascg_stage": "3220191374",
},
}
return bot
}
func hasHashTags(s string, tags []twitter.HashtagEntity) bool {
for _, tag := range tags {
if s == tag.Text {
return true
}
}
return false
}
func getMedias(tweet *twitter.Tweet) []twitter.MediaEntity {
if tweet.ExtendedTweet != nil {
if tweet.ExtendedTweet.ExtendedEntities != nil {
return tweet.ExtendedTweet.ExtendedEntities.Media
}
return tweet.ExtendedTweet.Entities.Media
}
if tweet.ExtendedEntities != nil {
return tweet.ExtendedEntities.Media
}
return tweet.Entities.Media
}
func sendPics(medias []twitter.MediaEntity) {
for _, media := range medias {
switch media.Type {
case "photo":
go qqBot.SendPics(qqBot.SendGroupMsg, media.MediaURLHttps)
default:
logger.Notice("media type ignored:", media.Type)
}
}
}
func logAllTrack(msg interface{}) {
logger.Debug(msg)
}
func getTweetTime(zone string, tweet *twitter.Tweet) string {
t := tweet.CreatedAt
ct, err := tweet.CreatedAtTime()
if err == nil {
tz, err := time.LoadLocation(zone)
if err == nil {
t = ct.In(tz).String()
}
}
return t
}
func checkSendKancolle(tweet *twitter.Tweet, msg string) {
// sleep 5s to wait for other bot
time.Sleep(5 * time.Second)
ct, err := tweet.CreatedAtTime()
if err != nil {
logger.Error(err)
return
}
key := "kancolle_" + strconv.FormatInt(ct.Unix(), 10)
exists, err := redisClient.Expire(key, 5*time.Second).Result()
if err != nil {
logger.Error(err)
return
}
if exists {
logger.Notice("other bot has sent")
return
}
t := getTweetTime("Asia/Tokyo", tweet)
qqBot.SendGroupMsg(tweet.User.Name + "\n" + t + "\n\n" + msg)
}
func (t *TwitterBot) trackTweet(tweet *twitter.Tweet) {
if tweet.RetweetedStatus != nil {
// logger.Debugf("ignore retweet (%s):{%s}", tweet.User.Name, tweet.Text)
return
}
msg := tweet.Text
medias := getMedias(tweet)
if tweet.Truncated {
if tweet.ExtendedTweet != nil {
msg = tweet.ExtendedTweet.FullText
}
// logger.Debugf("no ExtendedTweet: %+v", tweet)
}
flattenedText := strconv.Quote(msg)
switch tweet.User.IDStr {
case t.Follows["KanColle_STAFF"]:
logger.Infof("(%s):{%s} %d medias", tweet.User.Name, flattenedText, len(medias))
sendPics(medias)
go checkSendKancolle(tweet, msg)
case t.Follows["imascg_stage"]:
logger.Infof("(%s):{%s} %d medias", tweet.User.Name, flattenedText, len(medias))
t := getTweetTime("Asia/Tokyo", tweet)
qqBot.SendGroupMsg(tweet.User.Name + "\n" + t + "\n\n" + msg)
sendPics(medias)
case t.Follows["komatan"]:
if len(medias) == 0 {
return
}
logger.Infof("(%s):{%s}", tweet.User.Name, flattenedText)
sendPics(medias)
case t.Follows["maesanpicture"]:
if len(medias) == 0 {
return
}
logger.Infof("(%s):{%s}", tweet.User.Name, flattenedText)
if hasHashTags("毎日五月雨", tweet.Entities.Hashtags) {
qqBot.SendGroupMsg(msg)
sendPics(medias)
}
case t.Follows["Strangestone"]:
if len(medias) == 0 {
return
}
logger.Infof("(%s):{%s}", tweet.User.Name, flattenedText)
if strings.HasPrefix(msg, "月曜日のたわわ") {
qqBot.SendGroupMsg(msg)
sendPics(medias)
}
default:
// logger.Debugf("(%s):{%s}", tweet.User.Name, flattenedText)
}
}
func (t *TwitterBot) selfProceedMedias(medias []twitter.MediaEntity, action int) {
for _, media := range medias {
switch media.Type {
case "photo":
switch action {
case 1:
file, err := downloadFile(media.MediaURLHttps, t.ImgPath)
if err != nil {
continue
}
telegramBot.sendPhoto(telegramBot.SelfChatID, file)
case -1:
removeFile(media.MediaURLHttps, t.ImgPath)
}
case "video":
var url string
vs := media.VideoInfo.Variants
vsLen := len(vs)
for i := range vs {
if vs[vsLen-i-1].ContentType == "video/mp4" {
url = vs[vsLen-i-1].URL
break
}
}
switch action {
case 1:
file, err := downloadFile(url, t.ImgPath)
if err != nil {
continue
}
telegramBot.sendVideo(telegramBot.SelfChatID, file)
case -1:
removeFile(url, t.ImgPath)
}
default:
logger.Notice("media type ignored:", media.Type)
}
}
}
func (t *TwitterBot) selfEvent(event *twitter.Event) {
if event.Source.IDStr != t.ID {
logger.Debugf("%s: (%s)", event.Event, event.Source.Name)
return
}
switch event.Event {
case "favorite":
medias := getMedias(event.TargetObject)
logger.Infof("favorite: (%s):{%s} %d medias", event.TargetObject.User.Name, strconv.Quote(event.TargetObject.Text), len(medias))
go t.selfProceedMedias(medias, 1)
case "unfavorite":
medias := getMedias(event.TargetObject)
logger.Debugf("unfavorite: (%s):{%s} %d medias", event.TargetObject.User.Name, strconv.Quote(event.TargetObject.Text), len(medias))
go t.selfProceedMedias(medias, -1)
default:
logger.Debug(event.Event)
}
}
func (t *TwitterBot) selfTweet(tweet *twitter.Tweet) {
if qqBot.Config.GroupName != "" {
if hasHashTags(qqBot.Config.GroupName, tweet.Entities.Hashtags) {
if tweet.QuotedStatus != nil {
logger.Infof("(%s):{%s}", qqBot.Config.GroupName, strconv.Quote(tweet.QuotedStatus.Text))
sendPics(getMedias(tweet.QuotedStatus))
} else {
logger.Infof("(%s):{%s}", qqBot.Config.GroupName, strconv.Quote(tweet.Text))
sendPics(getMedias(tweet))
}
}
}
}
// Track ...
func (t *TwitterBot) Track() {
follows := []string{}
for _, value := range t.Follows {
follows = append(follows, value)
}
for i := 1; ; i++ {
demux := twitter.NewSwitchDemux()
demux.Tweet = t.trackTweet
filterParams := &twitter.StreamFilterParams{
Follow: follows,
}
stream, err := t.Client.Streams.Filter(filterParams)
if err != nil {
logger.Error(err)
time.Sleep(time.Duration(i) * time.Second)
}
demux.HandleChan(stream.Messages)
}
}
// Self ...
func (t *TwitterBot) Self() {
for i := 1; ; i++ {
demux := twitter.NewSwitchDemux()
demux.Event = t.selfEvent
demux.Tweet = t.selfTweet
userParams := &twitter.StreamUserParams{
With: t.ID,
}
stream, err := t.Client.Streams.User(userParams)
if err != nil {
logger.Error(err)
time.Sleep(time.Duration(i) * time.Second)
}
demux.HandleChan(stream.Messages)
}
}
fix for animated_gif
package main
import (
"strconv"
"strings"
"time"
"github.com/dghubble/go-twitter/twitter"
"github.com/dghubble/oauth1"
)
var (
twitterBot *TwitterBot
)
// TwitterBot ...
type TwitterBot struct {
ID string
ImgPath string
Client *twitter.Client
Follows map[string]string
}
// NewTwitterBot ...
func NewTwitterBot(cfg *TwitterConfig) *TwitterBot {
config := oauth1.NewConfig(cfg.ConsumerKey, cfg.ConsumerSecret)
token := oauth1.NewToken(cfg.AccessToken, cfg.AccessSecret)
httpClient := config.Client(oauth1.NoContext, token)
client := twitter.NewClient(httpClient)
bot := &TwitterBot{
ID: cfg.SelfID,
ImgPath: cfg.ImgPath,
Client: client,
Follows: map[string]string{
"KanColle_STAFF": "294025417",
"komatan": "96604067",
"maesanpicture": "2381595966",
"Strangestone": "93332575",
// "kazuharukina": "28787294",
// "sinoalice_jp": "818752826025181184",
"imascg_stage": "3220191374",
},
}
return bot
}
func hasHashTags(s string, tags []twitter.HashtagEntity) bool {
for _, tag := range tags {
if s == tag.Text {
return true
}
}
return false
}
func getMedias(tweet *twitter.Tweet) []twitter.MediaEntity {
if tweet.ExtendedTweet != nil {
if tweet.ExtendedTweet.ExtendedEntities != nil {
return tweet.ExtendedTweet.ExtendedEntities.Media
}
return tweet.ExtendedTweet.Entities.Media
}
if tweet.ExtendedEntities != nil {
return tweet.ExtendedEntities.Media
}
return tweet.Entities.Media
}
func sendPics(medias []twitter.MediaEntity) {
for _, media := range medias {
switch media.Type {
case "photo":
go qqBot.SendPics(qqBot.SendGroupMsg, media.MediaURLHttps)
default:
logger.Notice("media type ignored:", media.Type)
}
}
}
func logAllTrack(msg interface{}) {
logger.Debug(msg)
}
func getTweetTime(zone string, tweet *twitter.Tweet) string {
t := tweet.CreatedAt
ct, err := tweet.CreatedAtTime()
if err == nil {
tz, err := time.LoadLocation(zone)
if err == nil {
t = ct.In(tz).String()
}
}
return t
}
func checkSendKancolle(tweet *twitter.Tweet, msg string) {
// sleep 5s to wait for other bot
time.Sleep(5 * time.Second)
ct, err := tweet.CreatedAtTime()
if err != nil {
logger.Error(err)
return
}
key := "kancolle_" + strconv.FormatInt(ct.Unix(), 10)
exists, err := redisClient.Expire(key, 5*time.Second).Result()
if err != nil {
logger.Error(err)
return
}
if exists {
logger.Notice("other bot has sent")
return
}
t := getTweetTime("Asia/Tokyo", tweet)
qqBot.SendGroupMsg(tweet.User.Name + "\n" + t + "\n\n" + msg)
}
func (t *TwitterBot) trackTweet(tweet *twitter.Tweet) {
if tweet.RetweetedStatus != nil {
// logger.Debugf("ignore retweet (%s):{%s}", tweet.User.Name, tweet.Text)
return
}
msg := tweet.Text
medias := getMedias(tweet)
if tweet.Truncated {
if tweet.ExtendedTweet != nil {
msg = tweet.ExtendedTweet.FullText
}
// logger.Debugf("no ExtendedTweet: %+v", tweet)
}
flattenedText := strconv.Quote(msg)
switch tweet.User.IDStr {
case t.Follows["KanColle_STAFF"]:
logger.Infof("(%s):{%s} %d medias", tweet.User.Name, flattenedText, len(medias))
sendPics(medias)
go checkSendKancolle(tweet, msg)
case t.Follows["imascg_stage"]:
logger.Infof("(%s):{%s} %d medias", tweet.User.Name, flattenedText, len(medias))
t := getTweetTime("Asia/Tokyo", tweet)
qqBot.SendGroupMsg(tweet.User.Name + "\n" + t + "\n\n" + msg)
sendPics(medias)
case t.Follows["komatan"]:
if len(medias) == 0 {
return
}
logger.Infof("(%s):{%s}", tweet.User.Name, flattenedText)
sendPics(medias)
case t.Follows["maesanpicture"]:
if len(medias) == 0 {
return
}
logger.Infof("(%s):{%s}", tweet.User.Name, flattenedText)
if hasHashTags("毎日五月雨", tweet.Entities.Hashtags) {
qqBot.SendGroupMsg(msg)
sendPics(medias)
}
case t.Follows["Strangestone"]:
if len(medias) == 0 {
return
}
logger.Infof("(%s):{%s}", tweet.User.Name, flattenedText)
if strings.HasPrefix(msg, "月曜日のたわわ") {
qqBot.SendGroupMsg(msg)
sendPics(medias)
}
default:
// logger.Debugf("(%s):{%s}", tweet.User.Name, flattenedText)
}
}
func (t *TwitterBot) selfProceedMedias(medias []twitter.MediaEntity, action int) {
var url string
for _, media := range medias {
switch media.Type {
case "photo":
url = media.MediaURLHttps
case "video":
vs := media.VideoInfo.Variants
vsLen := len(vs)
for i := range vs {
if vs[vsLen-i-1].ContentType == "video/mp4" {
url = vs[vsLen-i-1].URL
break
}
}
case "animated_gif":
vs := media.VideoInfo.Variants
vsLen := len(vs)
for i := range vs {
if vs[vsLen-i-1].ContentType == "video/mp4" {
url = vs[vsLen-i-1].URL
break
}
}
default:
logger.Notice("media type ignored:", media.Type)
continue
}
switch action {
case 1:
file, err := downloadFile(url, t.ImgPath)
if err != nil {
continue
}
telegramBot.sendVideo(telegramBot.SelfChatID, file)
case -1:
removeFile(url, t.ImgPath)
}
}
}
func (t *TwitterBot) selfEvent(event *twitter.Event) {
if event.Source.IDStr != t.ID {
logger.Debugf("%s: (%s)", event.Event, event.Source.Name)
return
}
switch event.Event {
case "favorite":
medias := getMedias(event.TargetObject)
logger.Infof("favorite: (%s):{%s} %d medias", event.TargetObject.User.Name, strconv.Quote(event.TargetObject.Text), len(medias))
go t.selfProceedMedias(medias, 1)
case "unfavorite":
medias := getMedias(event.TargetObject)
logger.Debugf("unfavorite: (%s):{%s} %d medias", event.TargetObject.User.Name, strconv.Quote(event.TargetObject.Text), len(medias))
go t.selfProceedMedias(medias, -1)
default:
logger.Debug(event.Event)
}
}
func (t *TwitterBot) selfTweet(tweet *twitter.Tweet) {
if qqBot.Config.GroupName != "" {
if hasHashTags(qqBot.Config.GroupName, tweet.Entities.Hashtags) {
if tweet.QuotedStatus != nil {
logger.Infof("(%s):{%s}", qqBot.Config.GroupName, strconv.Quote(tweet.QuotedStatus.Text))
sendPics(getMedias(tweet.QuotedStatus))
} else {
logger.Infof("(%s):{%s}", qqBot.Config.GroupName, strconv.Quote(tweet.Text))
sendPics(getMedias(tweet))
}
}
}
}
// Track ...
func (t *TwitterBot) Track() {
follows := []string{}
for _, value := range t.Follows {
follows = append(follows, value)
}
for i := 1; ; i++ {
demux := twitter.NewSwitchDemux()
demux.Tweet = t.trackTweet
filterParams := &twitter.StreamFilterParams{
Follow: follows,
}
stream, err := t.Client.Streams.Filter(filterParams)
if err != nil {
logger.Error(err)
time.Sleep(time.Duration(i) * time.Second)
}
demux.HandleChan(stream.Messages)
}
}
// Self ...
func (t *TwitterBot) Self() {
for i := 1; ; i++ {
demux := twitter.NewSwitchDemux()
demux.Event = t.selfEvent
demux.Tweet = t.selfTweet
userParams := &twitter.StreamUserParams{
With: t.ID,
}
stream, err := t.Client.Streams.User(userParams)
if err != nil {
logger.Error(err)
time.Sleep(time.Duration(i) * time.Second)
}
demux.HandleChan(stream.Messages)
}
}
|
package moul
import (
"os"
"github.com/ChimeraCoder/anaconda"
"github.com/patrickmn/go-cache"
)
func init() {
RegisterAction("twitter-last-tweets", GetTwitterLastTweetsAction)
}
func GetTwitterLastTweetsAction(args []string) (interface{}, error) {
if tweets, found := moulCache.Get("twitter-last-tweets"); found {
return tweets, nil
}
tweets, err := GetTwitterLastTweets()
if err != nil {
return nil, err
}
moulCache.Set("twitter-last-tweets", tweets, cache.DefaultExpiration)
return tweets, nil
}
func initTwitterAPI() *anaconda.TwitterApi {
anaconda.SetConsumerKey(os.Getenv("TWITTER_CONSUMER_KEY"))
anaconda.SetConsumerSecret(os.Getenv("TWITTER_CONSUMER_SECRET"))
api := anaconda.NewTwitterApi(os.Getenv("TWITTER_ACCESS_TOKEN"), os.Getenv("TWITTER_ACCESS_TOKEN_SECRET"))
return api
}
func GetTwitterLastTweets() (interface{}, error) {
api := initTwitterAPI()
return api.GetUserTimeline(map[string][]string{})
}
Add twitter followers
package moul
import (
"os"
"github.com/ChimeraCoder/anaconda"
"github.com/patrickmn/go-cache"
)
func init() {
RegisterAction("twitter-last-tweets", GetTwitterLastTweetsAction)
RegisterAction("twitter-followers-list", GetTwitterFollowersListAction)
}
func GetTwitterLastTweetsAction(args []string) (interface{}, error) {
if tweets, found := moulCache.Get("twitter-last-tweets"); found {
return tweets, nil
}
tweets, err := GetTwitterLastTweets()
if err != nil {
return nil, err
}
moulCache.Set("twitter-last-tweets", tweets, cache.DefaultExpiration)
return tweets, nil
}
func GetTwitterFollowersListAction(args []string) (interface{}, error) {
if followers, found := moulCache.Get("twitter-followers"); found {
return followers, nil
}
followers, err := GetTwitterFollowersList()
if err != nil {
return nil, err
}
moulCache.Set("twitter-followers", followers, cache.DefaultExpiration)
return followers, nil
}
func initTwitterAPI() *anaconda.TwitterApi {
anaconda.SetConsumerKey(os.Getenv("TWITTER_CONSUMER_KEY"))
anaconda.SetConsumerSecret(os.Getenv("TWITTER_CONSUMER_SECRET"))
api := anaconda.NewTwitterApi(os.Getenv("TWITTER_ACCESS_TOKEN"), os.Getenv("TWITTER_ACCESS_TOKEN_SECRET"))
return api
}
func GetTwitterFollowersList() (interface{}, error) {
api := initTwitterAPI()
return api.GetFollowersList(nil)
}
func GetTwitterLastTweets() (interface{}, error) {
api := initTwitterAPI()
return api.GetUserTimeline(nil)
}
|
// The txn package implements support for multi-document transactions.
//
// For details check the following blog post:
//
// http://blog.labix.org/2012/08/22/multi-doc-transactions-for-mongodb
//
package txn
import (
"encoding/binary"
"fmt"
"reflect"
"sort"
"strings"
"sync"
"gopkg.in/mgo.v2-unstable"
"gopkg.in/mgo.v2-unstable/bson"
crand "crypto/rand"
mrand "math/rand"
)
type state int
const (
tpreparing state = 1 // One or more documents not prepared
tprepared state = 2 // Prepared but not yet ready to run
taborting state = 3 // Assertions failed, cleaning up
tapplying state = 4 // Changes are in progress
taborted state = 5 // Pre-conditions failed, nothing done
tapplied state = 6 // All changes applied
)
func (s state) String() string {
switch s {
case tpreparing:
return "preparing"
case tprepared:
return "prepared"
case taborting:
return "aborting"
case tapplying:
return "applying"
case taborted:
return "aborted"
case tapplied:
return "applied"
}
panic(fmt.Errorf("unknown state: %d", s))
}
var rand *mrand.Rand
var randmu sync.Mutex
func init() {
var seed int64
err := binary.Read(crand.Reader, binary.BigEndian, &seed)
if err != nil {
panic(err)
}
rand = mrand.New(mrand.NewSource(seed))
}
type transaction struct {
Id bson.ObjectId `bson:"_id"`
State state `bson:"s"`
Info interface{} `bson:"i,omitempty"`
Ops []Op `bson:"o"`
Nonce string `bson:"n,omitempty"`
Revnos []int64 `bson:"r,omitempty"`
docKeysCached docKeys
}
func (t *transaction) String() string {
if t.Nonce == "" {
return t.Id.Hex()
}
return string(t.token())
}
func (t *transaction) done() bool {
return t.State == tapplied || t.State == taborted
}
func (t *transaction) token() token {
if t.Nonce == "" {
panic("transaction has no nonce")
}
return tokenFor(t)
}
func (t *transaction) docKeys() docKeys {
if t.docKeysCached != nil {
return t.docKeysCached
}
dkeys := make(docKeys, 0, len(t.Ops))
NextOp:
for _, op := range t.Ops {
dkey := op.docKey()
for i := range dkeys {
if dkey == dkeys[i] {
continue NextOp
}
}
dkeys = append(dkeys, dkey)
}
sort.Sort(dkeys)
t.docKeysCached = dkeys
return dkeys
}
// tokenFor returns a unique transaction token that
// is composed by t's id and a nonce. If t already has
// a nonce assigned to it, it will be used, otherwise
// a new nonce will be generated.
func tokenFor(t *transaction) token {
nonce := t.Nonce
if nonce == "" {
nonce = newNonce()
}
return token(t.Id.Hex() + "_" + nonce)
}
func newNonce() string {
randmu.Lock()
r := rand.Uint32()
randmu.Unlock()
n := make([]byte, 8)
for i := uint(0); i < 8; i++ {
n[i] = "0123456789abcdef"[(r>>(4*i))&0xf]
}
return string(n)
}
type token string
func (tt token) id() bson.ObjectId { return bson.ObjectIdHex(string(tt[:24])) }
func (tt token) nonce() string { return string(tt[25:]) }
// Op represents an operation to a single document that may be
// applied as part of a transaction with other operations.
type Op struct {
// C and Id identify the collection and document this operation
// refers to. Id is matched against the "_id" document field.
C string `bson:"c"`
Id interface{} `bson:"d"`
// Assert optionally holds a query document that is used to
// test the operation document at the time the transaction is
// going to be applied. The assertions for all operations in
// a transaction are tested before any changes take place,
// and the transaction is entirely aborted if any of them
// fails. This is also the only way to prevent a transaction
// from being being applied (the transaction continues despite
// the outcome of Insert, Update, and Remove).
Assert interface{} `bson:"a,omitempty"`
// The Insert, Update and Remove fields describe the mutation
// intended by the operation. At most one of them may be set
// per operation. If none are set, Assert must be set and the
// operation becomes a read-only test.
//
// Insert holds the document to be inserted at the time the
// transaction is applied. The Id field will be inserted
// into the document automatically as its _id field. The
// transaction will continue even if the document already
// exists. Use Assert with txn.DocMissing if the insertion is
// required.
//
// Update holds the update document to be applied at the time
// the transaction is applied. The transaction will continue
// even if a document with Id is missing. Use Assert to
// test for the document presence or its contents.
//
// Remove indicates whether to remove the document with Id.
// The transaction continues even if the document doesn't yet
// exist at the time the transaction is applied. Use Assert
// with txn.DocExists to make sure it will be removed.
Insert interface{} `bson:"i,omitempty"`
Update interface{} `bson:"u,omitempty"`
Remove bool `bson:"r,omitempty"`
}
func (op *Op) isChange() bool {
return op.Update != nil || op.Insert != nil || op.Remove
}
func (op *Op) docKey() docKey {
return docKey{op.C, op.Id}
}
func (op *Op) name() string {
switch {
case op.Update != nil:
return "update"
case op.Insert != nil:
return "insert"
case op.Remove:
return "remove"
case op.Assert != nil:
return "assert"
}
return "none"
}
const (
// DocExists and DocMissing may be used on an operation's
// Assert value to assert that the document with the given
// Id exists or does not exist, respectively.
DocExists = "d+"
DocMissing = "d-"
)
// A Runner applies operations as part of a transaction onto any number
// of collections within a database. See the Run method for details.
type Runner struct {
tc *mgo.Collection // txns
sc *mgo.Collection // stash
lc *mgo.Collection // log
}
// NewRunner returns a new transaction runner that uses tc to hold its
// transactions.
//
// Multiple transaction collections may exist in a single database, but
// all collections that are touched by operations in a given transaction
// collection must be handled exclusively by it.
//
// A second collection with the same name of tc but suffixed by ".stash"
// will be used for implementing the transactional behavior of insert
// and remove operations.
func NewRunner(tc *mgo.Collection) *Runner {
return &Runner{tc, tc.Database.C(tc.Name + ".stash"), nil}
}
var ErrAborted = fmt.Errorf("transaction aborted")
// Run creates a new transaction with ops and runs it immediately.
// The id parameter specifies the transaction id, and may be written
// down ahead of time to later verify the success of the change and
// resume it, when the procedure is interrupted for any reason. If
// empty, a random id will be generated.
// The info parameter, if not nil, is included under the "i"
// field of the transaction document.
//
// Operations across documents are not atomically applied, but are
// guaranteed to be eventually all applied in the order provided or
// all aborted, as long as the affected documents are only modified
// through transactions. If documents are simultaneously modified
// by transactions and out of transactions the behavior is undefined.
//
// If Run returns no errors, all operations were applied successfully.
// If it returns ErrAborted, one or more operations can't be applied
// and the transaction was entirely aborted with no changes performed.
// Otherwise, if the transaction is interrupted while running for any
// reason, it may be resumed explicitly or by attempting to apply
// another transaction on any of the documents targeted by ops, as
// long as the interruption was made after the transaction document
// itself was inserted. Run Resume with the obtained transaction id
// to confirm whether the transaction was applied or not.
//
// Any number of transactions may be run concurrently, with one
// runner or many.
func (r *Runner) Run(ops []Op, id bson.ObjectId, info interface{}) (err error) {
const efmt = "error in transaction op %d: %s"
for i := range ops {
op := &ops[i]
if op.C == "" || op.Id == nil {
return fmt.Errorf(efmt, i, "C or Id missing")
}
changes := 0
if op.Insert != nil {
changes++
}
if op.Update != nil {
changes++
}
if op.Remove {
changes++
}
if changes > 1 {
return fmt.Errorf(efmt, i, "more than one of Insert/Update/Remove set")
}
if changes == 0 && op.Assert == nil {
return fmt.Errorf(efmt, i, "none of Assert/Insert/Update/Remove set")
}
}
if id == "" {
id = bson.NewObjectId()
}
// Insert transaction sooner rather than later, to stay on the safer side.
t := transaction{
Id: id,
Ops: ops,
State: tpreparing,
Info: info,
}
if err = r.tc.Insert(&t); err != nil {
return err
}
if err = flush(r, &t); err != nil {
return err
}
if t.State == taborted {
return ErrAborted
} else if t.State != tapplied {
panic(fmt.Errorf("invalid state for %s after flush: %q", &t, t.State))
}
return nil
}
// ResumeAll resumes all pending transactions. All ErrAborted errors
// from individual transactions are ignored.
func (r *Runner) ResumeAll() (err error) {
debugf("Resuming all unfinished transactions")
iter := r.tc.Find(bson.D{{"s", bson.D{{"$in", []state{tpreparing, tprepared, tapplying}}}}}).Iter()
var t transaction
for iter.Next(&t) {
if t.State == tapplied || t.State == taborted {
continue
}
debugf("Resuming %s from %q", t.Id, t.State)
if err := flush(r, &t); err != nil {
return err
}
if !t.done() {
panic(fmt.Errorf("invalid state for %s after flush: %q", &t, t.State))
}
}
return nil
}
// Resume resumes the transaction with id. It returns mgo.ErrNotFound
// if the transaction is not found. Otherwise, it has the same semantics
// of the Run method after the transaction is inserted.
func (r *Runner) Resume(id bson.ObjectId) (err error) {
t, err := r.load(id)
if err != nil {
return err
}
if !t.done() {
debugf("Resuming %s from %q", t, t.State)
if err := flush(r, t); err != nil {
return err
}
}
if t.State == taborted {
return ErrAborted
} else if t.State != tapplied {
panic(fmt.Errorf("invalid state for %s after flush: %q", t, t.State))
}
return nil
}
// ChangeLog enables logging of changes to the given collection
// every time a transaction that modifies content is done being
// applied.
//
// Saved documents are in the format:
//
// {"_id": <txn id>, <collection>: {"d": [<doc id>, ...], "r": [<doc revno>, ...]}}
//
// The document revision is the value of the txn-revno field after
// the change has been applied. Negative values indicate the document
// was not present in the collection. Revisions will not change when
// updates or removes are applied to missing documents or inserts are
// attempted when the document isn't present.
func (r *Runner) ChangeLog(logc *mgo.Collection) {
r.lc = logc
}
// PurgeMissing removes from collections any state that refers to transaction
// documents that for whatever reason have been lost from the system (removed
// by accident or lost in a hard crash, for example).
//
// This method should very rarely be needed, if at all, and should never be
// used during the normal operation of an application. Its purpose is to put
// a system that has seen unavoidable corruption back in a working state.
func (r *Runner) PurgeMissing(collections ...string) error {
type M map[string]interface{}
type S []interface{}
pipeline := []M{
{"$project": M{"_id": 1, "txn-queue": 1}},
{"$unwind": "$txn-queue"},
{"$sort": M{"_id": 1, "txn-queue": 1}},
//{"$group": M{"_id": M{"$substr": S{"$txn-queue", 0, 24}}, "docids": M{"$push": "$_id"}}},
}
type TRef struct {
DocId interface{} "_id"
TxnId string "txn-queue"
}
found := make(map[bson.ObjectId]bool)
colls := make(map[string]bool)
sort.Strings(collections)
for _, collection := range collections {
c := r.tc.Database.C(collection)
iter := c.Pipe(pipeline).Iter()
var tref TRef
for iter.Next(&tref) {
txnId := bson.ObjectIdHex(tref.TxnId[:24])
if found[txnId] {
continue
}
if r.tc.FindId(txnId).One(nil) == nil {
found[txnId] = true
continue
}
logf("WARNING: purging from document %s/%v the missing transaction id %s", collection, tref.DocId, txnId)
err := c.UpdateId(tref.DocId, M{"$pull": M{"txn-queue": M{"$regex": "^" + txnId.Hex() + "_*"}}})
if err != nil {
return fmt.Errorf("error purging missing transaction %s: %v", txnId.Hex(), err)
}
}
colls[collection] = true
}
type StashTRef struct {
Id docKey "_id"
TxnId string "txn-queue"
}
iter := r.sc.Pipe(pipeline).Iter()
var stref StashTRef
for iter.Next(&stref) {
txnId := bson.ObjectIdHex(stref.TxnId[:24])
if found[txnId] {
continue
}
if r.tc.FindId(txnId).One(nil) == nil {
found[txnId] = true
continue
}
logf("WARNING: purging from stash document %s/%v the missing transaction id %s", stref.Id.C, stref.Id.Id, txnId)
err := r.sc.UpdateId(stref.Id, M{"$pull": M{"txn-queue": M{"$regex": "^" + txnId.Hex() + "_*"}}})
if err != nil {
return fmt.Errorf("error purging missing transaction %s: %v", txnId.Hex(), err)
}
}
return nil
}
func (r *Runner) load(id bson.ObjectId) (*transaction, error) {
var t transaction
err := r.tc.FindId(id).One(&t)
if err == mgo.ErrNotFound {
return nil, fmt.Errorf("cannot find transaction %s", id)
} else if err != nil {
return nil, err
}
return &t, nil
}
type typeNature int
const (
// The order of these values matters. Transactions
// from applications using different ordering will
// be incompatible with each other.
_ typeNature = iota
natureString
natureInt
natureFloat
natureBool
natureStruct
)
func valueNature(v interface{}) (value interface{}, nature typeNature) {
rv := reflect.ValueOf(v)
switch rv.Kind() {
case reflect.String:
return rv.String(), natureString
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return rv.Int(), natureInt
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
return int64(rv.Uint()), natureInt
case reflect.Float32, reflect.Float64:
return rv.Float(), natureFloat
case reflect.Bool:
return rv.Bool(), natureBool
case reflect.Struct:
return v, natureStruct
}
panic("document id type unsupported by txn: " + rv.Kind().String())
}
type docKey struct {
C string
Id interface{}
}
type docKeys []docKey
func (ks docKeys) Len() int { return len(ks) }
func (ks docKeys) Swap(i, j int) { ks[i], ks[j] = ks[j], ks[i] }
func (ks docKeys) Less(i, j int) bool {
a, b := ks[i], ks[j]
if a.C != b.C {
return a.C < b.C
}
return valuecmp(a.Id, b.Id) == -1
}
func valuecmp(a, b interface{}) int {
av, an := valueNature(a)
bv, bn := valueNature(b)
if an < bn {
return -1
}
if an > bn {
return 1
}
if av == bv {
return 0
}
var less bool
switch an {
case natureString:
less = av.(string) < bv.(string)
case natureInt:
less = av.(int64) < bv.(int64)
case natureFloat:
less = av.(float64) < bv.(float64)
case natureBool:
less = !av.(bool) && bv.(bool)
case natureStruct:
less = structcmp(av, bv) == -1
default:
panic("unreachable")
}
if less {
return -1
}
return 1
}
func structcmp(a, b interface{}) int {
av := reflect.ValueOf(a)
bv := reflect.ValueOf(b)
var ai, bi = 0, 0
var an, bn = av.NumField(), bv.NumField()
var avi, bvi interface{}
var af, bf reflect.StructField
for {
for ai < an {
af = av.Type().Field(ai)
if isExported(af.Name) {
avi = av.Field(ai).Interface()
ai++
break
}
ai++
}
for bi < bn {
bf = bv.Type().Field(bi)
if isExported(bf.Name) {
bvi = bv.Field(bi).Interface()
bi++
break
}
bi++
}
if n := valuecmp(avi, bvi); n != 0 {
return n
}
nameA := getFieldName(af)
nameB := getFieldName(bf)
if nameA < nameB {
return -1
}
if nameA > nameB {
return 1
}
if ai == an && bi == bn {
return 0
}
if ai == an || bi == bn {
if ai == bn {
return -1
}
return 1
}
}
panic("unreachable")
}
func isExported(name string) bool {
a := name[0]
return a >= 'A' && a <= 'Z'
}
func getFieldName(f reflect.StructField) string {
name := f.Tag.Get("bson")
if i := strings.Index(name, ","); i >= 0 {
name = name[:i]
}
if name == "" {
name = strings.ToLower(f.Name)
}
return name
}
Check for iteration errors in PurgeMissing
Not checking for iterator errors was hiding problems with pipeline
result size limits being exceeded when PurgeMissing was being used to
recover a production database.
// The txn package implements support for multi-document transactions.
//
// For details check the following blog post:
//
// http://blog.labix.org/2012/08/22/multi-doc-transactions-for-mongodb
//
package txn
import (
"encoding/binary"
"fmt"
"reflect"
"sort"
"strings"
"sync"
"gopkg.in/mgo.v2-unstable"
"gopkg.in/mgo.v2-unstable/bson"
crand "crypto/rand"
mrand "math/rand"
)
type state int
const (
tpreparing state = 1 // One or more documents not prepared
tprepared state = 2 // Prepared but not yet ready to run
taborting state = 3 // Assertions failed, cleaning up
tapplying state = 4 // Changes are in progress
taborted state = 5 // Pre-conditions failed, nothing done
tapplied state = 6 // All changes applied
)
func (s state) String() string {
switch s {
case tpreparing:
return "preparing"
case tprepared:
return "prepared"
case taborting:
return "aborting"
case tapplying:
return "applying"
case taborted:
return "aborted"
case tapplied:
return "applied"
}
panic(fmt.Errorf("unknown state: %d", s))
}
var rand *mrand.Rand
var randmu sync.Mutex
func init() {
var seed int64
err := binary.Read(crand.Reader, binary.BigEndian, &seed)
if err != nil {
panic(err)
}
rand = mrand.New(mrand.NewSource(seed))
}
type transaction struct {
Id bson.ObjectId `bson:"_id"`
State state `bson:"s"`
Info interface{} `bson:"i,omitempty"`
Ops []Op `bson:"o"`
Nonce string `bson:"n,omitempty"`
Revnos []int64 `bson:"r,omitempty"`
docKeysCached docKeys
}
func (t *transaction) String() string {
if t.Nonce == "" {
return t.Id.Hex()
}
return string(t.token())
}
func (t *transaction) done() bool {
return t.State == tapplied || t.State == taborted
}
func (t *transaction) token() token {
if t.Nonce == "" {
panic("transaction has no nonce")
}
return tokenFor(t)
}
func (t *transaction) docKeys() docKeys {
if t.docKeysCached != nil {
return t.docKeysCached
}
dkeys := make(docKeys, 0, len(t.Ops))
NextOp:
for _, op := range t.Ops {
dkey := op.docKey()
for i := range dkeys {
if dkey == dkeys[i] {
continue NextOp
}
}
dkeys = append(dkeys, dkey)
}
sort.Sort(dkeys)
t.docKeysCached = dkeys
return dkeys
}
// tokenFor returns a unique transaction token that
// is composed by t's id and a nonce. If t already has
// a nonce assigned to it, it will be used, otherwise
// a new nonce will be generated.
func tokenFor(t *transaction) token {
nonce := t.Nonce
if nonce == "" {
nonce = newNonce()
}
return token(t.Id.Hex() + "_" + nonce)
}
func newNonce() string {
randmu.Lock()
r := rand.Uint32()
randmu.Unlock()
n := make([]byte, 8)
for i := uint(0); i < 8; i++ {
n[i] = "0123456789abcdef"[(r>>(4*i))&0xf]
}
return string(n)
}
type token string
func (tt token) id() bson.ObjectId { return bson.ObjectIdHex(string(tt[:24])) }
func (tt token) nonce() string { return string(tt[25:]) }
// Op represents an operation to a single document that may be
// applied as part of a transaction with other operations.
type Op struct {
// C and Id identify the collection and document this operation
// refers to. Id is matched against the "_id" document field.
C string `bson:"c"`
Id interface{} `bson:"d"`
// Assert optionally holds a query document that is used to
// test the operation document at the time the transaction is
// going to be applied. The assertions for all operations in
// a transaction are tested before any changes take place,
// and the transaction is entirely aborted if any of them
// fails. This is also the only way to prevent a transaction
// from being being applied (the transaction continues despite
// the outcome of Insert, Update, and Remove).
Assert interface{} `bson:"a,omitempty"`
// The Insert, Update and Remove fields describe the mutation
// intended by the operation. At most one of them may be set
// per operation. If none are set, Assert must be set and the
// operation becomes a read-only test.
//
// Insert holds the document to be inserted at the time the
// transaction is applied. The Id field will be inserted
// into the document automatically as its _id field. The
// transaction will continue even if the document already
// exists. Use Assert with txn.DocMissing if the insertion is
// required.
//
// Update holds the update document to be applied at the time
// the transaction is applied. The transaction will continue
// even if a document with Id is missing. Use Assert to
// test for the document presence or its contents.
//
// Remove indicates whether to remove the document with Id.
// The transaction continues even if the document doesn't yet
// exist at the time the transaction is applied. Use Assert
// with txn.DocExists to make sure it will be removed.
Insert interface{} `bson:"i,omitempty"`
Update interface{} `bson:"u,omitempty"`
Remove bool `bson:"r,omitempty"`
}
func (op *Op) isChange() bool {
return op.Update != nil || op.Insert != nil || op.Remove
}
func (op *Op) docKey() docKey {
return docKey{op.C, op.Id}
}
func (op *Op) name() string {
switch {
case op.Update != nil:
return "update"
case op.Insert != nil:
return "insert"
case op.Remove:
return "remove"
case op.Assert != nil:
return "assert"
}
return "none"
}
const (
// DocExists and DocMissing may be used on an operation's
// Assert value to assert that the document with the given
// Id exists or does not exist, respectively.
DocExists = "d+"
DocMissing = "d-"
)
// A Runner applies operations as part of a transaction onto any number
// of collections within a database. See the Run method for details.
type Runner struct {
tc *mgo.Collection // txns
sc *mgo.Collection // stash
lc *mgo.Collection // log
}
// NewRunner returns a new transaction runner that uses tc to hold its
// transactions.
//
// Multiple transaction collections may exist in a single database, but
// all collections that are touched by operations in a given transaction
// collection must be handled exclusively by it.
//
// A second collection with the same name of tc but suffixed by ".stash"
// will be used for implementing the transactional behavior of insert
// and remove operations.
func NewRunner(tc *mgo.Collection) *Runner {
return &Runner{tc, tc.Database.C(tc.Name + ".stash"), nil}
}
var ErrAborted = fmt.Errorf("transaction aborted")
// Run creates a new transaction with ops and runs it immediately.
// The id parameter specifies the transaction id, and may be written
// down ahead of time to later verify the success of the change and
// resume it, when the procedure is interrupted for any reason. If
// empty, a random id will be generated.
// The info parameter, if not nil, is included under the "i"
// field of the transaction document.
//
// Operations across documents are not atomically applied, but are
// guaranteed to be eventually all applied in the order provided or
// all aborted, as long as the affected documents are only modified
// through transactions. If documents are simultaneously modified
// by transactions and out of transactions the behavior is undefined.
//
// If Run returns no errors, all operations were applied successfully.
// If it returns ErrAborted, one or more operations can't be applied
// and the transaction was entirely aborted with no changes performed.
// Otherwise, if the transaction is interrupted while running for any
// reason, it may be resumed explicitly or by attempting to apply
// another transaction on any of the documents targeted by ops, as
// long as the interruption was made after the transaction document
// itself was inserted. Run Resume with the obtained transaction id
// to confirm whether the transaction was applied or not.
//
// Any number of transactions may be run concurrently, with one
// runner or many.
func (r *Runner) Run(ops []Op, id bson.ObjectId, info interface{}) (err error) {
const efmt = "error in transaction op %d: %s"
for i := range ops {
op := &ops[i]
if op.C == "" || op.Id == nil {
return fmt.Errorf(efmt, i, "C or Id missing")
}
changes := 0
if op.Insert != nil {
changes++
}
if op.Update != nil {
changes++
}
if op.Remove {
changes++
}
if changes > 1 {
return fmt.Errorf(efmt, i, "more than one of Insert/Update/Remove set")
}
if changes == 0 && op.Assert == nil {
return fmt.Errorf(efmt, i, "none of Assert/Insert/Update/Remove set")
}
}
if id == "" {
id = bson.NewObjectId()
}
// Insert transaction sooner rather than later, to stay on the safer side.
t := transaction{
Id: id,
Ops: ops,
State: tpreparing,
Info: info,
}
if err = r.tc.Insert(&t); err != nil {
return err
}
if err = flush(r, &t); err != nil {
return err
}
if t.State == taborted {
return ErrAborted
} else if t.State != tapplied {
panic(fmt.Errorf("invalid state for %s after flush: %q", &t, t.State))
}
return nil
}
// ResumeAll resumes all pending transactions. All ErrAborted errors
// from individual transactions are ignored.
func (r *Runner) ResumeAll() (err error) {
debugf("Resuming all unfinished transactions")
iter := r.tc.Find(bson.D{{"s", bson.D{{"$in", []state{tpreparing, tprepared, tapplying}}}}}).Iter()
var t transaction
for iter.Next(&t) {
if t.State == tapplied || t.State == taborted {
continue
}
debugf("Resuming %s from %q", t.Id, t.State)
if err := flush(r, &t); err != nil {
return err
}
if !t.done() {
panic(fmt.Errorf("invalid state for %s after flush: %q", &t, t.State))
}
}
return nil
}
// Resume resumes the transaction with id. It returns mgo.ErrNotFound
// if the transaction is not found. Otherwise, it has the same semantics
// of the Run method after the transaction is inserted.
func (r *Runner) Resume(id bson.ObjectId) (err error) {
t, err := r.load(id)
if err != nil {
return err
}
if !t.done() {
debugf("Resuming %s from %q", t, t.State)
if err := flush(r, t); err != nil {
return err
}
}
if t.State == taborted {
return ErrAborted
} else if t.State != tapplied {
panic(fmt.Errorf("invalid state for %s after flush: %q", t, t.State))
}
return nil
}
// ChangeLog enables logging of changes to the given collection
// every time a transaction that modifies content is done being
// applied.
//
// Saved documents are in the format:
//
// {"_id": <txn id>, <collection>: {"d": [<doc id>, ...], "r": [<doc revno>, ...]}}
//
// The document revision is the value of the txn-revno field after
// the change has been applied. Negative values indicate the document
// was not present in the collection. Revisions will not change when
// updates or removes are applied to missing documents or inserts are
// attempted when the document isn't present.
func (r *Runner) ChangeLog(logc *mgo.Collection) {
r.lc = logc
}
// PurgeMissing removes from collections any state that refers to transaction
// documents that for whatever reason have been lost from the system (removed
// by accident or lost in a hard crash, for example).
//
// This method should very rarely be needed, if at all, and should never be
// used during the normal operation of an application. Its purpose is to put
// a system that has seen unavoidable corruption back in a working state.
func (r *Runner) PurgeMissing(collections ...string) error {
type M map[string]interface{}
type S []interface{}
pipeline := []M{
{"$project": M{"_id": 1, "txn-queue": 1}},
{"$unwind": "$txn-queue"},
{"$sort": M{"_id": 1, "txn-queue": 1}},
//{"$group": M{"_id": M{"$substr": S{"$txn-queue", 0, 24}}, "docids": M{"$push": "$_id"}}},
}
type TRef struct {
DocId interface{} "_id"
TxnId string "txn-queue"
}
found := make(map[bson.ObjectId]bool)
colls := make(map[string]bool)
sort.Strings(collections)
for _, collection := range collections {
c := r.tc.Database.C(collection)
iter := c.Pipe(pipeline).Iter()
var tref TRef
for iter.Next(&tref) {
txnId := bson.ObjectIdHex(tref.TxnId[:24])
if found[txnId] {
continue
}
if r.tc.FindId(txnId).One(nil) == nil {
found[txnId] = true
continue
}
logf("WARNING: purging from document %s/%v the missing transaction id %s", collection, tref.DocId, txnId)
err := c.UpdateId(tref.DocId, M{"$pull": M{"txn-queue": M{"$regex": "^" + txnId.Hex() + "_*"}}})
if err != nil {
return fmt.Errorf("error purging missing transaction %s: %v", txnId.Hex(), err)
}
}
if err := iter.Close(); err != nil {
return fmt.Errorf("transaction queue iteration error for collection %s: %v", collection, err)
}
colls[collection] = true
}
type StashTRef struct {
Id docKey "_id"
TxnId string "txn-queue"
}
iter := r.sc.Pipe(pipeline).Iter()
var stref StashTRef
for iter.Next(&stref) {
txnId := bson.ObjectIdHex(stref.TxnId[:24])
if found[txnId] {
continue
}
if r.tc.FindId(txnId).One(nil) == nil {
found[txnId] = true
continue
}
logf("WARNING: purging from stash document %s/%v the missing transaction id %s", stref.Id.C, stref.Id.Id, txnId)
err := r.sc.UpdateId(stref.Id, M{"$pull": M{"txn-queue": M{"$regex": "^" + txnId.Hex() + "_*"}}})
if err != nil {
return fmt.Errorf("error purging missing transaction %s: %v", txnId.Hex(), err)
}
}
if err := iter.Close(); err != nil {
return fmt.Errorf("transaction stash iteration error: %v", err)
}
return nil
}
func (r *Runner) load(id bson.ObjectId) (*transaction, error) {
var t transaction
err := r.tc.FindId(id).One(&t)
if err == mgo.ErrNotFound {
return nil, fmt.Errorf("cannot find transaction %s", id)
} else if err != nil {
return nil, err
}
return &t, nil
}
type typeNature int
const (
// The order of these values matters. Transactions
// from applications using different ordering will
// be incompatible with each other.
_ typeNature = iota
natureString
natureInt
natureFloat
natureBool
natureStruct
)
func valueNature(v interface{}) (value interface{}, nature typeNature) {
rv := reflect.ValueOf(v)
switch rv.Kind() {
case reflect.String:
return rv.String(), natureString
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return rv.Int(), natureInt
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
return int64(rv.Uint()), natureInt
case reflect.Float32, reflect.Float64:
return rv.Float(), natureFloat
case reflect.Bool:
return rv.Bool(), natureBool
case reflect.Struct:
return v, natureStruct
}
panic("document id type unsupported by txn: " + rv.Kind().String())
}
type docKey struct {
C string
Id interface{}
}
type docKeys []docKey
func (ks docKeys) Len() int { return len(ks) }
func (ks docKeys) Swap(i, j int) { ks[i], ks[j] = ks[j], ks[i] }
func (ks docKeys) Less(i, j int) bool {
a, b := ks[i], ks[j]
if a.C != b.C {
return a.C < b.C
}
return valuecmp(a.Id, b.Id) == -1
}
func valuecmp(a, b interface{}) int {
av, an := valueNature(a)
bv, bn := valueNature(b)
if an < bn {
return -1
}
if an > bn {
return 1
}
if av == bv {
return 0
}
var less bool
switch an {
case natureString:
less = av.(string) < bv.(string)
case natureInt:
less = av.(int64) < bv.(int64)
case natureFloat:
less = av.(float64) < bv.(float64)
case natureBool:
less = !av.(bool) && bv.(bool)
case natureStruct:
less = structcmp(av, bv) == -1
default:
panic("unreachable")
}
if less {
return -1
}
return 1
}
func structcmp(a, b interface{}) int {
av := reflect.ValueOf(a)
bv := reflect.ValueOf(b)
var ai, bi = 0, 0
var an, bn = av.NumField(), bv.NumField()
var avi, bvi interface{}
var af, bf reflect.StructField
for {
for ai < an {
af = av.Type().Field(ai)
if isExported(af.Name) {
avi = av.Field(ai).Interface()
ai++
break
}
ai++
}
for bi < bn {
bf = bv.Type().Field(bi)
if isExported(bf.Name) {
bvi = bv.Field(bi).Interface()
bi++
break
}
bi++
}
if n := valuecmp(avi, bvi); n != 0 {
return n
}
nameA := getFieldName(af)
nameB := getFieldName(bf)
if nameA < nameB {
return -1
}
if nameA > nameB {
return 1
}
if ai == an && bi == bn {
return 0
}
if ai == an || bi == bn {
if ai == bn {
return -1
}
return 1
}
}
panic("unreachable")
}
func isExported(name string) bool {
a := name[0]
return a >= 'A' && a <= 'Z'
}
func getFieldName(f reflect.StructField) string {
name := f.Tag.Get("bson")
if i := strings.Index(name, ","); i >= 0 {
name = name[:i]
}
if name == "" {
name = strings.ToLower(f.Name)
}
return name
}
|
package funcs
import (
"encoding/base64"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strings"
"unicode/utf8"
"github.com/hashicorp/hcl2/hcl"
"github.com/hashicorp/hcl2/hcl/hclsyntax"
homedir "github.com/mitchellh/go-homedir"
"github.com/zclconf/go-cty/cty"
"github.com/zclconf/go-cty/cty/function"
)
// MakeFileFunc constructs a function that takes a file path and returns the
// contents of that file, either directly as a string (where valid UTF-8 is
// required) or as a string containing base64 bytes.
func MakeFileFunc(baseDir string, encBase64 bool) function.Function {
return function.New(&function.Spec{
Params: []function.Parameter{
{
Name: "path",
Type: cty.String,
},
},
Type: function.StaticReturnType(cty.String),
Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
path := args[0].AsString()
src, err := readFileBytes(baseDir, path)
if err != nil {
return cty.UnknownVal(cty.String), err
}
switch {
case encBase64:
enc := base64.StdEncoding.EncodeToString(src)
return cty.StringVal(enc), nil
default:
if !utf8.Valid(src) {
return cty.UnknownVal(cty.String), fmt.Errorf("contents of %s are not valid UTF-8; use the filebase64 function to obtain the Base64 encoded contents or the other file functions (e.g. filemd5, filesha256) to obtain file hashing results instead", path)
}
return cty.StringVal(string(src)), nil
}
},
})
}
// MakeTemplateFileFunc constructs a function that takes a file path and
// an arbitrary object of named values and attempts to render the referenced
// file as a template using HCL template syntax.
//
// The template itself may recursively call other functions so a callback
// must be provided to get access to those functions. The template cannot,
// however, access any variables defined in the scope: it is restricted only to
// those variables provided in the second function argument, to ensure that all
// dependencies on other graph nodes can be seen before executing this function.
//
// As a special exception, a referenced template file may not recursively call
// the templatefile function, since that would risk the same file being
// included into itself indefinitely.
func MakeTemplateFileFunc(baseDir string, funcsCb func() map[string]function.Function) function.Function {
params := []function.Parameter{
{
Name: "path",
Type: cty.String,
},
{
Name: "vars",
Type: cty.DynamicPseudoType,
},
}
loadTmpl := func(fn string) (hcl.Expression, error) {
// We re-use File here to ensure the same filename interpretation
// as it does, along with its other safety checks.
tmplVal, err := File(baseDir, cty.StringVal(fn))
if err != nil {
return nil, err
}
expr, diags := hclsyntax.ParseTemplate([]byte(tmplVal.AsString()), fn, hcl.Pos{Line: 1, Column: 1})
if diags.HasErrors() {
return nil, diags
}
return expr, nil
}
renderTmpl := func(expr hcl.Expression, varsVal cty.Value) (cty.Value, error) {
if varsTy := varsVal.Type(); !(varsTy.IsMapType() || varsTy.IsObjectType()) {
return cty.DynamicVal, function.NewArgErrorf(1, "invalid vars value: must be a map") // or an object, but we don't strongly distinguish these most of the time
}
ctx := &hcl.EvalContext{
Variables: varsVal.AsValueMap(),
}
// We'll pre-check references in the template here so we can give a
// more specialized error message than HCL would by default, so it's
// clearer that this problem is coming from a templatefile call.
for _, traversal := range expr.Variables() {
root := traversal.RootName()
if _, ok := ctx.Variables[root]; !ok {
return cty.DynamicVal, function.NewArgErrorf(1, "vars map does not contain key %q, referenced at %s", root, traversal[0].SourceRange())
}
}
givenFuncs := funcsCb() // this callback indirection is to avoid chicken/egg problems
funcs := make(map[string]function.Function, len(givenFuncs))
for name, fn := range givenFuncs {
if name == "templatefile" {
// We stub this one out to prevent recursive calls.
funcs[name] = function.New(&function.Spec{
Params: params,
Type: func(args []cty.Value) (cty.Type, error) {
return cty.NilType, fmt.Errorf("cannot recursively call templatefile from inside templatefile call")
},
})
continue
}
funcs[name] = fn
}
ctx.Functions = funcs
val, diags := expr.Value(ctx)
if diags.HasErrors() {
return cty.DynamicVal, diags
}
return val, nil
}
return function.New(&function.Spec{
Params: params,
Type: func(args []cty.Value) (cty.Type, error) {
if !(args[0].IsKnown() && args[1].IsKnown()) {
return cty.DynamicPseudoType, nil
}
// We'll render our template now to see what result type it produces.
// A template consisting only of a single interpolation an potentially
// return any type.
expr, err := loadTmpl(args[0].AsString())
if err != nil {
return cty.DynamicPseudoType, err
}
// This is safe even if args[1] contains unknowns because the HCL
// template renderer itself knows how to short-circuit those.
val, err := renderTmpl(expr, args[1])
return val.Type(), err
},
Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
expr, err := loadTmpl(args[0].AsString())
if err != nil {
return cty.DynamicVal, err
}
return renderTmpl(expr, args[1])
},
})
}
// MakeFileExistsFunc constructs a function that takes a path
// and determines whether a file exists at that path
func MakeFileExistsFunc(baseDir string) function.Function {
return function.New(&function.Spec{
Params: []function.Parameter{
{
Name: "path",
Type: cty.String,
},
},
Type: function.StaticReturnType(cty.Bool),
Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
path := args[0].AsString()
path, err := homedir.Expand(path)
if err != nil {
return cty.UnknownVal(cty.Bool), fmt.Errorf("failed to expand ~: %s", err)
}
if !filepath.IsAbs(path) {
path = filepath.Join(baseDir, path)
}
// Ensure that the path is canonical for the host OS
path = filepath.Clean(path)
fi, err := os.Stat(path)
if err != nil {
if os.IsNotExist(err) {
return cty.False, nil
}
return cty.UnknownVal(cty.Bool), fmt.Errorf("failed to stat %s", path)
}
if fi.Mode().IsRegular() {
return cty.True, nil
}
return cty.False, fmt.Errorf("%s is not a regular file, but %q",
path, fi.Mode().String())
},
})
}
// MakeFileSetFunc constructs a function that takes a glob pattern
// and enumerates a file set from that pattern
func MakeFileSetFunc(baseDir string) function.Function {
return function.New(&function.Spec{
Params: []function.Parameter{
{
Name: "path",
Type: cty.String,
},
{
Name: "pattern",
Type: cty.String,
},
},
Type: function.StaticReturnType(cty.Set(cty.String)),
Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
path := args[0].AsString()
pattern := args[1].AsString()
path, err := homedir.Expand(path)
if err != nil {
return cty.UnknownVal(cty.Set(cty.String)), fmt.Errorf("failed to expand ~: %s", err)
}
if !filepath.IsAbs(path) {
path = filepath.Join(baseDir, path)
}
// Join the path to the glob pattern, while ensuring the full
// pattern is canonical for the host OS. The joined path is
// automatically cleaned during this operation.
pattern = filepath.Join(path, pattern)
matches, err := filepath.Glob(pattern)
if err != nil {
return cty.UnknownVal(cty.Set(cty.String)), fmt.Errorf("failed to glob pattern (%s): %s", pattern, err)
}
var matchVals []cty.Value
for _, match := range matches {
fi, err := os.Stat(match)
if err != nil {
return cty.UnknownVal(cty.Set(cty.String)), fmt.Errorf("failed to stat (%s): %s", match, err)
}
if !fi.Mode().IsRegular() {
continue
}
// Remove the path and file separator from matches.
match = strings.TrimPrefix(match, path+string(filepath.Separator))
// Replace any remaining file separators with forward slash (/)
// separators for cross-system compatibility.
match = filepath.ToSlash(match)
matchVals = append(matchVals, cty.StringVal(match))
}
if len(matchVals) == 0 {
return cty.SetValEmpty(cty.String), nil
}
return cty.SetVal(matchVals), nil
},
})
}
// BasenameFunc constructs a function that takes a string containing a filesystem path
// and removes all except the last portion from it.
var BasenameFunc = function.New(&function.Spec{
Params: []function.Parameter{
{
Name: "path",
Type: cty.String,
},
},
Type: function.StaticReturnType(cty.String),
Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
return cty.StringVal(filepath.Base(args[0].AsString())), nil
},
})
// DirnameFunc constructs a function that takes a string containing a filesystem path
// and removes the last portion from it.
var DirnameFunc = function.New(&function.Spec{
Params: []function.Parameter{
{
Name: "path",
Type: cty.String,
},
},
Type: function.StaticReturnType(cty.String),
Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
return cty.StringVal(filepath.Dir(args[0].AsString())), nil
},
})
// AbsPathFunc constructs a function that converts a filesystem path to an absolute path
var AbsPathFunc = function.New(&function.Spec{
Params: []function.Parameter{
{
Name: "path",
Type: cty.String,
},
},
Type: function.StaticReturnType(cty.String),
Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
absPath, err := filepath.Abs(args[0].AsString())
return cty.StringVal(filepath.ToSlash(absPath)), err
},
})
// PathExpandFunc constructs a function that expands a leading ~ character to the current user's home directory.
var PathExpandFunc = function.New(&function.Spec{
Params: []function.Parameter{
{
Name: "path",
Type: cty.String,
},
},
Type: function.StaticReturnType(cty.String),
Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
homePath, err := homedir.Expand(args[0].AsString())
return cty.StringVal(homePath), err
},
})
func readFileBytes(baseDir, path string) ([]byte, error) {
path, err := homedir.Expand(path)
if err != nil {
return nil, fmt.Errorf("failed to expand ~: %s", err)
}
if !filepath.IsAbs(path) {
path = filepath.Join(baseDir, path)
}
// Ensure that the path is canonical for the host OS
path = filepath.Clean(path)
src, err := ioutil.ReadFile(path)
if err != nil {
// ReadFile does not return Terraform-user-friendly error
// messages, so we'll provide our own.
if os.IsNotExist(err) {
return nil, fmt.Errorf("no file exists at %s", path)
}
return nil, fmt.Errorf("failed to read %s", path)
}
return src, nil
}
// File reads the contents of the file at the given path.
//
// The file must contain valid UTF-8 bytes, or this function will return an error.
//
// The underlying function implementation works relative to a particular base
// directory, so this wrapper takes a base directory string and uses it to
// construct the underlying function before calling it.
func File(baseDir string, path cty.Value) (cty.Value, error) {
fn := MakeFileFunc(baseDir, false)
return fn.Call([]cty.Value{path})
}
// FileExists determines whether a file exists at the given path.
//
// The underlying function implementation works relative to a particular base
// directory, so this wrapper takes a base directory string and uses it to
// construct the underlying function before calling it.
func FileExists(baseDir string, path cty.Value) (cty.Value, error) {
fn := MakeFileExistsFunc(baseDir)
return fn.Call([]cty.Value{path})
}
// FileSet enumerates a set of files given a glob pattern
//
// The underlying function implementation works relative to a particular base
// directory, so this wrapper takes a base directory string and uses it to
// construct the underlying function before calling it.
func FileSet(baseDir string, path, pattern cty.Value) (cty.Value, error) {
fn := MakeFileSetFunc(baseDir)
return fn.Call([]cty.Value{path, pattern})
}
// FileBase64 reads the contents of the file at the given path.
//
// The bytes from the file are encoded as base64 before returning.
//
// The underlying function implementation works relative to a particular base
// directory, so this wrapper takes a base directory string and uses it to
// construct the underlying function before calling it.
func FileBase64(baseDir string, path cty.Value) (cty.Value, error) {
fn := MakeFileFunc(baseDir, true)
return fn.Call([]cty.Value{path})
}
// Basename takes a string containing a filesystem path and removes all except the last portion from it.
//
// The underlying function implementation works only with the path string and does not access the filesystem itself.
// It is therefore unable to take into account filesystem features such as symlinks.
//
// If the path is empty then the result is ".", representing the current working directory.
func Basename(path cty.Value) (cty.Value, error) {
return BasenameFunc.Call([]cty.Value{path})
}
// Dirname takes a string containing a filesystem path and removes the last portion from it.
//
// The underlying function implementation works only with the path string and does not access the filesystem itself.
// It is therefore unable to take into account filesystem features such as symlinks.
//
// If the path is empty then the result is ".", representing the current working directory.
func Dirname(path cty.Value) (cty.Value, error) {
return DirnameFunc.Call([]cty.Value{path})
}
// Pathexpand takes a string that might begin with a `~` segment, and if so it replaces that segment with
// the current user's home directory path.
//
// The underlying function implementation works only with the path string and does not access the filesystem itself.
// It is therefore unable to take into account filesystem features such as symlinks.
//
// If the leading segment in the path is not `~` then the given path is returned unmodified.
func Pathexpand(path cty.Value) (cty.Value, error) {
return PathExpandFunc.Call([]cty.Value{path})
}
lang/funcs: Remove homedir.Expand() and refactor path trimming with filepath.Rel() in fileset() function
Reference: https://github.com/hashicorp/terraform/pull/22621#pullrequestreview-282259385
package funcs
import (
"encoding/base64"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"unicode/utf8"
"github.com/hashicorp/hcl2/hcl"
"github.com/hashicorp/hcl2/hcl/hclsyntax"
homedir "github.com/mitchellh/go-homedir"
"github.com/zclconf/go-cty/cty"
"github.com/zclconf/go-cty/cty/function"
)
// MakeFileFunc constructs a function that takes a file path and returns the
// contents of that file, either directly as a string (where valid UTF-8 is
// required) or as a string containing base64 bytes.
func MakeFileFunc(baseDir string, encBase64 bool) function.Function {
return function.New(&function.Spec{
Params: []function.Parameter{
{
Name: "path",
Type: cty.String,
},
},
Type: function.StaticReturnType(cty.String),
Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
path := args[0].AsString()
src, err := readFileBytes(baseDir, path)
if err != nil {
return cty.UnknownVal(cty.String), err
}
switch {
case encBase64:
enc := base64.StdEncoding.EncodeToString(src)
return cty.StringVal(enc), nil
default:
if !utf8.Valid(src) {
return cty.UnknownVal(cty.String), fmt.Errorf("contents of %s are not valid UTF-8; use the filebase64 function to obtain the Base64 encoded contents or the other file functions (e.g. filemd5, filesha256) to obtain file hashing results instead", path)
}
return cty.StringVal(string(src)), nil
}
},
})
}
// MakeTemplateFileFunc constructs a function that takes a file path and
// an arbitrary object of named values and attempts to render the referenced
// file as a template using HCL template syntax.
//
// The template itself may recursively call other functions so a callback
// must be provided to get access to those functions. The template cannot,
// however, access any variables defined in the scope: it is restricted only to
// those variables provided in the second function argument, to ensure that all
// dependencies on other graph nodes can be seen before executing this function.
//
// As a special exception, a referenced template file may not recursively call
// the templatefile function, since that would risk the same file being
// included into itself indefinitely.
func MakeTemplateFileFunc(baseDir string, funcsCb func() map[string]function.Function) function.Function {
params := []function.Parameter{
{
Name: "path",
Type: cty.String,
},
{
Name: "vars",
Type: cty.DynamicPseudoType,
},
}
loadTmpl := func(fn string) (hcl.Expression, error) {
// We re-use File here to ensure the same filename interpretation
// as it does, along with its other safety checks.
tmplVal, err := File(baseDir, cty.StringVal(fn))
if err != nil {
return nil, err
}
expr, diags := hclsyntax.ParseTemplate([]byte(tmplVal.AsString()), fn, hcl.Pos{Line: 1, Column: 1})
if diags.HasErrors() {
return nil, diags
}
return expr, nil
}
renderTmpl := func(expr hcl.Expression, varsVal cty.Value) (cty.Value, error) {
if varsTy := varsVal.Type(); !(varsTy.IsMapType() || varsTy.IsObjectType()) {
return cty.DynamicVal, function.NewArgErrorf(1, "invalid vars value: must be a map") // or an object, but we don't strongly distinguish these most of the time
}
ctx := &hcl.EvalContext{
Variables: varsVal.AsValueMap(),
}
// We'll pre-check references in the template here so we can give a
// more specialized error message than HCL would by default, so it's
// clearer that this problem is coming from a templatefile call.
for _, traversal := range expr.Variables() {
root := traversal.RootName()
if _, ok := ctx.Variables[root]; !ok {
return cty.DynamicVal, function.NewArgErrorf(1, "vars map does not contain key %q, referenced at %s", root, traversal[0].SourceRange())
}
}
givenFuncs := funcsCb() // this callback indirection is to avoid chicken/egg problems
funcs := make(map[string]function.Function, len(givenFuncs))
for name, fn := range givenFuncs {
if name == "templatefile" {
// We stub this one out to prevent recursive calls.
funcs[name] = function.New(&function.Spec{
Params: params,
Type: func(args []cty.Value) (cty.Type, error) {
return cty.NilType, fmt.Errorf("cannot recursively call templatefile from inside templatefile call")
},
})
continue
}
funcs[name] = fn
}
ctx.Functions = funcs
val, diags := expr.Value(ctx)
if diags.HasErrors() {
return cty.DynamicVal, diags
}
return val, nil
}
return function.New(&function.Spec{
Params: params,
Type: func(args []cty.Value) (cty.Type, error) {
if !(args[0].IsKnown() && args[1].IsKnown()) {
return cty.DynamicPseudoType, nil
}
// We'll render our template now to see what result type it produces.
// A template consisting only of a single interpolation an potentially
// return any type.
expr, err := loadTmpl(args[0].AsString())
if err != nil {
return cty.DynamicPseudoType, err
}
// This is safe even if args[1] contains unknowns because the HCL
// template renderer itself knows how to short-circuit those.
val, err := renderTmpl(expr, args[1])
return val.Type(), err
},
Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
expr, err := loadTmpl(args[0].AsString())
if err != nil {
return cty.DynamicVal, err
}
return renderTmpl(expr, args[1])
},
})
}
// MakeFileExistsFunc constructs a function that takes a path
// and determines whether a file exists at that path
func MakeFileExistsFunc(baseDir string) function.Function {
return function.New(&function.Spec{
Params: []function.Parameter{
{
Name: "path",
Type: cty.String,
},
},
Type: function.StaticReturnType(cty.Bool),
Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
path := args[0].AsString()
path, err := homedir.Expand(path)
if err != nil {
return cty.UnknownVal(cty.Bool), fmt.Errorf("failed to expand ~: %s", err)
}
if !filepath.IsAbs(path) {
path = filepath.Join(baseDir, path)
}
// Ensure that the path is canonical for the host OS
path = filepath.Clean(path)
fi, err := os.Stat(path)
if err != nil {
if os.IsNotExist(err) {
return cty.False, nil
}
return cty.UnknownVal(cty.Bool), fmt.Errorf("failed to stat %s", path)
}
if fi.Mode().IsRegular() {
return cty.True, nil
}
return cty.False, fmt.Errorf("%s is not a regular file, but %q",
path, fi.Mode().String())
},
})
}
// MakeFileSetFunc constructs a function that takes a glob pattern
// and enumerates a file set from that pattern
func MakeFileSetFunc(baseDir string) function.Function {
return function.New(&function.Spec{
Params: []function.Parameter{
{
Name: "path",
Type: cty.String,
},
{
Name: "pattern",
Type: cty.String,
},
},
Type: function.StaticReturnType(cty.Set(cty.String)),
Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
path := args[0].AsString()
pattern := args[1].AsString()
if !filepath.IsAbs(path) {
path = filepath.Join(baseDir, path)
}
// Join the path to the glob pattern, while ensuring the full
// pattern is canonical for the host OS. The joined path is
// automatically cleaned during this operation.
pattern = filepath.Join(path, pattern)
matches, err := filepath.Glob(pattern)
if err != nil {
return cty.UnknownVal(cty.Set(cty.String)), fmt.Errorf("failed to glob pattern (%s): %s", pattern, err)
}
var matchVals []cty.Value
for _, match := range matches {
fi, err := os.Stat(match)
if err != nil {
return cty.UnknownVal(cty.Set(cty.String)), fmt.Errorf("failed to stat (%s): %s", match, err)
}
if !fi.Mode().IsRegular() {
continue
}
// Remove the path and file separator from matches.
match, err = filepath.Rel(path, match)
if err != nil {
return cty.UnknownVal(cty.Set(cty.String)), fmt.Errorf("failed to trim path of match (%s): %s", match, err)
}
// Replace any remaining file separators with forward slash (/)
// separators for cross-system compatibility.
match = filepath.ToSlash(match)
matchVals = append(matchVals, cty.StringVal(match))
}
if len(matchVals) == 0 {
return cty.SetValEmpty(cty.String), nil
}
return cty.SetVal(matchVals), nil
},
})
}
// BasenameFunc constructs a function that takes a string containing a filesystem path
// and removes all except the last portion from it.
var BasenameFunc = function.New(&function.Spec{
Params: []function.Parameter{
{
Name: "path",
Type: cty.String,
},
},
Type: function.StaticReturnType(cty.String),
Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
return cty.StringVal(filepath.Base(args[0].AsString())), nil
},
})
// DirnameFunc constructs a function that takes a string containing a filesystem path
// and removes the last portion from it.
var DirnameFunc = function.New(&function.Spec{
Params: []function.Parameter{
{
Name: "path",
Type: cty.String,
},
},
Type: function.StaticReturnType(cty.String),
Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
return cty.StringVal(filepath.Dir(args[0].AsString())), nil
},
})
// AbsPathFunc constructs a function that converts a filesystem path to an absolute path
var AbsPathFunc = function.New(&function.Spec{
Params: []function.Parameter{
{
Name: "path",
Type: cty.String,
},
},
Type: function.StaticReturnType(cty.String),
Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
absPath, err := filepath.Abs(args[0].AsString())
return cty.StringVal(filepath.ToSlash(absPath)), err
},
})
// PathExpandFunc constructs a function that expands a leading ~ character to the current user's home directory.
var PathExpandFunc = function.New(&function.Spec{
Params: []function.Parameter{
{
Name: "path",
Type: cty.String,
},
},
Type: function.StaticReturnType(cty.String),
Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
homePath, err := homedir.Expand(args[0].AsString())
return cty.StringVal(homePath), err
},
})
func readFileBytes(baseDir, path string) ([]byte, error) {
path, err := homedir.Expand(path)
if err != nil {
return nil, fmt.Errorf("failed to expand ~: %s", err)
}
if !filepath.IsAbs(path) {
path = filepath.Join(baseDir, path)
}
// Ensure that the path is canonical for the host OS
path = filepath.Clean(path)
src, err := ioutil.ReadFile(path)
if err != nil {
// ReadFile does not return Terraform-user-friendly error
// messages, so we'll provide our own.
if os.IsNotExist(err) {
return nil, fmt.Errorf("no file exists at %s", path)
}
return nil, fmt.Errorf("failed to read %s", path)
}
return src, nil
}
// File reads the contents of the file at the given path.
//
// The file must contain valid UTF-8 bytes, or this function will return an error.
//
// The underlying function implementation works relative to a particular base
// directory, so this wrapper takes a base directory string and uses it to
// construct the underlying function before calling it.
func File(baseDir string, path cty.Value) (cty.Value, error) {
fn := MakeFileFunc(baseDir, false)
return fn.Call([]cty.Value{path})
}
// FileExists determines whether a file exists at the given path.
//
// The underlying function implementation works relative to a particular base
// directory, so this wrapper takes a base directory string and uses it to
// construct the underlying function before calling it.
func FileExists(baseDir string, path cty.Value) (cty.Value, error) {
fn := MakeFileExistsFunc(baseDir)
return fn.Call([]cty.Value{path})
}
// FileSet enumerates a set of files given a glob pattern
//
// The underlying function implementation works relative to a particular base
// directory, so this wrapper takes a base directory string and uses it to
// construct the underlying function before calling it.
func FileSet(baseDir string, path, pattern cty.Value) (cty.Value, error) {
fn := MakeFileSetFunc(baseDir)
return fn.Call([]cty.Value{path, pattern})
}
// FileBase64 reads the contents of the file at the given path.
//
// The bytes from the file are encoded as base64 before returning.
//
// The underlying function implementation works relative to a particular base
// directory, so this wrapper takes a base directory string and uses it to
// construct the underlying function before calling it.
func FileBase64(baseDir string, path cty.Value) (cty.Value, error) {
fn := MakeFileFunc(baseDir, true)
return fn.Call([]cty.Value{path})
}
// Basename takes a string containing a filesystem path and removes all except the last portion from it.
//
// The underlying function implementation works only with the path string and does not access the filesystem itself.
// It is therefore unable to take into account filesystem features such as symlinks.
//
// If the path is empty then the result is ".", representing the current working directory.
func Basename(path cty.Value) (cty.Value, error) {
return BasenameFunc.Call([]cty.Value{path})
}
// Dirname takes a string containing a filesystem path and removes the last portion from it.
//
// The underlying function implementation works only with the path string and does not access the filesystem itself.
// It is therefore unable to take into account filesystem features such as symlinks.
//
// If the path is empty then the result is ".", representing the current working directory.
func Dirname(path cty.Value) (cty.Value, error) {
return DirnameFunc.Call([]cty.Value{path})
}
// Pathexpand takes a string that might begin with a `~` segment, and if so it replaces that segment with
// the current user's home directory path.
//
// The underlying function implementation works only with the path string and does not access the filesystem itself.
// It is therefore unable to take into account filesystem features such as symlinks.
//
// If the leading segment in the path is not `~` then the given path is returned unmodified.
func Pathexpand(path cty.Value) (cty.Value, error) {
return PathExpandFunc.Call([]cty.Value{path})
}
|
package main
import (
"fmt"
"os"
"sort"
"strconv"
"strings"
"github.com/coreos/fleet/machine"
"github.com/coreos/fleet/schema"
)
const (
defaultListUnitFilesFields = "unit,hash,dstate,state,tmachine"
)
var (
listUnitFilesFieldsFlag string
cmdListUnitFiles = &Command{
Name: "list-unit-files",
Summary: "List the units that exist in the cluster.",
Usage: "[--fields]",
Description: `Lists all unit files that exist in the cluster (whether or not they are loaded onto a machine).`,
Run: runListUnitFiles,
}
listUnitFilesFields = map[string]unitToField{
"unit": func(u schema.Unit, full bool) string {
return u.Name
},
"global": func(u schema.Unit, full bool) string {
return strconv.FormatBool(suToGlobal(u))
},
"dstate": func(u schema.Unit, full bool) string {
if u.DesiredState == "" {
return "-"
}
return u.DesiredState
},
"tmachine": func(u schema.Unit, full bool) string {
if suToGlobal(u) || u.MachineID == "" {
return "-"
}
ms := cachedMachineState(u.MachineID)
if ms == nil {
ms = &machine.MachineState{ID: u.MachineID}
}
return machineFullLegend(*ms, full)
},
"state": func(u schema.Unit, full bool) string {
if suToGlobal(u) || u.CurrentState == "" {
return "-"
}
return u.CurrentState
},
"hash": func(u schema.Unit, full bool) string {
uf := schema.MapSchemaUnitOptionsToUnitFile(u.Options)
if !full {
return uf.Hash().Short()
}
return uf.Hash().String()
},
"desc": func(u schema.Unit, full bool) string {
uf := schema.MapSchemaUnitOptionsToUnitFile(u.Options)
d := uf.Description()
if d == "" {
return "-"
}
return d
},
}
)
type unitToField func(u schema.Unit, full bool) string
func init() {
cmdListUnitFiles.Flags.BoolVar(&sharedFlags.Full, "full", false, "Do not ellipsize fields on output")
cmdListUnitFiles.Flags.BoolVar(&sharedFlags.NoLegend, "no-legend", false, "Do not print a legend (column headers)")
cmdListUnitFiles.Flags.StringVar(&listUnitFilesFieldsFlag, "fields", defaultListUnitFilesFields, fmt.Sprintf("Columns to print for each Unit file. Valid fields are %q", strings.Join(unitToFieldKeys(listUnitFilesFields), ",")))
}
func runListUnitFiles(args []string) (exit int) {
if listUnitFilesFieldsFlag == "" {
fmt.Fprintf(os.Stderr, "Must define output format\n")
return 1
}
cols := strings.Split(listUnitFilesFieldsFlag, ",")
for _, s := range cols {
if _, ok := listUnitFilesFields[s]; !ok {
fmt.Fprintf(os.Stderr, "Invalid key in output format: %q\n", s)
return 1
}
}
units, err := cAPI.Units()
if err != nil {
fmt.Fprintf(os.Stderr, "Error retrieving list of units from repository: %v\n", err)
return 1
}
if !sharedFlags.NoLegend {
fmt.Fprintln(out, strings.ToUpper(strings.Join(cols, "\t")))
}
for _, u := range units {
var f []string
for _, c := range cols {
f = append(f, listUnitFilesFields[c](*u, sharedFlags.Full))
}
fmt.Fprintln(out, strings.Join(f, "\t"))
}
out.Flush()
return
}
func unitToFieldKeys(m map[string]unitToField) (keys []string) {
for k := range m {
keys = append(keys, k)
}
sort.Strings(keys)
return
}
fleetctl: rename TMACHINE to TARGET
package main
import (
"fmt"
"os"
"sort"
"strconv"
"strings"
"github.com/coreos/fleet/machine"
"github.com/coreos/fleet/schema"
)
const (
defaultListUnitFilesFields = "unit,hash,dstate,state,target"
)
func mapTargetField(u schema.Unit, full bool) string {
if suToGlobal(u) || u.MachineID == "" {
return "-"
}
ms := cachedMachineState(u.MachineID)
if ms == nil {
ms = &machine.MachineState{ID: u.MachineID}
}
return machineFullLegend(*ms, full)
}
var (
listUnitFilesFieldsFlag string
cmdListUnitFiles = &Command{
Name: "list-unit-files",
Summary: "List the units that exist in the cluster.",
Usage: "[--fields]",
Description: `Lists all unit files that exist in the cluster (whether or not they are loaded onto a machine).`,
Run: runListUnitFiles,
}
listUnitFilesFields = map[string]unitToField{
"unit": func(u schema.Unit, full bool) string {
return u.Name
},
"global": func(u schema.Unit, full bool) string {
return strconv.FormatBool(suToGlobal(u))
},
"dstate": func(u schema.Unit, full bool) string {
if u.DesiredState == "" {
return "-"
}
return u.DesiredState
},
"target": mapTargetField,
"tmachine": mapTargetField,
"state": func(u schema.Unit, full bool) string {
if suToGlobal(u) || u.CurrentState == "" {
return "-"
}
return u.CurrentState
},
"hash": func(u schema.Unit, full bool) string {
uf := schema.MapSchemaUnitOptionsToUnitFile(u.Options)
if !full {
return uf.Hash().Short()
}
return uf.Hash().String()
},
"desc": func(u schema.Unit, full bool) string {
uf := schema.MapSchemaUnitOptionsToUnitFile(u.Options)
d := uf.Description()
if d == "" {
return "-"
}
return d
},
}
)
type unitToField func(u schema.Unit, full bool) string
func init() {
cmdListUnitFiles.Flags.BoolVar(&sharedFlags.Full, "full", false, "Do not ellipsize fields on output")
cmdListUnitFiles.Flags.BoolVar(&sharedFlags.NoLegend, "no-legend", false, "Do not print a legend (column headers)")
cmdListUnitFiles.Flags.StringVar(&listUnitFilesFieldsFlag, "fields", defaultListUnitFilesFields, fmt.Sprintf("Columns to print for each Unit file. Valid fields are %q", strings.Join(unitToFieldKeys(listUnitFilesFields), ",")))
}
func runListUnitFiles(args []string) (exit int) {
if listUnitFilesFieldsFlag == "" {
fmt.Fprintf(os.Stderr, "Must define output format\n")
return 1
}
cols := strings.Split(listUnitFilesFieldsFlag, ",")
for _, s := range cols {
if _, ok := listUnitFilesFields[s]; !ok {
fmt.Fprintf(os.Stderr, "Invalid key in output format: %q\n", s)
return 1
}
if s == "tmachine" {
fmt.Fprintln(os.Stderr, "WARNING: The \"tmachine\" field is deprecated. Use \"target\" instead")
}
}
units, err := cAPI.Units()
if err != nil {
fmt.Fprintf(os.Stderr, "Error retrieving list of units from repository: %v\n", err)
return 1
}
if !sharedFlags.NoLegend {
fmt.Fprintln(out, strings.ToUpper(strings.Join(cols, "\t")))
}
for _, u := range units {
var f []string
for _, c := range cols {
f = append(f, listUnitFilesFields[c](*u, sharedFlags.Full))
}
fmt.Fprintln(out, strings.Join(f, "\t"))
}
out.Flush()
return
}
func unitToFieldKeys(m map[string]unitToField) (keys []string) {
for k := range m {
keys = append(keys, k)
}
sort.Strings(keys)
return
}
|
package namecheap
import (
"fmt"
"net/http"
"net/http/httptest"
"net/url"
"testing"
)
var (
fakeUser = "foo"
fakeKey = "bar"
fakeClientIP = "10.0.0.1"
tlds = map[string]string{
"com.au": "com.au",
"com": "com",
"co.uk": "co.uk",
"uk": "uk",
"edu": "edu",
"co.com": "co.com",
"za.com": "za.com",
}
)
func assertEq(t *testing.T, variable, got, want string) {
if got != want {
t.Errorf("Expected %s to be '%s' but got '%s'", variable, want, got)
}
}
func assertHdr(tc *testcase, t *testing.T, values *url.Values) {
ch, _ := newChallenge(tc.domain, "", tlds)
assertEq(t, "ApiUser", values.Get("ApiUser"), fakeUser)
assertEq(t, "ApiKey", values.Get("ApiKey"), fakeKey)
assertEq(t, "UserName", values.Get("UserName"), fakeUser)
assertEq(t, "ClientIp", values.Get("ClientIp"), fakeClientIP)
assertEq(t, "SLD", values.Get("SLD"), ch.sld)
assertEq(t, "TLD", values.Get("TLD"), ch.tld)
}
func mockServer(tc *testcase, t *testing.T, w http.ResponseWriter, r *http.Request) {
switch r.Method {
case "GET":
values := r.URL.Query()
cmd := values.Get("Command")
switch cmd {
case "namecheap.domains.dns.getHosts":
assertHdr(tc, t, &values)
w.WriteHeader(http.StatusOK)
fmt.Fprintf(w, tc.getHostsResponse)
case "namecheap.domains.getTldList":
w.WriteHeader(http.StatusOK)
fmt.Fprintf(w, responseGetTlds)
default:
t.Errorf("Unexpected GET command: %s", cmd)
}
case "POST":
r.ParseForm()
values := r.Form
cmd := values.Get("Command")
switch cmd {
case "namecheap.domains.dns.setHosts":
assertHdr(tc, t, &values)
w.WriteHeader(http.StatusOK)
fmt.Fprint(w, tc.setHostsResponse)
default:
t.Errorf("Unexpected POST command: %s", cmd)
}
default:
t.Errorf("Unexpected http method: %s", r.Method)
}
}
func testGetHosts(tc *testcase, t *testing.T) {
mock := httptest.NewServer(http.HandlerFunc(
func(w http.ResponseWriter, r *http.Request) {
mockServer(tc, t, w, r)
}))
defer mock.Close()
prov := &DNSProvider{
baseURL: mock.URL,
apiUser: fakeUser,
apiKey: fakeKey,
clientIP: fakeClientIP,
}
ch, _ := newChallenge(tc.domain, "", tlds)
hosts, err := prov.getHosts(ch)
if tc.errString != "" {
if err == nil || err.Error() != tc.errString {
t.Errorf("Namecheap getHosts case %s expected error", tc.name)
}
} else {
if err != nil {
t.Errorf("Namecheap getHosts case %s failed\n%v", tc.name, err)
}
}
next1:
for _, h := range hosts {
for _, th := range tc.hosts {
if h == th {
continue next1
}
}
t.Errorf("getHosts case %s unexpected record [%s:%s:%s]",
tc.name, h.Type, h.Name, h.Address)
}
next2:
for _, th := range tc.hosts {
for _, h := range hosts {
if h == th {
continue next2
}
}
t.Errorf("getHosts case %s missing record [%s:%s:%s]",
tc.name, th.Type, th.Name, th.Address)
}
}
func mockDNSProvider(url string) *DNSProvider {
return &DNSProvider{
baseURL: url,
apiUser: fakeUser,
apiKey: fakeKey,
clientIP: fakeClientIP,
}
}
func testSetHosts(tc *testcase, t *testing.T) {
mock := httptest.NewServer(http.HandlerFunc(
func(w http.ResponseWriter, r *http.Request) {
mockServer(tc, t, w, r)
}))
defer mock.Close()
prov := mockDNSProvider(mock.URL)
ch, _ := newChallenge(tc.domain, "", tlds)
hosts, err := prov.getHosts(ch)
if tc.errString != "" {
if err == nil || err.Error() != tc.errString {
t.Errorf("Namecheap getHosts case %s expected error", tc.name)
}
} else {
if err != nil {
t.Errorf("Namecheap getHosts case %s failed\n%v", tc.name, err)
}
}
if err != nil {
return
}
err = prov.setHosts(ch, hosts)
if err != nil {
t.Errorf("Namecheap setHosts case %s failed", tc.name)
}
}
func testPresent(tc *testcase, t *testing.T) {
mock := httptest.NewServer(http.HandlerFunc(
func(w http.ResponseWriter, r *http.Request) {
mockServer(tc, t, w, r)
}))
defer mock.Close()
prov := mockDNSProvider(mock.URL)
err := prov.Present(tc.domain, "", "dummyKey")
if tc.errString != "" {
if err == nil || err.Error() != tc.errString {
t.Errorf("Namecheap Present case %s expected error", tc.name)
}
} else {
if err != nil {
t.Errorf("Namecheap Present case %s failed\n%v", tc.name, err)
}
}
}
func testCleanUp(tc *testcase, t *testing.T) {
mock := httptest.NewServer(http.HandlerFunc(
func(w http.ResponseWriter, r *http.Request) {
mockServer(tc, t, w, r)
}))
defer mock.Close()
prov := mockDNSProvider(mock.URL)
err := prov.CleanUp(tc.domain, "", "dummyKey")
if tc.errString != "" {
if err == nil || err.Error() != tc.errString {
t.Errorf("Namecheap CleanUp case %s expected error", tc.name)
}
} else {
if err != nil {
t.Errorf("Namecheap CleanUp case %s failed\n%v", tc.name, err)
}
}
}
func TestNamecheap(t *testing.T) {
for _, tc := range testcases {
testGetHosts(&tc, t)
testSetHosts(&tc, t)
testPresent(&tc, t)
testCleanUp(&tc, t)
}
}
func TestNamecheapDomainSplit(t *testing.T) {
tests := []struct {
domain string
valid bool
tld string
sld string
host string
}{
{"a.b.c.test.co.uk", true, "co.uk", "test", "a.b.c"},
{"test.co.uk", true, "co.uk", "test", ""},
{"test.com", true, "com", "test", ""},
{"test.co.com", true, "co.com", "test", ""},
{"www.test.com.au", true, "com.au", "test", "www"},
{"www.za.com", true, "za.com", "www", ""},
{"", false, "", "", ""},
{"a", false, "", "", ""},
{"com", false, "", "", ""},
{"co.com", false, "", "", ""},
{"co.uk", false, "", "", ""},
{"test.au", false, "", "", ""},
{"za.com", false, "", "", ""},
{"www.za", false, "", "", ""},
{"www.test.au", false, "", "", ""},
{"www.test.unk", false, "", "", ""},
}
for _, test := range tests {
valid := true
ch, err := newChallenge(test.domain, "", tlds)
if err != nil {
valid = false
}
if test.valid && !valid {
t.Errorf("Expected '%s' to split", test.domain)
} else if !test.valid && valid {
t.Errorf("Expected '%s' to produce error", test.domain)
}
if test.valid && valid {
assertEq(t, "domain", ch.domain, test.domain)
assertEq(t, "tld", ch.tld, test.tld)
assertEq(t, "sld", ch.sld, test.sld)
assertEq(t, "host", ch.host, test.host)
}
}
}
type testcase struct {
name string
domain string
hosts []host
errString string
getHostsResponse string
setHostsResponse string
}
var testcases = []testcase{
{
"Test:Success:1",
"test.example.com",
[]host{
{"A", "home", "10.0.0.1", "10", "1799"},
{"A", "www", "10.0.0.2", "10", "1200"},
{"AAAA", "a", "::0", "10", "1799"},
{"CNAME", "*", "example.com.", "10", "1799"},
{"MXE", "example.com", "10.0.0.5", "10", "1800"},
{"URL", "xyz", "https://google.com", "10", "1799"},
},
"",
responseGetHostsSuccess1,
responseSetHostsSuccess1,
},
{
"Test:Success:2",
"example.com",
[]host{
{"A", "@", "10.0.0.2", "10", "1200"},
{"A", "www", "10.0.0.3", "10", "60"},
},
"",
responseGetHostsSuccess2,
responseSetHostsSuccess2,
},
{
"Test:Error:BadApiKey:1",
"test.example.com",
nil,
"Namecheap error: API Key is invalid or API access has not been enabled [1011102]",
responseGetHostsErrorBadApiKey1,
"",
},
}
var responseGetHostsSuccess1 = `<?xml version="1.0" encoding="utf-8"?>
<ApiResponse Status="OK" xmlns="http://api.namecheap.com/xml.response">
<Errors />
<Warnings />
<RequestedCommand>namecheap.domains.dns.getHosts</RequestedCommand>
<CommandResponse Type="namecheap.domains.dns.getHosts">
<DomainDNSGetHostsResult Domain="example.com" EmailType="MXE" IsUsingOurDNS="true">
<host HostId="217076" Name="www" Type="A" Address="10.0.0.2" MXPref="10" TTL="1200" AssociatedAppTitle="" FriendlyName="" IsActive="true" IsDDNSEnabled="false" />
<host HostId="217069" Name="home" Type="A" Address="10.0.0.1" MXPref="10" TTL="1799" AssociatedAppTitle="" FriendlyName="" IsActive="true" IsDDNSEnabled="false" />
<host HostId="217071" Name="a" Type="AAAA" Address="::0" MXPref="10" TTL="1799" AssociatedAppTitle="" FriendlyName="" IsActive="true" IsDDNSEnabled="false" />
<host HostId="217075" Name="*" Type="CNAME" Address="example.com." MXPref="10" TTL="1799" AssociatedAppTitle="" FriendlyName="" IsActive="true" IsDDNSEnabled="false" />
<host HostId="217073" Name="example.com" Type="MXE" Address="10.0.0.5" MXPref="10" TTL="1800" AssociatedAppTitle="MXE" FriendlyName="MXE1" IsActive="true" IsDDNSEnabled="false" />
<host HostId="217077" Name="xyz" Type="URL" Address="https://google.com" MXPref="10" TTL="1799" AssociatedAppTitle="" FriendlyName="" IsActive="true" IsDDNSEnabled="false" />
</DomainDNSGetHostsResult>
</CommandResponse>
<Server>PHX01SBAPI01</Server>
<GMTTimeDifference>--5:00</GMTTimeDifference>
<ExecutionTime>3.338</ExecutionTime>
</ApiResponse>`
var responseSetHostsSuccess1 = `<?xml version="1.0" encoding="utf-8"?>
<ApiResponse Status="OK" xmlns="http://api.namecheap.com/xml.response">
<Errors />
<Warnings />
<RequestedCommand>namecheap.domains.dns.setHosts</RequestedCommand>
<CommandResponse Type="namecheap.domains.dns.setHosts">
<DomainDNSSetHostsResult Domain="example.com" IsSuccess="true">
<Warnings />
</DomainDNSSetHostsResult>
</CommandResponse>
<Server>PHX01SBAPI01</Server>
<GMTTimeDifference>--5:00</GMTTimeDifference>
<ExecutionTime>2.347</ExecutionTime>
</ApiResponse>`
var responseGetHostsSuccess2 = `<?xml version="1.0" encoding="utf-8"?>
<ApiResponse Status="OK" xmlns="http://api.namecheap.com/xml.response">
<Errors />
<Warnings />
<RequestedCommand>namecheap.domains.dns.getHosts</RequestedCommand>
<CommandResponse Type="namecheap.domains.dns.getHosts">
<DomainDNSGetHostsResult Domain="example.com" EmailType="MXE" IsUsingOurDNS="true">
<host HostId="217076" Name="@" Type="A" Address="10.0.0.2" MXPref="10" TTL="1200" AssociatedAppTitle="" FriendlyName="" IsActive="true" IsDDNSEnabled="false" />
<host HostId="217069" Name="www" Type="A" Address="10.0.0.3" MXPref="10" TTL="60" AssociatedAppTitle="" FriendlyName="" IsActive="true" IsDDNSEnabled="false" />
</DomainDNSGetHostsResult>
</CommandResponse>
<Server>PHX01SBAPI01</Server>
<GMTTimeDifference>--5:00</GMTTimeDifference>
<ExecutionTime>3.338</ExecutionTime>
</ApiResponse>`
var responseSetHostsSuccess2 = `<?xml version="1.0" encoding="utf-8"?>
<ApiResponse Status="OK" xmlns="http://api.namecheap.com/xml.response">
<Errors />
<Warnings />
<RequestedCommand>namecheap.domains.dns.setHosts</RequestedCommand>
<CommandResponse Type="namecheap.domains.dns.setHosts">
<DomainDNSSetHostsResult Domain="example.com" IsSuccess="true">
<Warnings />
</DomainDNSSetHostsResult>
</CommandResponse>
<Server>PHX01SBAPI01</Server>
<GMTTimeDifference>--5:00</GMTTimeDifference>
<ExecutionTime>2.347</ExecutionTime>
</ApiResponse>`
var responseGetHostsErrorBadAPIKey1 = `<?xml version="1.0" encoding="utf-8"?>
<ApiResponse Status="ERROR" xmlns="http://api.namecheap.com/xml.response">
<Errors>
<Error Number="1011102">API Key is invalid or API access has not been enabled</Error>
</Errors>
<Warnings />
<RequestedCommand />
<Server>PHX01SBAPI01</Server>
<GMTTimeDifference>--5:00</GMTTimeDifference>
<ExecutionTime>0</ExecutionTime>
</ApiResponse>`
var responseGetTlds = `<?xml version="1.0" encoding="utf-8"?>
<ApiResponse Status="OK" xmlns="http://api.namecheap.com/xml.response">
<Errors />
<Warnings />
<RequestedCommand>namecheap.domains.getTldList</RequestedCommand>
<CommandResponse Type="namecheap.domains.getTldList">
<Tlds>
<Tld Name="com" NonRealTime="false" MinRegisterYears="1" MaxRegisterYears="10" MinRenewYears="1" MaxRenewYears="10" RenewalMinDays="0" RenewalMaxDays="4000" ReactivateMaxDays="27" MinTransferYears="1" MaxTransferYears="1" IsApiRegisterable="true" IsApiRenewable="true" IsApiTransferable="true" IsEppRequired="true" IsDisableModContact="false" IsDisableWGAllot="false" IsIncludeInExtendedSearchOnly="false" SequenceNumber="10" Type="GTLD" SubType="" IsSupportsIDN="true" Category="A" SupportsRegistrarLock="true" AddGracePeriodDays="5" WhoisVerification="false" ProviderApiDelete="true" TldState="" SearchGroup="" Registry="">Most recognized top level domain<Categories><TldCategory Name="popular" SequenceNumber="10" /></Categories></Tld>
</Tlds>
</CommandResponse>
<Server>PHX01SBAPI01</Server>
<GMTTimeDifference>--5:00</GMTTimeDifference>
<ExecutionTime>0.004</ExecutionTime>
</ApiResponse>`
Fix namecheap test
package namecheap
import (
"fmt"
"net/http"
"net/http/httptest"
"net/url"
"testing"
)
var (
fakeUser = "foo"
fakeKey = "bar"
fakeClientIP = "10.0.0.1"
tlds = map[string]string{
"com.au": "com.au",
"com": "com",
"co.uk": "co.uk",
"uk": "uk",
"edu": "edu",
"co.com": "co.com",
"za.com": "za.com",
}
)
func assertEq(t *testing.T, variable, got, want string) {
if got != want {
t.Errorf("Expected %s to be '%s' but got '%s'", variable, want, got)
}
}
func assertHdr(tc *testcase, t *testing.T, values *url.Values) {
ch, _ := newChallenge(tc.domain, "", tlds)
assertEq(t, "ApiUser", values.Get("ApiUser"), fakeUser)
assertEq(t, "ApiKey", values.Get("ApiKey"), fakeKey)
assertEq(t, "UserName", values.Get("UserName"), fakeUser)
assertEq(t, "ClientIp", values.Get("ClientIp"), fakeClientIP)
assertEq(t, "SLD", values.Get("SLD"), ch.sld)
assertEq(t, "TLD", values.Get("TLD"), ch.tld)
}
func mockServer(tc *testcase, t *testing.T, w http.ResponseWriter, r *http.Request) {
switch r.Method {
case "GET":
values := r.URL.Query()
cmd := values.Get("Command")
switch cmd {
case "namecheap.domains.dns.getHosts":
assertHdr(tc, t, &values)
w.WriteHeader(http.StatusOK)
fmt.Fprintf(w, tc.getHostsResponse)
case "namecheap.domains.getTldList":
w.WriteHeader(http.StatusOK)
fmt.Fprintf(w, responseGetTlds)
default:
t.Errorf("Unexpected GET command: %s", cmd)
}
case "POST":
r.ParseForm()
values := r.Form
cmd := values.Get("Command")
switch cmd {
case "namecheap.domains.dns.setHosts":
assertHdr(tc, t, &values)
w.WriteHeader(http.StatusOK)
fmt.Fprint(w, tc.setHostsResponse)
default:
t.Errorf("Unexpected POST command: %s", cmd)
}
default:
t.Errorf("Unexpected http method: %s", r.Method)
}
}
func testGetHosts(tc *testcase, t *testing.T) {
mock := httptest.NewServer(http.HandlerFunc(
func(w http.ResponseWriter, r *http.Request) {
mockServer(tc, t, w, r)
}))
defer mock.Close()
prov := &DNSProvider{
baseURL: mock.URL,
apiUser: fakeUser,
apiKey: fakeKey,
clientIP: fakeClientIP,
}
ch, _ := newChallenge(tc.domain, "", tlds)
hosts, err := prov.getHosts(ch)
if tc.errString != "" {
if err == nil || err.Error() != tc.errString {
t.Errorf("Namecheap getHosts case %s expected error", tc.name)
}
} else {
if err != nil {
t.Errorf("Namecheap getHosts case %s failed\n%v", tc.name, err)
}
}
next1:
for _, h := range hosts {
for _, th := range tc.hosts {
if h == th {
continue next1
}
}
t.Errorf("getHosts case %s unexpected record [%s:%s:%s]",
tc.name, h.Type, h.Name, h.Address)
}
next2:
for _, th := range tc.hosts {
for _, h := range hosts {
if h == th {
continue next2
}
}
t.Errorf("getHosts case %s missing record [%s:%s:%s]",
tc.name, th.Type, th.Name, th.Address)
}
}
func mockDNSProvider(url string) *DNSProvider {
return &DNSProvider{
baseURL: url,
apiUser: fakeUser,
apiKey: fakeKey,
clientIP: fakeClientIP,
}
}
func testSetHosts(tc *testcase, t *testing.T) {
mock := httptest.NewServer(http.HandlerFunc(
func(w http.ResponseWriter, r *http.Request) {
mockServer(tc, t, w, r)
}))
defer mock.Close()
prov := mockDNSProvider(mock.URL)
ch, _ := newChallenge(tc.domain, "", tlds)
hosts, err := prov.getHosts(ch)
if tc.errString != "" {
if err == nil || err.Error() != tc.errString {
t.Errorf("Namecheap getHosts case %s expected error", tc.name)
}
} else {
if err != nil {
t.Errorf("Namecheap getHosts case %s failed\n%v", tc.name, err)
}
}
if err != nil {
return
}
err = prov.setHosts(ch, hosts)
if err != nil {
t.Errorf("Namecheap setHosts case %s failed", tc.name)
}
}
func testPresent(tc *testcase, t *testing.T) {
mock := httptest.NewServer(http.HandlerFunc(
func(w http.ResponseWriter, r *http.Request) {
mockServer(tc, t, w, r)
}))
defer mock.Close()
prov := mockDNSProvider(mock.URL)
err := prov.Present(tc.domain, "", "dummyKey")
if tc.errString != "" {
if err == nil || err.Error() != tc.errString {
t.Errorf("Namecheap Present case %s expected error", tc.name)
}
} else {
if err != nil {
t.Errorf("Namecheap Present case %s failed\n%v", tc.name, err)
}
}
}
func testCleanUp(tc *testcase, t *testing.T) {
mock := httptest.NewServer(http.HandlerFunc(
func(w http.ResponseWriter, r *http.Request) {
mockServer(tc, t, w, r)
}))
defer mock.Close()
prov := mockDNSProvider(mock.URL)
err := prov.CleanUp(tc.domain, "", "dummyKey")
if tc.errString != "" {
if err == nil || err.Error() != tc.errString {
t.Errorf("Namecheap CleanUp case %s expected error", tc.name)
}
} else {
if err != nil {
t.Errorf("Namecheap CleanUp case %s failed\n%v", tc.name, err)
}
}
}
func TestNamecheap(t *testing.T) {
for _, tc := range testcases {
testGetHosts(&tc, t)
testSetHosts(&tc, t)
testPresent(&tc, t)
testCleanUp(&tc, t)
}
}
func TestNamecheapDomainSplit(t *testing.T) {
tests := []struct {
domain string
valid bool
tld string
sld string
host string
}{
{"a.b.c.test.co.uk", true, "co.uk", "test", "a.b.c"},
{"test.co.uk", true, "co.uk", "test", ""},
{"test.com", true, "com", "test", ""},
{"test.co.com", true, "co.com", "test", ""},
{"www.test.com.au", true, "com.au", "test", "www"},
{"www.za.com", true, "za.com", "www", ""},
{"", false, "", "", ""},
{"a", false, "", "", ""},
{"com", false, "", "", ""},
{"co.com", false, "", "", ""},
{"co.uk", false, "", "", ""},
{"test.au", false, "", "", ""},
{"za.com", false, "", "", ""},
{"www.za", false, "", "", ""},
{"www.test.au", false, "", "", ""},
{"www.test.unk", false, "", "", ""},
}
for _, test := range tests {
valid := true
ch, err := newChallenge(test.domain, "", tlds)
if err != nil {
valid = false
}
if test.valid && !valid {
t.Errorf("Expected '%s' to split", test.domain)
} else if !test.valid && valid {
t.Errorf("Expected '%s' to produce error", test.domain)
}
if test.valid && valid {
assertEq(t, "domain", ch.domain, test.domain)
assertEq(t, "tld", ch.tld, test.tld)
assertEq(t, "sld", ch.sld, test.sld)
assertEq(t, "host", ch.host, test.host)
}
}
}
type testcase struct {
name string
domain string
hosts []host
errString string
getHostsResponse string
setHostsResponse string
}
var testcases = []testcase{
{
"Test:Success:1",
"test.example.com",
[]host{
{"A", "home", "10.0.0.1", "10", "1799"},
{"A", "www", "10.0.0.2", "10", "1200"},
{"AAAA", "a", "::0", "10", "1799"},
{"CNAME", "*", "example.com.", "10", "1799"},
{"MXE", "example.com", "10.0.0.5", "10", "1800"},
{"URL", "xyz", "https://google.com", "10", "1799"},
},
"",
responseGetHostsSuccess1,
responseSetHostsSuccess1,
},
{
"Test:Success:2",
"example.com",
[]host{
{"A", "@", "10.0.0.2", "10", "1200"},
{"A", "www", "10.0.0.3", "10", "60"},
},
"",
responseGetHostsSuccess2,
responseSetHostsSuccess2,
},
{
"Test:Error:BadApiKey:1",
"test.example.com",
nil,
"Namecheap error: API Key is invalid or API access has not been enabled [1011102]",
responseGetHostsErrorBadAPIKey1,
"",
},
}
var responseGetHostsSuccess1 = `<?xml version="1.0" encoding="utf-8"?>
<ApiResponse Status="OK" xmlns="http://api.namecheap.com/xml.response">
<Errors />
<Warnings />
<RequestedCommand>namecheap.domains.dns.getHosts</RequestedCommand>
<CommandResponse Type="namecheap.domains.dns.getHosts">
<DomainDNSGetHostsResult Domain="example.com" EmailType="MXE" IsUsingOurDNS="true">
<host HostId="217076" Name="www" Type="A" Address="10.0.0.2" MXPref="10" TTL="1200" AssociatedAppTitle="" FriendlyName="" IsActive="true" IsDDNSEnabled="false" />
<host HostId="217069" Name="home" Type="A" Address="10.0.0.1" MXPref="10" TTL="1799" AssociatedAppTitle="" FriendlyName="" IsActive="true" IsDDNSEnabled="false" />
<host HostId="217071" Name="a" Type="AAAA" Address="::0" MXPref="10" TTL="1799" AssociatedAppTitle="" FriendlyName="" IsActive="true" IsDDNSEnabled="false" />
<host HostId="217075" Name="*" Type="CNAME" Address="example.com." MXPref="10" TTL="1799" AssociatedAppTitle="" FriendlyName="" IsActive="true" IsDDNSEnabled="false" />
<host HostId="217073" Name="example.com" Type="MXE" Address="10.0.0.5" MXPref="10" TTL="1800" AssociatedAppTitle="MXE" FriendlyName="MXE1" IsActive="true" IsDDNSEnabled="false" />
<host HostId="217077" Name="xyz" Type="URL" Address="https://google.com" MXPref="10" TTL="1799" AssociatedAppTitle="" FriendlyName="" IsActive="true" IsDDNSEnabled="false" />
</DomainDNSGetHostsResult>
</CommandResponse>
<Server>PHX01SBAPI01</Server>
<GMTTimeDifference>--5:00</GMTTimeDifference>
<ExecutionTime>3.338</ExecutionTime>
</ApiResponse>`
var responseSetHostsSuccess1 = `<?xml version="1.0" encoding="utf-8"?>
<ApiResponse Status="OK" xmlns="http://api.namecheap.com/xml.response">
<Errors />
<Warnings />
<RequestedCommand>namecheap.domains.dns.setHosts</RequestedCommand>
<CommandResponse Type="namecheap.domains.dns.setHosts">
<DomainDNSSetHostsResult Domain="example.com" IsSuccess="true">
<Warnings />
</DomainDNSSetHostsResult>
</CommandResponse>
<Server>PHX01SBAPI01</Server>
<GMTTimeDifference>--5:00</GMTTimeDifference>
<ExecutionTime>2.347</ExecutionTime>
</ApiResponse>`
var responseGetHostsSuccess2 = `<?xml version="1.0" encoding="utf-8"?>
<ApiResponse Status="OK" xmlns="http://api.namecheap.com/xml.response">
<Errors />
<Warnings />
<RequestedCommand>namecheap.domains.dns.getHosts</RequestedCommand>
<CommandResponse Type="namecheap.domains.dns.getHosts">
<DomainDNSGetHostsResult Domain="example.com" EmailType="MXE" IsUsingOurDNS="true">
<host HostId="217076" Name="@" Type="A" Address="10.0.0.2" MXPref="10" TTL="1200" AssociatedAppTitle="" FriendlyName="" IsActive="true" IsDDNSEnabled="false" />
<host HostId="217069" Name="www" Type="A" Address="10.0.0.3" MXPref="10" TTL="60" AssociatedAppTitle="" FriendlyName="" IsActive="true" IsDDNSEnabled="false" />
</DomainDNSGetHostsResult>
</CommandResponse>
<Server>PHX01SBAPI01</Server>
<GMTTimeDifference>--5:00</GMTTimeDifference>
<ExecutionTime>3.338</ExecutionTime>
</ApiResponse>`
var responseSetHostsSuccess2 = `<?xml version="1.0" encoding="utf-8"?>
<ApiResponse Status="OK" xmlns="http://api.namecheap.com/xml.response">
<Errors />
<Warnings />
<RequestedCommand>namecheap.domains.dns.setHosts</RequestedCommand>
<CommandResponse Type="namecheap.domains.dns.setHosts">
<DomainDNSSetHostsResult Domain="example.com" IsSuccess="true">
<Warnings />
</DomainDNSSetHostsResult>
</CommandResponse>
<Server>PHX01SBAPI01</Server>
<GMTTimeDifference>--5:00</GMTTimeDifference>
<ExecutionTime>2.347</ExecutionTime>
</ApiResponse>`
var responseGetHostsErrorBadAPIKey1 = `<?xml version="1.0" encoding="utf-8"?>
<ApiResponse Status="ERROR" xmlns="http://api.namecheap.com/xml.response">
<Errors>
<Error Number="1011102">API Key is invalid or API access has not been enabled</Error>
</Errors>
<Warnings />
<RequestedCommand />
<Server>PHX01SBAPI01</Server>
<GMTTimeDifference>--5:00</GMTTimeDifference>
<ExecutionTime>0</ExecutionTime>
</ApiResponse>`
var responseGetTlds = `<?xml version="1.0" encoding="utf-8"?>
<ApiResponse Status="OK" xmlns="http://api.namecheap.com/xml.response">
<Errors />
<Warnings />
<RequestedCommand>namecheap.domains.getTldList</RequestedCommand>
<CommandResponse Type="namecheap.domains.getTldList">
<Tlds>
<Tld Name="com" NonRealTime="false" MinRegisterYears="1" MaxRegisterYears="10" MinRenewYears="1" MaxRenewYears="10" RenewalMinDays="0" RenewalMaxDays="4000" ReactivateMaxDays="27" MinTransferYears="1" MaxTransferYears="1" IsApiRegisterable="true" IsApiRenewable="true" IsApiTransferable="true" IsEppRequired="true" IsDisableModContact="false" IsDisableWGAllot="false" IsIncludeInExtendedSearchOnly="false" SequenceNumber="10" Type="GTLD" SubType="" IsSupportsIDN="true" Category="A" SupportsRegistrarLock="true" AddGracePeriodDays="5" WhoisVerification="false" ProviderApiDelete="true" TldState="" SearchGroup="" Registry="">Most recognized top level domain<Categories><TldCategory Name="popular" SequenceNumber="10" /></Categories></Tld>
</Tlds>
</CommandResponse>
<Server>PHX01SBAPI01</Server>
<GMTTimeDifference>--5:00</GMTTimeDifference>
<ExecutionTime>0.004</ExecutionTime>
</ApiResponse>`
|
/*
* general_test.go
*
* Copyright 2012 Raul Mera <rmera{at}chemDOThelsinkiDOTfi>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU Lesser General
* Public License along with this program. If not, see
* <http://www.gnu.org/licenses/>.
*
*
*/
/*This provides some tests for the library, in the form of little functions
* that have practical applications*/
package chem
import "github.com/skelterjohn/go.matrix"
import "fmt"
import "testing"
import "strings"
import "strconv"
//TestChangeAxis reads the PDB 2c9v.pdb from the test directory, collects
//The CA and CB of residue D124, and rotates the whole molecule such as the vector
//defined by these 2 atoms is aligned with the Z axis. The new molecule is written
//as 2c9v_aligned.pdb to the test folder.
func TestChangeAxis(Te *testing.T){
var mol Molecule
ats,coords,bfac,err:=PdbRead("test/2c9v.pdb",true)
if err!=nil{
Te.Error(err)
}
mol.Atoms=ats
mol.Coords=coords
mol.Bfactors=bfac
orient_atoms:=[2]int{0,0}
for index, atom:= range(mol.Atoms){
if atom.Chain=='A' && atom.Molid==124{
if atom.Name=="CA"{
orient_atoms[0]=index
}else if atom.Name=="CB"{
orient_atoms[1]=index
}
}
}
ov1:=mol.Coord(orient_atoms[0], 0)
ov2:=mol.Coord(orient_atoms[1], 0)
//now we center the thing in the beta carbon of D124
err=SubRow(mol.Coords[0],ov2)
//Now the rotation
ov1=mol.Coord(orient_atoms[0], 0) //make sure we have the correct versions
ov2=mol.Coord(orient_atoms[1], 0) //same
orient:=ov2.Copy()
orient.SubtractDense(ov1)
rotation,err:=GetSwitchZ(mol.Coords[0],orient)
fmt.Println("rotation: ",rotation)
if err!= nil {
Te.Error(err)
}
mol.Coords[0]=matrix.ParallelProduct(mol.Coords[0],rotation)
fmt.Println(orient_atoms[1], mol.Atoms[orient_atoms[1]],mol.Atoms[orient_atoms[0]])//, mol.Coords[0][orient_atoms[1]])
if err!=nil{
Te.Error(err)
}
PdbWrite(&mol,"test/2c9v-aligned.pdb")
}
//TestGeo opens the sample.xyz file in the test directory, and pull a number of hardcoded atoms
//In the direction of a hardcoded vectos. It builds 12 files with the pulled atoms displaced by
//different along the pulling vector
func TestGeo(Te *testing.T) {
pulled_atoms:=[7]int{43,41,42,40,85,86,87}
pulling_vector:=[2]int{40,88}
var mol Molecule
a,b,err:=XyzRead("test/sample.xyz")
if err!=nil{
Te.Error(err)
}
mol.Atoms=a
mol.Coords=b
pulled_res,err:=mol.GetCoords(pulled_atoms[:], 0)
if err!=nil{
Te.Error(err)
}
at1:=mol.Coord(pulling_vector[0],0)
vector:=mol.Coord(pulling_vector[1],0)
vector=vector.Copy()
err=vector.SubtractDense(at1)
if err!=nil{
Te.Error(err)
}
vector=Unitarize(vector)
var scale_factors = [12]float64{-1.0, -2.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0}
for _,scaling:=range(scale_factors){
vec:=vector.Copy()
pulled:=pulled_res.Copy()
vec.Scale(scaling)
err=AddRow(pulled,vec)
if err!=nil{
Te.Error(err)
}
mol.SetCoords(pulled_atoms[:], 0, pulled)
err=mol.Corrupted()
if err!=nil{
Te.Error(err)
}
XyzWrite(&mol, 0, strings.Replace("test/sample_xxxx.xyz","xxxx",strconv.FormatFloat(scaling, 'f', 1, 64),1)) //There might be an easier way of creating the filenames
}
//fmt.Println(mol.Atoms,mol.Coords,err,pulled,vector,vector.TwoNorm())
}
git problems
/*
* general_test.go
*
* Copyright 2012 Raul Mera <rmera{at}chemDOThelsinkiDOTfi>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU Lesser General
* Public License along with this program. If not, see
* <http://www.gnu.org/licenses/>.
*
*
*/
/*This provides some tests for the library, in the form of little functions
* that have practical applications*/
package chem
<<<<<<< HEAD
=======
>>>>>>> 0f5d26fbc271dbb9c035488d19a7c1678435b309
import "github.com/skelterjohn/go.matrix"
import "fmt"
import "testing"
import "strings"
import "strconv"
//TestChangeAxis reads the PDB 2c9v.pdb from the test directory, collects
//The CA and CB of residue D124, and rotates the whole molecule such as the vector
//defined by these 2 atoms is aligned with the Z axis. The new molecule is written
//as 2c9v_aligned.pdb to the test folder.
func TestChangeAxis(Te *testing.T){
var mol Molecule
ats,coords,bfac,err:=PdbRead("test/2c9v.pdb",true)
if err!=nil{
Te.Error(err)
}
mol.Atoms=ats
mol.Coords=coords
mol.Bfactors=bfac
orient_atoms:=[2]int{0,0}
for index, atom:= range(mol.Atoms){
if atom.Chain=='A' && atom.Molid==124{
if atom.Name=="CA"{
orient_atoms[0]=index
}else if atom.Name=="CB"{
orient_atoms[1]=index
}
}
}
ov1:=mol.Coord(orient_atoms[0], 0)
ov2:=mol.Coord(orient_atoms[1], 0)
//now we center the thing in the beta carbon of D124
err=SubRow(mol.Coords[0],ov2)
//Now the rotation
ov1=mol.Coord(orient_atoms[0], 0) //make sure we have the correct versions
ov2=mol.Coord(orient_atoms[1], 0) //same
orient:=ov2.Copy()
orient.SubtractDense(ov1)
rotation,err:=GetSwitchZ(mol.Coords[0],orient)
fmt.Println("rotation: ",rotation)
if err!= nil {
Te.Error(err)
}
mol.Coords[0]=matrix.ParallelProduct(mol.Coords[0],rotation)
fmt.Println(orient_atoms[1], mol.Atoms[orient_atoms[1]],mol.Atoms[orient_atoms[0]])//, mol.Coords[0][orient_atoms[1]])
if err!=nil{
Te.Error(err)
}
PdbWrite(&mol,"test/2c9v-aligned.pdb")
}
//TestGeo opens the sample.xyz file in the test directory, and pull a number of hardcoded atoms
//In the direction of a hardcoded vectos. It builds 12 files with the pulled atoms displaced by
//different along the pulling vector
func TestGeo(Te *testing.T) {
pulled_atoms:=[7]int{43,41,42,40,85,86,87}
pulling_vector:=[2]int{40,88}
var mol Molecule
a,b,err:=XyzRead("test/sample.xyz")
if err!=nil{
Te.Error(err)
}
mol.Atoms=a
mol.Coords=b
pulled_res,err:=mol.GetCoords(pulled_atoms[:], 0)
if err!=nil{
Te.Error(err)
}
at1:=mol.Coord(pulling_vector[0],0)
vector:=mol.Coord(pulling_vector[1],0)
vector=vector.Copy()
err=vector.SubtractDense(at1)
if err!=nil{
Te.Error(err)
}
vector=Unitarize(vector)
var scale_factors = [12]float64{-1.0, -2.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0}
for _,scaling:=range(scale_factors){
vec:=vector.Copy()
pulled:=pulled_res.Copy()
vec.Scale(scaling)
err=AddRow(pulled,vec)
if err!=nil{
Te.Error(err)
}
mol.SetCoords(pulled_atoms[:], 0, pulled)
err=mol.Corrupted()
if err!=nil{
Te.Error(err)
}
XyzWrite(&mol, 0, strings.Replace("test/sample_xxxx.xyz","xxxx",strconv.FormatFloat(scaling, 'f', 1, 64),1)) //There might be an easier way of creating the filenames
}
//fmt.Println(mol.Atoms,mol.Coords,err,pulled,vector,vector.TwoNorm())
}
|
package model
import "time"
type ApiAiRequest struct {
Id string `json:"id"`
Timestamp time.Time `json:"timestamp"`
Result struct {
Source string `json:"source"`
ResolvedQuery string `json:"resolvedQuery"`
Speech string `json:"speech"`
Action string `json:"action"`
ActionIncomplete bool `json:"actionIncomplete"`
Parameters struct {
Parameters map[string]string
} `json:"parameters"`
Contexts []struct {
Name string `json:"name"`
Parameters struct {
Parameters map[string]string
} `json:"parameters"`
Lifespan int `json:"lifespan"`
} `json:"contexts"`
Metadata struct {
IntentID string `json:"intentId"`
WebhookUsed string `json:"webhookUsed"`
WebhookForSlotFillingUsed string `json:"webhookForSlotFillingUsed"`
IntentName string `json:"intentName"`
} `json:"metadata"`
Fulfillment struct {
Speech string `json:"speech"`
Messages []struct {
Type int `json:"type"`
Speech string `json:"speech"`
} `json:"messages"`
} `json:"fulfillment"`
Score float64 `json:"score"`
} `json:"result"`
Status struct {
Code int `json:"code"`
ErrorType string `json:"errorType"`
} `json:"status"`
SessionID string `json:"sessionId"`
OriginalRequest struct {
Source string `json:"source"`
Data struct {
Inputs []struct {
Arguments []struct {
RawText string `json:"raw_text"`
TextValue string `json:"text_value"`
Name string `json:"name"`
} `json:"arguments"`
Intent string `json:"intent"`
RawInputs []struct {
Query string `json:"query"`
InputType int `json:"input_type"`
} `json:"raw_inputs"`
} `json:"inputs"`
User struct {
UserID string `json:"user_id"`
Profile struct {
DisplayName string `json:"display_name"`
GivenName string `json:"given_name"`
FamilyName string `json:"family_name"`
} `json:"profile"`
AccessToken string `json:"access_token"`
} `json:"user"`
Device struct {
Location struct {
Coordinates struct {
Latitude float64 `json:"latitude"`
Longitude float64 `json:"longitude"`
} `json:"coordinates"`
FormattedAddress string `json:"formatted_address"`
ZipCode string `json:"zip_code"`
City string `json:"city"`
} `json:"location"`
} `json:"device"`
Conversation struct {
ConversationToken string `json:"conversation_token"`
ConversationID string `json:"conversation_id"`
Type int `json:"type"`
} `json:"conversation"`
} `json:"data"`
} `json:"originalRequest"`
}
Fix for parsing parameter
package model
import "time"
type ApiAiRequest struct {
Id string `json:"id"`
Timestamp time.Time `json:"timestamp"`
Result struct {
Source string `json:"source"`
ResolvedQuery string `json:"resolvedQuery"`
Speech string `json:"speech"`
Action string `json:"action"`
ActionIncomplete bool `json:"actionIncomplete"`
Parameters map[string]string `json:"parameters"`
Contexts []struct {
Name string `json:"name"`
Parameters struct {
Parameters map[string]string
} `json:"parameters"`
Lifespan int `json:"lifespan"`
} `json:"contexts"`
Metadata struct {
IntentID string `json:"intentId"`
WebhookUsed string `json:"webhookUsed"`
WebhookForSlotFillingUsed string `json:"webhookForSlotFillingUsed"`
IntentName string `json:"intentName"`
} `json:"metadata"`
Fulfillment struct {
Speech string `json:"speech"`
Messages []struct {
Type int `json:"type"`
Speech string `json:"speech"`
} `json:"messages"`
} `json:"fulfillment"`
Score float64 `json:"score"`
} `json:"result"`
Status struct {
Code int `json:"code"`
ErrorType string `json:"errorType"`
} `json:"status"`
SessionID string `json:"sessionId"`
OriginalRequest struct {
Source string `json:"source"`
Data struct {
Inputs []struct {
Arguments []struct {
RawText string `json:"raw_text"`
TextValue string `json:"text_value"`
Name string `json:"name"`
} `json:"arguments"`
Intent string `json:"intent"`
RawInputs []struct {
Query string `json:"query"`
InputType int `json:"input_type"`
} `json:"raw_inputs"`
} `json:"inputs"`
User struct {
UserID string `json:"user_id"`
Profile struct {
DisplayName string `json:"display_name"`
GivenName string `json:"given_name"`
FamilyName string `json:"family_name"`
} `json:"profile"`
AccessToken string `json:"access_token"`
} `json:"user"`
Device struct {
Location struct {
Coordinates struct {
Latitude float64 `json:"latitude"`
Longitude float64 `json:"longitude"`
} `json:"coordinates"`
FormattedAddress string `json:"formatted_address"`
ZipCode string `json:"zip_code"`
City string `json:"city"`
} `json:"location"`
} `json:"device"`
Conversation struct {
ConversationToken string `json:"conversation_token"`
ConversationID string `json:"conversation_id"`
Type int `json:"type"`
} `json:"conversation"`
} `json:"data"`
} `json:"originalRequest"`
}
|
package beat
import (
"github.com/elastic/beats/libbeat/beat"
"github.com/elastic/beats/libbeat/cfgfile"
"github.com/elastic/beats/libbeat/logp"
"github.com/elastic/beats/libbeat/publisher"
)
type Execbeat struct {
done chan struct{}
ExecConfig ConfigSettings
events publisher.Client
}
func New() *Execbeat {
return &Execbeat{}
}
func (execBeat *Execbeat) Config(b *beat.Beat) error {
err := cfgfile.Read(&execBeat.ExecConfig, "")
if err != nil {
logp.Err("Error reading configuration file: %v", err)
return err
}
logp.Info("execbeat", "Init execbeat")
return nil
}
func (execBeat *Execbeat) Setup(b *beat.Beat) error {
execBeat.events = b.Events
return nil
}
func (exexBeat *Execbeat) Run(b *beat.Beat) error {
var err error
var poller *Executor
for i, exitConfig := range exexBeat.ExecConfig.Execbeat.Execs {
logp.Debug("execbeat", "Creating poller # %v with command: %v", i, exitConfig.Command)
poller = NewExecutor(exexBeat, exitConfig)
go poller.Run()
}
for {
}
return err
}
func (execBeat *Execbeat) Cleanup(b *beat.Beat) error {
return nil
}
func (execBeat *Execbeat) Stop() {
close(execBeat.done)
}
[2] Hanging during shutdown
package beat
import (
"github.com/elastic/beats/libbeat/beat"
"github.com/elastic/beats/libbeat/cfgfile"
"github.com/elastic/beats/libbeat/logp"
"github.com/elastic/beats/libbeat/publisher"
)
type Execbeat struct {
done chan bool
ExecConfig ConfigSettings
events publisher.Client
}
func New() *Execbeat {
return &Execbeat{}
}
func (execBeat *Execbeat) Config(b *beat.Beat) error {
err := cfgfile.Read(&execBeat.ExecConfig, "")
if err != nil {
logp.Err("Error reading configuration file: %v", err)
return err
}
logp.Info("execbeat", "Init execbeat")
return nil
}
func (execBeat *Execbeat) Setup(b *beat.Beat) error {
execBeat.events = b.Events
execBeat.done = make(chan bool)
return nil
}
func (exexBeat *Execbeat) Run(b *beat.Beat) error {
var err error
var poller *Executor
for i, exitConfig := range exexBeat.ExecConfig.Execbeat.Execs {
logp.Debug("execbeat", "Creating poller # %v with command: %v", i, exitConfig.Command)
poller = NewExecutor(exexBeat, exitConfig)
go poller.Run()
}
for {
select {
case <-exexBeat.done:
return nil
}
}
return err
}
func (execBeat *Execbeat) Cleanup(b *beat.Beat) error {
return nil
}
func (execBeat *Execbeat) Stop() {
close(execBeat.done)
}
|
package api
import (
"fmt"
"github.com/Aptomi/aptomi/pkg/lang"
"github.com/Aptomi/aptomi/pkg/runtime"
"github.com/julienschmidt/httprouter"
"net/http"
)
func (api *coreAPI) getUserOptional(request *http.Request) *lang.User {
username := request.Header.Get("Username")
if len(username) == 0 {
return nil
}
return api.externalData.UserLoader.LoadUserByName(username)
}
func (api *coreAPI) getUserRequired(request *http.Request) *lang.User {
user := api.getUserOptional(request)
if user == nil {
panic("Unauthorized or user couldn't be loaded")
}
return user
}
var AuthSuccessObject = &runtime.Info{
Kind: "auth-success",
Constructor: func() runtime.Object { return &AuthSuccess{} },
}
type AuthSuccess struct {
runtime.TypeKind `yaml:",inline"`
}
func (api *coreAPI) authenticateUser(writer http.ResponseWriter, request *http.Request, params httprouter.Params) {
username := request.PostFormValue("username")
password := request.PostFormValue("password")
_, err := api.externalData.UserLoader.Authenticate(username, password)
if err != nil {
serverErr := NewServerError(fmt.Sprintf("Authentication error: %s", err))
api.contentType.WriteOne(writer, request, serverErr)
} else {
api.contentType.WriteOne(writer, request, &AuthSuccess{AuthSuccessObject.GetTypeKind()})
}
}
Add missing godoc
package api
import (
"fmt"
"github.com/Aptomi/aptomi/pkg/lang"
"github.com/Aptomi/aptomi/pkg/runtime"
"github.com/julienschmidt/httprouter"
"net/http"
)
func (api *coreAPI) getUserOptional(request *http.Request) *lang.User {
username := request.Header.Get("Username")
if len(username) == 0 {
return nil
}
return api.externalData.UserLoader.LoadUserByName(username)
}
func (api *coreAPI) getUserRequired(request *http.Request) *lang.User {
user := api.getUserOptional(request)
if user == nil {
panic("Unauthorized or user couldn't be loaded")
}
return user
}
// AuthSuccessObject contains Info for the AuthSuccess type
var AuthSuccessObject = &runtime.Info{
Kind: "auth-success",
Constructor: func() runtime.Object { return &AuthSuccess{} },
}
// AuthSuccess represents successful authentication
type AuthSuccess struct {
runtime.TypeKind `yaml:",inline"`
}
func (api *coreAPI) authenticateUser(writer http.ResponseWriter, request *http.Request, params httprouter.Params) {
username := request.PostFormValue("username")
password := request.PostFormValue("password")
_, err := api.externalData.UserLoader.Authenticate(username, password)
if err != nil {
serverErr := NewServerError(fmt.Sprintf("Authentication error: %s", err))
api.contentType.WriteOne(writer, request, serverErr)
} else {
api.contentType.WriteOne(writer, request, &AuthSuccess{AuthSuccessObject.GetTypeKind()})
}
}
|
// Package cmd defines commands for the DC/OS CLI.
package cmd
import (
"path/filepath"
"github.com/dcos/dcos-cli/api"
"github.com/dcos/dcos-cli/pkg/cmd/auth"
"github.com/dcos/dcos-cli/pkg/cmd/cluster"
"github.com/dcos/dcos-cli/pkg/cmd/config"
"github.com/dcos/dcos-cli/pkg/plugin"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
const annotationUsageOptions string = "usage_options"
// NewDCOSCommand creates the `dcos` command with its `auth`, `config`, and `cluster` subcommands.
func NewDCOSCommand(ctx api.Context) *cobra.Command {
var verbose int
cmd := &cobra.Command{
Use: "dcos",
PersistentPreRun: func(cmd *cobra.Command, args []string) {
if verbose == 1 {
// -v sets the logger level to info.
ctx.Logger().SetLevel(logrus.InfoLevel)
} else if verbose > 1 {
// -vv sets the logger level to debug. This also happens for -vvv
// and above, in such cases we set the logging level to its maximum.
ctx.Logger().SetLevel(logrus.DebugLevel)
}
},
}
cmd.PersistentFlags().CountVarP(&verbose, "", "v", "verbosity (-v or -vv)")
cmd.AddCommand(
auth.NewCommand(ctx),
config.NewCommand(ctx),
cluster.NewCommand(ctx),
)
// If a cluster is attached, we get its plugins.
if cluster, err := ctx.Cluster(); err == nil {
pluginManager := ctx.PluginManager(cluster.SubcommandsDir())
for _, p := range pluginManager.Plugins() {
for _, e := range p.Executables {
for _, c := range e.Commands {
executable := filepath.Join(p.BinDir, e.Filename)
cmd.AddCommand(pluginCommand(executable, pluginManager, c))
}
}
}
}
// This follows the CLI design guidelines for help formatting.
cmd.SetUsageTemplate(`Usage:{{if .Runnable}}
{{.UseLine}}{{end}}{{if .HasAvailableSubCommands}}
{{.CommandPath}} [command]{{end}}{{if .HasExample}}
Examples:
{{.Example}}{{end}}{{if .HasAvailableSubCommands}}
Commands:{{range .Commands}}{{if (or .IsAvailableCommand (eq .Name "help"))}}
{{.Name}}
{{.Short}}{{end}}{{end}}{{end}}{{if .HasAvailableLocalFlags}}
Options:{{if ne (index .Annotations "` + annotationUsageOptions + `") ""}}{{index .Annotations "` + annotationUsageOptions + `"}}{{else}}
{{.LocalFlags.FlagUsages | trimTrailingWhitespaces}}{{end}}{{end}}{{if .HasAvailableSubCommands}}
Use "{{.CommandPath}} [command] --help" for more information about a command.{{end}}
`)
cmd.Annotations = map[string]string{
annotationUsageOptions: `
--version
Print version information
-v, -vv
Output verbosity (verbose or very verbose)
-h, --help
Show usage help`,
}
return cmd
}
func pluginCommand(executable string, pluginManager *plugin.Manager, c *plugin.Command) *cobra.Command {
return &cobra.Command{
Use: c.Name,
Short: c.Description,
DisableFlagParsing: true,
SilenceErrors: true, // Silences error message if command returns an exit code.
SilenceUsage: true, // Silences usage information from the wrapper CLI on error.
RunE: func(cmd *cobra.Command, args []string) error {
// Prepend the arguments with the commands name so that the
// executed command knows which subcommand to execute (e.g.
// `dcos marathon app` would send `<binary> app` without this).
argsWithRoot := append([]string{c.Name}, args...)
return pluginManager.Invoke(executable, argsWithRoot)
},
}
}
Disable usage output on errors
This causes command help messages to appear everytime an error is triggered.
// Package cmd defines commands for the DC/OS CLI.
package cmd
import (
"path/filepath"
"github.com/dcos/dcos-cli/api"
"github.com/dcos/dcos-cli/pkg/cmd/auth"
"github.com/dcos/dcos-cli/pkg/cmd/cluster"
"github.com/dcos/dcos-cli/pkg/cmd/config"
"github.com/dcos/dcos-cli/pkg/plugin"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
const annotationUsageOptions string = "usage_options"
// NewDCOSCommand creates the `dcos` command with its `auth`, `config`, and `cluster` subcommands.
func NewDCOSCommand(ctx api.Context) *cobra.Command {
var verbose int
cmd := &cobra.Command{
Use: "dcos",
PersistentPreRun: func(cmd *cobra.Command, args []string) {
cmd.SilenceUsage = true
if verbose == 1 {
// -v sets the logger level to info.
ctx.Logger().SetLevel(logrus.InfoLevel)
} else if verbose > 1 {
// -vv sets the logger level to debug. This also happens for -vvv
// and above, in such cases we set the logging level to its maximum.
ctx.Logger().SetLevel(logrus.DebugLevel)
}
},
}
cmd.PersistentFlags().CountVarP(&verbose, "", "v", "verbosity (-v or -vv)")
cmd.AddCommand(
auth.NewCommand(ctx),
config.NewCommand(ctx),
cluster.NewCommand(ctx),
)
// If a cluster is attached, we get its plugins.
if cluster, err := ctx.Cluster(); err == nil {
pluginManager := ctx.PluginManager(cluster.SubcommandsDir())
for _, p := range pluginManager.Plugins() {
for _, e := range p.Executables {
for _, c := range e.Commands {
executable := filepath.Join(p.BinDir, e.Filename)
cmd.AddCommand(pluginCommand(executable, pluginManager, c))
}
}
}
}
// This follows the CLI design guidelines for help formatting.
cmd.SetUsageTemplate(`Usage:{{if .Runnable}}
{{.UseLine}}{{end}}{{if .HasAvailableSubCommands}}
{{.CommandPath}} [command]{{end}}{{if .HasExample}}
Examples:
{{.Example}}{{end}}{{if .HasAvailableSubCommands}}
Commands:{{range .Commands}}{{if (or .IsAvailableCommand (eq .Name "help"))}}
{{.Name}}
{{.Short}}{{end}}{{end}}{{end}}{{if .HasAvailableLocalFlags}}
Options:{{if ne (index .Annotations "` + annotationUsageOptions + `") ""}}{{index .Annotations "` + annotationUsageOptions + `"}}{{else}}
{{.LocalFlags.FlagUsages | trimTrailingWhitespaces}}{{end}}{{end}}{{if .HasAvailableSubCommands}}
Use "{{.CommandPath}} [command] --help" for more information about a command.{{end}}
`)
cmd.Annotations = map[string]string{
annotationUsageOptions: `
--version
Print version information
-v, -vv
Output verbosity (verbose or very verbose)
-h, --help
Show usage help`,
}
return cmd
}
func pluginCommand(executable string, pluginManager *plugin.Manager, c *plugin.Command) *cobra.Command {
return &cobra.Command{
Use: c.Name,
Short: c.Description,
DisableFlagParsing: true,
SilenceErrors: true, // Silences error message if command returns an exit code.
SilenceUsage: true, // Silences usage information from the wrapper CLI on error.
RunE: func(cmd *cobra.Command, args []string) error {
// Prepend the arguments with the commands name so that the
// executed command knows which subcommand to execute (e.g.
// `dcos marathon app` would send `<binary> app` without this).
argsWithRoot := append([]string{c.Name}, args...)
return pluginManager.Invoke(executable, argsWithRoot)
},
}
}
|
// Copyright 2014 The Gogs Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package models
import (
"fmt"
"os"
"path"
"strings"
_ "github.com/go-sql-driver/mysql"
_ "github.com/lib/pq"
"github.com/lunny/xorm"
"github.com/gogits/gogs/modules/base"
)
var (
orm *xorm.Engine
tables []interface{}
HasEngine bool
DbCfg struct {
Type, Host, Name, User, Pwd, Path, SslMode string
}
EnableSQLite3 bool
UseSQLite3 bool
)
func init() {
tables = append(tables, new(User), new(PublicKey), new(Repository), new(Watch),
new(Action), new(Access), new(Issue), new(Comment), new(Oauth2), new(Follow),
new(Mirror), new(Release))
}
func LoadModelsConfig() {
DbCfg.Type = base.Cfg.MustValue("database", "DB_TYPE")
if DbCfg.Type == "sqlite3" {
UseSQLite3 = true
}
DbCfg.Host = base.Cfg.MustValue("database", "HOST")
DbCfg.Name = base.Cfg.MustValue("database", "NAME")
DbCfg.User = base.Cfg.MustValue("database", "USER")
DbCfg.Pwd = base.Cfg.MustValue("database", "PASSWD")
DbCfg.SslMode = base.Cfg.MustValue("database", "SSL_MODE")
DbCfg.Path = base.Cfg.MustValue("database", "PATH", "data/gogs.db")
}
func NewTestEngine(x *xorm.Engine) (err error) {
switch DbCfg.Type {
case "mysql":
x, err = xorm.NewEngine("mysql", fmt.Sprintf("%s:%s@tcp(%s)/%s?charset=utf8",
DbCfg.User, DbCfg.Pwd, DbCfg.Host, DbCfg.Name))
case "postgres":
var host, port = "127.0.0.1", "5432"
fields := strings.Split(DbCfg.Host, ":")
if len(fields) > 0 {
host = fields[0]
}
if len(fields) > 1 {
port = fields[1]
}
cnnstr := fmt.Sprintf("user=%s password=%s host=%s port=%s dbname=%s sslmode=%s",
DbCfg.User, DbCfg.Pwd, host, port, DbCfg.Name, DbCfg.SslMode)
//fmt.Println(cnnstr)
x, err = xorm.NewEngine("postgres", cnnstr)
case "sqlite3":
if !EnableSQLite3 {
return fmt.Errorf("Unknown database type: %s", DbCfg.Type)
}
os.MkdirAll(path.Dir(DbCfg.Path), os.ModePerm)
x, err = xorm.NewEngine("sqlite3", DbCfg.Path)
default:
return fmt.Errorf("Unknown database type: %s", DbCfg.Type)
}
if err != nil {
return fmt.Errorf("models.init(fail to conntect database): %v", err)
}
return x.Sync(tables...)
}
func SetEngine() (err error) {
switch DbCfg.Type {
case "mysql":
orm, err = xorm.NewEngine("mysql", fmt.Sprintf("%s:%s@tcp(%s)/%s?charset=utf8",
DbCfg.User, DbCfg.Pwd, DbCfg.Host, DbCfg.Name))
case "postgres":
var host, port = "127.0.0.1", "5432"
fields := strings.Split(DbCfg.Host, ":")
if len(fields) > 0 {
host = fields[0]
}
if len(fields) > 1 {
port = fields[1]
}
orm, err = xorm.NewEngine("postgres", fmt.Sprintf("user=%s password=%s host=%s port=%s dbname=%s sslmode=%s",
DbCfg.User, DbCfg.Pwd, host, port, DbCfg.Name, DbCfg.SslMode))
case "sqlite3":
os.MkdirAll(path.Dir(DbCfg.Path), os.ModePerm)
orm, err = xorm.NewEngine("sqlite3", DbCfg.Path)
default:
return fmt.Errorf("Unknown database type: %s", DbCfg.Type)
}
if err != nil {
return fmt.Errorf("models.init(fail to conntect database): %v", err)
}
// WARNNING: for serv command, MUST remove the output to os.stdout,
// so use log file to instead print to stdout.
execDir, _ := base.ExecDir()
logPath := execDir + "/log/xorm.log"
os.MkdirAll(path.Dir(logPath), os.ModePerm)
f, err := os.Create(logPath)
if err != nil {
return fmt.Errorf("models.init(fail to create xorm.log): %v", err)
}
orm.Logger = f
orm.ShowSQL = true
orm.ShowDebug = true
orm.ShowErr = true
return nil
}
func NewEngine() (err error) {
if err = SetEngine(); err != nil {
return err
}
if err = orm.Sync(tables...); err != nil {
return fmt.Errorf("sync database struct error: %v\n", err)
}
return nil
}
type Statistic struct {
Counter struct {
User, PublicKey, Repo,
Watch, Action, Access,
Issue, Comment,
Mirror, Oauth, Release int64
}
}
func GetStatistic() (stats Statistic) {
stats.Counter.User, _ = orm.Count(new(User))
stats.Counter.PublicKey, _ = orm.Count(new(PublicKey))
stats.Counter.Repo, _ = orm.Count(new(Repository))
stats.Counter.Watch, _ = orm.Count(new(Watch))
stats.Counter.Action, _ = orm.Count(new(Action))
stats.Counter.Access, _ = orm.Count(new(Access))
stats.Counter.Issue, _ = orm.Count(new(Issue))
stats.Counter.Comment, _ = orm.Count(new(Comment))
stats.Counter.Mirror, _ = orm.Count(new(Mirror))
stats.Counter.Oauth, _ = orm.Count(new(Oauth2))
stats.Counter.Release, _ = orm.Count(new(Release))
return
}
Bug fix on build tag
// Copyright 2014 The Gogs Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package models
import (
"fmt"
"os"
"path"
"strings"
_ "github.com/go-sql-driver/mysql"
_ "github.com/gogits/cache"
_ "github.com/lib/pq"
"github.com/lunny/xorm"
"github.com/gogits/gogs/modules/base"
)
var (
orm *xorm.Engine
tables []interface{}
HasEngine bool
DbCfg struct {
Type, Host, Name, User, Pwd, Path, SslMode string
}
EnableSQLite3 bool
UseSQLite3 bool
)
func init() {
tables = append(tables, new(User), new(PublicKey), new(Repository), new(Watch),
new(Action), new(Access), new(Issue), new(Comment), new(Oauth2), new(Follow),
new(Mirror), new(Release))
}
func LoadModelsConfig() {
DbCfg.Type = base.Cfg.MustValue("database", "DB_TYPE")
if DbCfg.Type == "sqlite3" {
UseSQLite3 = true
}
DbCfg.Host = base.Cfg.MustValue("database", "HOST")
DbCfg.Name = base.Cfg.MustValue("database", "NAME")
DbCfg.User = base.Cfg.MustValue("database", "USER")
DbCfg.Pwd = base.Cfg.MustValue("database", "PASSWD")
DbCfg.SslMode = base.Cfg.MustValue("database", "SSL_MODE")
DbCfg.Path = base.Cfg.MustValue("database", "PATH", "data/gogs.db")
}
func NewTestEngine(x *xorm.Engine) (err error) {
switch DbCfg.Type {
case "mysql":
x, err = xorm.NewEngine("mysql", fmt.Sprintf("%s:%s@tcp(%s)/%s?charset=utf8",
DbCfg.User, DbCfg.Pwd, DbCfg.Host, DbCfg.Name))
case "postgres":
var host, port = "127.0.0.1", "5432"
fields := strings.Split(DbCfg.Host, ":")
if len(fields) > 0 {
host = fields[0]
}
if len(fields) > 1 {
port = fields[1]
}
cnnstr := fmt.Sprintf("user=%s password=%s host=%s port=%s dbname=%s sslmode=%s",
DbCfg.User, DbCfg.Pwd, host, port, DbCfg.Name, DbCfg.SslMode)
//fmt.Println(cnnstr)
x, err = xorm.NewEngine("postgres", cnnstr)
case "sqlite3":
if !EnableSQLite3 {
return fmt.Errorf("Unknown database type: %s", DbCfg.Type)
}
os.MkdirAll(path.Dir(DbCfg.Path), os.ModePerm)
x, err = xorm.NewEngine("sqlite3", DbCfg.Path)
default:
return fmt.Errorf("Unknown database type: %s", DbCfg.Type)
}
if err != nil {
return fmt.Errorf("models.init(fail to conntect database): %v", err)
}
return x.Sync(tables...)
}
func SetEngine() (err error) {
switch DbCfg.Type {
case "mysql":
orm, err = xorm.NewEngine("mysql", fmt.Sprintf("%s:%s@tcp(%s)/%s?charset=utf8",
DbCfg.User, DbCfg.Pwd, DbCfg.Host, DbCfg.Name))
case "postgres":
var host, port = "127.0.0.1", "5432"
fields := strings.Split(DbCfg.Host, ":")
if len(fields) > 0 {
host = fields[0]
}
if len(fields) > 1 {
port = fields[1]
}
orm, err = xorm.NewEngine("postgres", fmt.Sprintf("user=%s password=%s host=%s port=%s dbname=%s sslmode=%s",
DbCfg.User, DbCfg.Pwd, host, port, DbCfg.Name, DbCfg.SslMode))
case "sqlite3":
os.MkdirAll(path.Dir(DbCfg.Path), os.ModePerm)
orm, err = xorm.NewEngine("sqlite3", DbCfg.Path)
default:
return fmt.Errorf("Unknown database type: %s", DbCfg.Type)
}
if err != nil {
return fmt.Errorf("models.init(fail to conntect database): %v", err)
}
// WARNNING: for serv command, MUST remove the output to os.stdout,
// so use log file to instead print to stdout.
execDir, _ := base.ExecDir()
logPath := execDir + "/log/xorm.log"
os.MkdirAll(path.Dir(logPath), os.ModePerm)
f, err := os.Create(logPath)
if err != nil {
return fmt.Errorf("models.init(fail to create xorm.log): %v", err)
}
orm.Logger = f
orm.ShowSQL = true
orm.ShowDebug = true
orm.ShowErr = true
return nil
}
func NewEngine() (err error) {
if err = SetEngine(); err != nil {
return err
}
if err = orm.Sync(tables...); err != nil {
return fmt.Errorf("sync database struct error: %v\n", err)
}
return nil
}
type Statistic struct {
Counter struct {
User, PublicKey, Repo,
Watch, Action, Access,
Issue, Comment,
Mirror, Oauth, Release int64
}
}
func GetStatistic() (stats Statistic) {
stats.Counter.User, _ = orm.Count(new(User))
stats.Counter.PublicKey, _ = orm.Count(new(PublicKey))
stats.Counter.Repo, _ = orm.Count(new(Repository))
stats.Counter.Watch, _ = orm.Count(new(Watch))
stats.Counter.Action, _ = orm.Count(new(Action))
stats.Counter.Access, _ = orm.Count(new(Access))
stats.Counter.Issue, _ = orm.Count(new(Issue))
stats.Counter.Comment, _ = orm.Count(new(Comment))
stats.Counter.Mirror, _ = orm.Count(new(Mirror))
stats.Counter.Oauth, _ = orm.Count(new(Oauth2))
stats.Counter.Release, _ = orm.Count(new(Release))
return
}
|
// Copyright 2013 The Chihaya Authors. All rights reserved.
// Use of this source code is governed by the BSD 2-Clause license,
// which can be found in the LICENSE file.
// Package models implements the models for an abstraction over the
// multiple data stores used by a BitTorrent tracker.
package models
import (
"errors"
"net/http"
"path"
"strconv"
"time"
"github.com/chihaya/chihaya/config"
"github.com/chihaya/chihaya/models/query"
)
var (
// ErrMalformedRequest is returned when an http.Request does no have the
// required parameters to create a model.
ErrMalformedRequest = errors.New("malformed request")
)
// Peer is a participant in a swarm.
type Peer struct {
ID string `json:"id"`
UserID uint64 `json:"user_id"`
TorrentID uint64 `json:"torrent_id"`
IP string `json:"ip"`
Port uint64 `json:"port"`
Uploaded uint64 `json:"uploaded"`
Downloaded uint64 `json:"downloaded`
Left uint64 `json:"left"`
LastAnnounce int64 `json:"last_announce"`
}
// Key returns the unique key used to look-up a peer in a swarm (i.e
// Torrent.Seeders & Torrent.Leechers).
func (p Peer) Key() string {
return p.ID + ":" + strconv.FormatUint(p.UserID, 36)
}
// Torrent is a swarm for a given torrent file.
type Torrent struct {
ID uint64 `json:"id"`
Infohash string `json:"infohash"`
Active bool `json:"active"`
Seeders map[string]Peer `json:"seeders"`
Leechers map[string]Peer `json:"leechers"`
Snatches uint64 `json:"snatches"`
UpMultiplier float64 `json:"up_multiplier"`
DownMultiplier float64 `json:"down_multiplier"`
LastAction int64 `json:"last_action"`
}
// InSeederPool returns true if a peer is within a Torrent's pool of seeders.
func (t *Torrent) InSeederPool(p *Peer) bool {
_, exists := t.Seeders[p.Key()]
return exists
}
// InLeecherPool returns true if a peer is within a Torrent's pool of leechers.
func (t *Torrent) InLeecherPool(p *Peer) bool {
_, exists := t.Leechers[p.Key()]
return exists
}
// NewPeer returns the Peer representation of an Announce.
func NewPeer(a *Announce, u *User, t *Torrent) *Peer {
return &Peer{
ID: a.PeerID,
UserID: u.ID,
TorrentID: t.ID,
IP: a.IP,
Port: a.Port,
Uploaded: a.Uploaded,
Downloaded: a.Downloaded,
Left: a.Left,
LastAnnounce: time.Now().Unix(),
}
}
// User is a registered user for private trackers.
type User struct {
ID uint64 `json:"id"`
Passkey string `json:"passkey"`
UpMultiplier float64 `json:"up_multiplier"`
DownMultiplier float64 `json:"down_multiplier"`
Snatches uint64 `json:"snatches"`
}
// Announce is an Announce by a Peer.
type Announce struct {
Config *config.Config `json:"config"`
Request *http.Request `json:"request"`
Compact bool `json:"compact"`
Downloaded uint64 `json:"downloaded"`
Event string `json:"event"`
IP string `json:"ip"`
Infohash string `json:"infohash"`
Left uint64 `json:"left"`
NumWant int `json:"numwant"`
Passkey string `json:"passkey"`
PeerID string `json:"peer_id"`
Port uint64 `json:"port"`
Uploaded uint64 `json:"uploaded"`
}
// NewAnnounce parses an HTTP request and generates an Announce.
func NewAnnounce(r *http.Request, conf *config.Config) (*Announce, error) {
q, err := query.New(r.URL.RawQuery)
if err != nil {
return nil, err
}
compact := q.Params["compact"] != "0"
downloaded, downloadedErr := q.Uint64("downloaded")
event, _ := q.Params["event"]
infohash, _ := q.Params["info_hash"]
ip, _ := q.RequestedIP(r)
left, leftErr := q.Uint64("left")
numWant := q.RequestedPeerCount(conf.NumWantFallback)
dir, _ := path.Split(r.URL.Path)
peerID, _ := q.Params["peer_id"]
port, portErr := q.Uint64("port")
uploaded, uploadedErr := q.Uint64("uploaded")
if downloadedErr != nil ||
infohash == "" ||
leftErr != nil ||
peerID == "" ||
portErr != nil ||
uploadedErr != nil ||
ip == "" ||
len(dir) != 34 {
return nil, ErrMalformedRequest
}
return &Announce{
Config: conf,
Request: r,
Compact: compact,
Downloaded: downloaded,
Event: event,
IP: ip,
Infohash: infohash,
Left: left,
NumWant: numWant,
Passkey: dir[1:33],
PeerID: peerID,
Port: port,
Uploaded: uploaded,
}, nil
}
// ClientID returns the part of a PeerID that identifies a Peer's client
// software.
func (a Announce) ClientID() (clientID string) {
length := len(a.PeerID)
if length >= 6 {
if a.PeerID[0] == '-' {
if length >= 7 {
clientID = a.PeerID[1:7]
}
} else {
clientID = a.PeerID[0:6]
}
}
return
}
// AnnounceDelta contains the changes to a Peer's state. These changes are
// recorded by the backend driver.
type AnnounceDelta struct {
Peer *Peer
Torrent *Torrent
User *User
// Created is true if this announce created a new peer or changed an existing
// peer's address
Created bool
// Snatched is true if this announce completed the download
Snatched bool
// Uploaded contains the raw upload delta for this announce, in bytes
Uploaded uint64
// Downloaded contains the raw download delta for this announce, in bytes
Downloaded uint64
}
// NewAnnounceDelta calculates a Peer's download and upload deltas between
// Announces and generates an AnnounceDelta.
func NewAnnounceDelta(a *Announce, p *Peer, u *User, t *Torrent, created, snatched bool) *AnnounceDelta {
var (
rawDeltaUp = p.Uploaded - a.Uploaded
rawDeltaDown uint64
)
if !a.Config.Freeleech {
rawDeltaDown = p.Downloaded - a.Downloaded
}
// Restarting a torrent may cause a delta to be negative.
if rawDeltaUp < 0 {
rawDeltaUp = 0
}
if rawDeltaDown < 0 {
rawDeltaDown = 0
}
return &AnnounceDelta{
Peer: p,
Torrent: t,
User: u,
Created: created,
Snatched: snatched,
Uploaded: uint64(float64(rawDeltaUp) * u.UpMultiplier * t.UpMultiplier),
Downloaded: uint64(float64(rawDeltaDown) * u.DownMultiplier * t.DownMultiplier),
}
}
// Scrape is a Scrape by a Peer.
type Scrape struct {
Config *config.Config `json:"config"`
Request *http.Request `json:"request"`
Passkey string
Infohashes []string
}
// NewScrape parses an HTTP request and generates a Scrape.
func NewScrape(r *http.Request, c *config.Config) (*Scrape, error) {
q, err := query.New(r.URL.RawQuery)
if err != nil {
return nil, err
}
var passkey string
if c.Private {
dir, _ := path.Split(r.URL.Path)
if len(dir) != 34 {
return nil, ErrMalformedRequest
}
passkey = dir[1:34]
}
if q.Infohashes == nil {
if _, exists := q.Params["infohash"]; !exists {
// There aren't any infohashes.
return nil, ErrMalformedRequest
}
q.Infohashes = []string{q.Params["infohash"]}
}
return &Scrape{
Config: c,
Request: r,
Passkey: passkey,
Infohashes: q.Infohashes,
}, nil
}
NewPeer now handles nil
// Copyright 2013 The Chihaya Authors. All rights reserved.
// Use of this source code is governed by the BSD 2-Clause license,
// which can be found in the LICENSE file.
// Package models implements the models for an abstraction over the
// multiple data stores used by a BitTorrent tracker.
package models
import (
"errors"
"net/http"
"path"
"strconv"
"time"
"github.com/chihaya/chihaya/config"
"github.com/chihaya/chihaya/models/query"
)
var (
// ErrMalformedRequest is returned when an http.Request does no have the
// required parameters to create a model.
ErrMalformedRequest = errors.New("malformed request")
)
// Peer is a participant in a swarm.
type Peer struct {
ID string `json:"id"`
UserID uint64 `json:"user_id"`
TorrentID uint64 `json:"torrent_id"`
IP string `json:"ip"`
Port uint64 `json:"port"`
Uploaded uint64 `json:"uploaded"`
Downloaded uint64 `json:"downloaded`
Left uint64 `json:"left"`
LastAnnounce int64 `json:"last_announce"`
}
// NewPeer returns the Peer representation of an Announce. When provided nil
// for the announce parameter, it panics. When provided nil for the user or
// torrent parameter, it returns a Peer{UserID: 0} or Peer{TorrentID: 0}
// respectively.
func NewPeer(a *Announce, u *User, t *Torrent) *Peer {
if a == nil {
panic("models: announce cannot equal nil")
}
var userID uint64
if u == nil {
userID = u.ID
}
var torrentID uint64
if t == nil {
torrentID = u.ID
}
return &Peer{
ID: a.PeerID,
UserID: userID,
TorrentID: torrentID,
IP: a.IP,
Port: a.Port,
Uploaded: a.Uploaded,
Downloaded: a.Downloaded,
Left: a.Left,
LastAnnounce: time.Now().Unix(),
}
}
// Key returns the unique key used to look-up a peer in a swarm (i.e
// Torrent.Seeders & Torrent.Leechers).
func (p Peer) Key() string {
return p.ID + ":" + strconv.FormatUint(p.UserID, 36)
}
// Torrent is a swarm for a given torrent file.
type Torrent struct {
ID uint64 `json:"id"`
Infohash string `json:"infohash"`
Active bool `json:"active"`
Seeders map[string]Peer `json:"seeders"`
Leechers map[string]Peer `json:"leechers"`
Snatches uint64 `json:"snatches"`
UpMultiplier float64 `json:"up_multiplier"`
DownMultiplier float64 `json:"down_multiplier"`
LastAction int64 `json:"last_action"`
}
// InSeederPool returns true if a peer is within a Torrent's pool of seeders.
func (t *Torrent) InSeederPool(p *Peer) bool {
_, exists := t.Seeders[p.Key()]
return exists
}
// InLeecherPool returns true if a peer is within a Torrent's pool of leechers.
func (t *Torrent) InLeecherPool(p *Peer) bool {
_, exists := t.Leechers[p.Key()]
return exists
}
// User is a registered user for private trackers.
type User struct {
ID uint64 `json:"id"`
Passkey string `json:"passkey"`
UpMultiplier float64 `json:"up_multiplier"`
DownMultiplier float64 `json:"down_multiplier"`
Snatches uint64 `json:"snatches"`
}
// Announce is an Announce by a Peer.
type Announce struct {
Config *config.Config `json:"config"`
Request *http.Request `json:"request"`
Compact bool `json:"compact"`
Downloaded uint64 `json:"downloaded"`
Event string `json:"event"`
IP string `json:"ip"`
Infohash string `json:"infohash"`
Left uint64 `json:"left"`
NumWant int `json:"numwant"`
Passkey string `json:"passkey"`
PeerID string `json:"peer_id"`
Port uint64 `json:"port"`
Uploaded uint64 `json:"uploaded"`
}
// NewAnnounce parses an HTTP request and generates an Announce.
func NewAnnounce(r *http.Request, conf *config.Config) (*Announce, error) {
q, err := query.New(r.URL.RawQuery)
if err != nil {
return nil, err
}
compact := q.Params["compact"] != "0"
downloaded, downloadedErr := q.Uint64("downloaded")
event, _ := q.Params["event"]
infohash, _ := q.Params["info_hash"]
ip, _ := q.RequestedIP(r)
left, leftErr := q.Uint64("left")
numWant := q.RequestedPeerCount(conf.NumWantFallback)
dir, _ := path.Split(r.URL.Path)
peerID, _ := q.Params["peer_id"]
port, portErr := q.Uint64("port")
uploaded, uploadedErr := q.Uint64("uploaded")
if downloadedErr != nil ||
infohash == "" ||
leftErr != nil ||
peerID == "" ||
portErr != nil ||
uploadedErr != nil ||
ip == "" ||
len(dir) != 34 {
return nil, ErrMalformedRequest
}
return &Announce{
Config: conf,
Request: r,
Compact: compact,
Downloaded: downloaded,
Event: event,
IP: ip,
Infohash: infohash,
Left: left,
NumWant: numWant,
Passkey: dir[1:33],
PeerID: peerID,
Port: port,
Uploaded: uploaded,
}, nil
}
// ClientID returns the part of a PeerID that identifies a Peer's client
// software.
func (a Announce) ClientID() (clientID string) {
length := len(a.PeerID)
if length >= 6 {
if a.PeerID[0] == '-' {
if length >= 7 {
clientID = a.PeerID[1:7]
}
} else {
clientID = a.PeerID[0:6]
}
}
return
}
// AnnounceDelta contains the changes to a Peer's state. These changes are
// recorded by the backend driver.
type AnnounceDelta struct {
Peer *Peer
Torrent *Torrent
User *User
// Created is true if this announce created a new peer or changed an existing
// peer's address
Created bool
// Snatched is true if this announce completed the download
Snatched bool
// Uploaded contains the raw upload delta for this announce, in bytes
Uploaded uint64
// Downloaded contains the raw download delta for this announce, in bytes
Downloaded uint64
}
// NewAnnounceDelta calculates a Peer's download and upload deltas between
// Announces and generates an AnnounceDelta.
func NewAnnounceDelta(a *Announce, p *Peer, u *User, t *Torrent, created, snatched bool) *AnnounceDelta {
var (
rawDeltaUp = p.Uploaded - a.Uploaded
rawDeltaDown uint64
)
if !a.Config.Freeleech {
rawDeltaDown = p.Downloaded - a.Downloaded
}
// Restarting a torrent may cause a delta to be negative.
if rawDeltaUp < 0 {
rawDeltaUp = 0
}
if rawDeltaDown < 0 {
rawDeltaDown = 0
}
return &AnnounceDelta{
Peer: p,
Torrent: t,
User: u,
Created: created,
Snatched: snatched,
Uploaded: uint64(float64(rawDeltaUp) * u.UpMultiplier * t.UpMultiplier),
Downloaded: uint64(float64(rawDeltaDown) * u.DownMultiplier * t.DownMultiplier),
}
}
// Scrape is a Scrape by a Peer.
type Scrape struct {
Config *config.Config `json:"config"`
Request *http.Request `json:"request"`
Passkey string
Infohashes []string
}
// NewScrape parses an HTTP request and generates a Scrape.
func NewScrape(r *http.Request, c *config.Config) (*Scrape, error) {
q, err := query.New(r.URL.RawQuery)
if err != nil {
return nil, err
}
var passkey string
if c.Private {
dir, _ := path.Split(r.URL.Path)
if len(dir) != 34 {
return nil, ErrMalformedRequest
}
passkey = dir[1:34]
}
if q.Infohashes == nil {
if _, exists := q.Params["infohash"]; !exists {
// There aren't any infohashes.
return nil, ErrMalformedRequest
}
q.Infohashes = []string{q.Params["infohash"]}
}
return &Scrape{
Config: c,
Request: r,
Passkey: passkey,
Infohashes: q.Infohashes,
}, nil
}
|
package models
import (
"github.com/pufferpanel/pufferpanel/v2"
"gopkg.in/go-playground/validator.v9"
"time"
)
type Server struct {
Name string `gorm:"UNIQUE_INDEX;size:20;NOT NULL" json:"-" validate:"required,printascii"`
Identifier string `gorm:"UNIQUE_INDEX;NOT NULL;PRIMARY_KEY;size:8" json:"-" validate:"required,printascii"`
NodeID uint `gorm:"NOT NULL" json:"-" validate:"required,min=1"`
Node Node `gorm:"ASSOCIATION_SAVE_REFERENCE:false" json:"-" validate:"-"`
IP string `gorm:"" json:"-" validate:"omitempty,ip|fqdn"`
Port uint16 `gorm:"" json:"-" validate:"omitempty"`
Type string `gorm:"NOT NULL;default='generic'" json:"-" validate:"required,printascii"`
CreatedAt time.Time `json:"-"`
UpdatedAt time.Time `json:"-"`
}
type Servers []*Server
func (s *Server) IsValid() (err error) {
err = validator.New().Struct(s)
if err != nil {
err = pufferpanel.GenerateValidationMessage(err)
}
return
}
func (s *Server) BeforeSave() (err error) {
err = s.IsValid()
return
}
Double names
package models
import (
"github.com/pufferpanel/pufferpanel/v2"
"gopkg.in/go-playground/validator.v9"
"time"
)
type Server struct {
Name string `gorm:"UNIQUE_INDEX;size:40;NOT NULL" json:"-" validate:"required,printascii"`
Identifier string `gorm:"UNIQUE_INDEX;NOT NULL;PRIMARY_KEY;size:8" json:"-" validate:"required,printascii"`
NodeID uint `gorm:"NOT NULL" json:"-" validate:"required,min=1"`
Node Node `gorm:"ASSOCIATION_SAVE_REFERENCE:false" json:"-" validate:"-"`
IP string `gorm:"" json:"-" validate:"omitempty,ip|fqdn"`
Port uint16 `gorm:"" json:"-" validate:"omitempty"`
Type string `gorm:"NOT NULL;default='generic'" json:"-" validate:"required,printascii"`
CreatedAt time.Time `json:"-"`
UpdatedAt time.Time `json:"-"`
}
type Servers []*Server
func (s *Server) IsValid() (err error) {
err = validator.New().Struct(s)
if err != nil {
err = pufferpanel.GenerateValidationMessage(err)
}
return
}
func (s *Server) BeforeSave() (err error) {
err = s.IsValid()
return
}
|
// Copyright 2014 Brian J. Downs
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package formatifier
import (
"errors"
"fmt"
"io/ioutil"
"net/http"
"strings"
)
// ToPhone will format the provided string as a Phone Number. Only supports
// US numbers currently.
func ToPhone(theString string, delimiter string) (string, error) {
if len(theString) < 10 {
return "", errors.New(lengthError)
}
f := New(theString)
f.removeNonDigits()
var buffer bytes.Buffer
count := 0
if len(f.theString) == 10 {
buffer.WriteString("(")
for _, i := range f.theString {
count++
buffer.WriteString(string(i))
if count == 3 {
buffer.WriteString(") ")
} else if count == 6 {
buffer.WriteString(fmt.Sprintf("%s", delimiter))
}
}
} else if len(f.theString) == 11 {
for _, i := range f.theString {
count++
buffer.WriteString(string(i))
if count == 1 {
buffer.WriteString(" (")
} else if count == 4 {
buffer.WriteString(") ")
} else if count == 7 {
buffer.WriteString(fmt.Sprintf("%s", delimiter))
}
}
}
return buffer.String(), nil
}
// ToURL will format the provided string as a URL. HTTP and HTTPS
// are the only supported protocols at this time.
func ToURL(theString string, secure bool, subdomain string) (string, error) {
// I'm assuming we have at least a 1 character domain, a dot, and a two
// char TLD.
if len(theString) < 4 {
return "", errors.New(lengthError)
}
f := New(theString)
f.makeLower()
if secure {
if len(subdomain) > 0 {
return fmt.Sprintf("https://%s.%s", subdomain, f.theString), nil
}
return fmt.Sprintf("https://%s", f.theString), nil
}
if len(subdomain) > 0 {
return fmt.Sprintf("http://%s.%s", subdomain, f.theString), nil
}
return fmt.Sprintf("http://%s", f.theString), nil
}
// ToSSN will format the provided string as a SSN.
func ToSSN(theString string, delimiter string) (string, error) {
if len(theString) != 9 {
return "", errors.New(lengthError)
}
f := New(theString)
f.removeNonDigits()
var buffer bytes.Buffer
count := 0
for _, i := range f.theString {
count++
buffer.WriteString(string(i))
if count == 3 || count == 5 {
buffer.WriteString(delimiter)
}
}
return buffer.String(), nil
}
// ToLockCombo will format the provided string as a Lock Combo.
func ToLockCombo(theString string, delimiter string) (string, error) {
f := New(theString)
f.removeNonDigits()
if len(f.theString) != 9 {
return "", errors.New("ERROR: String needs to be 6 digits for Lock Combo format")
}
var buffer bytes.Buffer
count := 0
for _, i := range f.theString {
count++
buffer.WriteString(string(i))
if count == 2 || count == 4 {
buffer.WriteString(delimiter)
}
}
return buffer.String(), nil
}
// ToISBN will format the provided string in International Standard Book Number
// (ISBN) format.
func ToISBN(theString string, delimiter string) (string, error) {
f := New(theString)
f.removeNonDigits()
if len(f.theString) != 13 {
return "", errors.New("ERROR: string must be 13 characters")
}
var buffer bytes.Buffer
count := 0
for _, i := range f.theString {
count++
buffer.WriteString(string(i))
if count == 3 || count == 4 || count == 6 || count == 12 {
buffer.WriteString(delimiter)
}
}
return buffer.String(), nil
}
// ToMorseCode will format the provided string in Morse Code.
func ToMorseCode(theString string) (string, error) {
f := New(theString)
if len(f.theString) < 1 {
return "", errors.New(lengthError)
}
f.makeLower()
var buffer bytes.Buffer
for _, i := range f.theString {
key := string(i)
if _, ok := morse[key]; ok {
buffer.WriteString(morse[key])
} else if key == " " {
buffer.WriteString(" ")
}
}
return buffer.String(), nil
}
// ToPirateSpeak will format the provided string in Pirate Speak.
func ToPirateSpeak(theString string) (string, error) {
f := New(theString)
if len(f.theString) < 1 {
return "", errors.New(lengthError)
}
f.urlEncodeSpaces()
response, err := http.Get(fmt.Sprintf(pirateLink, f.theString))
if err != nil {
return "", errors.New("ERROR: Unable to convert")
}
defer response.Body.Close()
contents, err := ioutil.ReadAll(response.Body)
if err != nil {
return "", err
}
return string(contents), nil
}
// ToIRSA will format the provided string in IRSA.
// International Radio-Telephony Spelling Alphabet
func ToIRSA(theString string) (string, error) {
f := New(theString)
if len(f.theString) < 1 {
return "", errors.New(lengthError)
}
f.makeLower()
var buffer bytes.Buffer
for _, i := range f.theString {
key := strings.ToLower(string(i))
if _, ok := irsa[key]; ok {
buffer.WriteString(irsa[key] + " ")
}
}
return buffer.String(), nil
}
// ToLeet will format the provided string in Leet Speak.
// TODO(bdowns328) Make select post match random.
func ToLeet(theString string) (string, error) {
f := New(theString)
if len(f.theString) < 1 {
return "", errors.New(lengthError)
}
f.makeLower()
words := strings.Fields(f.theString)
var buffer bytes.Buffer
for _, word := range words {
key := string(word)
if _, ok := leet[word]; ok {
buffer.WriteString(leet[key][0] + " ")
} else {
for _, i := range word {
letter := string(i)
if _, ok := leet[letter]; ok {
buffer.WriteString(leet[letter][0])
}
}
buffer.WriteString(" ")
}
}
return buffer.String(), nil
}
fixed length check bug in ToLockCombo
// Copyright 2014 Brian J. Downs
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package formatifier
import (
"errors"
"fmt"
"io/ioutil"
"net/http"
"strings"
)
// ToPhone will format the provided string as a Phone Number. Only supports
// US numbers currently.
func ToPhone(theString string, delimiter string) (string, error) {
if len(theString) < 10 {
return "", errors.New(lengthError)
}
f := New(theString)
f.removeNonDigits()
var buffer bytes.Buffer
count := 0
if len(f.theString) == 10 {
buffer.WriteString("(")
for _, i := range f.theString {
count++
buffer.WriteString(string(i))
if count == 3 {
buffer.WriteString(") ")
} else if count == 6 {
buffer.WriteString(fmt.Sprintf("%s", delimiter))
}
}
} else if len(f.theString) == 11 {
for _, i := range f.theString {
count++
buffer.WriteString(string(i))
if count == 1 {
buffer.WriteString(" (")
} else if count == 4 {
buffer.WriteString(") ")
} else if count == 7 {
buffer.WriteString(fmt.Sprintf("%s", delimiter))
}
}
}
return buffer.String(), nil
}
// ToURL will format the provided string as a URL. HTTP and HTTPS
// are the only supported protocols at this time.
func ToURL(theString string, secure bool, subdomain string) (string, error) {
// I'm assuming we have at least a 1 character domain, a dot, and a two
// char TLD.
if len(theString) < 4 {
return "", errors.New(lengthError)
}
f := New(theString)
f.makeLower()
if secure {
if len(subdomain) > 0 {
return fmt.Sprintf("https://%s.%s", subdomain, f.theString), nil
}
return fmt.Sprintf("https://%s", f.theString), nil
}
if len(subdomain) > 0 {
return fmt.Sprintf("http://%s.%s", subdomain, f.theString), nil
}
return fmt.Sprintf("http://%s", f.theString), nil
}
// ToSSN will format the provided string as a SSN.
func ToSSN(theString string, delimiter string) (string, error) {
f := New(theString)
f.removeNonDigits()
if len(f.theString) != 9 {
return "", errors.New("ERROR: String needs to be 9 digits for Social Security Numbers")
}
var buffer bytes.Buffer
count := 0
for _, i := range f.theString {
count++
buffer.WriteString(string(i))
if count == 3 || count == 5 {
buffer.WriteString(delimiter)
}
}
return buffer.String(), nil
}
// ToLockCombo will format the provided string as a Lock Combo.
func ToLockCombo(theString string, delimiter string) (string, error) {
f := New(theString)
f.removeNonDigits()
if len(f.theString) != 6 {
return "", errors.New("ERROR: String needs to be 6 digits for Lock Combo format")
}
var buffer bytes.Buffer
count := 0
for _, i := range f.theString {
count++
buffer.WriteString(string(i))
if count == 2 || count == 4 {
buffer.WriteString(delimiter)
}
}
return buffer.String(), nil
}
// ToISBN will format the provided string in International Standard Book Number
// (ISBN) format.
func ToISBN(theString string, delimiter string) (string, error) {
f := New(theString)
f.removeNonDigits()
if len(f.theString) != 13 {
return "", errors.New("ERROR: string must be 13 characters")
}
var buffer bytes.Buffer
count := 0
for _, i := range f.theString {
count++
buffer.WriteString(string(i))
if count == 3 || count == 4 || count == 6 || count == 12 {
buffer.WriteString(delimiter)
}
}
return buffer.String(), nil
}
// ToMorseCode will format the provided string in Morse Code.
func ToMorseCode(theString string) (string, error) {
f := New(theString)
if len(f.theString) < 1 {
return "", errors.New(lengthError)
}
f.makeLower()
var buffer bytes.Buffer
for _, i := range f.theString {
key := string(i)
if _, ok := morse[key]; ok {
buffer.WriteString(morse[key])
} else if key == " " {
buffer.WriteString(" ")
}
}
return buffer.String(), nil
}
// ToPirateSpeak will format the provided string in Pirate Speak.
func ToPirateSpeak(theString string) (string, error) {
f := New(theString)
if len(f.theString) < 1 {
return "", errors.New(lengthError)
}
f.urlEncodeSpaces()
response, err := http.Get(fmt.Sprintf(pirateLink, f.theString))
if err != nil {
return "", errors.New("ERROR: Unable to convert")
}
defer response.Body.Close()
contents, err := ioutil.ReadAll(response.Body)
if err != nil {
return "", err
}
return string(contents), nil
}
// ToIRSA will format the provided string in IRSA.
// International Radio-Telephony Spelling Alphabet
func ToIRSA(theString string) (string, error) {
f := New(theString)
if len(f.theString) < 1 {
return "", errors.New(lengthError)
}
f.makeLower()
var buffer bytes.Buffer
for _, i := range f.theString {
key := strings.ToLower(string(i))
if _, ok := irsa[key]; ok {
buffer.WriteString(irsa[key] + " ")
}
}
return buffer.String(), nil
}
// ToLeet will format the provided string in Leet Speak.
// TODO(bdowns328) Make select post match random.
func ToLeet(theString string) (string, error) {
f := New(theString)
if len(f.theString) < 1 {
return "", errors.New(lengthError)
}
f.makeLower()
words := strings.Fields(f.theString)
var buffer bytes.Buffer
for _, word := range words {
key := string(word)
if _, ok := leet[word]; ok {
buffer.WriteString(leet[key][0] + " ")
} else {
for _, i := range word {
letter := string(i)
if _, ok := leet[letter]; ok {
buffer.WriteString(leet[letter][0])
}
}
buffer.WriteString(" ")
}
}
return buffer.String(), nil
}
|
package state
import (
"errors"
"log"
"strings"
"unicode/utf8"
"github.com/belak/irc"
"github.com/belak/seabird/bot"
)
func init() {
bot.RegisterPlugin("state", NewStatePlugin)
}
/*
* TODO:
* Public interface
* Stop making assumptions about the number of params
*/
// State is a plugin which will track the state of users and channels.
type State struct {
currentNick string
chanTypes map[rune]bool
chanModes []map[rune]bool
userModes map[rune]bool
isupport map[string]string
prefixModes map[rune]rune
modePrefixes map[rune]rune
}
func NewStatePlugin(b *bot.Bot) (bot.Plugin, error) {
s := &State{}
s.clear()
b.BasicMux.Event("JOIN", s.joinCallback)
b.BasicMux.Event("KICK", s.kickCallback)
b.BasicMux.Event("MODE", s.modeCallback)
b.BasicMux.Event("NICK", s.nickCallback)
b.BasicMux.Event("PART", s.partCallback)
b.BasicMux.Event("QUIT", s.quitCallback)
b.BasicMux.Event("001", s.callback001) // RPL_WELCOME
b.BasicMux.Event("004", s.callback004) // RPL_MYINFO
b.BasicMux.Event("005", s.callback005) // RPL_ISUPPORT
b.BasicMux.Event("352", s.callback352) // RPL_WHOREPLY
b.BasicMux.Event("315", s.callback315) // RPL_ENDOFWHO
b.BasicMux.Event("353", s.callback353) // RPL_NAMES
b.BasicMux.Event("366", s.callback366) // RPL_ENDOFNAMES
// b.BasicMux.Event("004", s.debugCallback)
// b.BasicMux.Event("005", s.debugCallback)
// TODO: CAP REQ multi-prefix
/* These are callbacks which might be useful eventually
b.BasicMux.Event("TOPIC", s.topicCallback)
b.BasicMux.Event("221", s.callback221) // RPL_UMODEIS
b.BasicMux.Event("305", s.callback305) // RPL_UNAWAY
b.BasicMux.Event("306", s.callback306) // RPL_NOWAWAY
b.BasicMux.Event("324", s.callback324) // RPL_CHANNELMODEIS
b.BasicMux.Event("328", s.callback328) // RPL_CHANNEL_URL
b.BasicMux.Event("329", s.callback329) // RPL_CREATIONTIME
b.BasicMux.Event("332", s.callback332) // RPL_TOPIC
b.BasicMux.Event("333", s.callback333) // RPL_TOPICWHOTIME
b.BasicMux.Event("346", s.callback346) // RPL_INVITELIST
b.BasicMux.Event("347", s.callback347) // RPL_ENDOFINVITELIST
b.BasicMux.Event("348", s.callback348) // RPL_EXCEPTLIST
b.BasicMux.Event("349", s.callback349) // RPL_ENDOFEXCEPTLIST
b.BasicMux.Event("367", s.callback367) // RPL_BANLIST
b.BasicMux.Event("368", s.callback368) // RPL_ENDOFBANLIST
*/
return s, nil
}
func (s *State) clear() {
s.isupport = make(map[string]string)
s.chanModes = []map[rune]bool{
map[rune]bool{},
map[rune]bool{},
map[rune]bool{},
map[rune]bool{},
}
s.chanTypes = make(map[rune]bool)
s.userModes = make(map[rune]bool)
s.prefixModes = make(map[rune]rune)
s.modePrefixes = make(map[rune]rune)
// Create a bogus message to send through callback004 to set
// some defaults we're missing.
m := &irc.Message{
Prefix: &irc.Prefix{},
Command: "004",
Params: []string{"", "", "", "Oiorw"},
}
s.callback004(nil, m)
// Create a bogus message to send through callback005 so we
// ensure any defaults which would have set special values
// actually set things.
m = &irc.Message{
Prefix: &irc.Prefix{},
Command: "005",
Params: []string{},
}
for k := range isupportDefaults {
m.Params = append(m.Params, "-"+k)
}
m.Params = append(m.Params, "are supported by this server.")
s.callback005(nil, m)
}
func (s *State) debugCallback(b *bot.Bot, m *irc.Message) {
log.Printf("%+v", m)
}
func (s *State) joinCallback(b *bot.Bot, m *irc.Message) {
cname := m.Params[0]
uname := m.Prefix.Name
log.Printf("%s joined channel %s\n", uname, cname)
if uname == s.currentNick {
log.Println("Joining new channel")
// Queue up a WHO so we can get all the nicks in this
// channel.
//
// TODO: This might not be needed if RPL_NAMES has
// what we need.
b.Writef("WHO :%s", cname)
} else {
// Run a WHO on the user to get the info we need
b.Writef("WHO :%s", uname)
}
}
func (s *State) partCallback(b *bot.Bot, m *irc.Message) {
cname := m.Params[0]
uname := m.Prefix.Name
log.Printf("%s left channel %s", uname, cname)
if uname == s.currentNick {
log.Println("Bot has been left", cname)
}
}
func (s *State) IsChannel(name string) bool {
r, size := utf8.DecodeRuneInString(name)
return size != 0 && s.chanTypes[r]
}
func (s *State) modeCallback(b *bot.Bot, m *irc.Message) {
log.Printf("%+v", m)
target := m.Params[0]
modestring := m.Params[1]
msgParams := m.Params[2:]
isChannel := s.IsChannel(target)
// Convenience function to modify the slice and pop the first param
popParam := func() (string, error) {
if len(msgParams) == 0 {
return "", errors.New("No more params")
}
p := msgParams[0]
msgParams = msgParams[1:]
return p, nil
}
state := '+'
for _, v := range modestring {
if v == '+' || v == '-' {
state = v
} else if isChannel {
if ok := s.chanModes[0][v]; ok {
// list-like (always take param)
p, err := popParam()
if err != nil {
continue
}
if state == '+' {
log.Printf("Adding %s to list for mode %s", p, string(v))
} else {
log.Printf("Removing %s from list for mode %s", p, string(v))
}
} else if ok := s.chanModes[1][v]; ok {
// key-like (always take param)
p, err := popParam()
if err != nil {
continue
}
if state == '+' {
log.Printf("Setting mode %s with param %s", string(v), p)
} else {
log.Printf("Unsetting mode %s with param %s", string(v), p)
}
} else if ok := s.chanModes[2][v]; ok {
// limit-like (take param if in + state)
if state == '+' {
p, err := popParam()
if err != nil {
continue
}
log.Printf("Setting mode %s to %s", string(v), p)
} else {
log.Printf("Unsetting mode %s", string(v))
}
} else if ok := s.chanModes[3][v]; ok {
// settings (never take param)
if state == '+' {
log.Printf("Setting mode %s", string(v))
} else {
log.Printf("Unsetting mode %s", string(v))
}
} else if mp, ok := s.modePrefixes[v]; ok {
// user prefix (always take param)
p, err := popParam()
if err != nil {
continue
}
if state == '+' {
log.Printf("Setting prefix %s (%s) on user %s", string(mp), string(v), p)
} else {
log.Printf("Unsetting prefix %s (%s) on user %s", string(mp), string(v), p)
}
}
} else {
if state == '+' {
log.Printf("Setting user mode %s", string(v))
} else {
log.Printf("Unsetting user mode %s", string(v))
}
}
}
}
func (s *State) quitCallback(b *bot.Bot, m *irc.Message) {
uname := m.Prefix.Name
log.Printf("%s has quit", uname)
if uname == s.currentNick {
log.Printf("Bot has quit. This is generally bad.")
// TODO: Well, shit. At this point it probably doesn't
// matter what we do.
}
}
func (s *State) kickCallback(b *bot.Bot, m *irc.Message) {
cname := m.Params[0]
uname := m.Params[1]
log.Printf("%s has been kicked from %s\n", uname, cname)
if uname == s.currentNick {
log.Println("Bot has been kicked from", cname)
}
}
func (s *State) nickCallback(b *bot.Bot, m *irc.Message) {
oldNick := m.Prefix.Name
newNick := m.Params[0]
log.Printf("%s changed nick to %s\n", oldNick, newNick)
if oldNick == s.currentNick {
log.Println("Updating current bot nick to", newNick)
s.currentNick = newNick
}
}
// RPL_WELCOME
func (s *State) callback001(b *bot.Bot, m *irc.Message) {
s.currentNick = m.Params[0]
s.clear()
}
// RPL_MYINFO
func (s *State) callback004(b *bot.Bot, m *irc.Message) {
s.userModes = make(map[rune]bool)
umodes := m.Params[3]
for _, mode := range umodes {
s.userModes[mode] = true
}
}
// RPL_WHOREPLY
func (s *State) callback352(b *bot.Bot, m *irc.Message) {
// <source> 352 <target> <channel> <user> <host> <server> <nick> <flags> :<distance> <realname>
// :kenny.chatspike.net 352 guest #test grawity broken.symlink *.chatspike.net grawity H@%+ :0 Mantas M.
var (
// target = m.Params[0]
channel = m.Params[1]
user = m.Params[2]
host = m.Params[3]
// server = m.Params[4]
nick = m.Params[5]
flags = m.Params[6]
// rest = m.Params[7] // Or m.Trailing()
)
log.Printf("Flags for %s!%s@%s on %s: %s", nick, user, host, channel, flags)
if flags[0] == 'H' {
log.Println("User is here")
flags = flags[1:]
} else if flags[0] == 'G' {
log.Println("User is away")
flags = flags[1:]
}
for _, c := range flags {
log.Printf("User has prefix %s (%s)", string(c), string(s.prefixModes[c]))
}
}
// RPL_ENDOFWHO
func (s *State) callback315(b *bot.Bot, m *irc.Message) {
// :kenny.chatspike.net 315 guest #test :End of /WHO list.
log.Printf("End of WHO for %s", m.Params[1])
}
// RPL_NAMES
func (s *State) callback353(b *bot.Bot, m *irc.Message) {
// :hades.arpa 353 guest = #tethys :~&@%+aji &@Attila @+alyx +KindOne Argure
channel := m.Params[2]
for _, name := range strings.Split(m.Trailing(), " ") {
// Trim prefix chars from the left
user := strings.TrimLeftFunc(name, func(r rune) bool {
_, ok := s.prefixModes[r]
return ok
})
// Grab just the modes from the original string
modes := strings.TrimSuffix(name, user)
// Loop through each of the modes
for _, p := range modes {
log.Printf("User %s has prefix %s (%s) in channel %s", user, string(p), string(s.prefixModes[p]), channel)
}
}
}
// RPL_ENDOFNAMES
func (s *State) callback366(b *bot.Bot, m *irc.Message) {
// :hades.arpa 366 guest #tethys :End of /NAMES list.
log.Printf("End of NAMES for %s", m.Params[1])
}
Small updates
package state
import (
"errors"
"log"
"strings"
"unicode/utf8"
"github.com/belak/irc"
"github.com/belak/seabird/bot"
)
func init() {
bot.RegisterPlugin("state", NewStatePlugin)
}
/*
* TODO:
* Public interface
* Stop making assumptions about the number of params
*/
// State is a plugin which will track the state of users and channels.
type State struct {
currentNick string
// These come from 004
userModes map[rune]bool
// These come from 005
chanTypes map[rune]bool
chanModes []map[rune]bool
isupport map[string]string
// @ -> o
prefixModes map[rune]rune
// o -> @
modePrefixes map[rune]rune
}
func NewStatePlugin(b *bot.Bot) (bot.Plugin, error) {
s := &State{}
s.clear()
b.BasicMux.Event("JOIN", s.joinCallback)
b.BasicMux.Event("KICK", s.kickCallback)
b.BasicMux.Event("MODE", s.modeCallback)
b.BasicMux.Event("NICK", s.nickCallback)
b.BasicMux.Event("PART", s.partCallback)
b.BasicMux.Event("QUIT", s.quitCallback)
b.BasicMux.Event("001", s.callback001) // RPL_WELCOME
b.BasicMux.Event("004", s.callback004) // RPL_MYINFO
b.BasicMux.Event("005", s.callback005) // RPL_ISUPPORT
b.BasicMux.Event("352", s.callback352) // RPL_WHOREPLY
b.BasicMux.Event("315", s.callback315) // RPL_ENDOFWHO
b.BasicMux.Event("353", s.callback353) // RPL_NAMES
b.BasicMux.Event("366", s.callback366) // RPL_ENDOFNAMES
// TODO: CAP REQ multi-prefix
/* These are callbacks which might be useful eventually
b.BasicMux.Event("TOPIC", s.topicCallback)
b.BasicMux.Event("221", s.callback221) // RPL_UMODEIS
b.BasicMux.Event("305", s.callback305) // RPL_UNAWAY
b.BasicMux.Event("306", s.callback306) // RPL_NOWAWAY
b.BasicMux.Event("324", s.callback324) // RPL_CHANNELMODEIS
b.BasicMux.Event("328", s.callback328) // RPL_CHANNEL_URL
b.BasicMux.Event("329", s.callback329) // RPL_CREATIONTIME
b.BasicMux.Event("332", s.callback332) // RPL_TOPIC
b.BasicMux.Event("333", s.callback333) // RPL_TOPICWHOTIME
b.BasicMux.Event("346", s.callback346) // RPL_INVITELIST
b.BasicMux.Event("347", s.callback347) // RPL_ENDOFINVITELIST
b.BasicMux.Event("348", s.callback348) // RPL_EXCEPTLIST
b.BasicMux.Event("349", s.callback349) // RPL_ENDOFEXCEPTLIST
b.BasicMux.Event("367", s.callback367) // RPL_BANLIST
b.BasicMux.Event("368", s.callback368) // RPL_ENDOFBANLIST
*/
return s, nil
}
func (s *State) clear() {
s.isupport = make(map[string]string)
s.chanModes = []map[rune]bool{
map[rune]bool{},
map[rune]bool{},
map[rune]bool{},
map[rune]bool{},
}
s.chanTypes = make(map[rune]bool)
s.userModes = make(map[rune]bool)
s.prefixModes = make(map[rune]rune)
s.modePrefixes = make(map[rune]rune)
// Create a bogus message to send through callback004 to set
// some defaults we're missing.
m := &irc.Message{
Prefix: &irc.Prefix{},
Command: "004",
Params: []string{"", "", "", "Oiorw"},
}
s.callback004(nil, m)
// Create a bogus message to send through callback005 so we
// ensure any defaults which would have set special values
// actually set things.
m = &irc.Message{
Prefix: &irc.Prefix{},
Command: "005",
Params: []string{},
}
for k := range isupportDefaults {
m.Params = append(m.Params, "-"+k)
}
m.Params = append(m.Params, "are supported by this server.")
s.callback005(nil, m)
}
func (s *State) debugCallback(b *bot.Bot, m *irc.Message) {
log.Printf("%+v", m)
}
func (s *State) joinCallback(b *bot.Bot, m *irc.Message) {
cname := m.Params[0]
uname := m.Prefix.Name
log.Printf("%s joined channel %s\n", uname, cname)
if uname == s.currentNick {
log.Println("Joining new channel")
// Queue up a WHO so we can get all the nicks in this
// channel.
//
// TODO: This might not be needed if RPL_NAMES has
// what we need.
b.Writef("WHO :%s", cname)
} else {
// Run a WHO on the user to get the info we need
b.Writef("WHO :%s", uname)
}
}
func (s *State) partCallback(b *bot.Bot, m *irc.Message) {
cname := m.Params[0]
uname := m.Prefix.Name
log.Printf("%s left channel %s", uname, cname)
if uname == s.currentNick {
log.Println("Bot has been left", cname)
}
}
func (s *State) IsChannel(name string) bool {
r, size := utf8.DecodeRuneInString(name)
return size != 0 && s.chanTypes[r]
}
func (s *State) modeCallback(b *bot.Bot, m *irc.Message) {
log.Printf("%+v", m)
target := m.Params[0]
modestring := m.Params[1]
msgParams := m.Params[2:]
isChannel := s.IsChannel(target)
// Convenience function to modify the slice and pop the first param
popParam := func() (string, error) {
if len(msgParams) == 0 {
return "", errors.New("No more params")
}
p := msgParams[0]
msgParams = msgParams[1:]
return p, nil
}
state := '+'
for _, v := range modestring {
if v == '+' || v == '-' {
state = v
} else if isChannel {
if ok := s.chanModes[0][v]; ok {
// list-like (always take param)
p, err := popParam()
if err != nil {
continue
}
if state == '+' {
log.Printf("Adding %s to list for mode %s", p, string(v))
} else {
log.Printf("Removing %s from list for mode %s", p, string(v))
}
} else if ok := s.chanModes[1][v]; ok {
// key-like (always take param)
p, err := popParam()
if err != nil {
continue
}
if state == '+' {
log.Printf("Setting mode %s with param %s", string(v), p)
} else {
log.Printf("Unsetting mode %s with param %s", string(v), p)
}
} else if ok := s.chanModes[2][v]; ok {
// limit-like (take param if in + state)
if state == '+' {
p, err := popParam()
if err != nil {
continue
}
log.Printf("Setting mode %s to %s", string(v), p)
} else {
log.Printf("Unsetting mode %s", string(v))
}
} else if ok := s.chanModes[3][v]; ok {
// settings (never take param)
if state == '+' {
log.Printf("Setting mode %s", string(v))
} else {
log.Printf("Unsetting mode %s", string(v))
}
} else if mp, ok := s.modePrefixes[v]; ok {
// user prefix (always take param)
p, err := popParam()
if err != nil {
continue
}
if state == '+' {
log.Printf("Setting prefix %s (%s) on user %s", string(mp), string(v), p)
} else {
log.Printf("Unsetting prefix %s (%s) on user %s", string(mp), string(v), p)
}
}
} else {
if state == '+' {
log.Printf("Setting user mode %s", string(v))
} else {
log.Printf("Unsetting user mode %s", string(v))
}
}
}
}
func (s *State) quitCallback(b *bot.Bot, m *irc.Message) {
uname := m.Prefix.Name
log.Printf("%s has quit", uname)
if uname == s.currentNick {
log.Printf("Bot has quit. This is generally bad.")
// TODO: Well, shit. At this point it probably doesn't
// matter what we do.
}
}
func (s *State) kickCallback(b *bot.Bot, m *irc.Message) {
cname := m.Params[0]
uname := m.Params[1]
log.Printf("%s has been kicked from %s\n", uname, cname)
if uname == s.currentNick {
log.Println("Bot has been kicked from", cname)
}
}
func (s *State) nickCallback(b *bot.Bot, m *irc.Message) {
oldNick := m.Prefix.Name
newNick := m.Params[0]
log.Printf("%s changed nick to %s\n", oldNick, newNick)
if oldNick == s.currentNick {
log.Println("Updating current bot nick to", newNick)
s.currentNick = newNick
}
}
// RPL_WELCOME
func (s *State) callback001(b *bot.Bot, m *irc.Message) {
s.currentNick = m.Params[0]
s.clear()
}
// RPL_MYINFO
func (s *State) callback004(b *bot.Bot, m *irc.Message) {
// NOTE: This would work for more than userModes, but we set a
// number of other things in 005 so there's no point setting
// them here. Plus, for compatibility, we can only get a
// limited subset of what we can get from 005.
s.userModes = make(map[rune]bool)
umodes := m.Params[3]
for _, mode := range umodes {
s.userModes[mode] = true
}
}
// RPL_WHOREPLY
func (s *State) callback352(b *bot.Bot, m *irc.Message) {
// <source> 352 <target> <channel> <user> <host> <server> <nick> <flags> :<distance> <realname>
// :kenny.chatspike.net 352 guest #test grawity broken.symlink *.chatspike.net grawity H@%+ :0 Mantas M.
var (
// target = m.Params[0]
channel = m.Params[1]
user = m.Params[2]
host = m.Params[3]
// server = m.Params[4]
nick = m.Params[5]
flags = m.Params[6]
// rest = m.Params[7] // Or m.Trailing()
)
log.Printf("Flags for %s!%s@%s on %s: %s", nick, user, host, channel, flags)
if flags[0] == 'H' {
log.Println("User is here")
flags = flags[1:]
} else if flags[0] == 'G' {
log.Println("User is away")
flags = flags[1:]
}
for _, c := range flags {
log.Printf("User has prefix %s (%s)", string(c), string(s.prefixModes[c]))
}
}
// RPL_ENDOFWHO
func (s *State) callback315(b *bot.Bot, m *irc.Message) {
// :kenny.chatspike.net 315 guest #test :End of /WHO list.
log.Printf("End of WHO for %s", m.Params[1])
}
// RPL_NAMES
func (s *State) callback353(b *bot.Bot, m *irc.Message) {
// :hades.arpa 353 guest = #tethys :~&@%+aji &@Attila @+alyx +KindOne Argure
channel := m.Params[2]
for _, name := range strings.Split(m.Trailing(), " ") {
// Trim prefix chars from the left
user := strings.TrimLeftFunc(name, func(r rune) bool {
_, ok := s.prefixModes[r]
return ok
})
// Grab just the modes from the original string
modes := strings.TrimSuffix(name, user)
// Loop through each of the modes
for _, p := range modes {
log.Printf("User %s has prefix %s (%s) in channel %s", user, string(p), string(s.prefixModes[p]), channel)
}
}
}
// RPL_ENDOFNAMES
func (s *State) callback366(b *bot.Bot, m *irc.Message) {
// :hades.arpa 366 guest #tethys :End of /NAMES list.
log.Printf("End of NAMES for %s", m.Params[1])
}
|
package boardgame
import (
"encoding/json"
"github.com/jkomoros/boardgame/errors"
"hash/fnv"
"log"
"math/rand"
"strconv"
)
//ImmutableState is a version of State, but minus any mutator methods. Because
//states may not be modified except by moves, in almost every case where a
//state is passed to game logic you define (whether on your GameDelegate
//methods, or Legal() on your move structs), an ImmutableState will be passed
//instead. If an ImmutableState is passed to your method, it's a strong signal
//that you shouldn't modify the state. Note that idiomatic use (e.g.
//concreteStates) will cast an ImmutableState to a State immediately in order
//to retrieve the concrete structs underneath, but if you do that you have to
//be careful not to inadvertently modify the state because the changes won't
//be persisted. See the documentation for State for more about states in
//general.
type ImmutableState interface {
//ImmutableGameState is a reference to to the underlying object returned
//from your GameDelegate.GameStateConstructor(), and can be safely cast
//back to that underlying struct so you can access its methods directly in
//a type- checked way. The difference is that the object formally exposed
//lacks the mutator methods, although when you cast back you'll get access
//to the full struct--be careful not to mutate things as they will not be
//persisted. See State.GameState for more.
ImmutableGameState() ImmutableSubState
//Each PlayerState is a reference to to the underlying object returned
//from your GameDelegate.PlayerStateConstructor(), and can be safely cast
//back to that underlying struct so you can access its methods directly in
//a type- checked way. The difference is that the object formally exposed
//lacks the mutator methods, although when you cast back you'll get access
//to the full struct--be careful not to mutate things as they will not be
//persisted. See State.PlayerStates for more.
ImmutablePlayerStates() []ImmutablePlayerState
//Each SubState is a reference to to the underlying object returned from
//your GameDelegate.DynamicComponentValuesConstructor() for the deck with
//that name, and can be safely cast back to that underlying struct so you
//can access its methods directly in a type- checked way. The difference
//is that the object formally exposed lacks the mutator methods, although
//when you cast back you'll get access to the full struct--be careful not
//to mutate things as they will not be persisted. DynamicComponentValues
//returns a map of deck name to array of component values, one per
//component in that deck. See State.DynamicComponentValues for more.
ImmutableDynamicComponentValues() map[string][]ImmutableSubState
//ImmutableCurrentPlayer returns the ImmutablePlayerState corresponding to
//the result of delegate.CurrentPlayerIndex(), or nil if the index isn't
//valid. This object is the same underlying struct that you returned from
//GameDelegate.PlayerStateConstructor and can be cast back safely to
//access the underlying methods. See State.CurrentPlayer for more.
ImmutableCurrentPlayer() ImmutablePlayerState
//CurrentPlayerIndex is a simple convenience wrapper around
//delegate.CurrentPlayerIndex(state) for this state.
CurrentPlayerIndex() PlayerIndex
//Version returns the version number the state is (or will be once
//committed).
Version() int
//Copy returns a deep copy of the State, including copied version of the
//Game and Player States. Note that copying uses the
//ProperyReadSetConfigurer interface, so any properties not enumerated
//there or otherwise defined in the constructors on your GameDelegate will
//not be copied.
Copy(sanitized bool) (ImmutableState, error)
//Diagram returns a basic, ascii rendering of the state for debug rendering.
//It thunks out to Delegate.Diagram.
Diagram() string
//Santizied will return false if this is a full-fidelity State object, or
//true if it has been sanitized, which means that some properties might be
//hidden or otherwise altered. This should return true if the object was
//created with Copy(true)
Sanitized() bool
//Computed returns the computed properties for this state.
computed() *computedProperties
//SanitizedForPlayer produces a copy state object that has been sanitized
//for the player at the given index. The state object returned will have
//Sanitized() return true. Will call GameDelegate.SanitizationPolicy to
//construct the effective policy to apply. See the documentation for
//Policy for more on sanitization.
SanitizedForPlayer(player PlayerIndex) ImmutableState
//Game is the Game that this state is part of. Calling
//Game.State(state.Version()) should return a state equivalent to this State
//(modulo sanitization, if applied).
Game() *Game
//StorageRecord returns a StateStorageRecord representing the state.
StorageRecord() StateStorageRecord
//containingImmutableStack will return the stack and slot index for the associated
//component, if that location is not sanitized. If no error is returned,
//stack.ComponentAt(slotIndex) == c will evaluate to true.
containingImmutableStack(c Component) (stack ImmutableStack, slotIndex int, err error)
}
type computedProperties struct {
Global PropertyCollection
Players []PropertyCollection
}
//StateGroupType is the top-level grouping object used in a StatePropertyRef.
type StateGroupType int
const (
StateGroupGame StateGroupType = iota
StateGroupPlayer
StateGroupDynamicComponentValues
)
//A StatePropertyRef is a reference to a particular property or item in a
//Property in a State, in a structured way. Currently used primarily as an
//input to your GameDelegate's SanitizationPolicy method. Get a new generic
//one, with all properties set to reasonable defaults, from
//NewStatePropertyRef.
type StatePropertyRef struct {
//Group is which of Game, Player, or DynamicComponentValues this is a
//reference to.
Group StateGroupType
//PropName is the specific property on the given SubStateObject specified
//by the rest of the StatePropertyRef.
PropName string
//PlayerIndex is the index of the player, if Group is StateGroupPlayer.
PlayerIndex int
//DeckName is only used when Group is StateGroupDynamicComponentValues
DeckName string
//StackIndex specifies the index of the component within the stack (if it
//is a stack) that is intended. Negative values signify "all components in
//stack"
StackIndex int
//BoardIndex specifies the index of the Stack within the Board (if it is a
//board) that is intended. Negative values signify "all stacks within the
//board".
BoardIndex int
//DeckIndex is used only when the Group is
//StateGroupDynamicComponentValues. Negative values mean "all values in
//deck".
DynamicComponentIndex int
}
//NewStatePropertyRef returns an initalized StatePropertyRef with all fields
//set to reasonable defaults. In particular, all of the Index properties are
//set to -1. It is rare for users of the library to need to create their own
//StatePropertyRefs.
func NewStatePropertyRef() StatePropertyRef {
return StatePropertyRef{
StateGroupGame,
"",
-1,
-1,
-1,
"",
-1,
}
}
//getReader returns the reader associated with the StatePropertyRef in the
//given state, or errors if the StatePropertyRef does not refer to a valid
//reader.
func (r StatePropertyRef) associatedReadSetter(st State) (PropertyReadSetter, error) {
switch r.Group {
case StateGroupGame:
gameState := st.GameState()
if gameState == nil {
return nil, errors.New("GameState selected, but was nil")
}
return gameState.ReadSetter(), nil
case StateGroupPlayer:
players := st.PlayerStates()
if len(players) == 0 {
return nil, errors.New("PlayerState selected, but no players in state")
}
if r.PlayerIndex < 0 {
return nil, errors.New("PlayerState selected, but negative value for PlayerIndex")
}
if r.PlayerIndex >= len(players) {
return nil, errors.New("PlayerState selected, but with a non-existent PlayerIndex")
}
player := players[r.PlayerIndex]
return player.ReadSetter(), nil
case StateGroupDynamicComponentValues:
allDecks := st.DynamicComponentValues()
if allDecks == nil {
return nil, errors.New("DynamicComponentValues selected, but was nil")
}
values, ok := allDecks[r.DeckName]
if !ok {
return nil, errors.New("DeckName did not refer to any component values: " + r.DeckName)
}
if r.DynamicComponentIndex < 0 || r.DynamicComponentIndex >= len(values) {
return nil, errors.New("DynamicComponentIndex referred to a component that didn't exist")
}
return values[r.DynamicComponentIndex].ReadSetter(), nil
}
return nil, errors.New("Invalid Group type")
}
//PlayerIndex is an int that represents the index of a given player in a game.
//Normal values are [0, game.NumPlayers). Special values are AdminPlayerIndex
//and ObserverPlayerIndex.
type PlayerIndex int
//ObserverPlayerIndex is a special PlayerIndex that denotes that the player in
//question is not one of the normal players, but someone generically watching.
//All hidden state should be hidden to them, and GroupSelf will never trigger
//for them.
const ObserverPlayerIndex PlayerIndex = -1
//AdminPlayerIndex is a special PlayerIndex that denotes the omniscient admin
//who can see all state and make moves whenever they want. This PlayerIndex is
//used for example to apply moves that your GameDelegate.ProposeFixUpMove
//returns, as well as when Timer's fire. It is also used when the server is in
//debug mode, allowing the given player to operate as the admin.
const AdminPlayerIndex PlayerIndex = -2
//State represents the entire semantic state of a game at a given version. For
//your specific game, GameState and PlayerStates will actually be concrete
//structs to your particular game. State is a container of gameStates,
//playerStates, and dynamicComponentValues for your game. Games often define a
//top-level concreteStates() *myGameState, []*myPlayerState so at the top of
//methods that accept a State they can quickly get concrete, type-checked
//types with only a single conversion leap of faith at the top. States contain
//mutable refrences to their contained SubStates, whereas ImmutableState does
//not. Most of the methods you define that accept states from the core game
//engine will be an ImmutableState, because the only time States should be
//modified is when the game is initally being set up before the first move,
//and during a move's Apply() method.
type State interface {
//State contains all of the methods of a read-only state.
ImmutableState
//GameState is a reference to to the underlying object returned from your
//GameDelegate.GameStateConstructor(), and can be safely cast back to that
//underlying struct so you can access its methods directly in a type-
//checked way.
GameState() SubState
//Each PlayerState is a reference to to the underlying object returned
//from your GameDelegate.PlayerStateConstructor(), and can be safely cast
//back to that underlying struct so you can access its methods directly in
//a type- checked way.
PlayerStates() []PlayerState
//Each SubState is a reference to to the underlying object returned from
//your GameDelegate.DynamicComponentValuesConstructor() for the deck with
//that name, and can be safely cast back to that underlying struct so you
//can access its methods directly in a type- checked way.
DynamicComponentValues() map[string][]SubState
//CurrentPlayer returns the PlayerState corresponding to the result of
//delegate.CurrentPlayerIndex(), or nil if the index isn't valid. This
//object is the same underlying struct that you returned from
//GameDelegate.PlayerStateConstructor and can be cast back safely to
//access the underlying methods.
CurrentPlayer() PlayerState
//Rand returns a source of randomness. All game logic should use this rand
//source. It is deterministically seeded when it is created for this state
//based on the game's ID, the game's secret salt, and the version number
//of the state. Repeated calls to Rand() on the same state will return the
//same random generator. If games use this source for all of their
//randomness it allows the game to be played back detrministically, which
//is useful in some testing scenarios. Rand is only available on State,
//not ImmutableState, because all methods that aren't mutators in your
//game logic should be deterministic.
Rand() *rand.Rand
//containingStack will return the stack and slot index for the
//associated component, if that location is not sanitized. If no error is
//returned, stack.ComponentAt(slotIndex) == c will evaluate to true.
containingStack(c Component) (stack Stack, slotIndex int, err error)
}
//Valid returns true if the PlayerIndex's value is legal in the context of the
//current State--that is, it is either AdminPlayerIndex, ObserverPlayerIndex,
//or between 0 (inclusive) and game.NumPlayers().
func (p PlayerIndex) Valid(state ImmutableState) bool {
if p == AdminPlayerIndex || p == ObserverPlayerIndex {
return true
}
if state == nil {
return false
}
if p < 0 || int(p) >= len(state.ImmutablePlayerStates()) {
return false
}
return true
}
//Next returns the next PlayerIndex, wrapping around back to 0 if it
//overflows. PlayerIndexes of AdminPlayerIndex and Observer PlayerIndex will
//not be affected.
func (p PlayerIndex) Next(state ImmutableState) PlayerIndex {
if p == AdminPlayerIndex || p == ObserverPlayerIndex {
return p
}
p++
if int(p) >= len(state.ImmutablePlayerStates()) {
p = 0
}
return p
}
//Previous returns the previous PlayerIndex, wrapping around back to len(players -1) if it
//goes below 0. PlayerIndexes of AdminPlayerIndex and Observer PlayerIndex will
//not be affected.
func (p PlayerIndex) Previous(state ImmutableState) PlayerIndex {
if p == AdminPlayerIndex || p == ObserverPlayerIndex {
return p
}
p--
if int(p) < 0 {
p = PlayerIndex(len(state.ImmutablePlayerStates()) - 1)
}
return p
}
//Equivalent checks whether the two playerIndexes are equivalent. For most
//indexes it checks if both are the same. ObserverPlayerIndex returns false
//when compared to any other PlayerIndex. AdminPlayerIndex returns true when
//compared to any other index (other than ObserverPlayerIndex). This method is
//useful for verifying that a given TargerPlayerIndex is equivalent to the
//proposer PlayerIndex in a move's Legal method. moves.CurrentPlayer handles
//that logic for you.
func (p PlayerIndex) Equivalent(other PlayerIndex) bool {
//Sanity check obviously-illegal values
if p < AdminPlayerIndex || other < AdminPlayerIndex {
return false
}
if p == ObserverPlayerIndex || other == ObserverPlayerIndex {
return false
}
if p == AdminPlayerIndex || other == AdminPlayerIndex {
return true
}
return p == other
}
//String returns the int value of the PlayerIndex.
func (p PlayerIndex) String() string {
return strconv.Itoa(int(p))
}
//componentIndexItem represents one item in the componentIndex.s
type componentIndexItem struct {
stack Stack
slotIndex int
}
//state implements both State and MutableState, so it can always be passed for
//either, and what it's interpreted as is primarily a function of what the
//method signature is that it's passed to
type state struct {
gameState ConfigurableSubState
playerStates []ConfigurablePlayerState
computedValues *computedProperties
dynamicComponentValues map[string][]ConfigurableSubState
//We hang onto these because otherwise we'd have to create them on the fly
//whenever MutablePlayerStates() and MutableDynamicComponentValues are
//called. They're populated in setStateForSubStates.
mutablePlayerStates []PlayerState
mutableDynamicComponentValues map[string][]SubState
secretMoveCount map[string][]int
sanitized bool
version int
game *Game
memoizedRand *rand.Rand
//componentIndex keeps track of the current location of all components in
//stacks in this state. It is not persisted, but is rebuilt the first time
//it's asked for, and then all modifications are kept track of as things
//move around.
componentIndex map[Component]componentIndexItem
//Set to true while computed is being calculating computed. Primarily so
//if you marshal JSON in that time we know to just elide computed.
calculatingComputed bool
//If TimerProp.Start() is called, it prepares a timer, but doesn't
//actually start ticking it until this state is committed. This is where
//we accumulate the timers that still need to be fully started at that
//point.
timersToStart []string
}
func (s *state) Rand() *rand.Rand {
if s.memoizedRand == nil {
input := "insecurestarterdefault"
if game := s.game; game != nil {
//Sometimes, like exampleState, we don't have the game reference.
//But those are rare and it's OK to have deterministic behavior.
input = game.Id() + game.secretSalt
}
input += strconv.Itoa(s.version)
hasher := fnv.New64()
hasher.Write([]byte(input))
val := hasher.Sum64()
s.memoizedRand = rand.New(rand.NewSource(int64(val)))
}
return s.memoizedRand
}
func (s *state) containingImmutableStack(c Component) (stack ImmutableStack, slotIndex int, err error) {
return s.containingStack(c)
}
func (s *state) containingStack(c Component) (stack Stack, slotIndex int, err error) {
if s.componentIndex == nil {
s.buildComponentIndex()
}
if c == nil {
return nil, 0, errors.New("Nil component doesn't exist in any stack")
}
if c.Deck().GenericComponent().Equivalent(c) {
return nil, 0, errors.New("The generic component for that deck isn't in any stack")
}
item, ok := s.componentIndex[c.ptr()]
if !ok {
//This can happen if the state is sanitized, after
//buildComponentIndex, which won't be able to see the component.
if s.Sanitized() {
return nil, 0, errors.New("That component's location is not public information.")
}
//If this happened and the state isn't expected, then something bad happened.
//TODO: remove this once debugging that it doesn't happen
log.Println("WARNING: Component didn't exist in index")
return nil, 0, errors.New("Unexpectedly that component was not found in the index")
}
//Sanity check that we're allowed to see that component in that location.
otherC := item.stack.ComponentAt(item.slotIndex)
if otherC == nil || otherC.Generic() {
return nil, 0, errors.New("That component's location is not public information.")
}
//This check should always work if the stack has been sanitized, because
//every Policy other than PolicyVisible replaces ComponentAt with generic
//component.
if !otherC.Equivalent(c) {
//If this happened and the state isn't expected, then something bad happened.
//TODO: remove this once debugging that it doesn't happen
log.Println("WARNING: Component didn't exist; wrong component in index")
return nil, 0, errors.New("Unexpectedly that component was not found in the index")
}
return item.stack, item.slotIndex, nil
}
//buildComponentIndex creates the component index by force. Should be called
//if an operation is called on the componentIndex but it's nil.
func (s *state) buildComponentIndex() {
s.componentIndex = make(map[Component]componentIndexItem)
if s.gameState != nil {
s.reportComponentLocationsForReader(s.gameState.ReadSetter())
}
for _, player := range s.playerStates {
if player != nil {
s.reportComponentLocationsForReader(player.ReadSetter())
}
}
for _, dynamicValues := range s.dynamicComponentValues {
for _, value := range dynamicValues {
if value != nil {
s.reportComponentLocationsForReader(value.ReadSetter())
}
}
}
}
//reportComponnentLocationsForReader goes through the given reader, and for
//each component it finds, reports its location into the index. Used to help
//build up the index when it's first created.
func (s *state) reportComponentLocationsForReader(readSetter PropertyReadSetter) {
for propName, propType := range readSetter.Props() {
if !readSetter.PropMutable(propName) {
continue
}
if propType == TypeStack {
stack, err := readSetter.StackProp(propName)
if err != nil {
continue
}
for i, c := range stack.Components() {
//can't use updateIndexForAllComponents because we don't want
//to clal buildComponents.
s.componentAddedImpl(c, stack, i)
}
} else if propType == TypeBoard {
board, err := readSetter.BoardProp(propName)
if err != nil {
continue
}
for _, stack := range board.Spaces() {
//can't use updateIndexForAllComponents because we don't want
//to clal buildComponents.
for i, c := range stack.Components() {
s.componentAddedImpl(c, stack, i)
}
}
}
}
}
func (s *state) componentAddedImpl(c Component, stack Stack, slotIndex int) {
if c == nil {
return
}
if c.Deck() != nil && c.Deck().GenericComponent().Equivalent(c) {
return
}
s.componentIndex[c.ptr()] = componentIndexItem{
stack,
slotIndex,
}
}
//componetAdded should be called by stacks when a component is added to them,
//by non-merged stacks.
func (s *state) componentAdded(c Component, stack Stack, slotIndex int) {
if s.componentIndex == nil {
s.buildComponentIndex()
}
s.componentAddedImpl(c, stack, slotIndex)
}
func (s *state) updateIndexForAllComponents(stack Stack) {
for i, c := range stack.Components() {
s.componentAdded(c, stack, i)
}
}
func (s *state) Version() int {
return s.version
}
func (s *state) GameState() SubState {
return s.gameState
}
func (s *state) PlayerStates() []PlayerState {
return s.mutablePlayerStates
}
func (s *state) DynamicComponentValues() map[string][]SubState {
return s.mutableDynamicComponentValues
}
func (s *state) Game() *Game {
return s.game
}
func (s *state) ImmutableGameState() ImmutableSubState {
return s.gameState
}
func (s *state) ImmutablePlayerStates() []ImmutablePlayerState {
result := make([]ImmutablePlayerState, len(s.playerStates))
for i := 0; i < len(s.playerStates); i++ {
result[i] = s.playerStates[i]
}
return result
}
func (s *state) ImmutableCurrentPlayer() ImmutablePlayerState {
return s.CurrentPlayer()
}
func (s *state) CurrentPlayer() PlayerState {
index := s.CurrentPlayerIndex()
if index < 0 || int(index) >= len(s.playerStates) {
return nil
}
return s.playerStates[index]
}
func (s *state) CurrentPlayerIndex() PlayerIndex {
return s.game.manager.delegate.CurrentPlayerIndex(s)
}
func (s *state) Copy(sanitized bool) (ImmutableState, error) {
//TODO: just make copy() be public
return s.copy(sanitized)
}
func (s *state) copy(sanitized bool) (*state, error) {
result, err := s.game.manager.emptyState(len(s.playerStates))
if err != nil {
return nil, err
}
moveCounts := make(map[string][]int)
for deck, counts := range s.secretMoveCount {
newCounts := make([]int, len(counts))
for i, count := range counts {
newCounts[i] = count
}
moveCounts[deck] = newCounts
}
result.secretMoveCount = moveCounts
result.sanitized = sanitized
result.version = s.version
result.game = s.game
//We copy this over, because this should only be set when computed is
//being calculated, and during that time we'll be creating sanitized
//copies of ourselves. However, if there are other copies created when
//this flag is set that outlive the original flag being unset, that
//state would be in a bad state long term...
result.calculatingComputed = s.calculatingComputed
//Note: we can't copy componentIndex, because all of those items point to
//MutableStacks in the original state, and we don't have an easy way to
//figure out which ones they correspond to in the new one.
if err := copyReader(s.gameState.ReadSetter(), result.gameState.ReadSetter()); err != nil {
return nil, err
}
for i := 0; i < len(s.playerStates); i++ {
if err := copyReader(s.playerStates[i].ReadSetter(), result.playerStates[i].ReadSetter()); err != nil {
return nil, err
}
}
for deckName, values := range s.dynamicComponentValues {
for i := 0; i < len(values); i++ {
if err := copyReader(s.dynamicComponentValues[deckName][i].ReadSetter(), result.dynamicComponentValues[deckName][i].ReadSetter()); err != nil {
return nil, err
}
}
}
return result, nil
}
//finish should be called when the state has all of its sub-states set. It
//goes through each subState on s and calls SetState on it, and also sets the
//mutable*States once.
func (s *state) setStateForSubStates() {
s.gameState.SetState(s)
s.gameState.SetImmutableState(s)
for i := 0; i < len(s.playerStates); i++ {
s.playerStates[i].SetState(s)
s.playerStates[i].SetImmutableState(s)
}
for _, dynamicComponents := range s.dynamicComponentValues {
for _, component := range dynamicComponents {
component.SetState(s)
component.SetImmutableState(s)
}
}
mutablePlayerStates := make([]PlayerState, len(s.playerStates))
for i := 0; i < len(s.playerStates); i++ {
mutablePlayerStates[i] = s.playerStates[i]
}
s.mutablePlayerStates = mutablePlayerStates
dynamicComponentValues := make(map[string][]SubState)
for key, arr := range s.dynamicComponentValues {
resultArr := make([]SubState, len(arr))
for i := 0; i < len(arr); i++ {
resultArr[i] = arr[i]
}
dynamicComponentValues[key] = resultArr
}
s.mutableDynamicComponentValues = dynamicComponentValues
}
//validateBeforeSave insures that for all readers, the playerIndexes are
//valid, and the stacks are too.
func (s *state) validateBeforeSave() error {
if err := validateReaderBeforeSave(s.GameState().Reader(), "Game", s); err != nil {
return err
}
for i, player := range s.PlayerStates() {
if err := validateReaderBeforeSave(player.Reader(), "Player "+strconv.Itoa(i), s); err != nil {
return err
}
}
for name, deck := range s.DynamicComponentValues() {
for i, values := range deck {
if err := validateReaderBeforeSave(values.Reader(), "DynamicComponentValues "+name+" "+strconv.Itoa(i), s); err != nil {
return err
}
}
}
//If delegate.PhaseEnum returns a tree, ensure it's in a leaf state.
delegate := s.Game().Manager().Delegate()
e := delegate.PhaseEnum()
if e == nil {
return nil
}
t := e.TreeEnum()
if t == nil {
return nil
}
if t.IsLeaf(delegate.CurrentPhase(s)) {
return nil
}
return errors.New("PhaseEnum is a TreeEnum, but CurrentPhase is not a leaf value.")
}
func validateReaderBeforeSave(reader PropertyReader, name string, state State) error {
for propName, propType := range reader.Props() {
if propType == TypePlayerIndex {
val, err := reader.PlayerIndexProp(propName)
if err != nil {
return errors.New("Error reading property " + propName + ": " + err.Error())
}
if !val.Valid(state) {
return errors.New(propName + " was an invalid PlayerIndex, with value " + strconv.Itoa(int(val)))
}
}
if propType == TypeStack {
stack, err := reader.ImmutableStackProp(propName)
if err != nil {
return errors.New("Error reading property " + propName + ": " + err.Error())
}
if merged := stack.MergedStack(); merged != nil {
if err := merged.Valid(); err != nil {
return errors.New(propName + " was a merged stack that did not validate: " + err.Error())
}
}
}
//We don't need to check TypeBoard here, because TypeBoard never has
//merged stacks within it, and those are the only ones who could be invalid here.
}
return nil
}
//committed is called right after the state has been committed to the database
//and we're sure it will stick. This is the time to do any actions that were
//triggered during the state manipulation. currently that is only timers.
func (s *state) committed() {
for _, id := range s.timersToStart {
s.game.manager.timers.StartTimer(id)
}
}
func (s *state) StorageRecord() StateStorageRecord {
record, _ := s.customMarshalJSON(false, true)
return record
}
func (s *state) customMarshalJSON(includeComputed bool, indent bool) ([]byte, error) {
obj := map[string]interface{}{
"Game": s.gameState,
"Players": s.playerStates,
"Version": s.version,
}
if includeComputed {
obj["Computed"] = s.computed()
}
//We emit the secretMoveCount only when the state isn't sanitized. Any
//time the state is sent via StateForPlayer sanitized will be true, so
//this has the effect of persisting SecretMoveCount when serialized for
//storage layer, but not when sanitized state.
if !s.sanitized {
if len(s.secretMoveCount) > 0 {
obj["SecretMoveCount"] = s.secretMoveCount
}
}
dynamic := s.DynamicComponentValues()
if dynamic != nil && len(dynamic) != 0 {
obj["Components"] = dynamic
} else {
obj["Components"] = map[string]interface{}{}
}
if indent {
return DefaultMarshalJSON(obj)
}
return json.Marshal(obj)
}
func (s *state) MarshalJSON() ([]byte, error) {
return s.customMarshalJSON(true, false)
}
func (s *state) Diagram() string {
return s.game.manager.delegate.Diagram(s)
}
func (s *state) Sanitized() bool {
return s.sanitized
}
func (s *state) ImmutableDynamicComponentValues() map[string][]ImmutableSubState {
result := make(map[string][]ImmutableSubState)
for key, val := range s.dynamicComponentValues {
slice := make([]ImmutableSubState, len(val))
for i := 0; i < len(slice); i++ {
slice[i] = val[i]
}
result[key] = slice
}
return result
}
func (s *state) computed() *computedProperties {
if s.calculatingComputed {
//This might be called in a Compute() callback either directly, or
//implicitly via MarshalJSON.
return nil
}
if s.computedValues == nil {
s.calculatingComputed = true
playerProperties := make([]PropertyCollection, len(s.playerStates))
for i, player := range s.playerStates {
playerProperties[i] = s.game.manager.delegate.ComputedPlayerProperties(player)
}
s.computedValues = &computedProperties{
Global: s.game.manager.delegate.ComputedGlobalProperties(s),
Players: playerProperties,
}
s.calculatingComputed = false
}
return s.computedValues
}
//SanitizedForPlayer is in sanitized.go
//Reader is the interface to fetch a PropertyReader from an object. See
//ConfigurableSubState and PropertyReadSetConfigurer for more.
type Reader interface {
Reader() PropertyReader
}
//ReadSetter is the interface to fetch a PropertyReadSetter from an object.
//See ConfigurableSubState and PropertyReadSetConfigurer for more.
type ReadSetter interface {
Reader
ReadSetter() PropertyReadSetter
}
//ReadSetConfigurer is the interface to fetch a PropertyReadSetConfigurer from
//an object. See ConfigurableSubState and PropertyReadSetConfigurer for more.
type ReadSetConfigurer interface {
ReadSetter
ReadSetConfigurer() PropertyReadSetConfigurer
}
//ImmutableStateSetter is included in ImmutableSubState, SubState, and
//ConfigureableSubState as the way to keep track of which ImmutableState a
//given SubState is part of. See also StateSetter, which adds getters/setters
//for mutable States. Typically you use base.SubState to implement this
//automatically.
type ImmutableStateSetter interface {
//SetImmutableState is called to give the SubState object a pointer back
//to the State that contains it. You can implement it yourself, or
//anonymously embed base.SubState to get it for free.
SetImmutableState(state ImmutableState)
//ImmutableState() returns the state that was set via SetState().
ImmutableState() ImmutableState
}
//StateSetter is included in SubState and ConfigureableSubState as the way to
//keep track of which State a given SubState is part of. See also
//ImmutableStateSetter, which adds getters/setters for ImmutableStates.
//Typically you use base.SubState to implement this automatically.
type StateSetter interface {
ImmutableStateSetter
SetState(state State)
State() State
}
//ImmutableSubState is the interface that all non-modifiable sub-state objects
//(PlayerStates. GameStates, and DynamicComponentValues) implement. It is like
//SubState, but minus any mutator methods. See ConfigurableSubState for more
//on the SubState type hierarchy.
type ImmutableSubState interface {
ImmutableStateSetter
Reader
}
//SubState is the interface that all sub-state objects (PlayerStates,
//GameStates, and DynamicComponentValues) implement. it is like
//ConfigurableSubState, but minus any configure methods. This means they can't
//be used to configure the substates at creation time but can be used to
//mutate them, for example in move.Apply(). See ConfigurableSubState for more
//on the SubState type hierarchy.
type SubState interface {
StateSetter
ReadSetter
}
/*
ConfigurableSubState is the interface for many types of structs that store
properties and configuration specific to your game type. The values returned
from your GameDelegate's GameStateConstructor, PlayerStateConstructor, and
DynamicComponentValues constructor must all implement this interface.
(PlayerStateConstructor also adds PlayerIndex())
A ConfigurableSubState is a struct that has a collection of properties all of
a given small set of legal types, enumerated in PropertyType. These are the
core objects to maintain state in your game type. The types of properties on
these objects are strictly defined to ensure the shapes of the objects are
simple and knowable.
The engine in general doesn't know the shape of your underlying structs, so it
uses the ProeprtyReadSetConfigurer interface to interact with your objects.
See the documetnation for PropertyReadSetConfigurer for more.
Many legal property types, like string and int, are simple and can be Read and
Set as you'd expect. But some, called interface types, are more complex
because they denote objects that carry configuration information in their
instantiation. Stacks, Timers, and Enums are examples of these. These
interface types can be Read and have their sub-properties Set. But they also
must be able to be Configured, which is to say instantied and set onto the
underlying struct.
ConfigurableSubState is the most powerful interface for interacting with these
types of objects, because it has methods to Read, Set, and Configure all
properties. In certain cases, however, for example with an ImmutableState, it
might not be appropriate to allow Setting or Configuring propeties. For this
reason, the interfaces are split into a series of layers, building up from
only Reader methods up to adding Set proeprties, and then terminating by
layering on Configure methods.
ConfigurablePlayerSubState is an interface that extends ConfigurableSubState
with one extra method, PlayerIndex(). There are also player-state versions for
SubState and ImmutableSubState.
Typically your game's sub-states satisfy this interface by embedding
base.SubState, and then using `boardgame-util codegen` to generate the
underlying code for the PropertyReadSetConfigurer for your object type.
*/
type ConfigurableSubState interface {
//Every SubState should be able to have its containing State set and read
//back, so each sub-state knows how to reach up and over into other parts
//of the over-arching state. You can implement this interface by emedding
//base.SubState in your struct.
StateSetter
//ReadSetConfigurer defines the method to retrieve the
//PropertyReadSetConfigurer for this object type. Typically this getter--
//and the underlying PropertyReadSetConfigurer it returns--are generated
//via `boardgame-util codegen`.
ReadSetConfigurer
}
//PlayerIndexer is implemented by all PlayerStates, which differentiates them
//from a generic SubState.
type PlayerIndexer interface {
//PlayerIndex encodes the index this user's state is in the containing
//state object, allowing the SubState to know how to fetch itself from its
//containing State.
PlayerIndex() PlayerIndex
}
//PlayerState represents the state of a game associated with a specific user.
//It is just a SubState with the addition of a PlayerIndex(). See
//ConfigurableSubState for more on the SubState type hierarchy.
type PlayerState interface {
PlayerIndexer
SubState
}
//ImmutablePlayerState represents a PlayerState SubState that is not in a
//context where mutating is legal. It is simply an ImmutableSubState that also
//has a PlayerIndex method. See more on substates at the documentation for
//ConfigurableSubState.
type ImmutablePlayerState interface {
PlayerIndexer
ImmutableSubState
}
//A ConfigurablePlayerState is a PlayerState that is allowed to be mutated and
//configured. It is simply a ConfigurableSubState that also has a
//PlayerIndex() method. See ConfigurableSubState for more on this hierarchy of
//objects.
type ConfigurablePlayerState interface {
PlayerIndexer
ConfigurableSubState
}
//DefaultMarshalJSON is a simple wrapper around json.MarshalIndent, with the
//right defaults set. If your structs need to implement MarshaLJSON to output
//JSON, use this to encode it.
func DefaultMarshalJSON(obj interface{}) ([]byte, error) {
return json.MarshalIndent(obj, "", " ")
}
Fix a compile error introduced in afda8a5d74b288d37aedb2653a3d160a84e7d8da when the StatePropertyRef field order was reordered for clarity. Part of #562.
package boardgame
import (
"encoding/json"
"github.com/jkomoros/boardgame/errors"
"hash/fnv"
"log"
"math/rand"
"strconv"
)
//ImmutableState is a version of State, but minus any mutator methods. Because
//states may not be modified except by moves, in almost every case where a
//state is passed to game logic you define (whether on your GameDelegate
//methods, or Legal() on your move structs), an ImmutableState will be passed
//instead. If an ImmutableState is passed to your method, it's a strong signal
//that you shouldn't modify the state. Note that idiomatic use (e.g.
//concreteStates) will cast an ImmutableState to a State immediately in order
//to retrieve the concrete structs underneath, but if you do that you have to
//be careful not to inadvertently modify the state because the changes won't
//be persisted. See the documentation for State for more about states in
//general.
type ImmutableState interface {
//ImmutableGameState is a reference to to the underlying object returned
//from your GameDelegate.GameStateConstructor(), and can be safely cast
//back to that underlying struct so you can access its methods directly in
//a type- checked way. The difference is that the object formally exposed
//lacks the mutator methods, although when you cast back you'll get access
//to the full struct--be careful not to mutate things as they will not be
//persisted. See State.GameState for more.
ImmutableGameState() ImmutableSubState
//Each PlayerState is a reference to to the underlying object returned
//from your GameDelegate.PlayerStateConstructor(), and can be safely cast
//back to that underlying struct so you can access its methods directly in
//a type- checked way. The difference is that the object formally exposed
//lacks the mutator methods, although when you cast back you'll get access
//to the full struct--be careful not to mutate things as they will not be
//persisted. See State.PlayerStates for more.
ImmutablePlayerStates() []ImmutablePlayerState
//Each SubState is a reference to to the underlying object returned from
//your GameDelegate.DynamicComponentValuesConstructor() for the deck with
//that name, and can be safely cast back to that underlying struct so you
//can access its methods directly in a type- checked way. The difference
//is that the object formally exposed lacks the mutator methods, although
//when you cast back you'll get access to the full struct--be careful not
//to mutate things as they will not be persisted. DynamicComponentValues
//returns a map of deck name to array of component values, one per
//component in that deck. See State.DynamicComponentValues for more.
ImmutableDynamicComponentValues() map[string][]ImmutableSubState
//ImmutableCurrentPlayer returns the ImmutablePlayerState corresponding to
//the result of delegate.CurrentPlayerIndex(), or nil if the index isn't
//valid. This object is the same underlying struct that you returned from
//GameDelegate.PlayerStateConstructor and can be cast back safely to
//access the underlying methods. See State.CurrentPlayer for more.
ImmutableCurrentPlayer() ImmutablePlayerState
//CurrentPlayerIndex is a simple convenience wrapper around
//delegate.CurrentPlayerIndex(state) for this state.
CurrentPlayerIndex() PlayerIndex
//Version returns the version number the state is (or will be once
//committed).
Version() int
//Copy returns a deep copy of the State, including copied version of the
//Game and Player States. Note that copying uses the
//ProperyReadSetConfigurer interface, so any properties not enumerated
//there or otherwise defined in the constructors on your GameDelegate will
//not be copied.
Copy(sanitized bool) (ImmutableState, error)
//Diagram returns a basic, ascii rendering of the state for debug rendering.
//It thunks out to Delegate.Diagram.
Diagram() string
//Santizied will return false if this is a full-fidelity State object, or
//true if it has been sanitized, which means that some properties might be
//hidden or otherwise altered. This should return true if the object was
//created with Copy(true)
Sanitized() bool
//Computed returns the computed properties for this state.
computed() *computedProperties
//SanitizedForPlayer produces a copy state object that has been sanitized
//for the player at the given index. The state object returned will have
//Sanitized() return true. Will call GameDelegate.SanitizationPolicy to
//construct the effective policy to apply. See the documentation for
//Policy for more on sanitization.
SanitizedForPlayer(player PlayerIndex) ImmutableState
//Game is the Game that this state is part of. Calling
//Game.State(state.Version()) should return a state equivalent to this State
//(modulo sanitization, if applied).
Game() *Game
//StorageRecord returns a StateStorageRecord representing the state.
StorageRecord() StateStorageRecord
//containingImmutableStack will return the stack and slot index for the associated
//component, if that location is not sanitized. If no error is returned,
//stack.ComponentAt(slotIndex) == c will evaluate to true.
containingImmutableStack(c Component) (stack ImmutableStack, slotIndex int, err error)
}
type computedProperties struct {
Global PropertyCollection
Players []PropertyCollection
}
//StateGroupType is the top-level grouping object used in a StatePropertyRef.
type StateGroupType int
const (
StateGroupGame StateGroupType = iota
StateGroupPlayer
StateGroupDynamicComponentValues
)
//A StatePropertyRef is a reference to a particular property or item in a
//Property in a State, in a structured way. Currently used primarily as an
//input to your GameDelegate's SanitizationPolicy method. Get a new generic
//one, with all properties set to reasonable defaults, from
//NewStatePropertyRef.
type StatePropertyRef struct {
//Group is which of Game, Player, or DynamicComponentValues this is a
//reference to.
Group StateGroupType
//PropName is the specific property on the given SubStateObject specified
//by the rest of the StatePropertyRef.
PropName string
//PlayerIndex is the index of the player, if Group is StateGroupPlayer.
PlayerIndex int
//DeckName is only used when Group is StateGroupDynamicComponentValues
DeckName string
//StackIndex specifies the index of the component within the stack (if it
//is a stack) that is intended. Negative values signify "all components in
//stack"
StackIndex int
//BoardIndex specifies the index of the Stack within the Board (if it is a
//board) that is intended. Negative values signify "all stacks within the
//board".
BoardIndex int
//DeckIndex is used only when the Group is
//StateGroupDynamicComponentValues. Negative values mean "all values in
//deck".
DynamicComponentIndex int
}
//NewStatePropertyRef returns an initalized StatePropertyRef with all fields
//set to reasonable defaults. In particular, all of the Index properties are
//set to -1. It is rare for users of the library to need to create their own
//StatePropertyRefs.
func NewStatePropertyRef() StatePropertyRef {
return StatePropertyRef{
StateGroupGame,
"",
-1,
"",
-1,
-1,
-1,
}
}
//getReader returns the reader associated with the StatePropertyRef in the
//given state, or errors if the StatePropertyRef does not refer to a valid
//reader.
func (r StatePropertyRef) associatedReadSetter(st State) (PropertyReadSetter, error) {
switch r.Group {
case StateGroupGame:
gameState := st.GameState()
if gameState == nil {
return nil, errors.New("GameState selected, but was nil")
}
return gameState.ReadSetter(), nil
case StateGroupPlayer:
players := st.PlayerStates()
if len(players) == 0 {
return nil, errors.New("PlayerState selected, but no players in state")
}
if r.PlayerIndex < 0 {
return nil, errors.New("PlayerState selected, but negative value for PlayerIndex")
}
if r.PlayerIndex >= len(players) {
return nil, errors.New("PlayerState selected, but with a non-existent PlayerIndex")
}
player := players[r.PlayerIndex]
return player.ReadSetter(), nil
case StateGroupDynamicComponentValues:
allDecks := st.DynamicComponentValues()
if allDecks == nil {
return nil, errors.New("DynamicComponentValues selected, but was nil")
}
values, ok := allDecks[r.DeckName]
if !ok {
return nil, errors.New("DeckName did not refer to any component values: " + r.DeckName)
}
if r.DynamicComponentIndex < 0 || r.DynamicComponentIndex >= len(values) {
return nil, errors.New("DynamicComponentIndex referred to a component that didn't exist")
}
return values[r.DynamicComponentIndex].ReadSetter(), nil
}
return nil, errors.New("Invalid Group type")
}
//PlayerIndex is an int that represents the index of a given player in a game.
//Normal values are [0, game.NumPlayers). Special values are AdminPlayerIndex
//and ObserverPlayerIndex.
type PlayerIndex int
//ObserverPlayerIndex is a special PlayerIndex that denotes that the player in
//question is not one of the normal players, but someone generically watching.
//All hidden state should be hidden to them, and GroupSelf will never trigger
//for them.
const ObserverPlayerIndex PlayerIndex = -1
//AdminPlayerIndex is a special PlayerIndex that denotes the omniscient admin
//who can see all state and make moves whenever they want. This PlayerIndex is
//used for example to apply moves that your GameDelegate.ProposeFixUpMove
//returns, as well as when Timer's fire. It is also used when the server is in
//debug mode, allowing the given player to operate as the admin.
const AdminPlayerIndex PlayerIndex = -2
//State represents the entire semantic state of a game at a given version. For
//your specific game, GameState and PlayerStates will actually be concrete
//structs to your particular game. State is a container of gameStates,
//playerStates, and dynamicComponentValues for your game. Games often define a
//top-level concreteStates() *myGameState, []*myPlayerState so at the top of
//methods that accept a State they can quickly get concrete, type-checked
//types with only a single conversion leap of faith at the top. States contain
//mutable refrences to their contained SubStates, whereas ImmutableState does
//not. Most of the methods you define that accept states from the core game
//engine will be an ImmutableState, because the only time States should be
//modified is when the game is initally being set up before the first move,
//and during a move's Apply() method.
type State interface {
//State contains all of the methods of a read-only state.
ImmutableState
//GameState is a reference to to the underlying object returned from your
//GameDelegate.GameStateConstructor(), and can be safely cast back to that
//underlying struct so you can access its methods directly in a type-
//checked way.
GameState() SubState
//Each PlayerState is a reference to to the underlying object returned
//from your GameDelegate.PlayerStateConstructor(), and can be safely cast
//back to that underlying struct so you can access its methods directly in
//a type- checked way.
PlayerStates() []PlayerState
//Each SubState is a reference to to the underlying object returned from
//your GameDelegate.DynamicComponentValuesConstructor() for the deck with
//that name, and can be safely cast back to that underlying struct so you
//can access its methods directly in a type- checked way.
DynamicComponentValues() map[string][]SubState
//CurrentPlayer returns the PlayerState corresponding to the result of
//delegate.CurrentPlayerIndex(), or nil if the index isn't valid. This
//object is the same underlying struct that you returned from
//GameDelegate.PlayerStateConstructor and can be cast back safely to
//access the underlying methods.
CurrentPlayer() PlayerState
//Rand returns a source of randomness. All game logic should use this rand
//source. It is deterministically seeded when it is created for this state
//based on the game's ID, the game's secret salt, and the version number
//of the state. Repeated calls to Rand() on the same state will return the
//same random generator. If games use this source for all of their
//randomness it allows the game to be played back detrministically, which
//is useful in some testing scenarios. Rand is only available on State,
//not ImmutableState, because all methods that aren't mutators in your
//game logic should be deterministic.
Rand() *rand.Rand
//containingStack will return the stack and slot index for the
//associated component, if that location is not sanitized. If no error is
//returned, stack.ComponentAt(slotIndex) == c will evaluate to true.
containingStack(c Component) (stack Stack, slotIndex int, err error)
}
//Valid returns true if the PlayerIndex's value is legal in the context of the
//current State--that is, it is either AdminPlayerIndex, ObserverPlayerIndex,
//or between 0 (inclusive) and game.NumPlayers().
func (p PlayerIndex) Valid(state ImmutableState) bool {
if p == AdminPlayerIndex || p == ObserverPlayerIndex {
return true
}
if state == nil {
return false
}
if p < 0 || int(p) >= len(state.ImmutablePlayerStates()) {
return false
}
return true
}
//Next returns the next PlayerIndex, wrapping around back to 0 if it
//overflows. PlayerIndexes of AdminPlayerIndex and Observer PlayerIndex will
//not be affected.
func (p PlayerIndex) Next(state ImmutableState) PlayerIndex {
if p == AdminPlayerIndex || p == ObserverPlayerIndex {
return p
}
p++
if int(p) >= len(state.ImmutablePlayerStates()) {
p = 0
}
return p
}
//Previous returns the previous PlayerIndex, wrapping around back to len(players -1) if it
//goes below 0. PlayerIndexes of AdminPlayerIndex and Observer PlayerIndex will
//not be affected.
func (p PlayerIndex) Previous(state ImmutableState) PlayerIndex {
if p == AdminPlayerIndex || p == ObserverPlayerIndex {
return p
}
p--
if int(p) < 0 {
p = PlayerIndex(len(state.ImmutablePlayerStates()) - 1)
}
return p
}
//Equivalent checks whether the two playerIndexes are equivalent. For most
//indexes it checks if both are the same. ObserverPlayerIndex returns false
//when compared to any other PlayerIndex. AdminPlayerIndex returns true when
//compared to any other index (other than ObserverPlayerIndex). This method is
//useful for verifying that a given TargerPlayerIndex is equivalent to the
//proposer PlayerIndex in a move's Legal method. moves.CurrentPlayer handles
//that logic for you.
func (p PlayerIndex) Equivalent(other PlayerIndex) bool {
//Sanity check obviously-illegal values
if p < AdminPlayerIndex || other < AdminPlayerIndex {
return false
}
if p == ObserverPlayerIndex || other == ObserverPlayerIndex {
return false
}
if p == AdminPlayerIndex || other == AdminPlayerIndex {
return true
}
return p == other
}
//String returns the int value of the PlayerIndex.
func (p PlayerIndex) String() string {
return strconv.Itoa(int(p))
}
//componentIndexItem represents one item in the componentIndex.s
type componentIndexItem struct {
stack Stack
slotIndex int
}
//state implements both State and MutableState, so it can always be passed for
//either, and what it's interpreted as is primarily a function of what the
//method signature is that it's passed to
type state struct {
gameState ConfigurableSubState
playerStates []ConfigurablePlayerState
computedValues *computedProperties
dynamicComponentValues map[string][]ConfigurableSubState
//We hang onto these because otherwise we'd have to create them on the fly
//whenever MutablePlayerStates() and MutableDynamicComponentValues are
//called. They're populated in setStateForSubStates.
mutablePlayerStates []PlayerState
mutableDynamicComponentValues map[string][]SubState
secretMoveCount map[string][]int
sanitized bool
version int
game *Game
memoizedRand *rand.Rand
//componentIndex keeps track of the current location of all components in
//stacks in this state. It is not persisted, but is rebuilt the first time
//it's asked for, and then all modifications are kept track of as things
//move around.
componentIndex map[Component]componentIndexItem
//Set to true while computed is being calculating computed. Primarily so
//if you marshal JSON in that time we know to just elide computed.
calculatingComputed bool
//If TimerProp.Start() is called, it prepares a timer, but doesn't
//actually start ticking it until this state is committed. This is where
//we accumulate the timers that still need to be fully started at that
//point.
timersToStart []string
}
func (s *state) Rand() *rand.Rand {
if s.memoizedRand == nil {
input := "insecurestarterdefault"
if game := s.game; game != nil {
//Sometimes, like exampleState, we don't have the game reference.
//But those are rare and it's OK to have deterministic behavior.
input = game.Id() + game.secretSalt
}
input += strconv.Itoa(s.version)
hasher := fnv.New64()
hasher.Write([]byte(input))
val := hasher.Sum64()
s.memoizedRand = rand.New(rand.NewSource(int64(val)))
}
return s.memoizedRand
}
func (s *state) containingImmutableStack(c Component) (stack ImmutableStack, slotIndex int, err error) {
return s.containingStack(c)
}
func (s *state) containingStack(c Component) (stack Stack, slotIndex int, err error) {
if s.componentIndex == nil {
s.buildComponentIndex()
}
if c == nil {
return nil, 0, errors.New("Nil component doesn't exist in any stack")
}
if c.Deck().GenericComponent().Equivalent(c) {
return nil, 0, errors.New("The generic component for that deck isn't in any stack")
}
item, ok := s.componentIndex[c.ptr()]
if !ok {
//This can happen if the state is sanitized, after
//buildComponentIndex, which won't be able to see the component.
if s.Sanitized() {
return nil, 0, errors.New("That component's location is not public information.")
}
//If this happened and the state isn't expected, then something bad happened.
//TODO: remove this once debugging that it doesn't happen
log.Println("WARNING: Component didn't exist in index")
return nil, 0, errors.New("Unexpectedly that component was not found in the index")
}
//Sanity check that we're allowed to see that component in that location.
otherC := item.stack.ComponentAt(item.slotIndex)
if otherC == nil || otherC.Generic() {
return nil, 0, errors.New("That component's location is not public information.")
}
//This check should always work if the stack has been sanitized, because
//every Policy other than PolicyVisible replaces ComponentAt with generic
//component.
if !otherC.Equivalent(c) {
//If this happened and the state isn't expected, then something bad happened.
//TODO: remove this once debugging that it doesn't happen
log.Println("WARNING: Component didn't exist; wrong component in index")
return nil, 0, errors.New("Unexpectedly that component was not found in the index")
}
return item.stack, item.slotIndex, nil
}
//buildComponentIndex creates the component index by force. Should be called
//if an operation is called on the componentIndex but it's nil.
func (s *state) buildComponentIndex() {
s.componentIndex = make(map[Component]componentIndexItem)
if s.gameState != nil {
s.reportComponentLocationsForReader(s.gameState.ReadSetter())
}
for _, player := range s.playerStates {
if player != nil {
s.reportComponentLocationsForReader(player.ReadSetter())
}
}
for _, dynamicValues := range s.dynamicComponentValues {
for _, value := range dynamicValues {
if value != nil {
s.reportComponentLocationsForReader(value.ReadSetter())
}
}
}
}
//reportComponnentLocationsForReader goes through the given reader, and for
//each component it finds, reports its location into the index. Used to help
//build up the index when it's first created.
func (s *state) reportComponentLocationsForReader(readSetter PropertyReadSetter) {
for propName, propType := range readSetter.Props() {
if !readSetter.PropMutable(propName) {
continue
}
if propType == TypeStack {
stack, err := readSetter.StackProp(propName)
if err != nil {
continue
}
for i, c := range stack.Components() {
//can't use updateIndexForAllComponents because we don't want
//to clal buildComponents.
s.componentAddedImpl(c, stack, i)
}
} else if propType == TypeBoard {
board, err := readSetter.BoardProp(propName)
if err != nil {
continue
}
for _, stack := range board.Spaces() {
//can't use updateIndexForAllComponents because we don't want
//to clal buildComponents.
for i, c := range stack.Components() {
s.componentAddedImpl(c, stack, i)
}
}
}
}
}
func (s *state) componentAddedImpl(c Component, stack Stack, slotIndex int) {
if c == nil {
return
}
if c.Deck() != nil && c.Deck().GenericComponent().Equivalent(c) {
return
}
s.componentIndex[c.ptr()] = componentIndexItem{
stack,
slotIndex,
}
}
//componetAdded should be called by stacks when a component is added to them,
//by non-merged stacks.
func (s *state) componentAdded(c Component, stack Stack, slotIndex int) {
if s.componentIndex == nil {
s.buildComponentIndex()
}
s.componentAddedImpl(c, stack, slotIndex)
}
func (s *state) updateIndexForAllComponents(stack Stack) {
for i, c := range stack.Components() {
s.componentAdded(c, stack, i)
}
}
func (s *state) Version() int {
return s.version
}
func (s *state) GameState() SubState {
return s.gameState
}
func (s *state) PlayerStates() []PlayerState {
return s.mutablePlayerStates
}
func (s *state) DynamicComponentValues() map[string][]SubState {
return s.mutableDynamicComponentValues
}
func (s *state) Game() *Game {
return s.game
}
func (s *state) ImmutableGameState() ImmutableSubState {
return s.gameState
}
func (s *state) ImmutablePlayerStates() []ImmutablePlayerState {
result := make([]ImmutablePlayerState, len(s.playerStates))
for i := 0; i < len(s.playerStates); i++ {
result[i] = s.playerStates[i]
}
return result
}
func (s *state) ImmutableCurrentPlayer() ImmutablePlayerState {
return s.CurrentPlayer()
}
func (s *state) CurrentPlayer() PlayerState {
index := s.CurrentPlayerIndex()
if index < 0 || int(index) >= len(s.playerStates) {
return nil
}
return s.playerStates[index]
}
func (s *state) CurrentPlayerIndex() PlayerIndex {
return s.game.manager.delegate.CurrentPlayerIndex(s)
}
func (s *state) Copy(sanitized bool) (ImmutableState, error) {
//TODO: just make copy() be public
return s.copy(sanitized)
}
func (s *state) copy(sanitized bool) (*state, error) {
result, err := s.game.manager.emptyState(len(s.playerStates))
if err != nil {
return nil, err
}
moveCounts := make(map[string][]int)
for deck, counts := range s.secretMoveCount {
newCounts := make([]int, len(counts))
for i, count := range counts {
newCounts[i] = count
}
moveCounts[deck] = newCounts
}
result.secretMoveCount = moveCounts
result.sanitized = sanitized
result.version = s.version
result.game = s.game
//We copy this over, because this should only be set when computed is
//being calculated, and during that time we'll be creating sanitized
//copies of ourselves. However, if there are other copies created when
//this flag is set that outlive the original flag being unset, that
//state would be in a bad state long term...
result.calculatingComputed = s.calculatingComputed
//Note: we can't copy componentIndex, because all of those items point to
//MutableStacks in the original state, and we don't have an easy way to
//figure out which ones they correspond to in the new one.
if err := copyReader(s.gameState.ReadSetter(), result.gameState.ReadSetter()); err != nil {
return nil, err
}
for i := 0; i < len(s.playerStates); i++ {
if err := copyReader(s.playerStates[i].ReadSetter(), result.playerStates[i].ReadSetter()); err != nil {
return nil, err
}
}
for deckName, values := range s.dynamicComponentValues {
for i := 0; i < len(values); i++ {
if err := copyReader(s.dynamicComponentValues[deckName][i].ReadSetter(), result.dynamicComponentValues[deckName][i].ReadSetter()); err != nil {
return nil, err
}
}
}
return result, nil
}
//finish should be called when the state has all of its sub-states set. It
//goes through each subState on s and calls SetState on it, and also sets the
//mutable*States once.
func (s *state) setStateForSubStates() {
s.gameState.SetState(s)
s.gameState.SetImmutableState(s)
for i := 0; i < len(s.playerStates); i++ {
s.playerStates[i].SetState(s)
s.playerStates[i].SetImmutableState(s)
}
for _, dynamicComponents := range s.dynamicComponentValues {
for _, component := range dynamicComponents {
component.SetState(s)
component.SetImmutableState(s)
}
}
mutablePlayerStates := make([]PlayerState, len(s.playerStates))
for i := 0; i < len(s.playerStates); i++ {
mutablePlayerStates[i] = s.playerStates[i]
}
s.mutablePlayerStates = mutablePlayerStates
dynamicComponentValues := make(map[string][]SubState)
for key, arr := range s.dynamicComponentValues {
resultArr := make([]SubState, len(arr))
for i := 0; i < len(arr); i++ {
resultArr[i] = arr[i]
}
dynamicComponentValues[key] = resultArr
}
s.mutableDynamicComponentValues = dynamicComponentValues
}
//validateBeforeSave insures that for all readers, the playerIndexes are
//valid, and the stacks are too.
func (s *state) validateBeforeSave() error {
if err := validateReaderBeforeSave(s.GameState().Reader(), "Game", s); err != nil {
return err
}
for i, player := range s.PlayerStates() {
if err := validateReaderBeforeSave(player.Reader(), "Player "+strconv.Itoa(i), s); err != nil {
return err
}
}
for name, deck := range s.DynamicComponentValues() {
for i, values := range deck {
if err := validateReaderBeforeSave(values.Reader(), "DynamicComponentValues "+name+" "+strconv.Itoa(i), s); err != nil {
return err
}
}
}
//If delegate.PhaseEnum returns a tree, ensure it's in a leaf state.
delegate := s.Game().Manager().Delegate()
e := delegate.PhaseEnum()
if e == nil {
return nil
}
t := e.TreeEnum()
if t == nil {
return nil
}
if t.IsLeaf(delegate.CurrentPhase(s)) {
return nil
}
return errors.New("PhaseEnum is a TreeEnum, but CurrentPhase is not a leaf value.")
}
func validateReaderBeforeSave(reader PropertyReader, name string, state State) error {
for propName, propType := range reader.Props() {
if propType == TypePlayerIndex {
val, err := reader.PlayerIndexProp(propName)
if err != nil {
return errors.New("Error reading property " + propName + ": " + err.Error())
}
if !val.Valid(state) {
return errors.New(propName + " was an invalid PlayerIndex, with value " + strconv.Itoa(int(val)))
}
}
if propType == TypeStack {
stack, err := reader.ImmutableStackProp(propName)
if err != nil {
return errors.New("Error reading property " + propName + ": " + err.Error())
}
if merged := stack.MergedStack(); merged != nil {
if err := merged.Valid(); err != nil {
return errors.New(propName + " was a merged stack that did not validate: " + err.Error())
}
}
}
//We don't need to check TypeBoard here, because TypeBoard never has
//merged stacks within it, and those are the only ones who could be invalid here.
}
return nil
}
//committed is called right after the state has been committed to the database
//and we're sure it will stick. This is the time to do any actions that were
//triggered during the state manipulation. currently that is only timers.
func (s *state) committed() {
for _, id := range s.timersToStart {
s.game.manager.timers.StartTimer(id)
}
}
func (s *state) StorageRecord() StateStorageRecord {
record, _ := s.customMarshalJSON(false, true)
return record
}
func (s *state) customMarshalJSON(includeComputed bool, indent bool) ([]byte, error) {
obj := map[string]interface{}{
"Game": s.gameState,
"Players": s.playerStates,
"Version": s.version,
}
if includeComputed {
obj["Computed"] = s.computed()
}
//We emit the secretMoveCount only when the state isn't sanitized. Any
//time the state is sent via StateForPlayer sanitized will be true, so
//this has the effect of persisting SecretMoveCount when serialized for
//storage layer, but not when sanitized state.
if !s.sanitized {
if len(s.secretMoveCount) > 0 {
obj["SecretMoveCount"] = s.secretMoveCount
}
}
dynamic := s.DynamicComponentValues()
if dynamic != nil && len(dynamic) != 0 {
obj["Components"] = dynamic
} else {
obj["Components"] = map[string]interface{}{}
}
if indent {
return DefaultMarshalJSON(obj)
}
return json.Marshal(obj)
}
func (s *state) MarshalJSON() ([]byte, error) {
return s.customMarshalJSON(true, false)
}
func (s *state) Diagram() string {
return s.game.manager.delegate.Diagram(s)
}
func (s *state) Sanitized() bool {
return s.sanitized
}
func (s *state) ImmutableDynamicComponentValues() map[string][]ImmutableSubState {
result := make(map[string][]ImmutableSubState)
for key, val := range s.dynamicComponentValues {
slice := make([]ImmutableSubState, len(val))
for i := 0; i < len(slice); i++ {
slice[i] = val[i]
}
result[key] = slice
}
return result
}
func (s *state) computed() *computedProperties {
if s.calculatingComputed {
//This might be called in a Compute() callback either directly, or
//implicitly via MarshalJSON.
return nil
}
if s.computedValues == nil {
s.calculatingComputed = true
playerProperties := make([]PropertyCollection, len(s.playerStates))
for i, player := range s.playerStates {
playerProperties[i] = s.game.manager.delegate.ComputedPlayerProperties(player)
}
s.computedValues = &computedProperties{
Global: s.game.manager.delegate.ComputedGlobalProperties(s),
Players: playerProperties,
}
s.calculatingComputed = false
}
return s.computedValues
}
//SanitizedForPlayer is in sanitized.go
//Reader is the interface to fetch a PropertyReader from an object. See
//ConfigurableSubState and PropertyReadSetConfigurer for more.
type Reader interface {
Reader() PropertyReader
}
//ReadSetter is the interface to fetch a PropertyReadSetter from an object.
//See ConfigurableSubState and PropertyReadSetConfigurer for more.
type ReadSetter interface {
Reader
ReadSetter() PropertyReadSetter
}
//ReadSetConfigurer is the interface to fetch a PropertyReadSetConfigurer from
//an object. See ConfigurableSubState and PropertyReadSetConfigurer for more.
type ReadSetConfigurer interface {
ReadSetter
ReadSetConfigurer() PropertyReadSetConfigurer
}
//ImmutableStateSetter is included in ImmutableSubState, SubState, and
//ConfigureableSubState as the way to keep track of which ImmutableState a
//given SubState is part of. See also StateSetter, which adds getters/setters
//for mutable States. Typically you use base.SubState to implement this
//automatically.
type ImmutableStateSetter interface {
//SetImmutableState is called to give the SubState object a pointer back
//to the State that contains it. You can implement it yourself, or
//anonymously embed base.SubState to get it for free.
SetImmutableState(state ImmutableState)
//ImmutableState() returns the state that was set via SetState().
ImmutableState() ImmutableState
}
//StateSetter is included in SubState and ConfigureableSubState as the way to
//keep track of which State a given SubState is part of. See also
//ImmutableStateSetter, which adds getters/setters for ImmutableStates.
//Typically you use base.SubState to implement this automatically.
type StateSetter interface {
ImmutableStateSetter
SetState(state State)
State() State
}
//ImmutableSubState is the interface that all non-modifiable sub-state objects
//(PlayerStates. GameStates, and DynamicComponentValues) implement. It is like
//SubState, but minus any mutator methods. See ConfigurableSubState for more
//on the SubState type hierarchy.
type ImmutableSubState interface {
ImmutableStateSetter
Reader
}
//SubState is the interface that all sub-state objects (PlayerStates,
//GameStates, and DynamicComponentValues) implement. it is like
//ConfigurableSubState, but minus any configure methods. This means they can't
//be used to configure the substates at creation time but can be used to
//mutate them, for example in move.Apply(). See ConfigurableSubState for more
//on the SubState type hierarchy.
type SubState interface {
StateSetter
ReadSetter
}
/*
ConfigurableSubState is the interface for many types of structs that store
properties and configuration specific to your game type. The values returned
from your GameDelegate's GameStateConstructor, PlayerStateConstructor, and
DynamicComponentValues constructor must all implement this interface.
(PlayerStateConstructor also adds PlayerIndex())
A ConfigurableSubState is a struct that has a collection of properties all of
a given small set of legal types, enumerated in PropertyType. These are the
core objects to maintain state in your game type. The types of properties on
these objects are strictly defined to ensure the shapes of the objects are
simple and knowable.
The engine in general doesn't know the shape of your underlying structs, so it
uses the ProeprtyReadSetConfigurer interface to interact with your objects.
See the documetnation for PropertyReadSetConfigurer for more.
Many legal property types, like string and int, are simple and can be Read and
Set as you'd expect. But some, called interface types, are more complex
because they denote objects that carry configuration information in their
instantiation. Stacks, Timers, and Enums are examples of these. These
interface types can be Read and have their sub-properties Set. But they also
must be able to be Configured, which is to say instantied and set onto the
underlying struct.
ConfigurableSubState is the most powerful interface for interacting with these
types of objects, because it has methods to Read, Set, and Configure all
properties. In certain cases, however, for example with an ImmutableState, it
might not be appropriate to allow Setting or Configuring propeties. For this
reason, the interfaces are split into a series of layers, building up from
only Reader methods up to adding Set proeprties, and then terminating by
layering on Configure methods.
ConfigurablePlayerSubState is an interface that extends ConfigurableSubState
with one extra method, PlayerIndex(). There are also player-state versions for
SubState and ImmutableSubState.
Typically your game's sub-states satisfy this interface by embedding
base.SubState, and then using `boardgame-util codegen` to generate the
underlying code for the PropertyReadSetConfigurer for your object type.
*/
type ConfigurableSubState interface {
//Every SubState should be able to have its containing State set and read
//back, so each sub-state knows how to reach up and over into other parts
//of the over-arching state. You can implement this interface by emedding
//base.SubState in your struct.
StateSetter
//ReadSetConfigurer defines the method to retrieve the
//PropertyReadSetConfigurer for this object type. Typically this getter--
//and the underlying PropertyReadSetConfigurer it returns--are generated
//via `boardgame-util codegen`.
ReadSetConfigurer
}
//PlayerIndexer is implemented by all PlayerStates, which differentiates them
//from a generic SubState.
type PlayerIndexer interface {
//PlayerIndex encodes the index this user's state is in the containing
//state object, allowing the SubState to know how to fetch itself from its
//containing State.
PlayerIndex() PlayerIndex
}
//PlayerState represents the state of a game associated with a specific user.
//It is just a SubState with the addition of a PlayerIndex(). See
//ConfigurableSubState for more on the SubState type hierarchy.
type PlayerState interface {
PlayerIndexer
SubState
}
//ImmutablePlayerState represents a PlayerState SubState that is not in a
//context where mutating is legal. It is simply an ImmutableSubState that also
//has a PlayerIndex method. See more on substates at the documentation for
//ConfigurableSubState.
type ImmutablePlayerState interface {
PlayerIndexer
ImmutableSubState
}
//A ConfigurablePlayerState is a PlayerState that is allowed to be mutated and
//configured. It is simply a ConfigurableSubState that also has a
//PlayerIndex() method. See ConfigurableSubState for more on this hierarchy of
//objects.
type ConfigurablePlayerState interface {
PlayerIndexer
ConfigurableSubState
}
//DefaultMarshalJSON is a simple wrapper around json.MarshalIndent, with the
//right defaults set. If your structs need to implement MarshaLJSON to output
//JSON, use this to encode it.
func DefaultMarshalJSON(obj interface{}) ([]byte, error) {
return json.MarshalIndent(obj, "", " ")
}
|
package client
import (
"crypto/x509"
"fmt"
"io/ioutil"
"net/http"
pb "github.com/google/go-tpm-tools/proto/attest"
)
// AttestOpts allows for customizing the functionality of Attest.
type AttestOpts struct {
// A unique, application-specific nonce used to guarantee freshness of the
// attestation. This must not be empty, and should generally be long enough
// to make brute force attacks infeasible.
//
// For security reasons, applications should not allow for attesting with
// arbitrary, externally-provided nonces. The nonce should be prefixed or
// otherwise bound (i.e. via a KDF) to application-specific data. For more
// information on why this is an issue, see this paper on robust remote
// attestation protocols:
// https://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.70.4562&rep=rep1&type=pdf
Nonce []byte
// TCG Canonical Event Log to add to the attestation.
// Currently, we only support PCR replay for PCRs orthogonal to those in the
// firmware event log, where PCRs 0-9 and 14 are often measured. If the two
// logs overlap, server-side verification using this library may fail.
CanonicalEventLog []byte
// Indicates whether the AK certificate chain should be retrieved for validation.
ValidateCertChain bool
}
// Constructs the certificate chain for the key's certificate, using the provided HTTP client.
func (k *Key) getCertificateChain() ([][]byte, error) {
var certs [][]byte
for _, url := range k.cert.IssuingCertificateURL {
resp, err := http.Get(url)
if err != nil {
return nil, fmt.Errorf("failed to retrieve certificate at %v: %v", url, err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("certificate retrieval from %s returned non-OK status: %v", url, resp.StatusCode)
}
certBytes, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("failed to read response body from %s: %v", url, err)
}
// Verify that the bytes can be parsed into a certificate.
_, err = x509.ParseCertificate(certBytes)
if err != nil {
return nil, fmt.Errorf("error parsing certificate from %s: %v", url, err)
}
certs = append(certs, certBytes)
}
return certs, nil
}
// Attest generates an Attestation containing the TCG Event Log and a Quote over
// all PCR banks. The provided nonce can be used to guarantee freshness of the
// attestation. This function will return an error if the key is not a
// restricted signing key.
//
// AttestOpts is used for additional configuration of the Attestation process.
// This is primarily used to pass the attestation's nonce:
//
// attestation, err := key.Attest(client.AttestOpts{Nonce: my_nonce})
func (k *Key) Attest(opts AttestOpts) (*pb.Attestation, error) {
if len(opts.Nonce) == 0 {
return nil, fmt.Errorf("provided nonce must not be empty")
}
sels, err := implementedPCRs(k.rw)
if err != nil {
return nil, err
}
attestation := pb.Attestation{}
if attestation.AkPub, err = k.PublicArea().Encode(); err != nil {
return nil, fmt.Errorf("failed to encode public area: %w", err)
}
attestation.AkCert = k.CertDERBytes()
for _, sel := range sels {
quote, err := k.Quote(sel, opts.Nonce)
if err != nil {
return nil, err
}
attestation.Quotes = append(attestation.Quotes, quote)
}
if attestation.EventLog, err = GetEventLog(k.rw); err != nil {
return nil, fmt.Errorf("failed to retrieve TCG Event Log: %w", err)
}
if len(opts.CanonicalEventLog) != 0 {
attestation.CanonicalEventLog = opts.CanonicalEventLog
}
// Construct certficate chain.
if opts.validateCertChain {
attestation.IntermediateCerts, err = k.getCertificateChain()
if err != nil {
return nil, fmt.Errorf("error creating intermediate cert chain: %v", err)
}
}
return &attestation, nil
}
Fix usages of validatecertchain
package client
import (
"crypto/x509"
"fmt"
"io/ioutil"
"net/http"
pb "github.com/google/go-tpm-tools/proto/attest"
)
// AttestOpts allows for customizing the functionality of Attest.
type AttestOpts struct {
// A unique, application-specific nonce used to guarantee freshness of the
// attestation. This must not be empty, and should generally be long enough
// to make brute force attacks infeasible.
//
// For security reasons, applications should not allow for attesting with
// arbitrary, externally-provided nonces. The nonce should be prefixed or
// otherwise bound (i.e. via a KDF) to application-specific data. For more
// information on why this is an issue, see this paper on robust remote
// attestation protocols:
// https://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.70.4562&rep=rep1&type=pdf
Nonce []byte
// TCG Canonical Event Log to add to the attestation.
// Currently, we only support PCR replay for PCRs orthogonal to those in the
// firmware event log, where PCRs 0-9 and 14 are often measured. If the two
// logs overlap, server-side verification using this library may fail.
CanonicalEventLog []byte
// Indicates whether the AK certificate chain should be retrieved for validation.
ValidateCertChain bool
}
// Constructs the certificate chain for the key's certificate, using the provided HTTP client.
func (k *Key) getCertificateChain() ([][]byte, error) {
var certs [][]byte
for _, url := range k.cert.IssuingCertificateURL {
resp, err := http.Get(url)
if err != nil {
return nil, fmt.Errorf("failed to retrieve certificate at %v: %v", url, err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("certificate retrieval from %s returned non-OK status: %v", url, resp.StatusCode)
}
certBytes, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("failed to read response body from %s: %v", url, err)
}
// Verify that the bytes can be parsed into a certificate.
_, err = x509.ParseCertificate(certBytes)
if err != nil {
return nil, fmt.Errorf("error parsing certificate from %s: %v", url, err)
}
certs = append(certs, certBytes)
}
return certs, nil
}
// Attest generates an Attestation containing the TCG Event Log and a Quote over
// all PCR banks. The provided nonce can be used to guarantee freshness of the
// attestation. This function will return an error if the key is not a
// restricted signing key.
//
// AttestOpts is used for additional configuration of the Attestation process.
// This is primarily used to pass the attestation's nonce:
//
// attestation, err := key.Attest(client.AttestOpts{Nonce: my_nonce})
func (k *Key) Attest(opts AttestOpts) (*pb.Attestation, error) {
if len(opts.Nonce) == 0 {
return nil, fmt.Errorf("provided nonce must not be empty")
}
sels, err := implementedPCRs(k.rw)
if err != nil {
return nil, err
}
attestation := pb.Attestation{}
if attestation.AkPub, err = k.PublicArea().Encode(); err != nil {
return nil, fmt.Errorf("failed to encode public area: %w", err)
}
attestation.AkCert = k.CertDERBytes()
for _, sel := range sels {
quote, err := k.Quote(sel, opts.Nonce)
if err != nil {
return nil, err
}
attestation.Quotes = append(attestation.Quotes, quote)
}
if attestation.EventLog, err = GetEventLog(k.rw); err != nil {
return nil, fmt.Errorf("failed to retrieve TCG Event Log: %w", err)
}
if len(opts.CanonicalEventLog) != 0 {
attestation.CanonicalEventLog = opts.CanonicalEventLog
}
// Construct certficate chain.
if opts.ValidateCertChain {
attestation.IntermediateCerts, err = k.getCertificateChain()
if err != nil {
return nil, fmt.Errorf("error creating intermediate cert chain: %v", err)
}
}
return &attestation, nil
}
|
// Discordgo - Discord bindings for Go
// Available at https://github.com/bwmarrin/discordgo
// Copyright 2015-2016 Bruce Marriner <bruce@sqls.net>. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This file contains code related to state tracking. If enabled, state
// tracking will capture the initial READY packet and many other websocket
// events and maintain an in-memory state of of guilds, channels, users, and
// so forth. This information can be accessed through the Session.State struct.
package discordgo
import (
"errors"
"sync"
)
// ErrNilState is returned when the state is nil.
var ErrNilState = errors.New("State not instantiated, please use discordgo.New() or assign Session.State.")
// A State contains the current known state.
// As discord sends this in a READY blob, it seems reasonable to simply
// use that struct as the data store.
type State struct {
sync.RWMutex
Ready
MaxMessageCount int
TrackChannels bool
TrackEmojis bool
TrackMembers bool
TrackRoles bool
TrackVoice bool
TrackPresences bool
guildMap map[string]*Guild
channelMap map[string]*Channel
}
// NewState creates an empty state.
func NewState() *State {
return &State{
Ready: Ready{
PrivateChannels: []*Channel{},
Guilds: []*Guild{},
},
TrackChannels: true,
TrackEmojis: true,
TrackMembers: true,
TrackRoles: true,
TrackVoice: true,
TrackPresences: true,
guildMap: make(map[string]*Guild),
channelMap: make(map[string]*Channel),
}
}
// GuildAdd adds a guild to the current world state, or
// updates it if it already exists.
func (s *State) GuildAdd(guild *Guild) error {
if s == nil {
return ErrNilState
}
s.Lock()
defer s.Unlock()
// Update the channels to point to the right guild, adding them to the channelMap as we go
for _, c := range guild.Channels {
s.channelMap[c.ID] = c
}
if g, ok := s.guildMap[guild.ID]; ok {
// We are about to replace `g` in the state with `guild`, but first we need to
// make sure we preserve any fields that the `guild` doesn't contain from `g`.
if guild.Roles == nil {
guild.Roles = g.Roles
}
if guild.Emojis == nil {
guild.Emojis = g.Emojis
}
if guild.Members == nil {
guild.Members = g.Members
}
if guild.Presences == nil {
guild.Presences = g.Presences
}
if guild.Channels == nil {
guild.Channels = g.Channels
}
if guild.VoiceStates == nil {
guild.VoiceStates = g.VoiceStates
}
*g = *guild
return nil
}
s.Guilds = append(s.Guilds, guild)
s.guildMap[guild.ID] = guild
return nil
}
// GuildRemove removes a guild from current world state.
func (s *State) GuildRemove(guild *Guild) error {
if s == nil {
return ErrNilState
}
_, err := s.Guild(guild.ID)
if err != nil {
return err
}
s.Lock()
defer s.Unlock()
delete(s.guildMap, guild.ID)
for i, g := range s.Guilds {
if g.ID == guild.ID {
s.Guilds = append(s.Guilds[:i], s.Guilds[i+1:]...)
return nil
}
}
return nil
}
// Guild gets a guild by ID.
// Useful for querying if @me is in a guild:
// _, err := discordgo.Session.State.Guild(guildID)
// isInGuild := err == nil
func (s *State) Guild(guildID string) (*Guild, error) {
if s == nil {
return nil, ErrNilState
}
s.RLock()
defer s.RUnlock()
if g, ok := s.guildMap[guildID]; ok {
return g, nil
}
return nil, errors.New("Guild not found.")
}
// TODO: Consider moving Guild state update methods onto *Guild.
// MemberAdd adds a member to the current world state, or
// updates it if it already exists.
func (s *State) MemberAdd(member *Member) error {
if s == nil {
return ErrNilState
}
guild, err := s.Guild(member.GuildID)
if err != nil {
return err
}
s.Lock()
defer s.Unlock()
for _, m := range guild.Members {
if m.User.ID == member.User.ID {
if member.JoinedAt != "" {
m.JoinedAt = member.JoinedAt
}
if member.Roles != nil {
m.Roles = member.Roles
}
// Seems to always be provided
m.Nick = member.Nick
m.User = member.User
return nil
}
}
guild.Members = append(guild.Members, member)
return nil
}
// MemberRemove removes a member from current world state.
func (s *State) MemberRemove(member *Member) error {
if s == nil {
return ErrNilState
}
guild, err := s.Guild(member.GuildID)
if err != nil {
return err
}
s.Lock()
defer s.Unlock()
for i, m := range guild.Members {
if m.User.ID == member.User.ID {
guild.Members = append(guild.Members[:i], guild.Members[i+1:]...)
return nil
}
}
return errors.New("Member not found.")
}
// Member gets a member by ID from a guild.
func (s *State) Member(guildID, userID string) (*Member, error) {
if s == nil {
return nil, ErrNilState
}
guild, err := s.Guild(guildID)
if err != nil {
return nil, err
}
s.RLock()
defer s.RUnlock()
for _, m := range guild.Members {
if m.User.ID == userID {
return m, nil
}
}
return nil, errors.New("Member not found.")
}
// PresenceAdd adds a presence to the current world state, or
// updates it if it already exists.
func (s *State) PresenceAdd(guildID string, presence *Presence) error {
if s == nil {
return ErrNilState
}
guild, err := s.Guild(guildID)
if err != nil {
return err
}
s.Lock()
defer s.Unlock()
for _, p := range guild.Presences {
if p.User.ID == presence.User.ID {
p.Game = presence.Game
if presence.Roles != nil {
p.Roles = presence.Roles
}
if presence.Status != "" {
p.Status = presence.Status
}
return nil
}
}
guild.Presences = append(guild.Presences, presence)
return nil
}
// PresenceRemove removes a member from current world state.
func (s *State) PresenceRemove(guildID, userID string) error {
if s == nil {
return ErrNilState
}
guild, err := s.Guild(guildID)
if err != nil {
return err
}
s.Lock()
defer s.Unlock()
for i, m := range guild.Presences {
if m.User.ID == userID {
guild.Presences = append(guild.Presences[:i], guild.Presences[i+1:]...)
return nil
}
}
return errors.New("User not found.")
}
// Presence gets a presnce by user ID from a guild.
func (s *State) Presence(guildID, userID string) (*Presence, error) {
if s == nil {
return nil, ErrNilState
}
guild, err := s.Guild(guildID)
if err != nil {
return nil, err
}
s.Lock()
defer s.Unlock()
for _, p := range guild.Presences {
if p.User.ID == userID {
return p, nil
}
}
return nil, errors.New("User not found.")
}
// RoleAdd adds a role to the current world state, or
// updates it if it already exists.
func (s *State) RoleAdd(guildID string, role *Role) error {
if s == nil {
return ErrNilState
}
guild, err := s.Guild(guildID)
if err != nil {
return err
}
s.Lock()
defer s.Unlock()
for i, r := range guild.Roles {
if r.ID == role.ID {
guild.Roles[i] = role
return nil
}
}
guild.Roles = append(guild.Roles, role)
return nil
}
// RoleRemove removes a role from current world state by ID.
func (s *State) RoleRemove(guildID, roleID string) error {
if s == nil {
return ErrNilState
}
guild, err := s.Guild(guildID)
if err != nil {
return err
}
s.Lock()
defer s.Unlock()
for i, r := range guild.Roles {
if r.ID == roleID {
guild.Roles = append(guild.Roles[:i], guild.Roles[i+1:]...)
return nil
}
}
return errors.New("Role not found.")
}
// Role gets a role by ID from a guild.
func (s *State) Role(guildID, roleID string) (*Role, error) {
if s == nil {
return nil, ErrNilState
}
guild, err := s.Guild(guildID)
if err != nil {
return nil, err
}
s.RLock()
defer s.RUnlock()
for _, r := range guild.Roles {
if r.ID == roleID {
return r, nil
}
}
return nil, errors.New("Role not found.")
}
// ChannelAdd adds a guild to the current world state, or
// updates it if it already exists.
// Channels may exist either as PrivateChannels or inside
// a guild.
func (s *State) ChannelAdd(channel *Channel) error {
if s == nil {
return ErrNilState
}
s.Lock()
defer s.Unlock()
// If the channel exists, replace it
if c, ok := s.channelMap[channel.ID]; ok {
if channel.Messages == nil {
channel.Messages = c.Messages
}
if channel.PermissionOverwrites == nil {
channel.PermissionOverwrites = c.PermissionOverwrites
}
*c = *channel
return nil
}
if channel.IsPrivate {
s.PrivateChannels = append(s.PrivateChannels, channel)
} else {
guild, ok := s.guildMap[channel.GuildID]
if !ok {
return errors.New("Guild for channel not found.")
}
guild.Channels = append(guild.Channels, channel)
}
s.channelMap[channel.ID] = channel
return nil
}
// ChannelRemove removes a channel from current world state.
func (s *State) ChannelRemove(channel *Channel) error {
if s == nil {
return ErrNilState
}
_, err := s.Channel(channel.ID)
if err != nil {
return err
}
if channel.IsPrivate {
s.Lock()
defer s.Unlock()
for i, c := range s.PrivateChannels {
if c.ID == channel.ID {
s.PrivateChannels = append(s.PrivateChannels[:i], s.PrivateChannels[i+1:]...)
break
}
}
} else {
guild, err := s.Guild(channel.GuildID)
if err != nil {
return err
}
s.Lock()
defer s.Unlock()
for i, c := range guild.Channels {
if c.ID == channel.ID {
guild.Channels = append(guild.Channels[:i], guild.Channels[i+1:]...)
break
}
}
}
delete(s.channelMap, channel.ID)
return nil
}
// GuildChannel gets a channel by ID from a guild.
// This method is Deprecated, use Channel(channelID)
func (s *State) GuildChannel(guildID, channelID string) (*Channel, error) {
return s.Channel(channelID)
}
// PrivateChannel gets a private channel by ID.
// This method is Deprecated, use Channel(channelID)
func (s *State) PrivateChannel(channelID string) (*Channel, error) {
return s.Channel(channelID)
}
// Channel gets a channel by ID, it will look in all guilds an private channels.
func (s *State) Channel(channelID string) (*Channel, error) {
if s == nil {
return nil, ErrNilState
}
s.RLock()
defer s.RUnlock()
if c, ok := s.channelMap[channelID]; ok {
return c, nil
}
return nil, errors.New("Channel not found.")
}
// Emoji returns an emoji for a guild and emoji id.
func (s *State) Emoji(guildID, emojiID string) (*Emoji, error) {
if s == nil {
return nil, ErrNilState
}
guild, err := s.Guild(guildID)
if err != nil {
return nil, err
}
s.RLock()
defer s.RUnlock()
for _, e := range guild.Emojis {
if e.ID == emojiID {
return e, nil
}
}
return nil, errors.New("Emoji not found.")
}
// EmojiAdd adds an emoji to the current world state.
func (s *State) EmojiAdd(guildID string, emoji *Emoji) error {
if s == nil {
return ErrNilState
}
guild, err := s.Guild(guildID)
if err != nil {
return err
}
s.Lock()
defer s.Unlock()
for i, e := range guild.Emojis {
if e.ID == emoji.ID {
guild.Emojis[i] = emoji
return nil
}
}
guild.Emojis = append(guild.Emojis, emoji)
return nil
}
// EmojisAdd adds multiple emojis to the world state.
func (s *State) EmojisAdd(guildID string, emojis []*Emoji) error {
for _, e := range emojis {
if err := s.EmojiAdd(guildID, e); err != nil {
return err
}
}
return nil
}
// MessageAdd adds a message to the current world state, or updates it if it exists.
// If the channel cannot be found, the message is discarded.
// Messages are kept in state up to s.MaxMessageCount
func (s *State) MessageAdd(message *Message) error {
if s == nil {
return ErrNilState
}
c, err := s.Channel(message.ChannelID)
if err != nil {
return err
}
s.Lock()
defer s.Unlock()
// If the message exists, merge in the new message contents.
for _, m := range c.Messages {
if m.ID == message.ID {
if message.Content != "" {
m.Content = message.Content
}
if message.EditedTimestamp != "" {
m.EditedTimestamp = message.EditedTimestamp
}
if message.Mentions != nil {
m.Mentions = message.Mentions
}
if message.Embeds != nil {
m.Embeds = message.Embeds
}
if message.Attachments != nil {
m.Attachments = message.Attachments
}
if message.Timestamp != "" {
m.Timestamp = message.Timestamp
}
if message.Author != nil {
m.Author = message.Author
}
return nil
}
}
c.Messages = append(c.Messages, message)
if len(c.Messages) > s.MaxMessageCount {
c.Messages = c.Messages[len(c.Messages)-s.MaxMessageCount:]
}
return nil
}
// MessageRemove removes a message from the world state.
func (s *State) MessageRemove(message *Message) error {
if s == nil {
return ErrNilState
}
c, err := s.Channel(message.ChannelID)
if err != nil {
return err
}
s.Lock()
defer s.Unlock()
for i, m := range c.Messages {
if m.ID == message.ID {
c.Messages = append(c.Messages[:i], c.Messages[i+1:]...)
return nil
}
}
return errors.New("Message not found.")
}
func (s *State) voiceStateUpdate(update *VoiceStateUpdate) error {
guild, err := s.Guild(update.GuildID)
if err != nil {
return err
}
s.Lock()
defer s.Unlock()
// Handle Leaving Channel
if update.ChannelID == "" {
for i, state := range guild.VoiceStates {
if state.UserID == update.UserID {
guild.VoiceStates = append(guild.VoiceStates[:i], guild.VoiceStates[i+1:]...)
return nil
}
}
} else {
for i, state := range guild.VoiceStates {
if state.UserID == update.UserID {
guild.VoiceStates[i] = update.VoiceState
return nil
}
}
guild.VoiceStates = append(guild.VoiceStates, update.VoiceState)
}
return nil
}
// Message gets a message by channel and message ID.
func (s *State) Message(channelID, messageID string) (*Message, error) {
if s == nil {
return nil, ErrNilState
}
c, err := s.Channel(channelID)
if err != nil {
return nil, err
}
s.RLock()
defer s.RUnlock()
for _, m := range c.Messages {
if m.ID == messageID {
return m, nil
}
}
return nil, errors.New("Message not found.")
}
// OnReady takes a Ready event and updates all internal state.
func (s *State) onReady(se *Session, r *Ready) (err error) {
if s == nil {
return ErrNilState
}
s.Lock()
defer s.Unlock()
// We must track at least the current user for Voice, even
// if state is disabled, store the bare essentials.
if !se.StateEnabled {
ready := Ready{
Version: r.Version,
SessionID: r.SessionID,
HeartbeatInterval: r.HeartbeatInterval,
User: r.User,
}
s.Ready = ready
return nil
}
s.Ready = *r
for _, g := range s.Guilds {
s.guildMap[g.ID] = g
for _, c := range g.Channels {
s.channelMap[c.ID] = c
}
}
for _, c := range s.PrivateChannels {
s.channelMap[c.ID] = c
}
return nil
}
// onInterface handles all events related to states.
func (s *State) onInterface(se *Session, i interface{}) (err error) {
if s == nil {
return ErrNilState
}
r, ok := i.(*Ready)
if ok {
return s.onReady(se, r)
}
if !se.StateEnabled {
return nil
}
switch t := i.(type) {
case *GuildCreate:
err = s.GuildAdd(t.Guild)
case *GuildUpdate:
err = s.GuildAdd(t.Guild)
case *GuildDelete:
err = s.GuildRemove(t.Guild)
case *GuildMemberAdd:
if s.TrackMembers {
err = s.MemberAdd(t.Member)
}
case *GuildMemberUpdate:
if s.TrackMembers {
err = s.MemberAdd(t.Member)
}
case *GuildMemberRemove:
if s.TrackMembers {
err = s.MemberRemove(t.Member)
}
if s.TrackPresences {
err = s.PresenceRemove(t.GuildID, t.Member.User.ID)
}
case *PresenceUpdate:
if s.TrackPresences {
err = s.PresenceAdd(t.GuildID, &t.Presence)
}
case *GuildRoleCreate:
if s.TrackRoles {
err = s.RoleAdd(t.GuildID, t.Role)
}
case *GuildRoleUpdate:
if s.TrackRoles {
err = s.RoleAdd(t.GuildID, t.Role)
}
case *GuildRoleDelete:
if s.TrackRoles {
err = s.RoleRemove(t.GuildID, t.RoleID)
}
case *GuildEmojisUpdate:
if s.TrackEmojis {
err = s.EmojisAdd(t.GuildID, t.Emojis)
}
case *ChannelCreate:
if s.TrackChannels {
err = s.ChannelAdd(t.Channel)
}
case *ChannelUpdate:
if s.TrackChannels {
err = s.ChannelAdd(t.Channel)
}
case *ChannelDelete:
if s.TrackChannels {
err = s.ChannelRemove(t.Channel)
}
case *MessageCreate:
if s.MaxMessageCount != 0 {
err = s.MessageAdd(t.Message)
}
case *MessageUpdate:
if s.MaxMessageCount != 0 {
err = s.MessageAdd(t.Message)
}
case *MessageDelete:
if s.MaxMessageCount != 0 {
err = s.MessageRemove(t.Message)
}
case *VoiceStateUpdate:
if s.TrackVoice {
err = s.voiceStateUpdate(t)
}
}
return
}
// UserChannelPermissions returns the permission of a user in a channel.
// userID : The ID of the user to calculate permissions for.
// channelID : The ID of the channel to calculate permission for.
func (s *State) UserChannelPermissions(userID, channelID string) (apermissions int, err error) {
if s == nil {
return 0, ErrNilState
}
channel, err := s.Channel(channelID)
if err != nil {
return
}
guild, err := s.Guild(channel.GuildID)
if err != nil {
return
}
if userID == guild.OwnerID {
apermissions = PermissionAll
return
}
member, err := s.Member(guild.ID, userID)
if err != nil {
return
}
for _, role := range guild.Roles {
if role.ID == guild.ID {
apermissions |= role.Permissions
break
}
}
for _, role := range guild.Roles {
for _, roleID := range member.Roles {
if role.ID == roleID {
apermissions |= role.Permissions
break
}
}
}
if apermissions&PermissionAdministrator > 0 {
apermissions |= PermissionAll
}
// Role and member overwites overrides the everyone role, so check for everyone role overwrite first
for _, overwrite := range channel.PermissionOverwrites {
if overwrite.Type == "role" && overwrite.ID == guild.ID {
apermissions &= ^overwrite.Deny
apermissions |= overwrite.Allow
break
}
}
// Member overwrites can override role overrides, so do two passes
for _, overwrite := range channel.PermissionOverwrites {
for _, roleID := range member.Roles {
if overwrite.Type == "role" && roleID == overwrite.ID {
apermissions &= ^overwrite.Deny
apermissions |= overwrite.Allow
break
}
}
}
for _, overwrite := range channel.PermissionOverwrites {
if overwrite.Type == "member" && overwrite.ID == userID {
apermissions &= ^overwrite.Deny
apermissions |= overwrite.Allow
break
}
}
if apermissions&PermissionAdministrator > 0 {
apermissions |= PermissionAllChannel
}
return
}
keep deleted messages
// Discordgo - Discord bindings for Go
// Available at https://github.com/bwmarrin/discordgo
// Copyright 2015-2016 Bruce Marriner <bruce@sqls.net>. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This file contains code related to state tracking. If enabled, state
// tracking will capture the initial READY packet and many other websocket
// events and maintain an in-memory state of of guilds, channels, users, and
// so forth. This information can be accessed through the Session.State struct.
package discordgo
import (
"errors"
"sync"
)
// ErrNilState is returned when the state is nil.
var ErrNilState = errors.New("State not instantiated, please use discordgo.New() or assign Session.State.")
// A State contains the current known state.
// As discord sends this in a READY blob, it seems reasonable to simply
// use that struct as the data store.
type State struct {
sync.RWMutex
Ready
MaxMessageCount int
TrackChannels bool
TrackEmojis bool
TrackMembers bool
TrackRoles bool
TrackVoice bool
TrackPresences bool
guildMap map[string]*Guild
channelMap map[string]*Channel
}
// NewState creates an empty state.
func NewState() *State {
return &State{
Ready: Ready{
PrivateChannels: []*Channel{},
Guilds: []*Guild{},
},
TrackChannels: true,
TrackEmojis: true,
TrackMembers: true,
TrackRoles: true,
TrackVoice: true,
TrackPresences: true,
guildMap: make(map[string]*Guild),
channelMap: make(map[string]*Channel),
}
}
// GuildAdd adds a guild to the current world state, or
// updates it if it already exists.
func (s *State) GuildAdd(guild *Guild) error {
if s == nil {
return ErrNilState
}
s.Lock()
defer s.Unlock()
// Update the channels to point to the right guild, adding them to the channelMap as we go
for _, c := range guild.Channels {
s.channelMap[c.ID] = c
}
if g, ok := s.guildMap[guild.ID]; ok {
// We are about to replace `g` in the state with `guild`, but first we need to
// make sure we preserve any fields that the `guild` doesn't contain from `g`.
if guild.Roles == nil {
guild.Roles = g.Roles
}
if guild.Emojis == nil {
guild.Emojis = g.Emojis
}
if guild.Members == nil {
guild.Members = g.Members
}
if guild.Presences == nil {
guild.Presences = g.Presences
}
if guild.Channels == nil {
guild.Channels = g.Channels
}
if guild.VoiceStates == nil {
guild.VoiceStates = g.VoiceStates
}
*g = *guild
return nil
}
s.Guilds = append(s.Guilds, guild)
s.guildMap[guild.ID] = guild
return nil
}
// GuildRemove removes a guild from current world state.
func (s *State) GuildRemove(guild *Guild) error {
if s == nil {
return ErrNilState
}
_, err := s.Guild(guild.ID)
if err != nil {
return err
}
s.Lock()
defer s.Unlock()
delete(s.guildMap, guild.ID)
for i, g := range s.Guilds {
if g.ID == guild.ID {
s.Guilds = append(s.Guilds[:i], s.Guilds[i+1:]...)
return nil
}
}
return nil
}
// Guild gets a guild by ID.
// Useful for querying if @me is in a guild:
// _, err := discordgo.Session.State.Guild(guildID)
// isInGuild := err == nil
func (s *State) Guild(guildID string) (*Guild, error) {
if s == nil {
return nil, ErrNilState
}
s.RLock()
defer s.RUnlock()
if g, ok := s.guildMap[guildID]; ok {
return g, nil
}
return nil, errors.New("Guild not found.")
}
// TODO: Consider moving Guild state update methods onto *Guild.
// MemberAdd adds a member to the current world state, or
// updates it if it already exists.
func (s *State) MemberAdd(member *Member) error {
if s == nil {
return ErrNilState
}
guild, err := s.Guild(member.GuildID)
if err != nil {
return err
}
s.Lock()
defer s.Unlock()
for _, m := range guild.Members {
if m.User.ID == member.User.ID {
if member.JoinedAt != "" {
m.JoinedAt = member.JoinedAt
}
if member.Roles != nil {
m.Roles = member.Roles
}
// Seems to always be provided
m.Nick = member.Nick
m.User = member.User
return nil
}
}
guild.Members = append(guild.Members, member)
return nil
}
// MemberRemove removes a member from current world state.
func (s *State) MemberRemove(member *Member) error {
if s == nil {
return ErrNilState
}
guild, err := s.Guild(member.GuildID)
if err != nil {
return err
}
s.Lock()
defer s.Unlock()
for i, m := range guild.Members {
if m.User.ID == member.User.ID {
guild.Members = append(guild.Members[:i], guild.Members[i+1:]...)
return nil
}
}
return errors.New("Member not found.")
}
// Member gets a member by ID from a guild.
func (s *State) Member(guildID, userID string) (*Member, error) {
if s == nil {
return nil, ErrNilState
}
guild, err := s.Guild(guildID)
if err != nil {
return nil, err
}
s.RLock()
defer s.RUnlock()
for _, m := range guild.Members {
if m.User.ID == userID {
return m, nil
}
}
return nil, errors.New("Member not found.")
}
// PresenceAdd adds a presence to the current world state, or
// updates it if it already exists.
func (s *State) PresenceAdd(guildID string, presence *Presence) error {
if s == nil {
return ErrNilState
}
guild, err := s.Guild(guildID)
if err != nil {
return err
}
s.Lock()
defer s.Unlock()
for _, p := range guild.Presences {
if p.User.ID == presence.User.ID {
p.Game = presence.Game
if presence.Roles != nil {
p.Roles = presence.Roles
}
if presence.Status != "" {
p.Status = presence.Status
}
return nil
}
}
guild.Presences = append(guild.Presences, presence)
return nil
}
// PresenceRemove removes a member from current world state.
func (s *State) PresenceRemove(guildID, userID string) error {
if s == nil {
return ErrNilState
}
guild, err := s.Guild(guildID)
if err != nil {
return err
}
s.Lock()
defer s.Unlock()
for i, m := range guild.Presences {
if m.User.ID == userID {
guild.Presences = append(guild.Presences[:i], guild.Presences[i+1:]...)
return nil
}
}
return errors.New("User not found.")
}
// Presence gets a presnce by user ID from a guild.
func (s *State) Presence(guildID, userID string) (*Presence, error) {
if s == nil {
return nil, ErrNilState
}
guild, err := s.Guild(guildID)
if err != nil {
return nil, err
}
s.Lock()
defer s.Unlock()
for _, p := range guild.Presences {
if p.User.ID == userID {
return p, nil
}
}
return nil, errors.New("User not found.")
}
// RoleAdd adds a role to the current world state, or
// updates it if it already exists.
func (s *State) RoleAdd(guildID string, role *Role) error {
if s == nil {
return ErrNilState
}
guild, err := s.Guild(guildID)
if err != nil {
return err
}
s.Lock()
defer s.Unlock()
for i, r := range guild.Roles {
if r.ID == role.ID {
guild.Roles[i] = role
return nil
}
}
guild.Roles = append(guild.Roles, role)
return nil
}
// RoleRemove removes a role from current world state by ID.
func (s *State) RoleRemove(guildID, roleID string) error {
if s == nil {
return ErrNilState
}
guild, err := s.Guild(guildID)
if err != nil {
return err
}
s.Lock()
defer s.Unlock()
for i, r := range guild.Roles {
if r.ID == roleID {
guild.Roles = append(guild.Roles[:i], guild.Roles[i+1:]...)
return nil
}
}
return errors.New("Role not found.")
}
// Role gets a role by ID from a guild.
func (s *State) Role(guildID, roleID string) (*Role, error) {
if s == nil {
return nil, ErrNilState
}
guild, err := s.Guild(guildID)
if err != nil {
return nil, err
}
s.RLock()
defer s.RUnlock()
for _, r := range guild.Roles {
if r.ID == roleID {
return r, nil
}
}
return nil, errors.New("Role not found.")
}
// ChannelAdd adds a guild to the current world state, or
// updates it if it already exists.
// Channels may exist either as PrivateChannels or inside
// a guild.
func (s *State) ChannelAdd(channel *Channel) error {
if s == nil {
return ErrNilState
}
s.Lock()
defer s.Unlock()
// If the channel exists, replace it
if c, ok := s.channelMap[channel.ID]; ok {
if channel.Messages == nil {
channel.Messages = c.Messages
}
if channel.PermissionOverwrites == nil {
channel.PermissionOverwrites = c.PermissionOverwrites
}
*c = *channel
return nil
}
if channel.IsPrivate {
s.PrivateChannels = append(s.PrivateChannels, channel)
} else {
guild, ok := s.guildMap[channel.GuildID]
if !ok {
return errors.New("Guild for channel not found.")
}
guild.Channels = append(guild.Channels, channel)
}
s.channelMap[channel.ID] = channel
return nil
}
// ChannelRemove removes a channel from current world state.
func (s *State) ChannelRemove(channel *Channel) error {
if s == nil {
return ErrNilState
}
_, err := s.Channel(channel.ID)
if err != nil {
return err
}
if channel.IsPrivate {
s.Lock()
defer s.Unlock()
for i, c := range s.PrivateChannels {
if c.ID == channel.ID {
s.PrivateChannels = append(s.PrivateChannels[:i], s.PrivateChannels[i+1:]...)
break
}
}
} else {
guild, err := s.Guild(channel.GuildID)
if err != nil {
return err
}
s.Lock()
defer s.Unlock()
for i, c := range guild.Channels {
if c.ID == channel.ID {
guild.Channels = append(guild.Channels[:i], guild.Channels[i+1:]...)
break
}
}
}
delete(s.channelMap, channel.ID)
return nil
}
// GuildChannel gets a channel by ID from a guild.
// This method is Deprecated, use Channel(channelID)
func (s *State) GuildChannel(guildID, channelID string) (*Channel, error) {
return s.Channel(channelID)
}
// PrivateChannel gets a private channel by ID.
// This method is Deprecated, use Channel(channelID)
func (s *State) PrivateChannel(channelID string) (*Channel, error) {
return s.Channel(channelID)
}
// Channel gets a channel by ID, it will look in all guilds an private channels.
func (s *State) Channel(channelID string) (*Channel, error) {
if s == nil {
return nil, ErrNilState
}
s.RLock()
defer s.RUnlock()
if c, ok := s.channelMap[channelID]; ok {
return c, nil
}
return nil, errors.New("Channel not found.")
}
// Emoji returns an emoji for a guild and emoji id.
func (s *State) Emoji(guildID, emojiID string) (*Emoji, error) {
if s == nil {
return nil, ErrNilState
}
guild, err := s.Guild(guildID)
if err != nil {
return nil, err
}
s.RLock()
defer s.RUnlock()
for _, e := range guild.Emojis {
if e.ID == emojiID {
return e, nil
}
}
return nil, errors.New("Emoji not found.")
}
// EmojiAdd adds an emoji to the current world state.
func (s *State) EmojiAdd(guildID string, emoji *Emoji) error {
if s == nil {
return ErrNilState
}
guild, err := s.Guild(guildID)
if err != nil {
return err
}
s.Lock()
defer s.Unlock()
for i, e := range guild.Emojis {
if e.ID == emoji.ID {
guild.Emojis[i] = emoji
return nil
}
}
guild.Emojis = append(guild.Emojis, emoji)
return nil
}
// EmojisAdd adds multiple emojis to the world state.
func (s *State) EmojisAdd(guildID string, emojis []*Emoji) error {
for _, e := range emojis {
if err := s.EmojiAdd(guildID, e); err != nil {
return err
}
}
return nil
}
// MessageAdd adds a message to the current world state, or updates it if it exists.
// If the channel cannot be found, the message is discarded.
// Messages are kept in state up to s.MaxMessageCount
func (s *State) MessageAdd(message *Message) error {
if s == nil {
return ErrNilState
}
c, err := s.Channel(message.ChannelID)
if err != nil {
return err
}
s.Lock()
defer s.Unlock()
// If the message exists, merge in the new message contents.
for _, m := range c.Messages {
if m.ID == message.ID {
if message.Content != "" {
m.Content = message.Content
}
if message.EditedTimestamp != "" {
m.EditedTimestamp = message.EditedTimestamp
}
if message.Mentions != nil {
m.Mentions = message.Mentions
}
if message.Embeds != nil {
m.Embeds = message.Embeds
}
if message.Attachments != nil {
m.Attachments = message.Attachments
}
if message.Timestamp != "" {
m.Timestamp = message.Timestamp
}
if message.Author != nil {
m.Author = message.Author
}
return nil
}
}
c.Messages = append(c.Messages, message)
if len(c.Messages) > s.MaxMessageCount {
c.Messages = c.Messages[len(c.Messages)-s.MaxMessageCount:]
}
return nil
}
// MessageRemove removes a message from the world state.
func (s *State) MessageRemove(message *Message) error {
if s == nil {
return ErrNilState
}
c, err := s.Channel(message.ChannelID)
if err != nil {
return err
}
s.Lock()
defer s.Unlock()
for i, m := range c.Messages {
if m.ID == message.ID {
c.Messages = append(c.Messages[:i], c.Messages[i+1:]...)
return nil
}
}
return errors.New("Message not found.")
}
func (s *State) voiceStateUpdate(update *VoiceStateUpdate) error {
guild, err := s.Guild(update.GuildID)
if err != nil {
return err
}
s.Lock()
defer s.Unlock()
// Handle Leaving Channel
if update.ChannelID == "" {
for i, state := range guild.VoiceStates {
if state.UserID == update.UserID {
guild.VoiceStates = append(guild.VoiceStates[:i], guild.VoiceStates[i+1:]...)
return nil
}
}
} else {
for i, state := range guild.VoiceStates {
if state.UserID == update.UserID {
guild.VoiceStates[i] = update.VoiceState
return nil
}
}
guild.VoiceStates = append(guild.VoiceStates, update.VoiceState)
}
return nil
}
// Message gets a message by channel and message ID.
func (s *State) Message(channelID, messageID string) (*Message, error) {
if s == nil {
return nil, ErrNilState
}
c, err := s.Channel(channelID)
if err != nil {
return nil, err
}
s.RLock()
defer s.RUnlock()
for _, m := range c.Messages {
if m.ID == messageID {
return m, nil
}
}
return nil, errors.New("Message not found.")
}
// OnReady takes a Ready event and updates all internal state.
func (s *State) onReady(se *Session, r *Ready) (err error) {
if s == nil {
return ErrNilState
}
s.Lock()
defer s.Unlock()
// We must track at least the current user for Voice, even
// if state is disabled, store the bare essentials.
if !se.StateEnabled {
ready := Ready{
Version: r.Version,
SessionID: r.SessionID,
HeartbeatInterval: r.HeartbeatInterval,
User: r.User,
}
s.Ready = ready
return nil
}
s.Ready = *r
for _, g := range s.Guilds {
s.guildMap[g.ID] = g
for _, c := range g.Channels {
s.channelMap[c.ID] = c
}
}
for _, c := range s.PrivateChannels {
s.channelMap[c.ID] = c
}
return nil
}
// onInterface handles all events related to states.
func (s *State) onInterface(se *Session, i interface{}) (err error) {
if s == nil {
return ErrNilState
}
r, ok := i.(*Ready)
if ok {
return s.onReady(se, r)
}
if !se.StateEnabled {
return nil
}
switch t := i.(type) {
case *GuildCreate:
err = s.GuildAdd(t.Guild)
case *GuildUpdate:
err = s.GuildAdd(t.Guild)
case *GuildDelete:
err = s.GuildRemove(t.Guild)
case *GuildMemberAdd:
if s.TrackMembers {
err = s.MemberAdd(t.Member)
}
case *GuildMemberUpdate:
if s.TrackMembers {
err = s.MemberAdd(t.Member)
}
case *GuildMemberRemove:
if s.TrackMembers {
err = s.MemberRemove(t.Member)
}
if s.TrackPresences {
err = s.PresenceRemove(t.GuildID, t.Member.User.ID)
}
case *PresenceUpdate:
if s.TrackPresences {
err = s.PresenceAdd(t.GuildID, &t.Presence)
}
case *GuildRoleCreate:
if s.TrackRoles {
err = s.RoleAdd(t.GuildID, t.Role)
}
case *GuildRoleUpdate:
if s.TrackRoles {
err = s.RoleAdd(t.GuildID, t.Role)
}
case *GuildRoleDelete:
if s.TrackRoles {
err = s.RoleRemove(t.GuildID, t.RoleID)
}
case *GuildEmojisUpdate:
if s.TrackEmojis {
err = s.EmojisAdd(t.GuildID, t.Emojis)
}
case *ChannelCreate:
if s.TrackChannels {
err = s.ChannelAdd(t.Channel)
}
case *ChannelUpdate:
if s.TrackChannels {
err = s.ChannelAdd(t.Channel)
}
case *ChannelDelete:
if s.TrackChannels {
err = s.ChannelRemove(t.Channel)
}
case *MessageCreate:
if s.MaxMessageCount != 0 {
err = s.MessageAdd(t.Message)
}
case *MessageUpdate:
if s.MaxMessageCount != 0 {
err = s.MessageAdd(t.Message)
}
// case *MessageDelete:
// if s.MaxMessageCount != 0 {
// err = s.MessageRemove(t.Message)
// }
case *VoiceStateUpdate:
if s.TrackVoice {
err = s.voiceStateUpdate(t)
}
}
return
}
// UserChannelPermissions returns the permission of a user in a channel.
// userID : The ID of the user to calculate permissions for.
// channelID : The ID of the channel to calculate permission for.
func (s *State) UserChannelPermissions(userID, channelID string) (apermissions int, err error) {
if s == nil {
return 0, ErrNilState
}
channel, err := s.Channel(channelID)
if err != nil {
return
}
guild, err := s.Guild(channel.GuildID)
if err != nil {
return
}
if userID == guild.OwnerID {
apermissions = PermissionAll
return
}
member, err := s.Member(guild.ID, userID)
if err != nil {
return
}
for _, role := range guild.Roles {
if role.ID == guild.ID {
apermissions |= role.Permissions
break
}
}
for _, role := range guild.Roles {
for _, roleID := range member.Roles {
if role.ID == roleID {
apermissions |= role.Permissions
break
}
}
}
if apermissions&PermissionAdministrator > 0 {
apermissions |= PermissionAll
}
// Role and member overwites overrides the everyone role, so check for everyone role overwrite first
for _, overwrite := range channel.PermissionOverwrites {
if overwrite.Type == "role" && overwrite.ID == guild.ID {
apermissions &= ^overwrite.Deny
apermissions |= overwrite.Allow
break
}
}
// Member overwrites can override role overrides, so do two passes
for _, overwrite := range channel.PermissionOverwrites {
for _, roleID := range member.Roles {
if overwrite.Type == "role" && roleID == overwrite.ID {
apermissions &= ^overwrite.Deny
apermissions |= overwrite.Allow
break
}
}
}
for _, overwrite := range channel.PermissionOverwrites {
if overwrite.Type == "member" && overwrite.ID == userID {
apermissions &= ^overwrite.Deny
apermissions |= overwrite.Allow
break
}
}
if apermissions&PermissionAdministrator > 0 {
apermissions |= PermissionAllChannel
}
return
}
|
package client
import (
"crypto/tls"
"net/http"
"golang.org/x/net/http2"
)
// APNS
const (
Development = "https://api.development.push.apple.com"
Production = "https://api.push.apple.com"
)
type Client struct {
HTTPClient *http.Client
Certificate tls.Certificate
}
func NewClient(certificate tls.Certificate) (*Client, error) {
config := &tls.Config{
Certificates: []tls.Certificate{certificate},
}
config.BuildNameToCertificate()
transport := &http.Transport{TLSClientConfig: config}
if err := http2.ConfigureTransport(transport); err != nil {
return nil, err
}
client := &Client{
HTTPClient: &http.Client{Transport: transport},
Certificate: certificate,
}
return client, nil
}
/*
// Config
config := &tls.Config{
Certificates: []tls.Certificate{t},
}
config.BuildNameToCertificate()
transport := &http.Transport{TLSClientConfig: config}
if err := http2.ConfigureTransport(transport); err != nil {
log.Fatal(err)
}
// Create http client with Transport with Go 1.6 Transport supports HTTP/2
client := &http.Client{Transport: transport}*/
refactor
package client
import (
"crypto/tls"
"net/http"
"golang.org/x/net/http2"
)
// APNS
const (
Development = "https://api.development.push.apple.com"
Production = "https://api.push.apple.com"
)
type Client struct {
HTTPClient *http.Client
Certificate tls.Certificate
}
func NewClient(certificate tls.Certificate) (*Client, error) {
config := &tls.Config{
Certificates: []tls.Certificate{certificate},
}
config.BuildNameToCertificate()
transport := &http.Transport{TLSClientConfig: config}
if err := http2.ConfigureTransport(transport); err != nil {
return nil, err
}
client := &Client{
HTTPClient: &http.Client{Transport: transport},
Certificate: certificate,
}
return client, nil
}
|
package rbxmk
import (
"fmt"
"strings"
lua "github.com/anaminus/gopher-lua"
"github.com/anaminus/rbxmk/rtypes"
"github.com/robloxapi/types"
)
// FrameType indicates the kind of frame for a State.
type FrameType uint8
const (
// Frame is a regular function.
FunctionFrame FrameType = iota
// Frame is a method; exclude first argument.
MethodFrame
// Frame is an operator, avoid displaying arguments.
OperatorFrame
)
// State contains references to an environment surrounding a value.
type State struct {
*World
L *lua.LState
// FrameType provides a hint to how errors should be produced.
FrameType FrameType
// cycle is used to mark a table as having been traversed. This is non-nil
// only for types that can contain other types.
cycle map[interface{}]struct{}
}
// CycleGuard begins a guard against reference cycles when reflecting with the
// state. Returns false if a guard was already set up for the state. If true is
// returned, the guard must be cleared via CycleClear. For example:
//
// if s.CycleGuard() {
// defer s.CycleClear()
// }
//
func (s *State) CycleGuard() bool {
if s.cycle == nil {
s.cycle = make(map[interface{}]struct{}, 4)
return true
}
return false
}
// CycleClear clears the cycle guard on the state. Panics if the state has no
// guard.
func (s *State) CycleClear() {
if s.cycle == nil {
panic("state has no cycle guard")
}
s.cycle = nil
}
// CycleMark marks t as visited, and returns whether t was already visited.
// Panics if the state has no guard.
func (s State) CycleMark(t interface{}) bool {
if s.cycle == nil {
panic("attempt to mark reference without cycle guard")
}
_, ok := s.cycle[t]
if !ok {
s.cycle[t] = struct{}{}
}
return ok
}
// Count returns the number of arguments in the stack frame.
func (s State) Count() int {
return s.L.GetTop()
}
// Push reflects v according to its type as registered with s.World, then pushes
// the results to s.L.
func (s State) Push(v types.Value) int {
rfl := s.MustReflector(v.Type())
lvs, err := rfl.PushTo(s, v)
if err != nil {
return s.RaiseError("%s", err)
}
for _, lv := range lvs {
s.L.Push(lv)
}
return len(lvs)
}
// Pull gets from s.L the values starting from n, and reflects a value from them
// according to type t registered with s.World.
func (s State) Pull(n int, t string) types.Value {
rfl := s.MustReflector(t)
var v types.Value
var err error
if rfl.Count < 0 {
lvs := make([]lua.LValue, 0, 4)
for i := n; i <= s.Count(); i++ {
lvs = append(lvs, s.L.Get(i))
}
v, err = rfl.PullFrom(s, lvs...)
} else if rfl.Count > 1 {
lvs := make([]lua.LValue, 0, 4)
for i := n; i <= rfl.Count; i++ {
lvs = append(lvs, s.CheckAny(i))
}
v, err = rfl.PullFrom(s, lvs...)
} else {
v, err = rfl.PullFrom(s, s.CheckAny(n))
}
if err != nil {
s.ArgError(n, err.Error())
return nil
}
return v
}
// PullOpt gets from s.L the value at n, and reflects a value from it according
// to type t registered with s.World. If the value is nil, d is returned
// instead.
func (s State) PullOpt(n int, t string, d types.Value) types.Value {
rfl := s.MustReflector(t)
if rfl.Count < 0 {
panic("PullOpt cannot pull variable types")
} else if rfl.Count > 1 {
panic("PullOpt cannot pull multi-value types")
}
lv := s.L.Get(n)
if lv == lua.LNil {
return d
}
v, err := rfl.PullFrom(s, lv)
if err != nil {
s.ArgError(n, err.Error())
return d
}
return v
}
// listTypes returns each type listed in a natural sentence.
func listTypes(types []string) string {
switch len(types) {
case 0:
return ""
case 1:
return types[0]
case 2:
return types[0] + " or " + types[1]
}
return strings.Join(types[:len(types)-2], ", ") + ", or " + types[len(types)-1]
}
// ReflectorError raises an error indicating that a reflector pushed or pulled
// an unexpected type. Under normal circumstances, this error should be
// unreachable.
func (s State) ReflectorError(n int) int {
return s.ArgError(n, "unreachable error: reflector mismatch")
}
// PullAnyOf gets from s.L the values starting from n, and reflects a value from
// them according to any of the types in t registered with s.World. Returns the
// first successful reflection among the types in t. If no types succeeded, then
// a type error is thrown.
func (s State) PullAnyOf(n int, t ...string) types.Value {
if n > s.Count() {
// Every type must reflect at least one value, so no values is an
// immediate error.
s.ArgError(n, "value expected")
return nil
}
v := s.PullAnyOfOpt(n, t...)
if v == nil {
s.TypeError(n, listTypes(t), "")
}
return v
}
// PullAnyOfOpt gets from s.L the values starting from n, and reflects a value from
// them according to any of the types in t registered with s.World. Returns the
// first successful reflection among the types in t. If no types succeeded, then
// nil is returned.
func (s State) PullAnyOfOpt(n int, t ...string) types.Value {
if n > s.Count() {
return nil
}
// Find the maximum count among the given types. 0 is treated the same as 1.
// <0 indicates an arbitrary number of values.
max := 1
ts := make([]Reflector, 0, 4)
for _, t := range t {
rfl := s.MustReflector(t)
ts = append(ts, rfl)
if rfl.Count > 1 {
max = rfl.Count
} else if rfl.Count < 0 {
max = -1
break
}
}
switch max {
case 1: // All types have 1 value.
lv := s.CheckAny(n)
for _, t := range ts {
if v, err := t.PullFrom(s, lv); err == nil {
return v
}
}
case -1: // At least one type has arbitrary values.
lvs := make([]lua.LValue, 0, 4)
for _, t := range ts {
lvs = lvs[:0]
var v types.Value
var err error
if t.Count < 0 {
// Append all values.
for i := n; i <= s.Count(); i++ {
lvs = append(lvs, s.L.Get(i))
}
v, err = t.PullFrom(s, lvs...)
} else if t.Count > 1 {
// Append up to type count.
for i := n; i <= t.Count; i++ {
lvs = append(lvs, s.CheckAny(i))
}
v, err = t.PullFrom(s, lvs...)
} else {
// Append single value.
v, err = t.PullFrom(s, s.CheckAny(n))
}
if err != nil {
continue
}
return v
}
default: // Constant maximum.
lvs := make([]lua.LValue, 0, 4)
for _, t := range ts {
lvs = lvs[:0]
n := t.Count
if n == 0 {
n = 1
}
for i := n; i <= t.Count; i++ {
lvs = append(lvs, s.CheckAny(i))
}
v, err := t.PullFrom(s, lvs...)
if err != nil {
continue
}
return v
}
}
return nil
}
// PushToTable reflects v according to its type as registered with s.World, then
// sets the result to table[field]. The type must be single-value. Does nothing
// if v is nil.
func (s State) PushToTable(table *lua.LTable, field lua.LValue, v types.Value) {
if v == nil {
return
}
rfl := s.MustReflector(v.Type())
if rfl.Count < 0 {
panic("PushToTable cannot push variable types")
} else if rfl.Count > 1 {
panic("PushToTable cannot push multi-value types")
}
lvs, err := rfl.PushTo(s, v)
if err != nil {
s.RaiseError("field %s: %s", field, err.Error())
return
}
table.RawSet(field, lvs[0])
}
// PullFromTable gets a value from table[field], and reflects a value from it to
// type t registered with s.World.
func (s State) PullFromTable(table *lua.LTable, field lua.LValue, t string) types.Value {
rfl := s.MustReflector(t)
if rfl.Count < 0 {
panic("PullFromTable cannot push variable types")
} else if rfl.Count > 1 {
panic("PullFromTable cannot push multi-value types")
}
v, err := rfl.PullFrom(s, table.RawGet(field))
if err != nil {
s.RaiseError("field %s: %s", field, err.Error())
return nil
}
return v
}
// PullFromTableOpt gets a value from table[field], and reflects a value from it
// to type t registered with s.World. If the value is nil, d is returned
// instead.
func (s State) PullFromTableOpt(table *lua.LTable, field lua.LValue, t string, d types.Value) types.Value {
rfl := s.MustReflector(t)
if rfl.Count < 0 {
panic("PullFromTableOpt cannot pull variable types")
} else if rfl.Count > 1 {
panic("PullFromTableOpt cannot pull multi-value types")
}
lv := table.RawGet(field)
if lv == lua.LNil {
return d
}
v, err := rfl.PullFrom(s, lv)
if err != nil {
s.RaiseError("field %s: %s", field, err.Error())
return d
}
return v
}
// PushArrayOf pushes an rtypes.Array, ensuring that each element is reflected
// according to t.
func (s State) PushArrayOf(t string, v rtypes.Array) int {
if s.CycleGuard() {
defer s.CycleClear()
}
if s.CycleMark(&v) {
return s.RaiseError("arrays cannot be cyclic")
}
rfl := s.MustReflector(t)
table := s.L.CreateTable(len(v), 0)
for i, v := range v {
lv, err := rfl.PushTo(s, v)
if err != nil {
return s.RaiseError("%s", err)
}
table.RawSetInt(i+1, lv[0])
}
s.L.Push(table)
return 1
}
// PullArrayOf pulls an rtypes.Array from n, ensuring that each element is
// reflected according to t.
func (s State) PullArrayOf(n int, t string) rtypes.Array {
rfl := s.MustReflector(t)
lv := s.CheckAny(n)
if s.CycleGuard() {
defer s.CycleClear()
}
table, ok := lv.(*lua.LTable)
if !ok {
s.ArgError(n, TypeError{Want: "table", Got: lv.Type().String()}.Error())
return nil
}
if s.CycleMark(table) {
s.ArgError(n, "tables cannot be cyclic")
return nil
}
l := table.Len()
array := make(rtypes.Array, l)
for i := 1; i <= l; i++ {
var err error
if array[i-1], err = rfl.PullFrom(s, table.RawGetInt(i)); err != nil {
s.ArgError(n, err.Error())
return nil
}
}
return array
}
func (s State) PushDictionaryOf(n int, t string, v rtypes.Dictionary) int {
if s.CycleGuard() {
defer s.CycleClear()
}
if s.CycleMark(&v) {
return s.RaiseError("dictionaries cannot be cyclic")
}
rfl := s.MustReflector(t)
table := s.L.CreateTable(0, len(v))
for k, v := range v {
lv, err := rfl.PushTo(s, v)
if err != nil {
return s.RaiseError("%s", err)
}
table.RawSetString(k, lv[0])
}
s.L.Push(table)
return 1
}
func (s State) PullDictionaryOf(n int, t string) rtypes.Dictionary {
rfl := s.MustReflector(t)
lv := s.CheckAny(n)
if s.CycleGuard() {
defer s.CycleClear()
}
table, ok := lv.(*lua.LTable)
if !ok {
s.ArgError(n, TypeError{Want: "table", Got: lv.Type().String()}.Error())
return nil
}
if s.CycleMark(table) {
s.ArgError(n, "tables cannot be cyclic")
return nil
}
dict := make(rtypes.Dictionary)
err := table.ForEach(func(k, lv lua.LValue) error {
v, err := rfl.PullFrom(s, lv)
if err != nil {
return err
}
dict[k.String()] = v
return nil
})
if err != nil {
s.ArgError(n, err.Error())
return nil
}
return dict
}
// RaiseError is a shortcut for LState.RaiseError that returns 0.
func (s State) RaiseError(format string, args ...interface{}) int {
s.L.RaiseError(format, args...)
return 0
}
// ArgError raises an argument error depending on the state's frame type.
func (s State) ArgError(n int, msg string, v ...interface{}) int {
if len(v) > 0 {
msg = fmt.Sprintf(msg, v...)
}
switch s.FrameType {
case MethodFrame:
if n <= 1 {
s.RaiseError("bad method receiver: %s", msg)
} else {
s.L.ArgError(n-1, msg)
}
case OperatorFrame:
s.RaiseError(msg)
default:
s.L.ArgError(n, msg)
}
return 0
}
// TypeError raises an argument type error depending on the state's frame type.
func (s State) TypeError(n int, want, got string) int {
err := TypeError{Want: want, Got: got}
switch s.FrameType {
case MethodFrame:
if n <= 1 {
s.RaiseError("bad method receiver: %s", err)
} else {
s.L.ArgError(n-1, err.Error())
}
case OperatorFrame:
s.RaiseError("%s", err.Error())
default:
s.L.ArgError(n, err.Error())
}
return 0
}
// CheckAny returns the nth argument, which can be any type as long as the
// argument exists.
func (s State) CheckAny(n int) lua.LValue {
if n > s.Count() {
s.ArgError(n, "value expected")
return nil
}
return s.L.Get(n)
}
// CheckBool returns the nth argument, expecting a boolean.
func (s State) CheckBool(n int) bool {
v := s.L.Get(n)
if lv, ok := v.(lua.LBool); ok {
return bool(lv)
}
s.TypeError(n, lua.LTBool.String(), v.Type().String())
return false
}
// CheckInt returns the nth argument as an int, expecting a number.
func (s State) CheckInt(n int) int {
v := s.L.Get(n)
if lv, ok := v.(lua.LNumber); ok {
return int(lv)
}
s.TypeError(n, lua.LTNumber.String(), v.Type().String())
return 0
}
// CheckInt64 returns the nth argument as an int64, expecting a number.
func (s State) CheckInt64(n int) int64 {
v := s.L.Get(n)
if lv, ok := v.(lua.LNumber); ok {
return int64(lv)
}
s.TypeError(n, lua.LTNumber.String(), v.Type().String())
return 0
}
// CheckNumber returns the nth argument, expecting a number.
func (s State) CheckNumber(n int) lua.LNumber {
v := s.L.Get(n)
if lv, ok := v.(lua.LNumber); ok {
return lv
}
s.TypeError(n, lua.LTNumber.String(), v.Type().String())
return 0
}
// CheckString returns the nth argument, expecting a string. Unlike
// LState.CheckString, it does not try to convert non-string values into a
// string.
func (s State) CheckString(n int) string {
v := s.L.Get(n)
if lv, ok := v.(lua.LString); ok {
return string(lv)
}
s.TypeError(n, lua.LTString.String(), v.Type().String())
return ""
}
// CheckTable returns the nth argument, expecting a table.
func (s State) CheckTable(n int) *lua.LTable {
v := s.L.Get(n)
if lv, ok := v.(*lua.LTable); ok {
return lv
}
s.TypeError(n, lua.LTTable.String(), v.Type().String())
return nil
}
// CheckFunction returns the nth argument, expecting a function.
func (s State) CheckFunction(n int) *lua.LFunction {
v := s.L.Get(n)
if lv, ok := v.(*lua.LFunction); ok {
return lv
}
s.TypeError(n, lua.LTFunction.String(), v.Type().String())
return nil
}
// CheckUserData returns the nth argument, expecting a userdata.
func (s State) CheckUserData(n int) *lua.LUserData {
v := s.L.Get(n)
if lv, ok := v.(*lua.LUserData); ok {
return lv
}
s.TypeError(n, lua.LTUserData.String(), v.Type().String())
return nil
}
// CheckThread returns the nth argument, expecting a thread.
func (s State) CheckThread(n int) *lua.LState {
v := s.L.Get(n)
if lv, ok := v.(*lua.LState); ok {
return lv
}
s.TypeError(n, lua.LTThread.String(), v.Type().String())
return nil
}
// OptBool returns the nth argument as a bool, or d if the argument is nil.
func (s State) OptBool(n int, d bool) bool {
v := s.L.Get(n)
if v == lua.LNil {
return d
}
if lv, ok := v.(lua.LBool); ok {
return bool(lv)
}
s.TypeError(n, lua.LTBool.String(), v.Type().String())
return false
}
// OptInt returns the nth argument as an int, or d if the argument is nil.
func (s State) OptInt(n int, d int) int {
v := s.L.Get(n)
if v == lua.LNil {
return d
}
if lv, ok := v.(lua.LNumber); ok {
return int(lv)
}
s.TypeError(n, lua.LTNumber.String(), v.Type().String())
return 0
}
// OptInt64 returns the nth argument as an int64, or d if the argument is nil.
func (s State) OptInt64(n int, d int64) int64 {
v := s.L.Get(n)
if v == lua.LNil {
return d
}
if lv, ok := v.(lua.LNumber); ok {
return int64(lv)
}
s.TypeError(n, lua.LTNumber.String(), v.Type().String())
return 0
}
// OptNumber returns the nth argument as a number, or d if the argument is nil.
func (s State) OptNumber(n int, d lua.LNumber) lua.LNumber {
v := s.L.Get(n)
if v == lua.LNil {
return d
}
if lv, ok := v.(lua.LNumber); ok {
return lv
}
s.TypeError(n, lua.LTNumber.String(), v.Type().String())
return 0
}
// OptString returns the nth argument as a string, or d if the argument is nil.
func (s State) OptString(n int, d string) string {
v := s.L.Get(n)
if v == lua.LNil {
return d
}
if lv, ok := v.(lua.LString); ok {
return string(lv)
}
s.TypeError(n, lua.LTString.String(), v.Type().String())
return ""
}
// OptTable returns the nth argument as a table, or d if the argument is nil.
func (s State) OptTable(n int, d *lua.LTable) *lua.LTable {
v := s.L.Get(n)
if v == lua.LNil {
return d
}
if lv, ok := v.(*lua.LTable); ok {
return lv
}
s.TypeError(n, lua.LTTable.String(), v.Type().String())
return nil
}
// OptFunction returns the nth argument as a function, or d if the argument is
// nil.
func (s State) OptFunction(n int, d *lua.LFunction) *lua.LFunction {
v := s.L.Get(n)
if v == lua.LNil {
return d
}
if lv, ok := v.(*lua.LFunction); ok {
return lv
}
s.TypeError(n, lua.LTFunction.String(), v.Type().String())
return nil
}
// OptUserData returns the nth argument as a userdata, or d if the argument is
// nil.
func (s State) OptUserData(n int, d *lua.LUserData) *lua.LUserData {
v := s.L.Get(n)
if v == lua.LNil {
return d
}
if lv, ok := v.(*lua.LUserData); ok {
return lv
}
s.TypeError(n, lua.LTUserData.String(), v.Type().String())
return nil
}
Display received type from PullAnyOf.
package rbxmk
import (
"fmt"
"strings"
lua "github.com/anaminus/gopher-lua"
"github.com/anaminus/rbxmk/rtypes"
"github.com/robloxapi/types"
)
// FrameType indicates the kind of frame for a State.
type FrameType uint8
const (
// Frame is a regular function.
FunctionFrame FrameType = iota
// Frame is a method; exclude first argument.
MethodFrame
// Frame is an operator, avoid displaying arguments.
OperatorFrame
)
// State contains references to an environment surrounding a value.
type State struct {
*World
L *lua.LState
// FrameType provides a hint to how errors should be produced.
FrameType FrameType
// cycle is used to mark a table as having been traversed. This is non-nil
// only for types that can contain other types.
cycle map[interface{}]struct{}
}
// CycleGuard begins a guard against reference cycles when reflecting with the
// state. Returns false if a guard was already set up for the state. If true is
// returned, the guard must be cleared via CycleClear. For example:
//
// if s.CycleGuard() {
// defer s.CycleClear()
// }
//
func (s *State) CycleGuard() bool {
if s.cycle == nil {
s.cycle = make(map[interface{}]struct{}, 4)
return true
}
return false
}
// CycleClear clears the cycle guard on the state. Panics if the state has no
// guard.
func (s *State) CycleClear() {
if s.cycle == nil {
panic("state has no cycle guard")
}
s.cycle = nil
}
// CycleMark marks t as visited, and returns whether t was already visited.
// Panics if the state has no guard.
func (s State) CycleMark(t interface{}) bool {
if s.cycle == nil {
panic("attempt to mark reference without cycle guard")
}
_, ok := s.cycle[t]
if !ok {
s.cycle[t] = struct{}{}
}
return ok
}
// Count returns the number of arguments in the stack frame.
func (s State) Count() int {
return s.L.GetTop()
}
// Push reflects v according to its type as registered with s.World, then pushes
// the results to s.L.
func (s State) Push(v types.Value) int {
rfl := s.MustReflector(v.Type())
lvs, err := rfl.PushTo(s, v)
if err != nil {
return s.RaiseError("%s", err)
}
for _, lv := range lvs {
s.L.Push(lv)
}
return len(lvs)
}
// Pull gets from s.L the values starting from n, and reflects a value from them
// according to type t registered with s.World.
func (s State) Pull(n int, t string) types.Value {
rfl := s.MustReflector(t)
var v types.Value
var err error
if rfl.Count < 0 {
lvs := make([]lua.LValue, 0, 4)
for i := n; i <= s.Count(); i++ {
lvs = append(lvs, s.L.Get(i))
}
v, err = rfl.PullFrom(s, lvs...)
} else if rfl.Count > 1 {
lvs := make([]lua.LValue, 0, 4)
for i := n; i <= rfl.Count; i++ {
lvs = append(lvs, s.CheckAny(i))
}
v, err = rfl.PullFrom(s, lvs...)
} else {
v, err = rfl.PullFrom(s, s.CheckAny(n))
}
if err != nil {
s.ArgError(n, err.Error())
return nil
}
return v
}
// PullOpt gets from s.L the value at n, and reflects a value from it according
// to type t registered with s.World. If the value is nil, d is returned
// instead.
func (s State) PullOpt(n int, t string, d types.Value) types.Value {
rfl := s.MustReflector(t)
if rfl.Count < 0 {
panic("PullOpt cannot pull variable types")
} else if rfl.Count > 1 {
panic("PullOpt cannot pull multi-value types")
}
lv := s.L.Get(n)
if lv == lua.LNil {
return d
}
v, err := rfl.PullFrom(s, lv)
if err != nil {
s.ArgError(n, err.Error())
return d
}
return v
}
// listTypes returns each type listed in a natural sentence.
func listTypes(types []string) string {
switch len(types) {
case 0:
return ""
case 1:
return types[0]
case 2:
return types[0] + " or " + types[1]
}
return strings.Join(types[:len(types)-2], ", ") + ", or " + types[len(types)-1]
}
// ReflectorError raises an error indicating that a reflector pushed or pulled
// an unexpected type. Under normal circumstances, this error should be
// unreachable.
func (s State) ReflectorError(n int) int {
return s.ArgError(n, "unreachable error: reflector mismatch")
}
// PullAnyOf gets from s.L the values starting from n, and reflects a value from
// them according to any of the types in t registered with s.World. Returns the
// first successful reflection among the types in t. If no types succeeded, then
// a type error is thrown.
func (s State) PullAnyOf(n int, t ...string) types.Value {
if n > s.Count() {
// Every type must reflect at least one value, so no values is an
// immediate error.
s.ArgError(n, "value expected")
return nil
}
v := s.PullAnyOfOpt(n, t...)
if v == nil {
s.TypeError(n, listTypes(t), s.Pull(n, "Variant").Type())
}
return v
}
// PullAnyOfOpt gets from s.L the values starting from n, and reflects a value from
// them according to any of the types in t registered with s.World. Returns the
// first successful reflection among the types in t. If no types succeeded, then
// nil is returned.
func (s State) PullAnyOfOpt(n int, t ...string) types.Value {
if n > s.Count() {
return nil
}
// Find the maximum count among the given types. 0 is treated the same as 1.
// <0 indicates an arbitrary number of values.
max := 1
ts := make([]Reflector, 0, 4)
for _, t := range t {
rfl := s.MustReflector(t)
ts = append(ts, rfl)
if rfl.Count > 1 {
max = rfl.Count
} else if rfl.Count < 0 {
max = -1
break
}
}
switch max {
case 1: // All types have 1 value.
lv := s.CheckAny(n)
for _, t := range ts {
if v, err := t.PullFrom(s, lv); err == nil {
return v
}
}
case -1: // At least one type has arbitrary values.
lvs := make([]lua.LValue, 0, 4)
for _, t := range ts {
lvs = lvs[:0]
var v types.Value
var err error
if t.Count < 0 {
// Append all values.
for i := n; i <= s.Count(); i++ {
lvs = append(lvs, s.L.Get(i))
}
v, err = t.PullFrom(s, lvs...)
} else if t.Count > 1 {
// Append up to type count.
for i := n; i <= t.Count; i++ {
lvs = append(lvs, s.CheckAny(i))
}
v, err = t.PullFrom(s, lvs...)
} else {
// Append single value.
v, err = t.PullFrom(s, s.CheckAny(n))
}
if err != nil {
continue
}
return v
}
default: // Constant maximum.
lvs := make([]lua.LValue, 0, 4)
for _, t := range ts {
lvs = lvs[:0]
n := t.Count
if n == 0 {
n = 1
}
for i := n; i <= t.Count; i++ {
lvs = append(lvs, s.CheckAny(i))
}
v, err := t.PullFrom(s, lvs...)
if err != nil {
continue
}
return v
}
}
return nil
}
// PushToTable reflects v according to its type as registered with s.World, then
// sets the result to table[field]. The type must be single-value. Does nothing
// if v is nil.
func (s State) PushToTable(table *lua.LTable, field lua.LValue, v types.Value) {
if v == nil {
return
}
rfl := s.MustReflector(v.Type())
if rfl.Count < 0 {
panic("PushToTable cannot push variable types")
} else if rfl.Count > 1 {
panic("PushToTable cannot push multi-value types")
}
lvs, err := rfl.PushTo(s, v)
if err != nil {
s.RaiseError("field %s: %s", field, err.Error())
return
}
table.RawSet(field, lvs[0])
}
// PullFromTable gets a value from table[field], and reflects a value from it to
// type t registered with s.World.
func (s State) PullFromTable(table *lua.LTable, field lua.LValue, t string) types.Value {
rfl := s.MustReflector(t)
if rfl.Count < 0 {
panic("PullFromTable cannot push variable types")
} else if rfl.Count > 1 {
panic("PullFromTable cannot push multi-value types")
}
v, err := rfl.PullFrom(s, table.RawGet(field))
if err != nil {
s.RaiseError("field %s: %s", field, err.Error())
return nil
}
return v
}
// PullFromTableOpt gets a value from table[field], and reflects a value from it
// to type t registered with s.World. If the value is nil, d is returned
// instead.
func (s State) PullFromTableOpt(table *lua.LTable, field lua.LValue, t string, d types.Value) types.Value {
rfl := s.MustReflector(t)
if rfl.Count < 0 {
panic("PullFromTableOpt cannot pull variable types")
} else if rfl.Count > 1 {
panic("PullFromTableOpt cannot pull multi-value types")
}
lv := table.RawGet(field)
if lv == lua.LNil {
return d
}
v, err := rfl.PullFrom(s, lv)
if err != nil {
s.RaiseError("field %s: %s", field, err.Error())
return d
}
return v
}
// PushArrayOf pushes an rtypes.Array, ensuring that each element is reflected
// according to t.
func (s State) PushArrayOf(t string, v rtypes.Array) int {
if s.CycleGuard() {
defer s.CycleClear()
}
if s.CycleMark(&v) {
return s.RaiseError("arrays cannot be cyclic")
}
rfl := s.MustReflector(t)
table := s.L.CreateTable(len(v), 0)
for i, v := range v {
lv, err := rfl.PushTo(s, v)
if err != nil {
return s.RaiseError("%s", err)
}
table.RawSetInt(i+1, lv[0])
}
s.L.Push(table)
return 1
}
// PullArrayOf pulls an rtypes.Array from n, ensuring that each element is
// reflected according to t.
func (s State) PullArrayOf(n int, t string) rtypes.Array {
rfl := s.MustReflector(t)
lv := s.CheckAny(n)
if s.CycleGuard() {
defer s.CycleClear()
}
table, ok := lv.(*lua.LTable)
if !ok {
s.ArgError(n, TypeError{Want: "table", Got: lv.Type().String()}.Error())
return nil
}
if s.CycleMark(table) {
s.ArgError(n, "tables cannot be cyclic")
return nil
}
l := table.Len()
array := make(rtypes.Array, l)
for i := 1; i <= l; i++ {
var err error
if array[i-1], err = rfl.PullFrom(s, table.RawGetInt(i)); err != nil {
s.ArgError(n, err.Error())
return nil
}
}
return array
}
func (s State) PushDictionaryOf(n int, t string, v rtypes.Dictionary) int {
if s.CycleGuard() {
defer s.CycleClear()
}
if s.CycleMark(&v) {
return s.RaiseError("dictionaries cannot be cyclic")
}
rfl := s.MustReflector(t)
table := s.L.CreateTable(0, len(v))
for k, v := range v {
lv, err := rfl.PushTo(s, v)
if err != nil {
return s.RaiseError("%s", err)
}
table.RawSetString(k, lv[0])
}
s.L.Push(table)
return 1
}
func (s State) PullDictionaryOf(n int, t string) rtypes.Dictionary {
rfl := s.MustReflector(t)
lv := s.CheckAny(n)
if s.CycleGuard() {
defer s.CycleClear()
}
table, ok := lv.(*lua.LTable)
if !ok {
s.ArgError(n, TypeError{Want: "table", Got: lv.Type().String()}.Error())
return nil
}
if s.CycleMark(table) {
s.ArgError(n, "tables cannot be cyclic")
return nil
}
dict := make(rtypes.Dictionary)
err := table.ForEach(func(k, lv lua.LValue) error {
v, err := rfl.PullFrom(s, lv)
if err != nil {
return err
}
dict[k.String()] = v
return nil
})
if err != nil {
s.ArgError(n, err.Error())
return nil
}
return dict
}
// RaiseError is a shortcut for LState.RaiseError that returns 0.
func (s State) RaiseError(format string, args ...interface{}) int {
s.L.RaiseError(format, args...)
return 0
}
// ArgError raises an argument error depending on the state's frame type.
func (s State) ArgError(n int, msg string, v ...interface{}) int {
if len(v) > 0 {
msg = fmt.Sprintf(msg, v...)
}
switch s.FrameType {
case MethodFrame:
if n <= 1 {
s.RaiseError("bad method receiver: %s", msg)
} else {
s.L.ArgError(n-1, msg)
}
case OperatorFrame:
s.RaiseError(msg)
default:
s.L.ArgError(n, msg)
}
return 0
}
// TypeError raises an argument type error depending on the state's frame type.
func (s State) TypeError(n int, want, got string) int {
err := TypeError{Want: want, Got: got}
switch s.FrameType {
case MethodFrame:
if n <= 1 {
s.RaiseError("bad method receiver: %s", err)
} else {
s.L.ArgError(n-1, err.Error())
}
case OperatorFrame:
s.RaiseError("%s", err.Error())
default:
s.L.ArgError(n, err.Error())
}
return 0
}
// CheckAny returns the nth argument, which can be any type as long as the
// argument exists.
func (s State) CheckAny(n int) lua.LValue {
if n > s.Count() {
s.ArgError(n, "value expected")
return nil
}
return s.L.Get(n)
}
// CheckBool returns the nth argument, expecting a boolean.
func (s State) CheckBool(n int) bool {
v := s.L.Get(n)
if lv, ok := v.(lua.LBool); ok {
return bool(lv)
}
s.TypeError(n, lua.LTBool.String(), v.Type().String())
return false
}
// CheckInt returns the nth argument as an int, expecting a number.
func (s State) CheckInt(n int) int {
v := s.L.Get(n)
if lv, ok := v.(lua.LNumber); ok {
return int(lv)
}
s.TypeError(n, lua.LTNumber.String(), v.Type().String())
return 0
}
// CheckInt64 returns the nth argument as an int64, expecting a number.
func (s State) CheckInt64(n int) int64 {
v := s.L.Get(n)
if lv, ok := v.(lua.LNumber); ok {
return int64(lv)
}
s.TypeError(n, lua.LTNumber.String(), v.Type().String())
return 0
}
// CheckNumber returns the nth argument, expecting a number.
func (s State) CheckNumber(n int) lua.LNumber {
v := s.L.Get(n)
if lv, ok := v.(lua.LNumber); ok {
return lv
}
s.TypeError(n, lua.LTNumber.String(), v.Type().String())
return 0
}
// CheckString returns the nth argument, expecting a string. Unlike
// LState.CheckString, it does not try to convert non-string values into a
// string.
func (s State) CheckString(n int) string {
v := s.L.Get(n)
if lv, ok := v.(lua.LString); ok {
return string(lv)
}
s.TypeError(n, lua.LTString.String(), v.Type().String())
return ""
}
// CheckTable returns the nth argument, expecting a table.
func (s State) CheckTable(n int) *lua.LTable {
v := s.L.Get(n)
if lv, ok := v.(*lua.LTable); ok {
return lv
}
s.TypeError(n, lua.LTTable.String(), v.Type().String())
return nil
}
// CheckFunction returns the nth argument, expecting a function.
func (s State) CheckFunction(n int) *lua.LFunction {
v := s.L.Get(n)
if lv, ok := v.(*lua.LFunction); ok {
return lv
}
s.TypeError(n, lua.LTFunction.String(), v.Type().String())
return nil
}
// CheckUserData returns the nth argument, expecting a userdata.
func (s State) CheckUserData(n int) *lua.LUserData {
v := s.L.Get(n)
if lv, ok := v.(*lua.LUserData); ok {
return lv
}
s.TypeError(n, lua.LTUserData.String(), v.Type().String())
return nil
}
// CheckThread returns the nth argument, expecting a thread.
func (s State) CheckThread(n int) *lua.LState {
v := s.L.Get(n)
if lv, ok := v.(*lua.LState); ok {
return lv
}
s.TypeError(n, lua.LTThread.String(), v.Type().String())
return nil
}
// OptBool returns the nth argument as a bool, or d if the argument is nil.
func (s State) OptBool(n int, d bool) bool {
v := s.L.Get(n)
if v == lua.LNil {
return d
}
if lv, ok := v.(lua.LBool); ok {
return bool(lv)
}
s.TypeError(n, lua.LTBool.String(), v.Type().String())
return false
}
// OptInt returns the nth argument as an int, or d if the argument is nil.
func (s State) OptInt(n int, d int) int {
v := s.L.Get(n)
if v == lua.LNil {
return d
}
if lv, ok := v.(lua.LNumber); ok {
return int(lv)
}
s.TypeError(n, lua.LTNumber.String(), v.Type().String())
return 0
}
// OptInt64 returns the nth argument as an int64, or d if the argument is nil.
func (s State) OptInt64(n int, d int64) int64 {
v := s.L.Get(n)
if v == lua.LNil {
return d
}
if lv, ok := v.(lua.LNumber); ok {
return int64(lv)
}
s.TypeError(n, lua.LTNumber.String(), v.Type().String())
return 0
}
// OptNumber returns the nth argument as a number, or d if the argument is nil.
func (s State) OptNumber(n int, d lua.LNumber) lua.LNumber {
v := s.L.Get(n)
if v == lua.LNil {
return d
}
if lv, ok := v.(lua.LNumber); ok {
return lv
}
s.TypeError(n, lua.LTNumber.String(), v.Type().String())
return 0
}
// OptString returns the nth argument as a string, or d if the argument is nil.
func (s State) OptString(n int, d string) string {
v := s.L.Get(n)
if v == lua.LNil {
return d
}
if lv, ok := v.(lua.LString); ok {
return string(lv)
}
s.TypeError(n, lua.LTString.String(), v.Type().String())
return ""
}
// OptTable returns the nth argument as a table, or d if the argument is nil.
func (s State) OptTable(n int, d *lua.LTable) *lua.LTable {
v := s.L.Get(n)
if v == lua.LNil {
return d
}
if lv, ok := v.(*lua.LTable); ok {
return lv
}
s.TypeError(n, lua.LTTable.String(), v.Type().String())
return nil
}
// OptFunction returns the nth argument as a function, or d if the argument is
// nil.
func (s State) OptFunction(n int, d *lua.LFunction) *lua.LFunction {
v := s.L.Get(n)
if v == lua.LNil {
return d
}
if lv, ok := v.(*lua.LFunction); ok {
return lv
}
s.TypeError(n, lua.LTFunction.String(), v.Type().String())
return nil
}
// OptUserData returns the nth argument as a userdata, or d if the argument is
// nil.
func (s State) OptUserData(n int, d *lua.LUserData) *lua.LUserData {
v := s.L.Get(n)
if v == lua.LNil {
return d
}
if lv, ok := v.(*lua.LUserData); ok {
return lv
}
s.TypeError(n, lua.LTUserData.String(), v.Type().String())
return nil
}
|
package lm2
import "sync/atomic"
type Stats struct {
RecordsWritten uint64
RecordsRead uint64
CacheHits uint64
CacheMisses uint64
}
func (s *Stats) incRecordsWritten(count uint64) {
atomic.AddUint64(&s.RecordsWritten, count)
}
func (s *Stats) incRecordsRead(count uint64) {
atomic.AddUint64(&s.RecordsRead, count)
}
func (s *Stats) incCacheHits(count uint64) {
atomic.AddUint64(&s.CacheHits, count)
}
func (s *Stats) incCacheMisses(count uint64) {
atomic.AddUint64(&s.CacheMisses, count)
}
func (s *Stats) clone() Stats {
return Stats{
RecordsWritten: atomic.LoadUint64(&s.RecordsWritten),
RecordsRead: atomic.LoadUint64(&s.RecordsRead),
CacheHits: atomic.LoadUint64(&s.CacheHits),
CacheMisses: atomic.LoadUint64(&s.CacheMisses),
}
}
add comment
package lm2
import "sync/atomic"
// Stats holds collection statistics.
type Stats struct {
RecordsWritten uint64
RecordsRead uint64
CacheHits uint64
CacheMisses uint64
}
func (s *Stats) incRecordsWritten(count uint64) {
atomic.AddUint64(&s.RecordsWritten, count)
}
func (s *Stats) incRecordsRead(count uint64) {
atomic.AddUint64(&s.RecordsRead, count)
}
func (s *Stats) incCacheHits(count uint64) {
atomic.AddUint64(&s.CacheHits, count)
}
func (s *Stats) incCacheMisses(count uint64) {
atomic.AddUint64(&s.CacheMisses, count)
}
func (s *Stats) clone() Stats {
return Stats{
RecordsWritten: atomic.LoadUint64(&s.RecordsWritten),
RecordsRead: atomic.LoadUint64(&s.RecordsRead),
CacheHits: atomic.LoadUint64(&s.CacheHits),
CacheMisses: atomic.LoadUint64(&s.CacheMisses),
}
}
|
/*
*
* GoStats
* A simple stats server for Go services
*
* (c)2013 Green Man Gaming Limited
*
*/
package gostats
import (
"encoding/json"
"fmt"
"net/http"
"regexp"
"strings"
"sync"
"time"
"github.com/VividCortex/gohistogram"
"github.com/gorilla/mux"
)
type metric struct {
histogram *gohistogram.NumericHistogram
Count int64
Sum int64
mutex sync.Mutex
}
func (m *metric) Add(value int64) {
m.mutex.Lock()
defer m.mutex.Unlock()
if m.Count == 0 {
m.histogram = gohistogram.NewHistogram(200)
}
m.Count += 1
m.Sum += value
m.histogram.Add(float64(value))
}
func (m *metric) MarshalJSON() ([]byte, error) {
build_out := make(map[string]interface{})
build_out["sum"] = m.Sum
build_out["count"] = m.Count
build_out["avg"] = m.Sum / m.Count
percentiles := []float64{0.25, 0.50, 0.75, 0.90, 0.95, 0.99, 0.999, 0.9999}
re := regexp.MustCompile("(0+)$")
for _, percentile := range percentiles {
p := strings.Replace(fmt.Sprintf("p%.2f", percentile*100), ".", "", -1)
p = re.ReplaceAllString(p, "")
build_out[p] = int64(m.histogram.Quantile(percentile))
}
return json.Marshal(build_out)
}
type StatServe struct {
Addr string
server *http.Server
counterMutex sync.Mutex
counters map[string]int
gauges map[string]int
labelMutex sync.Mutex
labels map[string]string
metricMutex sync.Mutex
metrics map[string]*metric
}
func (s *StatServe) FetchStatsFunc() http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("content-type", "application/json")
to_return := make(map[string]interface{})
to_return["counters"] = s.counters
to_return["metrics"] = s.metrics
to_return["labels"] = s.labels
json_string, _ := json.Marshal(to_return)
fmt.Fprint(w, string(json_string))
}
}
func (s *StatServe) configure() {
// If I'm already configured, there's no point doing it again
if s.server != nil {
return
}
// Set up the URLs
r := mux.NewRouter()
r.HandleFunc("/stats", s.FetchStatsFunc())
handler := http.NewServeMux()
handler.Handle("/", r)
// Set up a server
s.server = &http.Server{Addr: s.Addr, Handler: handler}
}
// ListenAndServe the Http server - best to use this is as a 'goroutine'
// as this will allow you to run this in the background
func (s *StatServe) ListenAndServe() (e error) {
s.configure()
return s.server.ListenAndServe()
}
/*
* Increment a named counter. We create if it doesn't exist. We also mutex
* updates. It's best to call this as a goroutine so that you can fire/forget
*/
func (s *StatServe) IncrementCounter(name string) {
s.counterMutex.Lock()
defer s.counterMutex.Unlock()
if _, ok := s.counters[name]; !ok {
s.counters[name] = 0
}
s.counters[name]++
}
/*
* Time how long it takes to run code in function f. This is designed to work
* as a wrapper as f takes/returns nothing.
*
* So:
*
* func foo() int {
* return_value = 0
*
* gostats.Stats.Time("foo", func() {
* return_value = 1
* })
*
* return return_value
* }
*/
func (s *StatServe) Time(name string, f func()) {
s.metricMutex.Lock()
if _, ok := s.metrics[name]; !ok {
s.metrics[name] = new(metric)
}
s.metricMutex.Unlock()
start := time.Now().UnixNano()
f()
end := time.Now().UnixNano()
m := s.metrics[name]
m.Add(end - start)
}
/*
* Set an arbitrary label with a value. Nothing fancy here
*/
func (s *StatServe) Label(name string, value string) {
s.labelMutex.Lock()
defer s.labelMutex.Unlock()
s.labels[name] = value
}
// A default Stats singleton for us
var Stats = &StatServe{
counters: make(map[string]int),
metrics: make(map[string]*metric),
labels: make(map[string]string),
}
Add endpoint to reset a counter or a metric.
/*
*
* GoStats
* A simple stats server for Go services
*
* (c)2013 Green Man Gaming Limited
*
*/
package gostats
import (
"encoding/json"
"fmt"
"net/http"
"regexp"
"strings"
"sync"
"time"
"github.com/VividCortex/gohistogram"
"github.com/gorilla/mux"
)
type metric struct {
histogram *gohistogram.NumericHistogram
Count int64
Sum int64
mutex sync.Mutex
}
func (m *metric) Add(value int64) {
m.mutex.Lock()
defer m.mutex.Unlock()
if m.Count == 0 {
m.histogram = gohistogram.NewHistogram(200)
}
m.Count += 1
m.Sum += value
m.histogram.Add(float64(value))
}
func (m *metric) MarshalJSON() ([]byte, error) {
build_out := make(map[string]interface{})
build_out["sum"] = m.Sum
build_out["count"] = m.Count
build_out["avg"] = m.Sum / m.Count
percentiles := []float64{0.25, 0.50, 0.75, 0.90, 0.95, 0.99, 0.999, 0.9999}
re := regexp.MustCompile("(0+)$")
for _, percentile := range percentiles {
p := strings.Replace(fmt.Sprintf("p%.2f", percentile*100), ".", "", -1)
p = re.ReplaceAllString(p, "")
build_out[p] = int64(m.histogram.Quantile(percentile))
}
return json.Marshal(build_out)
}
type StatServe struct {
Addr string
server *http.Server
counterMutex sync.Mutex
counters map[string]int
gauges map[string]int
labelMutex sync.Mutex
labels map[string]string
metricMutex sync.Mutex
metrics map[string]*metric
}
func (s *StatServe) FetchStatsFunc() http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("content-type", "application/json")
to_return := make(map[string]interface{})
to_return["counters"] = s.counters
to_return["metrics"] = s.metrics
to_return["labels"] = s.labels
json_string, _ := json.Marshal(to_return)
fmt.Fprint(w, string(json_string))
}
}
func (s *StatServe) configure() {
// If I'm already configured, there's no point doing it again
if s.server != nil {
return
}
// Set up the URLs
r := mux.NewRouter()
r.HandleFunc("/stats", s.FetchStatsFunc())
handler := http.NewServeMux()
handler.Handle("/", r)
// Set up a server
s.server = &http.Server{Addr: s.Addr, Handler: handler}
}
// ListenAndServe the Http server - best to use this is as a 'goroutine'
// as this will allow you to run this in the background
func (s *StatServe) ListenAndServe() (e error) {
s.configure()
return s.server.ListenAndServe()
}
/*
* Increment a named counter. We create if it doesn't exist. We also mutex
* updates. It's best to call this as a goroutine so that you can fire/forget
*/
func (s *StatServe) IncrementCounter(name string) {
s.counterMutex.Lock()
defer s.counterMutex.Unlock()
if _, ok := s.counters[name]; !ok {
s.counters[name] = 0
}
s.counters[name]++
}
/*
* Time how long it takes to run code in function f. This is designed to work
* as a wrapper as f takes/returns nothing.
*
* So:
*
* func foo() int {
* return_value = 0
*
* gostats.Stats.Time("foo", func() {
* return_value = 1
* })
*
* return return_value
* }
*/
func (s *StatServe) Time(name string, f func()) {
s.metricMutex.Lock()
if _, ok := s.metrics[name]; !ok {
s.metrics[name] = new(metric)
}
s.metricMutex.Unlock()
start := time.Now().UnixNano()
f()
end := time.Now().UnixNano()
m := s.metrics[name]
m.Add(end - start)
}
/*
* Set an arbitrary label with a value. Nothing fancy here
*/
func (s *StatServe) Label(name string, value string) {
s.labelMutex.Lock()
defer s.labelMutex.Unlock()
s.labels[name] = value
}
/*
* ResetMetric deletes a metric entry.
*/
func (s *StatServe) ResetMetric(name string) {
s.metricMutex.Lock()
if _, ok := s.metrics[name]; ok {
delete(s.metrics, name)
}
s.metricMutex.Unlock()
}
/*
* ResetCounter deletes a counter.
*/
func (s *StatServe) ResetCounter(name string) {
s.counterMutex.Lock()
if _, ok := s.counters[name]; ok {
delete(s.counters, name)
}
s.counterMutex.Unlock()
}
// A default Stats singleton for us
var Stats = &StatServe{
counters: make(map[string]int),
metrics: make(map[string]*metric),
labels: make(map[string]string),
}
|
package main
import (
"sync"
)
type ProxyStats struct {
sync.RWMutex
clients uint64
bytes uint64
}
func (ps *ProxyStats) AddServed() {
ps.Lock()
ps.clients++
ps.Unlock()
}
func (ps *ProxyStats) AddBytes(bc int64) {
if bc <= 0 {
return
}
ps.Lock()
ps.bytes += uint64(bc)
ps.Unlock()
}
func (ps *ProxyStats) GetStats() (uint64, uint64) {
ps.RLock()
defer ps.RUnlock()
return ps.bytes, ps.clients
}
put stat order back to what it should be
fixes #5
package main
import (
"sync"
)
type ProxyStats struct {
sync.RWMutex
clients uint64
bytes uint64
}
func (ps *ProxyStats) AddServed() {
ps.Lock()
ps.clients++
ps.Unlock()
}
func (ps *ProxyStats) AddBytes(bc int64) {
if bc <= 0 {
return
}
ps.Lock()
ps.bytes += uint64(bc)
ps.Unlock()
}
func (ps *ProxyStats) GetStats() (uint64, uint64) {
ps.RLock()
defer ps.RUnlock()
return ps.clients, ps.bytes
}
|
/* Copyright (c) 2014-2015, Daniel Martí <mvdan@mvdan.cc> */
/* See LICENSE for licensing information */
package main
import (
"errors"
"fmt"
"sync"
"github.com/mvdan/bytesize"
)
var (
ErrReachedMaxNumber = errors.New("reached maximum number of pastes")
ErrReachedMaxStorage = errors.New("reached maximum storage of pastes")
)
type Stats struct {
number, maxNumber int
storage, maxStorage int64
sync.RWMutex
}
func (s *Stats) makeSpaceFor(size int64) error {
s.Lock()
defer s.Unlock()
if s.maxNumber > 0 && s.number >= s.maxNumber {
return ErrReachedMaxNumber
}
if s.maxStorage > 0 && s.storage+size > s.maxStorage {
return ErrReachedMaxStorage
}
s.number++
s.storage += size
return nil
}
func (s *Stats) freeSpace(size int64) {
s.Lock()
s.number--
s.storage -= size
s.Unlock()
}
func (s *Stats) reportNumber() string {
if s.maxNumber > 0 {
return fmt.Sprintf("%d (%.2f%% out of %d)", s.number,
float64(s.number*100)/float64(s.maxNumber), s.maxNumber)
}
return fmt.Sprintf("%d", s.number)
}
func (s *Stats) reportStorage() string {
if s.maxStorage > 0 {
return fmt.Sprintf("%s (%.2f%% out of %s)", bytesize.ByteSize(s.storage),
float64(s.storage*100)/float64(s.maxStorage), s.maxStorage)
}
return fmt.Sprintf("%s", bytesize.ByteSize(s.storage))
}
func (s *Stats) Report() string {
s.RLock()
number := s.reportNumber()
storage := s.reportStorage()
s.RUnlock()
return fmt.Sprintf("Have a total of %s pastes using %s", number, storage)
}
Fix remaining typo from the bytesize -> int64 switch
/* Copyright (c) 2014-2015, Daniel Martí <mvdan@mvdan.cc> */
/* See LICENSE for licensing information */
package main
import (
"errors"
"fmt"
"sync"
"github.com/mvdan/bytesize"
)
var (
ErrReachedMaxNumber = errors.New("reached maximum number of pastes")
ErrReachedMaxStorage = errors.New("reached maximum storage of pastes")
)
type Stats struct {
number, maxNumber int
storage, maxStorage int64
sync.RWMutex
}
func (s *Stats) makeSpaceFor(size int64) error {
s.Lock()
defer s.Unlock()
if s.maxNumber > 0 && s.number >= s.maxNumber {
return ErrReachedMaxNumber
}
if s.maxStorage > 0 && s.storage+size > s.maxStorage {
return ErrReachedMaxStorage
}
s.number++
s.storage += size
return nil
}
func (s *Stats) freeSpace(size int64) {
s.Lock()
s.number--
s.storage -= size
s.Unlock()
}
func (s *Stats) reportNumber() string {
if s.maxNumber > 0 {
return fmt.Sprintf("%d (%.2f%% out of %d)", s.number,
float64(s.number*100)/float64(s.maxNumber), s.maxNumber)
}
return fmt.Sprintf("%d", s.number)
}
func (s *Stats) reportStorage() string {
if s.maxStorage > 0 {
return fmt.Sprintf("%s (%.2f%% out of %s)", bytesize.ByteSize(s.storage),
float64(s.storage*100)/float64(s.maxStorage), bytesize.ByteSize(s.maxStorage))
}
return fmt.Sprintf("%s", bytesize.ByteSize(s.storage))
}
func (s *Stats) Report() string {
s.RLock()
number := s.reportNumber()
storage := s.reportStorage()
s.RUnlock()
return fmt.Sprintf("Have a total of %s pastes using %s", number, storage)
}
|
// TODO Check not for odd numbers but for 2 pow n + 1
package main
import (
"errors"
"fmt"
"math/rand"
"time"
)
const CornerStartingHeight = 50
const Roughness = 10
type FractalParams struct {
LowX int
LowY int
HighX int
HighY int
RandomFactor float64
}
// Generate an height map using a fractal algorithm
func generate(size int) ([][]float64, error) {
// check if size is odd
if size%2 == 0 {
return nil, errors.New("Size must be odd.")
}
var x, y = size, size
// Generate our slice to hold the height map data
heightMap := make([][]float64, y, y)
for i := range heightMap {
heightMap[i] = make([]float64, x, x)
}
// Initialize randomness
randomGen := rand.New(rand.NewSource(time.Now().UnixNano()))
randomFactor := float64(Roughness * x / 10)
// Assign a basic value to each of the four corners
heightMap[0][0] = CornerStartingHeight + generateRandomNumber(randomFactor, randomGen)
heightMap[0][y-1] = CornerStartingHeight + generateRandomNumber(randomFactor, randomGen)
heightMap[x-1][0] = CornerStartingHeight + generateRandomNumber(randomFactor, randomGen)
heightMap[x-1][y-1] = CornerStartingHeight + generateRandomNumber(randomFactor, randomGen)
// Recursively generate height map
fractalGeneration(heightMap, FractalParams{0, 0, x - 1, y - 1, randomFactor}, randomGen)
return heightMap, nil
}
// Applies a fractal algorithm to a sub-section of the map
func fractalGeneration(heightMap [][]float64, params FractalParams,
randomGen *rand.Rand) {
// fmt.Printf("%d %d %d %d \n", params.LowX, params.LowY, params.HighX, params.HighY)
// assign center value step
var averageCenter = (heightMap[params.LowX][params.LowY] +
heightMap[params.LowX][params.HighY] +
heightMap[params.HighX][params.LowY] +
heightMap[params.HighX][params.HighY]) / 4
// we multiply the RandomFactor by 2 when we assign the center point because
// the center points needs to be more randomized than the corner midpoints
heightMap[(params.LowX+params.HighX)/2][(params.LowY+params.HighY)/2] = averageCenter +
generateRandomNumber(params.RandomFactor, randomGen)
xMidPoint := (params.LowX + params.HighX) / 2
yMidPoint := (params.LowY + params.HighY) / 2
// assign corner midpoints step
if heightMap[params.LowX][yMidPoint] == 0 {
heightMap[params.LowX][yMidPoint] = (heightMap[params.LowX][params.LowY] +
heightMap[params.LowX][params.HighY]) / 2 //+ generateRandomNumber(params.RandomFactor, randomGen)
}
if heightMap[params.HighX][yMidPoint] == 0 {
heightMap[params.HighX][yMidPoint] = (heightMap[params.HighX][params.LowY] +
heightMap[params.HighX][params.HighY]) / 2 //+ generateRandomNumber(params.RandomFactor, randomGen)
}
if heightMap[xMidPoint][params.LowY] == 0 {
heightMap[xMidPoint][params.LowY] = (heightMap[params.LowX][params.LowY] +
heightMap[params.HighX][params.LowY]) / 2 //+ generateRandomNumber(params.RandomFactor, randomGen)
}
if heightMap[xMidPoint][params.HighY] == 0 {
heightMap[xMidPoint][params.HighY] = (heightMap[params.LowX][params.HighY] +
heightMap[params.HighX][params.HighY]) / 2 //+ generateRandomNumber(params.RandomFactor, randomGen)
}
// Recalculate RandomFactor so it gets lower with each iteration
if params.RandomFactor > 1 {
params.RandomFactor = params.RandomFactor / 2
}
// Recursively call fractal generation
if params.HighX-params.LowX > 2 {
fractalGeneration(heightMap, FractalParams{params.LowX, params.LowY,
xMidPoint, yMidPoint,
params.RandomFactor}, randomGen)
fractalGeneration(heightMap, FractalParams{xMidPoint, params.LowY,
params.HighX, yMidPoint,
params.RandomFactor}, randomGen)
fractalGeneration(heightMap, FractalParams{params.LowX, yMidPoint,
xMidPoint, params.HighY,
params.RandomFactor}, randomGen)
fractalGeneration(heightMap, FractalParams{xMidPoint, yMidPoint,
params.HighX, params.HighY,
params.RandomFactor}, randomGen)
}
}
func generateRandomNumber(randomFactor float64, randomGen *rand.Rand) float64 {
randNum := randomGen.NormFloat64() * randomFactor
fmt.Printf("randonFactor = %d, randNum = %d \n", randomFactor, randNum)
return randNum
}
Cleaned up fractal_map.go
// TODO Check not for odd numbers but for 2 pow n + 1
package main
import (
"errors"
"math/rand"
"time"
)
const CornerStartingHeight = 500
const Roughness = 10
type FractalParams struct {
LowX int
LowY int
HighX int
HighY int
RandomFactor float64
}
// Generate an height map using a fractal algorithm
func generate(size int) ([][]float64, error) {
// check if size is odd
if size%2 == 0 {
return nil, errors.New("Size must be odd.")
}
var x, y = size, size
// Generate our slice to hold the height map data
heightMap := make([][]float64, y, y)
for i := range heightMap {
heightMap[i] = make([]float64, x, x)
}
// Initialize randomness
randomGen := rand.New(rand.NewSource(time.Now().UnixNano()))
randomFactor := float64(Roughness * x / 10)
// Assign a basic value to each of the four corners
heightMap[0][0] = CornerStartingHeight + generateRandomNumber(randomFactor, randomGen)
heightMap[0][y-1] = CornerStartingHeight + generateRandomNumber(randomFactor, randomGen)
heightMap[x-1][0] = CornerStartingHeight + generateRandomNumber(randomFactor, randomGen)
heightMap[x-1][y-1] = CornerStartingHeight + generateRandomNumber(randomFactor, randomGen)
// Recursively generate height map
fractalGeneration(heightMap, FractalParams{0, 0, x - 1, y - 1, randomFactor}, randomGen)
return heightMap, nil
}
// Applies a fractal algorithm to a sub-section of the map
func fractalGeneration(heightMap [][]float64, params FractalParams,
randomGen *rand.Rand) {
// assign center value step
var averageCenter = (heightMap[params.LowX][params.LowY] +
heightMap[params.LowX][params.HighY] +
heightMap[params.HighX][params.LowY] +
heightMap[params.HighX][params.HighY]) / 4
// we multiply the RandomFactor by 2 when we assign the center point because
// the center points needs to be more randomized than the corner midpoints
heightMap[(params.LowX+params.HighX)/2][(params.LowY+params.HighY)/2] = averageCenter +
generateRandomNumber(params.RandomFactor*2, randomGen)
xMidPoint := (params.LowX + params.HighX) / 2
yMidPoint := (params.LowY + params.HighY) / 2
// assign corner midpoints step
if heightMap[params.LowX][yMidPoint] == 0 {
heightMap[params.LowX][yMidPoint] = (heightMap[params.LowX][params.LowY]+
heightMap[params.LowX][params.HighY])/2 + generateRandomNumber(params.RandomFactor, randomGen)
}
if heightMap[params.HighX][yMidPoint] == 0 {
heightMap[params.HighX][yMidPoint] = (heightMap[params.HighX][params.LowY]+
heightMap[params.HighX][params.HighY])/2 + generateRandomNumber(params.RandomFactor, randomGen)
}
if heightMap[xMidPoint][params.LowY] == 0 {
heightMap[xMidPoint][params.LowY] = (heightMap[params.LowX][params.LowY]+
heightMap[params.HighX][params.LowY])/2 + generateRandomNumber(params.RandomFactor, randomGen)
}
if heightMap[xMidPoint][params.HighY] == 0 {
heightMap[xMidPoint][params.HighY] = (heightMap[params.LowX][params.HighY]+
heightMap[params.HighX][params.HighY])/2 + generateRandomNumber(params.RandomFactor, randomGen)
}
// Recalculate RandomFactor so it gets lower with each iteration
if params.RandomFactor > 1 {
params.RandomFactor = params.RandomFactor / 2
}
// Recursively call fractal generation
if params.HighX-params.LowX > 2 {
fractalGeneration(heightMap, FractalParams{params.LowX, params.LowY,
xMidPoint, yMidPoint,
params.RandomFactor}, randomGen)
fractalGeneration(heightMap, FractalParams{xMidPoint, params.LowY,
params.HighX, yMidPoint,
params.RandomFactor}, randomGen)
fractalGeneration(heightMap, FractalParams{params.LowX, yMidPoint,
xMidPoint, params.HighY,
params.RandomFactor}, randomGen)
fractalGeneration(heightMap, FractalParams{xMidPoint, yMidPoint,
params.HighX, params.HighY,
params.RandomFactor}, randomGen)
}
}
func generateRandomNumber(randomFactor float64, randomGen *rand.Rand) float64 {
randNum := randomGen.NormFloat64() * randomFactor
return randNum
}
|
package frame
import "sync"
import "bytes"
var (
bufPool = sync.Pool{
New: func() interface{} {
return &bytes.Buffer{}
},
}
bytesPool = sync.Pool{
New: func() interface{} {
return &[8]byte{}
},
}
voidBytes = make([]byte, 8)
)
func getBuf() *bytes.Buffer {
return bufPool.Get().(*bytes.Buffer)
}
func returnBuf(buf *bytes.Buffer) {
buf.Reset()
bufPool.Put(buf)
}
func getBytes() *[8]byte {
return bytesPool.Get().(*[8]byte)
}
func returnBytes(p *[8]byte) {
copy((*p)[:], voidBytes)
bytesPool.Put(p)
}
func assert(err error) {
if err != nil {
panic(err)
}
}
chunkSize
package frame
import "sync"
import "bytes"
const (
chunkSize = 1 << 15
)
var (
bufPool = sync.Pool{
New: func() interface{} {
return &bytes.Buffer{}
},
}
bytesPool = sync.Pool{
New: func() interface{} {
return &[chunkSize]byte{}
},
}
voidBytes = make([]byte, chunkSize)
)
func getBuf() *bytes.Buffer {
return bufPool.Get().(*bytes.Buffer)
}
func returnBuf(buf *bytes.Buffer) {
buf.Reset()
bufPool.Put(buf)
}
func getBytes() *[chunkSize]byte {
return bytesPool.Get().(*[chunkSize]byte)
}
func returnBytes(p *[chunkSize]byte) {
copy((*p)[:], voidBytes)
bytesPool.Put(p)
}
func assert(err error) {
if err != nil {
panic(err)
}
}
|
// Package rpc_proxy provides a proxy interface for Nomad Servers. The
// RpcProxy periodically shuffles which server a Nomad Client communicates
// with in order to redistribute load across Nomad Servers. Nomad Servers
// that fail an RPC request are automatically cycled to the end of the list
// until the server list is reshuffled.
//
// The servers package does not provide any external API guarantees and
// should be called only by `hashicorp/nomad`.
package rpc_proxy
import (
"fmt"
"log"
"math/rand"
"sync"
"sync/atomic"
"time"
"github.com/hashicorp/consul/lib"
"github.com/hashicorp/nomad/nomad/structs"
)
const (
// apiMajorVersion is synchronized with `nomad/server.go` and
// represents the API version supported by this client.
//
// TODO(sean@): This symbol should be exported somewhere.
apiMajorVersion = 1
// clientRPCJitterFraction determines the amount of jitter added to
// clientRPCMinReuseDuration before a connection is expired and a new
// connection is established in order to rebalance load across Nomad
// servers. The cluster-wide number of connections per second from
// rebalancing is applied after this jitter to ensure the CPU impact
// is always finite. See newRebalanceConnsPerSecPerServer's comment
// for additional commentary.
//
// For example, in a 10K Nomad cluster with 5x servers, this default
// averages out to ~13 new connections from rebalancing per server
// per second (each connection is reused for 120s to 180s).
clientRPCJitterFraction = 2
// clientRPCMinReuseDuration controls the minimum amount of time RPC
// queries are sent over an established connection to a single server
clientRPCMinReuseDuration = 120 * time.Second
// Limit the number of new connections a server receives per second
// for connection rebalancing. This limit caps the load caused by
// continual rebalancing efforts when a cluster is in equilibrium. A
// lower value comes at the cost of increased recovery time after a
// partition. This parameter begins to take effect when there are
// more than ~48K clients querying 5x servers or at lower server
// counts when there is a partition.
//
// For example, in a 100K Nomad cluster with 5x servers, it will take
// ~5min for all servers to rebalance their connections. If 99,995
// agents are in the minority talking to only one server, it will
// take ~26min for all servers to rebalance. A 10K cluster in the
// same scenario will take ~2.6min to rebalance.
newRebalanceConnsPerSecPerServer = 64
// rpcAPIMismatchLogRate determines the rate at which log entries are
// emitted when the client and server's API versions are mismatched.
rpcAPIMismatchLogRate = 3 * time.Hour
)
// NomadConfigInfo is an interface wrapper around this Nomad Agent's
// configuration to prevents a cyclic import dependency.
type NomadConfigInfo interface {
RPCVersion() int
Region() string
}
// Pinger is an interface wrapping client.ConnPool to prevent a
// cyclic import dependency
type Pinger interface {
PingNomadServer(region string, version int, s *ServerEndpoint) (bool, error)
}
// serverList is an array of Nomad Servers. The first server in the list is
// the active server.
//
// NOTE(sean@): We are explicitly relying on the fact that serverList will be
// copied onto the stack by atomic.Value. Please keep this structure light.
type serverList struct {
L []*ServerEndpoint
}
type RpcProxy struct {
// activatedList manages the list of Nomad Servers that are eligible
// to be queried by the Agent
activatedList atomic.Value
listLock sync.Mutex
// primaryServers is a list of servers found in the last heartbeat.
// primaryServers are periodically reshuffled. Covered by
// serverListLock.
primaryServers serverList
// backupServers is a list of fallback servers. These servers are
// appended to the RpcProxy's serverList, but are never shuffled with
// the list of servers discovered via the Nomad heartbeat. Covered
// by serverListLock.
backupServers serverList
// serverListLock covers both backupServers and primaryServers
serverListLock sync.RWMutex
leaderAddr string
numNodes int
// rebalanceTimer controls the duration of the rebalance interval
rebalanceTimer *time.Timer
// shutdownCh is a copy of the channel in nomad.Client
shutdownCh chan struct{}
logger *log.Logger
configInfo NomadConfigInfo
// rpcAPIMismatchThrottle regulates the rate at which warning
// messages are emitted in the event of an API mismatch between the
// clients and servers.
rpcAPIMismatchThrottle map[string]time.Time
// connPoolPinger is used to test the health of a server in the
// connection pool. Pinger is an interface that wraps
// client.ConnPool.
connPoolPinger Pinger
// notifyFailedBarrier is acts as a barrier to prevent queuing behind
// serverListLock and acts as a TryLock().
notifyFailedBarrier int32
}
// activateEndpoint adds an endpoint to the RpcProxy's active serverList.
// Returns true if the server was added, returns false if the server already
// existed in the RpcProxy's serverList.
func (p *RpcProxy) activateEndpoint(s *ServerEndpoint) bool {
l := p.getServerList()
// Check if this server is known
found := false
for idx, existing := range l.L {
if existing.Name == s.Name {
newServers := make([]*ServerEndpoint, len(l.L))
copy(newServers, l.L)
// Overwrite the existing server details in order to
// possibly update metadata (e.g. server version)
newServers[idx] = s
l.L = newServers
found = true
break
}
}
// Add to the list if not known
if !found {
newServers := make([]*ServerEndpoint, len(l.L), len(l.L)+1)
copy(newServers, l.L)
newServers = append(newServers, s)
l.L = newServers
}
p.saveServerList(l)
return !found
}
// SetBackupServers sets a list of Nomad Servers to be used in the event that
// the Nomad Agent lost contact with the list of Nomad Servers provided via
// the Nomad Agent's heartbeat. If available, the backup servers are
// populated via Consul.
func (p *RpcProxy) SetBackupServers(addrs []string) error {
l := make([]*ServerEndpoint, 0, len(addrs))
for _, s := range addrs {
s, err := newServer(s)
if err != nil {
p.logger.Printf("[WARN] RPC Proxy: unable to create backup server %q: %v", s, err)
return fmt.Errorf("unable to create new backup server from %q: %v", s, err)
}
l = append(l, s)
}
p.serverListLock.Lock()
p.backupServers.L = l
p.serverListLock.Unlock()
p.listLock.Lock()
defer p.listLock.Unlock()
for _, s := range l {
p.activateEndpoint(s)
}
return nil
}
// AddPrimaryServer takes the RPC address of a Nomad server, creates a new
// endpoint, and adds it to both the primaryServers list and the active
// serverList used in the RPC Proxy. If the endpoint is not known by the
// RpcProxy, appends the endpoint to the list. The new endpoint will begin
// seeing use after the rebalance timer fires (or enough servers fail
// organically). Any values in the primary server list are overridden by the
// next successful heartbeat.
func (p *RpcProxy) AddPrimaryServer(rpcAddr string) *ServerEndpoint {
s, err := newServer(rpcAddr)
if err != nil {
p.logger.Printf("[WARN] RPC Proxy: unable to create new primary server from endpoint %q", rpcAddr)
return nil
}
p.serverListLock.Lock()
p.primaryServers.L = append(p.primaryServers.L, s)
p.serverListLock.Unlock()
p.listLock.Lock()
p.activateEndpoint(s)
p.listLock.Unlock()
return s
}
// cycleServers returns a new list of servers that has dequeued the first
// server and enqueued it at the end of the list. cycleServers assumes the
// caller is holding the listLock. cycleServer does not test or ping
// the next server inline. cycleServer may be called when the environment
// has just entered an unhealthy situation and blocking on a server test is
// less desirable than just returning the next server in the firing line. If
// the next server fails, it will fail fast enough and cycleServer will be
// called again.
func (l *serverList) cycleServer() (servers []*ServerEndpoint) {
numServers := len(l.L)
if numServers < 2 {
return servers // No action required
}
newServers := make([]*ServerEndpoint, 0, numServers)
newServers = append(newServers, l.L[1:]...)
newServers = append(newServers, l.L[0])
return newServers
}
// removeServerByKey performs an inline removal of the first matching server
func (l *serverList) removeServerByKey(targetKey *EndpointKey) {
for i, s := range l.L {
if targetKey.Equal(s.Key()) {
copy(l.L[i:], l.L[i+1:])
l.L[len(l.L)-1] = nil
l.L = l.L[:len(l.L)-1]
return
}
}
}
// shuffleServers shuffles the server list in place
func (l *serverList) shuffleServers() {
for i := len(l.L) - 1; i > 0; i-- {
j := rand.Int31n(int32(i + 1))
l.L[i], l.L[j] = l.L[j], l.L[i]
}
}
// FindServer takes out an internal "read lock" and searches through the list
// of servers to find a "healthy" server. If the server is actually
// unhealthy, we rely on heartbeats to detect this and remove the node from
// the server list. If the server at the front of the list has failed or
// fails during an RPC call, it is rotated to the end of the list. If there
// are no servers available, return nil.
func (p *RpcProxy) FindServer() *ServerEndpoint {
l := p.getServerList()
numServers := len(l.L)
if numServers == 0 {
p.logger.Printf("[WARN] RPC Proxy: No servers available")
return nil
} else {
// Return whatever is at the front of the list because it is
// assumed to be the oldest in the server list (unless -
// hypothetically - the server list was rotated right after a
// server was added).
return l.L[0]
}
}
// getServerList is a convenience method which hides the locking semantics
// of atomic.Value from the caller.
func (p *RpcProxy) getServerList() serverList {
return p.activatedList.Load().(serverList)
}
// saveServerList is a convenience method which hides the locking semantics
// of atomic.Value from the caller.
func (p *RpcProxy) saveServerList(l serverList) {
p.activatedList.Store(l)
}
func (p *RpcProxy) LeaderAddr() string {
p.listLock.Lock()
defer p.listLock.Unlock()
return p.leaderAddr
}
// NewRpcProxy is the only way to safely create a new RpcProxy.
func NewRpcProxy(logger *log.Logger, shutdownCh chan struct{}, configInfo NomadConfigInfo, connPoolPinger Pinger) (p *RpcProxy) {
p = new(RpcProxy)
p.logger = logger
p.configInfo = configInfo // can't pass *nomad.Client: import cycle
p.connPoolPinger = connPoolPinger // can't pass *nomad.ConnPool: import cycle
p.rebalanceTimer = time.NewTimer(clientRPCMinReuseDuration)
p.shutdownCh = shutdownCh
l := serverList{}
l.L = make([]*ServerEndpoint, 0)
p.saveServerList(l)
return p
}
// NotifyFailedServer marks the passed in server as "failed" by rotating it
// to the end of the server list.
func (p *RpcProxy) NotifyFailedServer(s *ServerEndpoint) {
l := p.getServerList()
// If the server being failed is not the first server on the list,
// this is a noop. If, however, the server is failed and first on
// the list, acquire the lock, retest, and take the penalty of moving
// the server to the end of the list.
// Only rotate the server list when there is more than one server
if len(l.L) > 1 && l.L[0] == s &&
// Use atomic.CAS to emulate a TryLock().
atomic.CompareAndSwapInt32(&p.notifyFailedBarrier, 0, 1) {
defer atomic.StoreInt32(&p.notifyFailedBarrier, 0)
// Grab a lock, retest, and take the hit of cycling the first
// server to the end.
p.listLock.Lock()
defer p.listLock.Unlock()
l = p.getServerList()
if len(l.L) > 1 && l.L[0] == s {
l.L = l.cycleServer()
p.saveServerList(l)
}
}
}
func (p *RpcProxy) NumNodes() int {
return p.numNodes
}
// NumServers takes out an internal "read lock" and returns the number of
// servers. numServers includes both healthy and unhealthy servers.
func (p *RpcProxy) NumServers() int {
l := p.getServerList()
return len(l.L)
}
// RebalanceServers shuffles the list of servers on this agent. The server
// at the front of the list is selected for the next RPC. RPC calls that
// fail for a particular server are rotated to the end of the list. This
// method reshuffles the list periodically in order to redistribute work
// across all known Nomad servers (i.e. guarantee that the order of servers
// in the server list is not positively correlated with the age of a server
// in the Nomad cluster). Periodically shuffling the server list prevents
// long-lived clients from fixating on long-lived servers.
//
// Unhealthy servers are removed from the server list during the next client
// heartbeat. Before the newly shuffled server list is saved, the new remote
// endpoint is tested to ensure its responsive.
func (p *RpcProxy) RebalanceServers() {
var serverListLocked bool
p.serverListLock.Lock()
serverListLocked = true
defer func() {
if serverListLocked {
p.serverListLock.Unlock()
}
}()
// Early abort if there is nothing to shuffle
if (len(p.primaryServers.L) + len(p.backupServers.L)) < 2 {
return
}
// Shuffle server lists independently
p.primaryServers.shuffleServers()
p.backupServers.shuffleServers()
// Create a new merged serverList
type targetServer struct {
server *ServerEndpoint
// 'n' == Nomad Server
// 'c' == Consul Server
// 'b' == Both
state byte
}
mergedList := make(map[EndpointKey]*targetServer, len(p.primaryServers.L)+len(p.backupServers.L))
for _, s := range p.primaryServers.L {
mergedList[*s.Key()] = &targetServer{server: s, state: 'n'}
}
for _, s := range p.backupServers.L {
k := s.Key()
_, found := mergedList[*k]
if found {
mergedList[*k].state = 'b'
} else {
mergedList[*k] = &targetServer{server: s, state: 'c'}
}
}
l := &serverList{L: make([]*ServerEndpoint, 0, len(mergedList))}
for _, s := range p.primaryServers.L {
l.L = append(l.L, s)
}
for _, v := range mergedList {
if v.state != 'c' {
continue
}
l.L = append(l.L, v.server)
}
// Release the lock before we begin transition to operations on the
// network timescale and attempt to ping servers. A copy of the
// servers has been made at this point.
p.serverListLock.Unlock()
serverListLocked = false
// Iterate through the shuffled server list to find an assumed
// healthy server. NOTE: Do not iterate on the list directly because
// this loop mutates the server list in-place.
var foundHealthyServer bool
for i := 0; i < len(l.L); i++ {
// Always test the first server. Failed servers are cycled
// and eventually removed from the list when Nomad heartbeats
// detect the failed node.
selectedServer := l.L[0]
ok, err := p.connPoolPinger.PingNomadServer(p.configInfo.Region(), p.configInfo.RPCVersion(), selectedServer)
if ok {
foundHealthyServer = true
break
}
p.logger.Printf(`[DEBUG] RPC Proxy: pinging server "%s" failed: %s`, selectedServer.String(), err)
l.cycleServer()
}
// If no healthy servers were found, sleep and wait for the admin to
// join this node to a server and begin receiving heartbeats with an
// updated list of Nomad servers. Or Consul will begin advertising a
// new server in the nomad-servers service.
if !foundHealthyServer {
p.logger.Printf("[DEBUG] RPC Proxy: No healthy servers during rebalance, aborting")
return
}
// Verify that all servers are present. Reconcile will save the
// final serverList.
if p.reconcileServerList(l) {
p.logger.Printf("[DEBUG] RPC Proxy: Rebalanced %d servers, next active server is %s", len(l.L), l.L[0].String())
} else {
// reconcileServerList failed because Nomad removed the
// server that was at the front of the list that had
// successfully been Ping'ed. Between the Ping and
// reconcile, a Nomad heartbeat removed the node.
//
// Instead of doing any heroics, "freeze in place" and
// continue to use the existing connection until the next
// rebalance occurs.
}
return
}
// reconcileServerList returns true when the first server in serverList (l)
// exists in the receiver's serverList (m). If true, the merged serverList
// (l) is stored as the receiver's serverList (m). Returns false if the
// first server in m does not exist in the passed in list (l) (i.e. was
// removed by Nomad during a PingNomadServer() call. Newly added servers are
// appended to the list and other missing servers are removed from the list.
func (p *RpcProxy) reconcileServerList(l *serverList) bool {
p.listLock.Lock()
defer p.listLock.Unlock()
// newServerList is a serverList that has been kept up-to-date with
// join and leave events.
newServerList := p.getServerList()
// If a Nomad heartbeat removed all nodes, or there is no selected
// server (zero nodes in serverList), abort early.
if len(newServerList.L) == 0 || len(l.L) == 0 {
return false
}
type targetServer struct {
server *ServerEndpoint
// 'b' == both
// 'o' == original
// 'n' == new
state byte
}
mergedList := make(map[EndpointKey]*targetServer, len(l.L))
for _, s := range l.L {
mergedList[*s.Key()] = &targetServer{server: s, state: 'o'}
}
for _, s := range newServerList.L {
k := s.Key()
_, found := mergedList[*k]
if found {
mergedList[*k].state = 'b'
} else {
mergedList[*k] = &targetServer{server: s, state: 'n'}
}
}
// Ensure the selected server has not been removed by a heartbeat
selectedServerKey := l.L[0].Key()
if v, found := mergedList[*selectedServerKey]; found && v.state == 'o' {
return false
}
// Append any new servers and remove any old servers
for k, v := range mergedList {
switch v.state {
case 'b':
// Do nothing, server exists in both
case 'o':
// Server has been removed
l.removeServerByKey(&k)
case 'n':
// Server added
l.L = append(l.L, v.server)
default:
panic("unknown merge list state")
}
}
p.saveServerList(*l)
return true
}
// RemoveServer takes out an internal write lock and removes a server from
// the server list.
func (p *RpcProxy) RemoveServer(s *ServerEndpoint) {
p.listLock.Lock()
defer p.listLock.Unlock()
l := p.getServerList()
// Remove the server if known
for i, _ := range l.L {
if l.L[i].Name == s.Name {
newServers := make([]*ServerEndpoint, 0, len(l.L)-1)
newServers = append(newServers, l.L[:i]...)
newServers = append(newServers, l.L[i+1:]...)
l.L = newServers
p.saveServerList(l)
return
}
}
}
// refreshServerRebalanceTimer is only called once m.rebalanceTimer expires.
func (p *RpcProxy) refreshServerRebalanceTimer() time.Duration {
l := p.getServerList()
numServers := len(l.L)
// Limit this connection's life based on the size (and health) of the
// cluster. Never rebalance a connection more frequently than
// connReuseLowWatermarkDuration, and make sure we never exceed
// clusterWideRebalanceConnsPerSec operations/s across numLANMembers.
clusterWideRebalanceConnsPerSec := float64(numServers * newRebalanceConnsPerSecPerServer)
connReuseLowWatermarkDuration := clientRPCMinReuseDuration + lib.RandomStagger(clientRPCMinReuseDuration/clientRPCJitterFraction)
numLANMembers := p.numNodes
connRebalanceTimeout := lib.RateScaledInterval(clusterWideRebalanceConnsPerSec, connReuseLowWatermarkDuration, numLANMembers)
p.rebalanceTimer.Reset(connRebalanceTimeout)
return connRebalanceTimeout
}
// ResetRebalanceTimer resets the rebalance timer. This method exists for
// testing and should not be used directly.
func (p *RpcProxy) ResetRebalanceTimer() {
p.listLock.Lock()
defer p.listLock.Unlock()
p.rebalanceTimer.Reset(clientRPCMinReuseDuration)
}
// ServerRPCAddrs returns one RPC Address per server
func (p *RpcProxy) ServerRPCAddrs() []string {
l := p.getServerList()
serverAddrs := make([]string, 0, len(l.L))
for _, s := range l.L {
serverAddrs = append(serverAddrs, s.Addr.String())
}
return serverAddrs
}
// Run is used to start and manage the task of automatically shuffling and
// rebalancing the list of Nomad servers. This maintenance only happens
// periodically based on the expiration of the timer. Failed servers are
// automatically cycled to the end of the list. New servers are appended to
// the list. The order of the server list must be shuffled periodically to
// distribute load across all known and available Nomad servers.
func (p *RpcProxy) Run() {
for {
select {
case <-p.rebalanceTimer.C:
p.RebalanceServers()
p.refreshServerRebalanceTimer()
case <-p.shutdownCh:
p.logger.Printf("[INFO] RPC Proxy: shutting down")
return
}
}
}
// UpdateFromNodeUpdateResponse handles heartbeat responses from Nomad
// Servers. Heartbeats contain a list of Nomad Servers that the client
// should talk with for RPC requests. UpdateFromNodeUpdateResponse does not
// rebalance its serverList, that is handled elsewhere. New servers learned
// via the heartbeat are appended to the RpcProxy's serverList. Removed
// servers are removed immediately. Servers speaking a newer RPC version are
// filtered from the serverList.
func (p *RpcProxy) UpdateFromNodeUpdateResponse(resp *structs.NodeUpdateResponse) error {
// Merge all servers found in the response. Servers in the response
// with newer API versions are filtered from the list. If the list
// is missing an address found in the RpcProxy's server list, remove
// it from the RpcProxy.
//
// FIXME(sean@): This is not true. We rely on an outside pump to set
// these values. In order to catch the orphaned clients where all
// Nomad servers were rolled between the heartbeat interval, the
// rebalance task queries Consul and adds the servers found in Consul
// to the server list in order to reattach an orphan to a server.
p.serverListLock.Lock()
defer p.serverListLock.Unlock()
// 1) Create a map to reconcile the difference between
// m.primaryServers and resp.Servers.
type targetServer struct {
server *ServerEndpoint
// 'b' == both
// 'o' == original
// 'n' == new
state byte
}
mergedNomadMap := make(map[EndpointKey]*targetServer, len(p.primaryServers.L)+len(resp.Servers))
numOldServers := 0
for _, s := range p.primaryServers.L {
mergedNomadMap[*s.Key()] = &targetServer{server: s, state: 'o'}
numOldServers++
}
numBothServers := 0
var newServers bool
for _, s := range resp.Servers {
// Filter out servers using a newer API version. Prevent
// spamming the logs every heartbeat.
//
// TODO(sean@): Move the logging throttle logic into a
// dedicated logging package so RpcProxy does not have to
// perform this accounting.
if int32(p.configInfo.RPCVersion()) < s.RPCVersion {
now := time.Now()
t, ok := p.rpcAPIMismatchThrottle[s.RPCAdvertiseAddr]
if ok && t.After(now) {
continue
}
p.logger.Printf("[WARN] API mismatch between client (v%d) and server (v%d), ignoring server %q", apiMajorVersion, s.RPCVersion, s.RPCAdvertiseAddr)
p.rpcAPIMismatchThrottle[s.RPCAdvertiseAddr] = now.Add(rpcAPIMismatchLogRate)
continue
}
server, err := newServer(s.RPCAdvertiseAddr)
if err != nil {
p.logger.Printf("[WARN] Unable to create a server from %q: %v", s.RPCAdvertiseAddr, err)
continue
}
k := server.Key()
_, found := mergedNomadMap[*k]
if found {
mergedNomadMap[*k].state = 'b'
numBothServers++
} else {
mergedNomadMap[*k] = &targetServer{server: server, state: 'n'}
newServers = true
}
}
// Short-circuit acquiring a lock if nothing changed
if !newServers && numOldServers == numBothServers {
return nil
}
p.listLock.Lock()
defer p.listLock.Unlock()
newServerCfg := p.getServerList()
for k, v := range mergedNomadMap {
switch v.state {
case 'b':
// Do nothing, server exists in both
case 'o':
// Server has been removed
// TODO(sean@): Teach Nomad servers how to remove
// themselves from their heartbeat in order to
// gracefully drain their clients over the next
// cluster's max rebalanceTimer duration. Without
// this enhancement, if a server being shutdown and
// it is the first in serverList, the client will
// fail its next RPC connection.
p.primaryServers.removeServerByKey(&k)
newServerCfg.removeServerByKey(&k)
case 'n':
// Server added. Append it to both lists
// immediately. The server should only go into
// active use in the event of a failure or after a
// rebalance occurs.
p.primaryServers.L = append(p.primaryServers.L, v.server)
newServerCfg.L = append(newServerCfg.L, v.server)
default:
panic("unknown merge list state")
}
}
p.numNodes = int(resp.NumNodes)
p.leaderAddr = resp.LeaderRPCAddr
p.saveServerList(newServerCfg)
return nil
}
Rename `mergedNomadMap` to `mergedPrimaryMap`. Gratuitous, but more correct.
// Package rpc_proxy provides a proxy interface for Nomad Servers. The
// RpcProxy periodically shuffles which server a Nomad Client communicates
// with in order to redistribute load across Nomad Servers. Nomad Servers
// that fail an RPC request are automatically cycled to the end of the list
// until the server list is reshuffled.
//
// The servers package does not provide any external API guarantees and
// should be called only by `hashicorp/nomad`.
package rpc_proxy
import (
"fmt"
"log"
"math/rand"
"sync"
"sync/atomic"
"time"
"github.com/hashicorp/consul/lib"
"github.com/hashicorp/nomad/nomad/structs"
)
const (
// apiMajorVersion is synchronized with `nomad/server.go` and
// represents the API version supported by this client.
//
// TODO(sean@): This symbol should be exported somewhere.
apiMajorVersion = 1
// clientRPCJitterFraction determines the amount of jitter added to
// clientRPCMinReuseDuration before a connection is expired and a new
// connection is established in order to rebalance load across Nomad
// servers. The cluster-wide number of connections per second from
// rebalancing is applied after this jitter to ensure the CPU impact
// is always finite. See newRebalanceConnsPerSecPerServer's comment
// for additional commentary.
//
// For example, in a 10K Nomad cluster with 5x servers, this default
// averages out to ~13 new connections from rebalancing per server
// per second (each connection is reused for 120s to 180s).
clientRPCJitterFraction = 2
// clientRPCMinReuseDuration controls the minimum amount of time RPC
// queries are sent over an established connection to a single server
clientRPCMinReuseDuration = 120 * time.Second
// Limit the number of new connections a server receives per second
// for connection rebalancing. This limit caps the load caused by
// continual rebalancing efforts when a cluster is in equilibrium. A
// lower value comes at the cost of increased recovery time after a
// partition. This parameter begins to take effect when there are
// more than ~48K clients querying 5x servers or at lower server
// counts when there is a partition.
//
// For example, in a 100K Nomad cluster with 5x servers, it will take
// ~5min for all servers to rebalance their connections. If 99,995
// agents are in the minority talking to only one server, it will
// take ~26min for all servers to rebalance. A 10K cluster in the
// same scenario will take ~2.6min to rebalance.
newRebalanceConnsPerSecPerServer = 64
// rpcAPIMismatchLogRate determines the rate at which log entries are
// emitted when the client and server's API versions are mismatched.
rpcAPIMismatchLogRate = 3 * time.Hour
)
// NomadConfigInfo is an interface wrapper around this Nomad Agent's
// configuration to prevents a cyclic import dependency.
type NomadConfigInfo interface {
RPCVersion() int
Region() string
}
// Pinger is an interface wrapping client.ConnPool to prevent a
// cyclic import dependency
type Pinger interface {
PingNomadServer(region string, version int, s *ServerEndpoint) (bool, error)
}
// serverList is an array of Nomad Servers. The first server in the list is
// the active server.
//
// NOTE(sean@): We are explicitly relying on the fact that serverList will be
// copied onto the stack by atomic.Value. Please keep this structure light.
type serverList struct {
L []*ServerEndpoint
}
type RpcProxy struct {
// activatedList manages the list of Nomad Servers that are eligible
// to be queried by the Agent
activatedList atomic.Value
listLock sync.Mutex
// primaryServers is a list of servers found in the last heartbeat.
// primaryServers are periodically reshuffled. Covered by
// serverListLock.
primaryServers serverList
// backupServers is a list of fallback servers. These servers are
// appended to the RpcProxy's serverList, but are never shuffled with
// the list of servers discovered via the Nomad heartbeat. Covered
// by serverListLock.
backupServers serverList
// serverListLock covers both backupServers and primaryServers
serverListLock sync.RWMutex
leaderAddr string
numNodes int
// rebalanceTimer controls the duration of the rebalance interval
rebalanceTimer *time.Timer
// shutdownCh is a copy of the channel in nomad.Client
shutdownCh chan struct{}
logger *log.Logger
configInfo NomadConfigInfo
// rpcAPIMismatchThrottle regulates the rate at which warning
// messages are emitted in the event of an API mismatch between the
// clients and servers.
rpcAPIMismatchThrottle map[string]time.Time
// connPoolPinger is used to test the health of a server in the
// connection pool. Pinger is an interface that wraps
// client.ConnPool.
connPoolPinger Pinger
// notifyFailedBarrier is acts as a barrier to prevent queuing behind
// serverListLock and acts as a TryLock().
notifyFailedBarrier int32
}
// activateEndpoint adds an endpoint to the RpcProxy's active serverList.
// Returns true if the server was added, returns false if the server already
// existed in the RpcProxy's serverList.
func (p *RpcProxy) activateEndpoint(s *ServerEndpoint) bool {
l := p.getServerList()
// Check if this server is known
found := false
for idx, existing := range l.L {
if existing.Name == s.Name {
newServers := make([]*ServerEndpoint, len(l.L))
copy(newServers, l.L)
// Overwrite the existing server details in order to
// possibly update metadata (e.g. server version)
newServers[idx] = s
l.L = newServers
found = true
break
}
}
// Add to the list if not known
if !found {
newServers := make([]*ServerEndpoint, len(l.L), len(l.L)+1)
copy(newServers, l.L)
newServers = append(newServers, s)
l.L = newServers
}
p.saveServerList(l)
return !found
}
// SetBackupServers sets a list of Nomad Servers to be used in the event that
// the Nomad Agent lost contact with the list of Nomad Servers provided via
// the Nomad Agent's heartbeat. If available, the backup servers are
// populated via Consul.
func (p *RpcProxy) SetBackupServers(addrs []string) error {
l := make([]*ServerEndpoint, 0, len(addrs))
for _, s := range addrs {
s, err := newServer(s)
if err != nil {
p.logger.Printf("[WARN] RPC Proxy: unable to create backup server %q: %v", s, err)
return fmt.Errorf("unable to create new backup server from %q: %v", s, err)
}
l = append(l, s)
}
p.serverListLock.Lock()
p.backupServers.L = l
p.serverListLock.Unlock()
p.listLock.Lock()
defer p.listLock.Unlock()
for _, s := range l {
p.activateEndpoint(s)
}
return nil
}
// AddPrimaryServer takes the RPC address of a Nomad server, creates a new
// endpoint, and adds it to both the primaryServers list and the active
// serverList used in the RPC Proxy. If the endpoint is not known by the
// RpcProxy, appends the endpoint to the list. The new endpoint will begin
// seeing use after the rebalance timer fires (or enough servers fail
// organically). Any values in the primary server list are overridden by the
// next successful heartbeat.
func (p *RpcProxy) AddPrimaryServer(rpcAddr string) *ServerEndpoint {
s, err := newServer(rpcAddr)
if err != nil {
p.logger.Printf("[WARN] RPC Proxy: unable to create new primary server from endpoint %q", rpcAddr)
return nil
}
p.serverListLock.Lock()
p.primaryServers.L = append(p.primaryServers.L, s)
p.serverListLock.Unlock()
p.listLock.Lock()
p.activateEndpoint(s)
p.listLock.Unlock()
return s
}
// cycleServers returns a new list of servers that has dequeued the first
// server and enqueued it at the end of the list. cycleServers assumes the
// caller is holding the listLock. cycleServer does not test or ping
// the next server inline. cycleServer may be called when the environment
// has just entered an unhealthy situation and blocking on a server test is
// less desirable than just returning the next server in the firing line. If
// the next server fails, it will fail fast enough and cycleServer will be
// called again.
func (l *serverList) cycleServer() (servers []*ServerEndpoint) {
numServers := len(l.L)
if numServers < 2 {
return servers // No action required
}
newServers := make([]*ServerEndpoint, 0, numServers)
newServers = append(newServers, l.L[1:]...)
newServers = append(newServers, l.L[0])
return newServers
}
// removeServerByKey performs an inline removal of the first matching server
func (l *serverList) removeServerByKey(targetKey *EndpointKey) {
for i, s := range l.L {
if targetKey.Equal(s.Key()) {
copy(l.L[i:], l.L[i+1:])
l.L[len(l.L)-1] = nil
l.L = l.L[:len(l.L)-1]
return
}
}
}
// shuffleServers shuffles the server list in place
func (l *serverList) shuffleServers() {
for i := len(l.L) - 1; i > 0; i-- {
j := rand.Int31n(int32(i + 1))
l.L[i], l.L[j] = l.L[j], l.L[i]
}
}
// FindServer takes out an internal "read lock" and searches through the list
// of servers to find a "healthy" server. If the server is actually
// unhealthy, we rely on heartbeats to detect this and remove the node from
// the server list. If the server at the front of the list has failed or
// fails during an RPC call, it is rotated to the end of the list. If there
// are no servers available, return nil.
func (p *RpcProxy) FindServer() *ServerEndpoint {
l := p.getServerList()
numServers := len(l.L)
if numServers == 0 {
p.logger.Printf("[WARN] RPC Proxy: No servers available")
return nil
} else {
// Return whatever is at the front of the list because it is
// assumed to be the oldest in the server list (unless -
// hypothetically - the server list was rotated right after a
// server was added).
return l.L[0]
}
}
// getServerList is a convenience method which hides the locking semantics
// of atomic.Value from the caller.
func (p *RpcProxy) getServerList() serverList {
return p.activatedList.Load().(serverList)
}
// saveServerList is a convenience method which hides the locking semantics
// of atomic.Value from the caller.
func (p *RpcProxy) saveServerList(l serverList) {
p.activatedList.Store(l)
}
func (p *RpcProxy) LeaderAddr() string {
p.listLock.Lock()
defer p.listLock.Unlock()
return p.leaderAddr
}
// NewRpcProxy is the only way to safely create a new RpcProxy.
func NewRpcProxy(logger *log.Logger, shutdownCh chan struct{}, configInfo NomadConfigInfo, connPoolPinger Pinger) (p *RpcProxy) {
p = new(RpcProxy)
p.logger = logger
p.configInfo = configInfo // can't pass *nomad.Client: import cycle
p.connPoolPinger = connPoolPinger // can't pass *nomad.ConnPool: import cycle
p.rebalanceTimer = time.NewTimer(clientRPCMinReuseDuration)
p.shutdownCh = shutdownCh
l := serverList{}
l.L = make([]*ServerEndpoint, 0)
p.saveServerList(l)
return p
}
// NotifyFailedServer marks the passed in server as "failed" by rotating it
// to the end of the server list.
func (p *RpcProxy) NotifyFailedServer(s *ServerEndpoint) {
l := p.getServerList()
// If the server being failed is not the first server on the list,
// this is a noop. If, however, the server is failed and first on
// the list, acquire the lock, retest, and take the penalty of moving
// the server to the end of the list.
// Only rotate the server list when there is more than one server
if len(l.L) > 1 && l.L[0] == s &&
// Use atomic.CAS to emulate a TryLock().
atomic.CompareAndSwapInt32(&p.notifyFailedBarrier, 0, 1) {
defer atomic.StoreInt32(&p.notifyFailedBarrier, 0)
// Grab a lock, retest, and take the hit of cycling the first
// server to the end.
p.listLock.Lock()
defer p.listLock.Unlock()
l = p.getServerList()
if len(l.L) > 1 && l.L[0] == s {
l.L = l.cycleServer()
p.saveServerList(l)
}
}
}
func (p *RpcProxy) NumNodes() int {
return p.numNodes
}
// NumServers takes out an internal "read lock" and returns the number of
// servers. numServers includes both healthy and unhealthy servers.
func (p *RpcProxy) NumServers() int {
l := p.getServerList()
return len(l.L)
}
// RebalanceServers shuffles the list of servers on this agent. The server
// at the front of the list is selected for the next RPC. RPC calls that
// fail for a particular server are rotated to the end of the list. This
// method reshuffles the list periodically in order to redistribute work
// across all known Nomad servers (i.e. guarantee that the order of servers
// in the server list is not positively correlated with the age of a server
// in the Nomad cluster). Periodically shuffling the server list prevents
// long-lived clients from fixating on long-lived servers.
//
// Unhealthy servers are removed from the server list during the next client
// heartbeat. Before the newly shuffled server list is saved, the new remote
// endpoint is tested to ensure its responsive.
func (p *RpcProxy) RebalanceServers() {
var serverListLocked bool
p.serverListLock.Lock()
serverListLocked = true
defer func() {
if serverListLocked {
p.serverListLock.Unlock()
}
}()
// Early abort if there is nothing to shuffle
if (len(p.primaryServers.L) + len(p.backupServers.L)) < 2 {
return
}
// Shuffle server lists independently
p.primaryServers.shuffleServers()
p.backupServers.shuffleServers()
// Create a new merged serverList
type targetServer struct {
server *ServerEndpoint
// 'n' == Nomad Server
// 'c' == Consul Server
// 'b' == Both
state byte
}
mergedList := make(map[EndpointKey]*targetServer, len(p.primaryServers.L)+len(p.backupServers.L))
for _, s := range p.primaryServers.L {
mergedList[*s.Key()] = &targetServer{server: s, state: 'n'}
}
for _, s := range p.backupServers.L {
k := s.Key()
_, found := mergedList[*k]
if found {
mergedList[*k].state = 'b'
} else {
mergedList[*k] = &targetServer{server: s, state: 'c'}
}
}
l := &serverList{L: make([]*ServerEndpoint, 0, len(mergedList))}
for _, s := range p.primaryServers.L {
l.L = append(l.L, s)
}
for _, v := range mergedList {
if v.state != 'c' {
continue
}
l.L = append(l.L, v.server)
}
// Release the lock before we begin transition to operations on the
// network timescale and attempt to ping servers. A copy of the
// servers has been made at this point.
p.serverListLock.Unlock()
serverListLocked = false
// Iterate through the shuffled server list to find an assumed
// healthy server. NOTE: Do not iterate on the list directly because
// this loop mutates the server list in-place.
var foundHealthyServer bool
for i := 0; i < len(l.L); i++ {
// Always test the first server. Failed servers are cycled
// and eventually removed from the list when Nomad heartbeats
// detect the failed node.
selectedServer := l.L[0]
ok, err := p.connPoolPinger.PingNomadServer(p.configInfo.Region(), p.configInfo.RPCVersion(), selectedServer)
if ok {
foundHealthyServer = true
break
}
p.logger.Printf(`[DEBUG] RPC Proxy: pinging server "%s" failed: %s`, selectedServer.String(), err)
l.cycleServer()
}
// If no healthy servers were found, sleep and wait for the admin to
// join this node to a server and begin receiving heartbeats with an
// updated list of Nomad servers. Or Consul will begin advertising a
// new server in the nomad-servers service.
if !foundHealthyServer {
p.logger.Printf("[DEBUG] RPC Proxy: No healthy servers during rebalance, aborting")
return
}
// Verify that all servers are present. Reconcile will save the
// final serverList.
if p.reconcileServerList(l) {
p.logger.Printf("[DEBUG] RPC Proxy: Rebalanced %d servers, next active server is %s", len(l.L), l.L[0].String())
} else {
// reconcileServerList failed because Nomad removed the
// server that was at the front of the list that had
// successfully been Ping'ed. Between the Ping and
// reconcile, a Nomad heartbeat removed the node.
//
// Instead of doing any heroics, "freeze in place" and
// continue to use the existing connection until the next
// rebalance occurs.
}
return
}
// reconcileServerList returns true when the first server in serverList (l)
// exists in the receiver's serverList (m). If true, the merged serverList
// (l) is stored as the receiver's serverList (m). Returns false if the
// first server in m does not exist in the passed in list (l) (i.e. was
// removed by Nomad during a PingNomadServer() call. Newly added servers are
// appended to the list and other missing servers are removed from the list.
func (p *RpcProxy) reconcileServerList(l *serverList) bool {
p.listLock.Lock()
defer p.listLock.Unlock()
// newServerList is a serverList that has been kept up-to-date with
// join and leave events.
newServerList := p.getServerList()
// If a Nomad heartbeat removed all nodes, or there is no selected
// server (zero nodes in serverList), abort early.
if len(newServerList.L) == 0 || len(l.L) == 0 {
return false
}
type targetServer struct {
server *ServerEndpoint
// 'b' == both
// 'o' == original
// 'n' == new
state byte
}
mergedList := make(map[EndpointKey]*targetServer, len(l.L))
for _, s := range l.L {
mergedList[*s.Key()] = &targetServer{server: s, state: 'o'}
}
for _, s := range newServerList.L {
k := s.Key()
_, found := mergedList[*k]
if found {
mergedList[*k].state = 'b'
} else {
mergedList[*k] = &targetServer{server: s, state: 'n'}
}
}
// Ensure the selected server has not been removed by a heartbeat
selectedServerKey := l.L[0].Key()
if v, found := mergedList[*selectedServerKey]; found && v.state == 'o' {
return false
}
// Append any new servers and remove any old servers
for k, v := range mergedList {
switch v.state {
case 'b':
// Do nothing, server exists in both
case 'o':
// Server has been removed
l.removeServerByKey(&k)
case 'n':
// Server added
l.L = append(l.L, v.server)
default:
panic("unknown merge list state")
}
}
p.saveServerList(*l)
return true
}
// RemoveServer takes out an internal write lock and removes a server from
// the server list.
func (p *RpcProxy) RemoveServer(s *ServerEndpoint) {
p.listLock.Lock()
defer p.listLock.Unlock()
l := p.getServerList()
// Remove the server if known
for i, _ := range l.L {
if l.L[i].Name == s.Name {
newServers := make([]*ServerEndpoint, 0, len(l.L)-1)
newServers = append(newServers, l.L[:i]...)
newServers = append(newServers, l.L[i+1:]...)
l.L = newServers
p.saveServerList(l)
return
}
}
}
// refreshServerRebalanceTimer is only called once m.rebalanceTimer expires.
func (p *RpcProxy) refreshServerRebalanceTimer() time.Duration {
l := p.getServerList()
numServers := len(l.L)
// Limit this connection's life based on the size (and health) of the
// cluster. Never rebalance a connection more frequently than
// connReuseLowWatermarkDuration, and make sure we never exceed
// clusterWideRebalanceConnsPerSec operations/s across numLANMembers.
clusterWideRebalanceConnsPerSec := float64(numServers * newRebalanceConnsPerSecPerServer)
connReuseLowWatermarkDuration := clientRPCMinReuseDuration + lib.RandomStagger(clientRPCMinReuseDuration/clientRPCJitterFraction)
numLANMembers := p.numNodes
connRebalanceTimeout := lib.RateScaledInterval(clusterWideRebalanceConnsPerSec, connReuseLowWatermarkDuration, numLANMembers)
p.rebalanceTimer.Reset(connRebalanceTimeout)
return connRebalanceTimeout
}
// ResetRebalanceTimer resets the rebalance timer. This method exists for
// testing and should not be used directly.
func (p *RpcProxy) ResetRebalanceTimer() {
p.listLock.Lock()
defer p.listLock.Unlock()
p.rebalanceTimer.Reset(clientRPCMinReuseDuration)
}
// ServerRPCAddrs returns one RPC Address per server
func (p *RpcProxy) ServerRPCAddrs() []string {
l := p.getServerList()
serverAddrs := make([]string, 0, len(l.L))
for _, s := range l.L {
serverAddrs = append(serverAddrs, s.Addr.String())
}
return serverAddrs
}
// Run is used to start and manage the task of automatically shuffling and
// rebalancing the list of Nomad servers. This maintenance only happens
// periodically based on the expiration of the timer. Failed servers are
// automatically cycled to the end of the list. New servers are appended to
// the list. The order of the server list must be shuffled periodically to
// distribute load across all known and available Nomad servers.
func (p *RpcProxy) Run() {
for {
select {
case <-p.rebalanceTimer.C:
p.RebalanceServers()
p.refreshServerRebalanceTimer()
case <-p.shutdownCh:
p.logger.Printf("[INFO] RPC Proxy: shutting down")
return
}
}
}
// UpdateFromNodeUpdateResponse handles heartbeat responses from Nomad
// Servers. Heartbeats contain a list of Nomad Servers that the client
// should talk with for RPC requests. UpdateFromNodeUpdateResponse does not
// rebalance its serverList, that is handled elsewhere. New servers learned
// via the heartbeat are appended to the RpcProxy's serverList. Removed
// servers are removed immediately. Servers speaking a newer RPC version are
// filtered from the serverList.
func (p *RpcProxy) UpdateFromNodeUpdateResponse(resp *structs.NodeUpdateResponse) error {
// Merge all servers found in the response. Servers in the response
// with newer API versions are filtered from the list. If the list
// is missing an address found in the RpcProxy's server list, remove
// it from the RpcProxy.
//
// FIXME(sean@): This is not true. We rely on an outside pump to set
// these values. In order to catch the orphaned clients where all
// Nomad servers were rolled between the heartbeat interval, the
// rebalance task queries Consul and adds the servers found in Consul
// to the server list in order to reattach an orphan to a server.
p.serverListLock.Lock()
defer p.serverListLock.Unlock()
// 1) Create a map to reconcile the difference between
// m.primaryServers and resp.Servers.
type targetServer struct {
server *ServerEndpoint
// 'b' == both
// 'o' == original
// 'n' == new
state byte
}
mergedPrimaryMap := make(map[EndpointKey]*targetServer, len(p.primaryServers.L)+len(resp.Servers))
numOldServers := 0
for _, s := range p.primaryServers.L {
mergedPrimaryMap[*s.Key()] = &targetServer{server: s, state: 'o'}
numOldServers++
}
numBothServers := 0
var newServers bool
for _, s := range resp.Servers {
// Filter out servers using a newer API version. Prevent
// spamming the logs every heartbeat.
//
// TODO(sean@): Move the logging throttle logic into a
// dedicated logging package so RpcProxy does not have to
// perform this accounting.
if int32(p.configInfo.RPCVersion()) < s.RPCVersion {
now := time.Now()
t, ok := p.rpcAPIMismatchThrottle[s.RPCAdvertiseAddr]
if ok && t.After(now) {
continue
}
p.logger.Printf("[WARN] API mismatch between client (v%d) and server (v%d), ignoring server %q", apiMajorVersion, s.RPCVersion, s.RPCAdvertiseAddr)
p.rpcAPIMismatchThrottle[s.RPCAdvertiseAddr] = now.Add(rpcAPIMismatchLogRate)
continue
}
server, err := newServer(s.RPCAdvertiseAddr)
if err != nil {
p.logger.Printf("[WARN] Unable to create a server from %q: %v", s.RPCAdvertiseAddr, err)
continue
}
k := server.Key()
_, found := mergedPrimaryMap[*k]
if found {
mergedPrimaryMap[*k].state = 'b'
numBothServers++
} else {
mergedPrimaryMap[*k] = &targetServer{server: server, state: 'n'}
newServers = true
}
}
// Short-circuit acquiring a lock if nothing changed
if !newServers && numOldServers == numBothServers {
return nil
}
p.listLock.Lock()
defer p.listLock.Unlock()
newServerCfg := p.getServerList()
for k, v := range mergedPrimaryMap {
switch v.state {
case 'b':
// Do nothing, server exists in both
case 'o':
// Server has been removed
// TODO(sean@): Teach Nomad servers how to remove
// themselves from their heartbeat in order to
// gracefully drain their clients over the next
// cluster's max rebalanceTimer duration. Without
// this enhancement, if a server being shutdown and
// it is the first in serverList, the client will
// fail its next RPC connection.
p.primaryServers.removeServerByKey(&k)
newServerCfg.removeServerByKey(&k)
case 'n':
// Server added. Append it to both lists
// immediately. The server should only go into
// active use in the event of a failure or after a
// rebalance occurs.
p.primaryServers.L = append(p.primaryServers.L, v.server)
newServerCfg.L = append(newServerCfg.L, v.server)
default:
panic("unknown merge list state")
}
}
p.numNodes = int(resp.NumNodes)
p.leaderAddr = resp.LeaderRPCAddr
p.saveServerList(newServerCfg)
return nil
}
|
// Package store is a dead simple configuration manager for Go applications.
package store
import (
"bytes"
"encoding/json"
"fmt"
"github.com/BurntSushi/toml"
"io/ioutil"
"os"
"path/filepath"
"reflect"
"runtime"
"strings"
)
var applicationName string
// SetApplicationName defines a unique application handle for file system.
//
// By default, Store puts all your config data to %APPDATA%/<appname> on Windows
// and to $XDG_CONFIG_HOME or $HOME on *unix systems.
//
// Warning: Store would panic on any sensitive calls if it's not set.
func SetApplicationName(handle string) {
applicationName = handle
}
// Load reads a configuration from `path` and puts it into `v` pointer.
//
// Path is a full filename, with extension. Since Store currently support
// TOML and JSON only, passing others would result in a corresponding error.
//
// If `path` doesn't exist, Load will create one and emptify `v` pointer by
// replacing it with a newly created object, derived from type of `v`.
func Load(path string, v interface{}) error {
if applicationName == "" {
panic("store: application name not defined")
}
globalPath := buildPlatformPath(path)
data, err := ioutil.ReadFile(globalPath)
if err != nil {
// There is a chance that file we are looking for
// just doesn't exist. In this case we are supposed
// to create an empty configuration file, based on v.
empty := reflect.New(reflect.TypeOf(v))
if innerErr := Save(path, &empty); innerErr != nil {
// Must be smth with file system... returning error from read.
return err
}
v = empty
return nil
}
contents := string(data)
if strings.HasSuffix(path, ".toml") {
if _, err := toml.Decode(contents, v); err != nil {
return err
}
} else if strings.HasSuffix(path, ".json") {
if err := json.Unmarshal(data, v); err != nil {
return err
}
} else {
return &stringError{"unknown configuration format"}
}
return nil
}
// Save puts a configuration from `v` pointer into a file `path`.
//
// Path is a full filename, with extension. Since Store currently support
// TOML and JSON only, passing others would result in a corresponding error.
func Save(path string, v interface{}) error {
if applicationName == "" {
panic("store: application name not defined")
}
var data []byte
if strings.HasSuffix(path, ".toml") {
var b bytes.Buffer
encoder := toml.NewEncoder(&b)
if err := encoder.Encode(v); err != nil {
return nil
}
data = b.Bytes()
} else if strings.HasSuffix(path, ".json") {
fileData, err := json.Marshal(v)
if err != nil {
return err
}
data = fileData
} else {
return &stringError{"unknown configuration format"}
}
globalPath := buildPlatformPath(path)
if err := os.MkdirAll(filepath.Dir(globalPath), os.ModePerm); err != nil {
return err
}
if err := ioutil.WriteFile(globalPath, data, os.ModePerm); err != nil {
return err
}
return nil
}
// buildPlatformPath builds a platform-dependent path for relative path given.
func buildPlatformPath(path string) string {
if runtime.GOOS == "windows" {
return fmt.Sprintf("%s\\%s\\%s", os.Getenv("APPDATA"),
applicationName,
path)
}
var unixConfigDir string
if xdg := os.Getenv("XDG_CONFIG_HOME"); xdg != "" {
unixConfigDir = xdg
} else {
unixConfigDir = os.Getenv("HOME")
}
return fmt.Sprintf("%s/.config/%s/%s", unixConfigDir,
applicationName,
path)
}
Fixing a prefix typo from previous commit
// Package store is a dead simple configuration manager for Go applications.
package store
import (
"bytes"
"encoding/json"
"fmt"
"github.com/BurntSushi/toml"
"io/ioutil"
"os"
"path/filepath"
"reflect"
"runtime"
"strings"
)
var applicationName string
// SetApplicationName defines a unique application handle for file system.
//
// By default, Store puts all your config data to %APPDATA%/<appname> on Windows
// and to $XDG_CONFIG_HOME or $HOME on *unix systems.
//
// Warning: Store would panic on any sensitive calls if it's not set.
func SetApplicationName(handle string) {
applicationName = handle
}
// Load reads a configuration from `path` and puts it into `v` pointer.
//
// Path is a full filename, with extension. Since Store currently support
// TOML and JSON only, passing others would result in a corresponding error.
//
// If `path` doesn't exist, Load will create one and emptify `v` pointer by
// replacing it with a newly created object, derived from type of `v`.
func Load(path string, v interface{}) error {
if applicationName == "" {
panic("store: application name not defined")
}
globalPath := buildPlatformPath(path)
data, err := ioutil.ReadFile(globalPath)
if err != nil {
// There is a chance that file we are looking for
// just doesn't exist. In this case we are supposed
// to create an empty configuration file, based on v.
empty := reflect.New(reflect.TypeOf(v))
if innerErr := Save(path, &empty); innerErr != nil {
// Must be smth with file system... returning error from read.
return err
}
v = empty
return nil
}
contents := string(data)
if strings.HasSuffix(path, ".toml") {
if _, err := toml.Decode(contents, v); err != nil {
return err
}
} else if strings.HasSuffix(path, ".json") {
if err := json.Unmarshal(data, v); err != nil {
return err
}
} else {
return &stringError{"unknown configuration format"}
}
return nil
}
// Save puts a configuration from `v` pointer into a file `path`.
//
// Path is a full filename, with extension. Since Store currently support
// TOML and JSON only, passing others would result in a corresponding error.
func Save(path string, v interface{}) error {
if applicationName == "" {
panic("store: application name not defined")
}
var data []byte
if strings.HasSuffix(path, ".toml") {
var b bytes.Buffer
encoder := toml.NewEncoder(&b)
if err := encoder.Encode(v); err != nil {
return nil
}
data = b.Bytes()
} else if strings.HasSuffix(path, ".json") {
fileData, err := json.Marshal(v)
if err != nil {
return err
}
data = fileData
} else {
return &stringError{"unknown configuration format"}
}
globalPath := buildPlatformPath(path)
if err := os.MkdirAll(filepath.Dir(globalPath), os.ModePerm); err != nil {
return err
}
if err := ioutil.WriteFile(globalPath, data, os.ModePerm); err != nil {
return err
}
return nil
}
// buildPlatformPath builds a platform-dependent path for relative path given.
func buildPlatformPath(path string) string {
if runtime.GOOS == "windows" {
return fmt.Sprintf("%s\\%s\\%s", os.Getenv("APPDATA"),
applicationName,
path)
}
var unixConfigDir string
if xdg := os.Getenv("XDG_CONFIG_HOME"); xdg != "" {
unixConfigDir = xdg
} else {
unixConfigDir = os.Getenv("HOME") + "/.config"
}
return fmt.Sprintf("%s/%s/%s", unixConfigDir,
applicationName,
path)
}
|
package bolster
import (
"fmt"
"os"
"reflect"
"strings"
"github.com/boltdb/bolt"
"github.com/nochso/bolster/bytesort"
"github.com/nochso/bolster/codec"
"github.com/nochso/bolster/codec/json"
"github.com/nochso/bolster/errlist"
)
const (
tagBolster = "bolster"
tagID = "id"
tagAutoIncrement = "inc"
)
type Store struct {
codec codec.Interface
db *bolt.DB
types map[reflect.Type]typeInfo
}
func Open(path string, mode os.FileMode, options *bolt.Options) (*Store, error) {
db, err := bolt.Open(path, mode, options)
if err != nil {
return nil, err
}
st := &Store{
codec: json.Codec,
db: db,
types: make(map[reflect.Type]typeInfo),
}
return st, nil
}
func (s *Store) Close() error {
return s.db.Close()
}
func (s *Store) Read(fn func(*Tx) error) error {
return s.db.View(func(btx *bolt.Tx) error {
tx := &Tx{btx: btx, store: s}
err := fn(tx)
if err != nil {
return err
}
return tx.errs.ErrorOrNil()
})
}
func (s *Store) Write(fn func(*Tx) error) error {
return s.db.Update(func(btx *bolt.Tx) error {
tx := &Tx{btx: btx, store: s}
err := fn(tx)
if err != nil {
return err
}
return tx.errs.ErrorOrNil()
})
}
func (s *Store) Register(v ...interface{}) error {
errs := errlist.New()
for _, vv := range v {
errs = errs.Append(s.register(vv))
}
return errs.ErrorOrNil()
}
func (s *Store) register(v interface{}) error {
t := reflect.TypeOf(v)
if t.Kind() == reflect.Ptr {
t = t.Elem()
}
if t.Kind() != reflect.Struct {
return fmt.Errorf("expected struct, got %v", t.Kind())
}
if _, exists := s.types[t]; exists {
return fmt.Errorf("%v: type is already registered", t)
}
ti, err := newTypeInfo(t)
if err != nil {
return err
}
s.types[ti.Type] = ti
return s.Write(func(tx *Tx) error {
_, err := tx.btx.CreateBucketIfNotExists(ti.FullName)
return err
})
}
type typeInfo struct {
FullName []byte
IDField int
AutoIncrement bool
Type reflect.Type
}
func newTypeInfo(t reflect.Type) (typeInfo, error) {
ti := &typeInfo{
FullName: []byte(t.PkgPath() + "." + t.Name()),
Type: t,
IDField: -1,
}
err := ti.validateIDField()
if err != nil {
return *ti, err
}
err = ti.validateBytesort()
return *ti, err
}
func (ti *typeInfo) validateIDField() error {
tags := newTagList(ti.Type)
idKeys := tags.filter(tagID)
if len(idKeys) > 1 {
return fmt.Errorf("%v: must not have multiple fields with tag %q", ti, tagID)
} else if len(idKeys) == 1 {
ti.IDField = idKeys[0]
} else if idField, ok := ti.Type.FieldByName("ID"); ok {
ti.IDField = idField.Index[0]
}
if ti.IDField != -1 {
ti.AutoIncrement = tags.contains(ti.IDField, tagAutoIncrement)
return nil
}
return fmt.Errorf("%v: unable to find ID field: field has to be named \"ID\" or tagged with `bolster:\"id\"`", ti)
}
type tagList [][]string
// newTagList returns a list of bolster tags for each struct field.
func newTagList(rt reflect.Type) tagList {
tl := make([][]string, 0, rt.NumField())
for i := 0; i < rt.NumField(); i++ {
ftags := strings.Split(rt.Field(i).Tag.Get(tagBolster), ",")
tl = append(tl, ftags)
}
return tl
}
// filter returns the positions of fields containing a tag s.
func (tl tagList) filter(s string) []int {
keys := []int{}
for i := range tl {
if tl.contains(i, s) {
keys = append(keys, i)
}
}
return keys
}
// contains returns true when i'th field contains tag s.
func (tl tagList) contains(i int, s string) bool {
for _, w := range tl[i] {
if w == s {
return true
}
}
return false
}
func (ti *typeInfo) validateBytesort() error {
f := ti.Type.Field(ti.IDField)
zv := reflect.Zero(f.Type)
_, err := bytesort.Encode(zv.Interface())
if err != nil {
err = fmt.Errorf("%v: ID field %q is not byte encodable: %s", ti, f.Name, err)
}
return err
}
func (ti typeInfo) String() string {
return string(ti.FullName)
}
Expose bolt.DB via func
package bolster
import (
"fmt"
"os"
"reflect"
"strings"
"github.com/boltdb/bolt"
"github.com/nochso/bolster/bytesort"
"github.com/nochso/bolster/codec"
"github.com/nochso/bolster/codec/json"
"github.com/nochso/bolster/errlist"
)
const (
tagBolster = "bolster"
tagID = "id"
tagAutoIncrement = "inc"
)
type Store struct {
codec codec.Interface
db *bolt.DB
types map[reflect.Type]typeInfo
}
func Open(path string, mode os.FileMode, options *bolt.Options) (*Store, error) {
db, err := bolt.Open(path, mode, options)
if err != nil {
return nil, err
}
st := &Store{
codec: json.Codec,
db: db,
types: make(map[reflect.Type]typeInfo),
}
return st, nil
}
func (s *Store) Bolt() *bolt.DB {
return s.db
}
func (s *Store) Close() error {
return s.db.Close()
}
func (s *Store) Read(fn func(*Tx) error) error {
return s.db.View(func(btx *bolt.Tx) error {
tx := &Tx{btx: btx, store: s}
err := fn(tx)
if err != nil {
return err
}
return tx.errs.ErrorOrNil()
})
}
func (s *Store) Write(fn func(*Tx) error) error {
return s.db.Update(func(btx *bolt.Tx) error {
tx := &Tx{btx: btx, store: s}
err := fn(tx)
if err != nil {
return err
}
return tx.errs.ErrorOrNil()
})
}
func (s *Store) Register(v ...interface{}) error {
errs := errlist.New()
for _, vv := range v {
errs = errs.Append(s.register(vv))
}
return errs.ErrorOrNil()
}
func (s *Store) register(v interface{}) error {
t := reflect.TypeOf(v)
if t.Kind() == reflect.Ptr {
t = t.Elem()
}
if t.Kind() != reflect.Struct {
return fmt.Errorf("expected struct, got %v", t.Kind())
}
if _, exists := s.types[t]; exists {
return fmt.Errorf("%v: type is already registered", t)
}
ti, err := newTypeInfo(t)
if err != nil {
return err
}
s.types[ti.Type] = ti
return s.Write(func(tx *Tx) error {
_, err := tx.btx.CreateBucketIfNotExists(ti.FullName)
return err
})
}
type typeInfo struct {
FullName []byte
IDField int
AutoIncrement bool
Type reflect.Type
}
func newTypeInfo(t reflect.Type) (typeInfo, error) {
ti := &typeInfo{
FullName: []byte(t.PkgPath() + "." + t.Name()),
Type: t,
IDField: -1,
}
err := ti.validateIDField()
if err != nil {
return *ti, err
}
err = ti.validateBytesort()
return *ti, err
}
func (ti *typeInfo) validateIDField() error {
tags := newTagList(ti.Type)
idKeys := tags.filter(tagID)
if len(idKeys) > 1 {
return fmt.Errorf("%v: must not have multiple fields with tag %q", ti, tagID)
} else if len(idKeys) == 1 {
ti.IDField = idKeys[0]
} else if idField, ok := ti.Type.FieldByName("ID"); ok {
ti.IDField = idField.Index[0]
}
if ti.IDField != -1 {
ti.AutoIncrement = tags.contains(ti.IDField, tagAutoIncrement)
return nil
}
return fmt.Errorf("%v: unable to find ID field: field has to be named \"ID\" or tagged with `bolster:\"id\"`", ti)
}
type tagList [][]string
// newTagList returns a list of bolster tags for each struct field.
func newTagList(rt reflect.Type) tagList {
tl := make([][]string, 0, rt.NumField())
for i := 0; i < rt.NumField(); i++ {
ftags := strings.Split(rt.Field(i).Tag.Get(tagBolster), ",")
tl = append(tl, ftags)
}
return tl
}
// filter returns the positions of fields containing a tag s.
func (tl tagList) filter(s string) []int {
keys := []int{}
for i := range tl {
if tl.contains(i, s) {
keys = append(keys, i)
}
}
return keys
}
// contains returns true when i'th field contains tag s.
func (tl tagList) contains(i int, s string) bool {
for _, w := range tl[i] {
if w == s {
return true
}
}
return false
}
func (ti *typeInfo) validateBytesort() error {
f := ti.Type.Field(ti.IDField)
zv := reflect.Zero(f.Type)
_, err := bytesort.Encode(zv.Interface())
if err != nil {
err = fmt.Errorf("%v: ID field %q is not byte encodable: %s", ti, f.Name, err)
}
return err
}
func (ti typeInfo) String() string {
return string(ti.FullName)
}
|
package kademlia
import (
"crypto/sha1"
"sync"
"time"
)
// Store is the interface for implementing the storage mechanism for the
// DHT.
type Store interface {
// Store should store a key/value pair on the network with the given
// replication and expiration times.
Store(key []byte, data []byte, replication time.Time, expiration time.Time, publisher bool) error
// Retrieve searches the network a given key/value, or returns the
// local key/value if it exists. If it is not found locally, or on
// the network, the found return value will be false.
Retrieve(key []byte) (data []byte, found bool)
// Delete will delete a key/value pair from the Store
Delete(key []byte)
// Init initializes the Store
Init()
// GetAllKeysForReplication should return the keys of all data to be
// replicated across the network. Typically all data should be
// replicated every tReplicate seconds.
GetAllKeysForReplication() [][]byte
// ExpireKeys should expire all key/values due for expiration.
ExpireKeys()
// GetKey returns the key for data
GetKey(data []byte) []byte
}
type MemoryStore struct {
mutex *sync.Mutex
data map[string][]byte
replicateMap map[string]time.Time
expireMap map[string]time.Time
}
func (ms *MemoryStore) GetAllKeysForReplication() [][]byte {
ms.mutex.Lock()
defer ms.mutex.Unlock()
var keys [][]byte
for k := range ms.data {
if time.Now().After(ms.replicateMap[k]) {
keys = append(keys, []byte(k))
}
}
return keys
}
func (ms *MemoryStore) ExpireKeys() {
ms.mutex.Lock()
defer ms.mutex.Unlock()
for k, v := range ms.expireMap {
if time.Now().After(v) {
delete(ms.replicateMap, k)
delete(ms.expireMap, k)
delete(ms.data, k)
}
}
}
func (ms *MemoryStore) Init() {
ms.data = make(map[string][]byte)
ms.mutex = &sync.Mutex{}
ms.replicateMap = make(map[string]time.Time)
ms.expireMap = make(map[string]time.Time)
}
func (ms *MemoryStore) GetKey(data []byte) []byte {
sha := sha1.Sum(data)
return sha[:]
}
func (ms *MemoryStore) Store(key []byte, data []byte, replication time.Time, expiration time.Time, publisher bool) error {
ms.mutex.Lock()
defer ms.mutex.Unlock()
ms.replicateMap[string(key)] = replication
ms.expireMap[string(key)] = expiration
ms.data[string(key)] = data
return nil
}
func (ms *MemoryStore) Retrieve(key []byte) (data []byte, found bool) {
ms.mutex.Lock()
defer ms.mutex.Unlock()
data, found = ms.data[string(key)]
return data, found
}
func (ms *MemoryStore) Delete(key []byte) {
ms.mutex.Lock()
defer ms.mutex.Unlock()
delete(ms.replicateMap, string(key))
delete(ms.expireMap, string(key))
delete(ms.data, string(key))
}
Fix Store documentation
package kademlia
import (
"crypto/sha1"
"sync"
"time"
)
// Store is the interface for implementing the storage mechanism for the
// DHT.
type Store interface {
// Store should store a key/value pair for the local node with the
// given replication and expiration times.
Store(key []byte, data []byte, replication time.Time, expiration time.Time, publisher bool) error
// Retrieve should return the local key/value if it exists.
Retrieve(key []byte) (data []byte, found bool)
// Delete should delete a key/value pair from the Store
Delete(key []byte)
// Init initializes the Store
Init()
// GetAllKeysForReplication should return the keys of all data to be
// replicated across the network. Typically all data should be
// replicated every tReplicate seconds.
GetAllKeysForReplication() [][]byte
// ExpireKeys should expire all key/values due for expiration.
ExpireKeys()
// GetKey returns the key for data
GetKey(data []byte) []byte
}
type MemoryStore struct {
mutex *sync.Mutex
data map[string][]byte
replicateMap map[string]time.Time
expireMap map[string]time.Time
}
func (ms *MemoryStore) GetAllKeysForReplication() [][]byte {
ms.mutex.Lock()
defer ms.mutex.Unlock()
var keys [][]byte
for k := range ms.data {
if time.Now().After(ms.replicateMap[k]) {
keys = append(keys, []byte(k))
}
}
return keys
}
func (ms *MemoryStore) ExpireKeys() {
ms.mutex.Lock()
defer ms.mutex.Unlock()
for k, v := range ms.expireMap {
if time.Now().After(v) {
delete(ms.replicateMap, k)
delete(ms.expireMap, k)
delete(ms.data, k)
}
}
}
func (ms *MemoryStore) Init() {
ms.data = make(map[string][]byte)
ms.mutex = &sync.Mutex{}
ms.replicateMap = make(map[string]time.Time)
ms.expireMap = make(map[string]time.Time)
}
func (ms *MemoryStore) GetKey(data []byte) []byte {
sha := sha1.Sum(data)
return sha[:]
}
func (ms *MemoryStore) Store(key []byte, data []byte, replication time.Time, expiration time.Time, publisher bool) error {
ms.mutex.Lock()
defer ms.mutex.Unlock()
ms.replicateMap[string(key)] = replication
ms.expireMap[string(key)] = expiration
ms.data[string(key)] = data
return nil
}
func (ms *MemoryStore) Retrieve(key []byte) (data []byte, found bool) {
ms.mutex.Lock()
defer ms.mutex.Unlock()
data, found = ms.data[string(key)]
return data, found
}
func (ms *MemoryStore) Delete(key []byte) {
ms.mutex.Lock()
defer ms.mutex.Unlock()
delete(ms.replicateMap, string(key))
delete(ms.expireMap, string(key))
delete(ms.data, string(key))
}
|
package main
import (
"os"
"time"
)
type Store struct {
baseDir string
}
func NewStore(relPath string) *Store {
// TODO: check if we need to add the full path
cwd, err := os.Getwd()
if err != nil {
panic(err)
}
baseDir := cwd + "/" + relPath
err = os.MkdirAll(baseDir, 0755)
if err != nil {
panic(err)
}
jobsDir := baseDir + "/jobs"
err = os.MkdirAll(jobsDir, 0755)
if err != nil {
panic(err)
}
return &Store{baseDir}
}
func (s Store) Lock() {
lockFile := s.baseDir + "/lock"
if FileExists(lockFile) {
panic(s.baseDir + "is locked!\n can't run locked directory")
} else {
TouchFile(lockFile)
}
}
func (s Store) Unlock () {
lockFile := s.baseDir + "/lock"
if ! FileExists(lockFile) {
panic(s.baseDir + "is not locked!\n can't unlock")
} else {
DeleteFile(lockFile)
}
}
func (s Store) getPath(jobID string, field string) string {
return s.baseDir + "/jobs/" + jobID + "/" + field
}
func (s Store) Setup (jobID string, cmd string) {
jobsDir := s.baseDir + "/jobs/" + jobID
err := os.MkdirAll(jobsDir, 0755)
if err != nil {
panic(err)
}
SetFile(s.getPath(jobID, "cmd"), cmd)
s.SetState(jobID, NEW)
}
func (s Store) GetState(jobID string) JobState {
stateFile := s.getPath(jobID, "state")
if ! FileExists(stateFile) {
return UNKNOWN
}
contents := ReadFile(stateFile)
return JOBSTATEIDS[contents]
}
func (s Store) SetOutput(jobID string, output string) {
AppendToFile(s.getPath(jobID, "output"), output)
}
func (s Store) GetOutput(jobID string) string {
return ReadFile(s.getPath(jobID,"output"))
}
func (s Store) GetLastTouch(jobID string) time.Time {
return LastTouch(s.getPath(jobID, "state"))
}
func (s Store) GetCmd(jobID string) string {
return ReadFile(s.getPath(jobID, "cmd"))
}
func (s Store) GetJobIDs() (result []string) {
for _, filename := range ListFiles(s.baseDir + "/jobs") {
result = append(result, filename)
}
return result
}
func (s Store) SetState(jobID string, jobState JobState) {
SetFile(s.getPath(jobID, "state"), STATELABELS[jobState])
}
func (s Store) Reset() {
for _, jobID := range s.GetJobIDs() {
s.SetState(jobID, NEW)
}
}
func (s Store) Close() {
RemoveFile(s.baseDir + "/lock")
}
Create output file on creation
package main
import (
"os"
"time"
)
type Store struct {
baseDir string
}
func NewStore(relPath string) *Store {
// TODO: check if we need to add the full path
cwd, err := os.Getwd()
if err != nil {
panic(err)
}
baseDir := cwd + "/" + relPath
err = os.MkdirAll(baseDir, 0755)
if err != nil {
panic(err)
}
jobsDir := baseDir + "/jobs"
err = os.MkdirAll(jobsDir, 0755)
if err != nil {
panic(err)
}
return &Store{baseDir}
}
func (s Store) Lock() {
lockFile := s.baseDir + "/lock"
if FileExists(lockFile) {
panic(s.baseDir + "is locked!\n can't run locked directory")
} else {
TouchFile(lockFile)
}
}
func (s Store) Unlock () {
lockFile := s.baseDir + "/lock"
if ! FileExists(lockFile) {
panic(s.baseDir + "is not locked!\n can't unlock")
} else {
DeleteFile(lockFile)
}
}
func (s Store) getPath(jobID string, field string) string {
return s.baseDir + "/jobs/" + jobID + "/" + field
}
func (s Store) Setup (jobID string, cmd string) {
jobsDir := s.baseDir + "/jobs/" + jobID
err := os.MkdirAll(jobsDir, 0755)
if err != nil {
panic(err)
}
SetFile(s.getPath(jobID, "cmd"), cmd)
TouchFile(s.getPath(jobID, "output"))
s.SetState(jobID, NEW)
}
func (s Store) GetState(jobID string) JobState {
stateFile := s.getPath(jobID, "state")
if ! FileExists(stateFile) {
return UNKNOWN
}
contents := ReadFile(stateFile)
return JOBSTATEIDS[contents]
}
func (s Store) SetOutput(jobID string, output string) {
AppendToFile(s.getPath(jobID, "output"), output)
}
func (s Store) GetOutput(jobID string) string {
return ReadFile(s.getPath(jobID,"output"))
}
func (s Store) GetLastTouch(jobID string) time.Time {
return LastTouch(s.getPath(jobID, "state"))
}
func (s Store) GetCmd(jobID string) string {
return ReadFile(s.getPath(jobID, "cmd"))
}
func (s Store) GetJobIDs() (result []string) {
for _, filename := range ListFiles(s.baseDir + "/jobs") {
result = append(result, filename)
}
return result
}
func (s Store) SetState(jobID string, jobState JobState) {
SetFile(s.getPath(jobID, "state"), STATELABELS[jobState])
}
func (s Store) Reset() {
for _, jobID := range s.GetJobIDs() {
s.SetState(jobID, NEW)
}
}
func (s Store) Close() {
RemoveFile(s.baseDir + "/lock")
}
|
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package sql provides a generic interface around SQL (or SQL-like)
// databases.
package sql
import (
"database/sql/driver"
"errors"
"fmt"
"io"
"sync"
)
var drivers = make(map[string]driver.Driver)
// Register makes a database driver available by the provided name.
// If Register is called twice with the same name or if driver is nil,
// it panics.
func Register(name string, driver driver.Driver) {
if driver == nil {
panic("sql: Register driver is nil")
}
if _, dup := drivers[name]; dup {
panic("sql: Register called twice for driver " + name)
}
drivers[name] = driver
}
// RawBytes is a byte slice that holds a reference to memory owned by
// the database itself. After a Scan into a RawBytes, the slice is only
// valid until the next call to Next, Scan, or Close.
type RawBytes []byte
// NullString represents a string that may be null.
// NullString implements the ScannerInto interface so
// it can be used as a scan destination:
//
// var s NullString
// err := db.QueryRow("SELECT name FROM foo WHERE id=?", id).Scan(&s)
// ...
// if s.Valid {
// // use s.String
// } else {
// // NULL value
// }
//
type NullString struct {
String string
Valid bool // Valid is true if String is not NULL
}
// ScanInto implements the ScannerInto interface.
func (ns *NullString) ScanInto(value interface{}) error {
if value == nil {
ns.String, ns.Valid = "", false
return nil
}
ns.Valid = true
return convertAssign(&ns.String, value)
}
// SubsetValue implements the driver SubsetValuer interface.
func (ns NullString) SubsetValue() (interface{}, error) {
if !ns.Valid {
return nil, nil
}
return ns.String, nil
}
// NullInt64 represents an int64 that may be null.
// NullInt64 implements the ScannerInto interface so
// it can be used as a scan destination, similar to NullString.
type NullInt64 struct {
Int64 int64
Valid bool // Valid is true if Int64 is not NULL
}
// ScanInto implements the ScannerInto interface.
func (n *NullInt64) ScanInto(value interface{}) error {
if value == nil {
n.Int64, n.Valid = 0, false
return nil
}
n.Valid = true
return convertAssign(&n.Int64, value)
}
// SubsetValue implements the driver SubsetValuer interface.
func (n NullInt64) SubsetValue() (interface{}, error) {
if !n.Valid {
return nil, nil
}
return n.Int64, nil
}
// NullFloat64 represents a float64 that may be null.
// NullFloat64 implements the ScannerInto interface so
// it can be used as a scan destination, similar to NullString.
type NullFloat64 struct {
Float64 float64
Valid bool // Valid is true if Float64 is not NULL
}
// ScanInto implements the ScannerInto interface.
func (n *NullFloat64) ScanInto(value interface{}) error {
if value == nil {
n.Float64, n.Valid = 0, false
return nil
}
n.Valid = true
return convertAssign(&n.Float64, value)
}
// SubsetValue implements the driver SubsetValuer interface.
func (n NullFloat64) SubsetValue() (interface{}, error) {
if !n.Valid {
return nil, nil
}
return n.Float64, nil
}
// NullBool represents a bool that may be null.
// NullBool implements the ScannerInto interface so
// it can be used as a scan destination, similar to NullString.
type NullBool struct {
Bool bool
Valid bool // Valid is true if Bool is not NULL
}
// ScanInto implements the ScannerInto interface.
func (n *NullBool) ScanInto(value interface{}) error {
if value == nil {
n.Bool, n.Valid = false, false
return nil
}
n.Valid = true
return convertAssign(&n.Bool, value)
}
// SubsetValue implements the driver SubsetValuer interface.
func (n NullBool) SubsetValue() (interface{}, error) {
if !n.Valid {
return nil, nil
}
return n.Bool, nil
}
// ScannerInto is an interface used by Scan.
type ScannerInto interface {
// ScanInto assigns a value from a database driver.
//
// The value will be of one of the following restricted
// set of types:
//
// int64
// float64
// bool
// []byte
// nil - for NULL values
//
// An error should be returned if the value can not be stored
// without loss of information.
ScanInto(value interface{}) error
}
// ErrNoRows is returned by Scan when QueryRow doesn't return a
// row. In such a case, QueryRow returns a placeholder *Row value that
// defers this error until a Scan.
var ErrNoRows = errors.New("sql: no rows in result set")
// DB is a database handle. It's safe for concurrent use by multiple
// goroutines.
type DB struct {
driver driver.Driver
dsn string
mu sync.Mutex // protects freeConn and closed
freeConn []driver.Conn
closed bool
}
// Open opens a database specified by its database driver name and a
// driver-specific data source name, usually consisting of at least a
// database name and connection information.
//
// Most users will open a database via a driver-specific connection
// helper function that returns a *DB.
func Open(driverName, dataSourceName string) (*DB, error) {
driver, ok := drivers[driverName]
if !ok {
return nil, fmt.Errorf("sql: unknown driver %q (forgotten import?)", driverName)
}
return &DB{driver: driver, dsn: dataSourceName}, nil
}
// Close closes the database, releasing any open resources.
func (db *DB) Close() error {
db.mu.Lock()
defer db.mu.Unlock()
var err error
for _, c := range db.freeConn {
err1 := c.Close()
if err1 != nil {
err = err1
}
}
db.freeConn = nil
db.closed = true
return err
}
func (db *DB) maxIdleConns() int {
const defaultMaxIdleConns = 2
// TODO(bradfitz): ask driver, if supported, for its default preference
// TODO(bradfitz): let users override?
return defaultMaxIdleConns
}
// conn returns a newly-opened or cached driver.Conn
func (db *DB) conn() (driver.Conn, error) {
db.mu.Lock()
if db.closed {
db.mu.Unlock()
return nil, errors.New("sql: database is closed")
}
if n := len(db.freeConn); n > 0 {
conn := db.freeConn[n-1]
db.freeConn = db.freeConn[:n-1]
db.mu.Unlock()
return conn, nil
}
db.mu.Unlock()
return db.driver.Open(db.dsn)
}
func (db *DB) connIfFree(wanted driver.Conn) (conn driver.Conn, ok bool) {
db.mu.Lock()
defer db.mu.Unlock()
for n, conn := range db.freeConn {
if conn == wanted {
db.freeConn[n] = db.freeConn[len(db.freeConn)-1]
db.freeConn = db.freeConn[:len(db.freeConn)-1]
return wanted, true
}
}
return nil, false
}
func (db *DB) putConn(c driver.Conn) {
db.mu.Lock()
defer db.mu.Unlock()
if n := len(db.freeConn); !db.closed && n < db.maxIdleConns() {
db.freeConn = append(db.freeConn, c)
return
}
db.closeConn(c) // TODO(bradfitz): release lock before calling this?
}
func (db *DB) closeConn(c driver.Conn) {
// TODO: check to see if we need this Conn for any prepared statements
// that are active.
c.Close()
}
// Prepare creates a prepared statement for later execution.
func (db *DB) Prepare(query string) (*Stmt, error) {
// TODO: check if db.driver supports an optional
// driver.Preparer interface and call that instead, if so,
// otherwise we make a prepared statement that's bound
// to a connection, and to execute this prepared statement
// we either need to use this connection (if it's free), else
// get a new connection + re-prepare + execute on that one.
ci, err := db.conn()
if err != nil {
return nil, err
}
defer db.putConn(ci)
si, err := ci.Prepare(query)
if err != nil {
return nil, err
}
stmt := &Stmt{
db: db,
query: query,
css: []connStmt{{ci, si}},
}
return stmt, nil
}
// Exec executes a query without returning any rows.
func (db *DB) Exec(query string, args ...interface{}) (Result, error) {
sargs, err := subsetTypeArgs(args)
if err != nil {
return nil, err
}
ci, err := db.conn()
if err != nil {
return nil, err
}
defer db.putConn(ci)
if execer, ok := ci.(driver.Execer); ok {
resi, err := execer.Exec(query, sargs)
if err != driver.ErrSkip {
if err != nil {
return nil, err
}
return result{resi}, nil
}
}
sti, err := ci.Prepare(query)
if err != nil {
return nil, err
}
defer sti.Close()
resi, err := sti.Exec(sargs)
if err != nil {
return nil, err
}
return result{resi}, nil
}
// Query executes a query that returns rows, typically a SELECT.
func (db *DB) Query(query string, args ...interface{}) (*Rows, error) {
stmt, err := db.Prepare(query)
if err != nil {
return nil, err
}
rows, err := stmt.Query(args...)
if err != nil {
stmt.Close()
return nil, err
}
rows.closeStmt = stmt
return rows, nil
}
// QueryRow executes a query that is expected to return at most one row.
// QueryRow always return a non-nil value. Errors are deferred until
// Row's Scan method is called.
func (db *DB) QueryRow(query string, args ...interface{}) *Row {
rows, err := db.Query(query, args...)
return &Row{rows: rows, err: err}
}
// Begin starts a transaction. The isolation level is dependent on
// the driver.
func (db *DB) Begin() (*Tx, error) {
ci, err := db.conn()
if err != nil {
return nil, err
}
txi, err := ci.Begin()
if err != nil {
db.putConn(ci)
return nil, fmt.Errorf("sql: failed to Begin transaction: %v", err)
}
return &Tx{
db: db,
ci: ci,
txi: txi,
}, nil
}
// Driver returns the database's underlying driver.
func (db *DB) Driver() driver.Driver {
return db.driver
}
// Tx is an in-progress database transaction.
//
// A transaction must end with a call to Commit or Rollback.
//
// After a call to Commit or Rollback, all operations on the
// transaction fail with ErrTxDone.
type Tx struct {
db *DB
// ci is owned exclusively until Commit or Rollback, at which point
// it's returned with putConn.
ci driver.Conn
txi driver.Tx
// cimu is held while somebody is using ci (between grabConn
// and releaseConn)
cimu sync.Mutex
// done transitions from false to true exactly once, on Commit
// or Rollback. once done, all operations fail with
// ErrTxDone.
done bool
}
var ErrTxDone = errors.New("sql: Transaction has already been committed or rolled back")
func (tx *Tx) close() {
if tx.done {
panic("double close") // internal error
}
tx.done = true
tx.db.putConn(tx.ci)
tx.ci = nil
tx.txi = nil
}
func (tx *Tx) grabConn() (driver.Conn, error) {
if tx.done {
return nil, ErrTxDone
}
tx.cimu.Lock()
return tx.ci, nil
}
func (tx *Tx) releaseConn() {
tx.cimu.Unlock()
}
// Commit commits the transaction.
func (tx *Tx) Commit() error {
if tx.done {
return ErrTxDone
}
defer tx.close()
return tx.txi.Commit()
}
// Rollback aborts the transaction.
func (tx *Tx) Rollback() error {
if tx.done {
return ErrTxDone
}
defer tx.close()
return tx.txi.Rollback()
}
// Prepare creates a prepared statement for use within a transaction.
//
// The returned statement operates within the transaction and can no longer
// be used once the transaction has been committed or rolled back.
//
// To use an existing prepared statement on this transaction, see Tx.Stmt.
func (tx *Tx) Prepare(query string) (*Stmt, error) {
// TODO(bradfitz): We could be more efficient here and either
// provide a method to take an existing Stmt (created on
// perhaps a different Conn), and re-create it on this Conn if
// necessary. Or, better: keep a map in DB of query string to
// Stmts, and have Stmt.Execute do the right thing and
// re-prepare if the Conn in use doesn't have that prepared
// statement. But we'll want to avoid caching the statement
// in the case where we only call conn.Prepare implicitly
// (such as in db.Exec or tx.Exec), but the caller package
// can't be holding a reference to the returned statement.
// Perhaps just looking at the reference count (by noting
// Stmt.Close) would be enough. We might also want a finalizer
// on Stmt to drop the reference count.
ci, err := tx.grabConn()
if err != nil {
return nil, err
}
defer tx.releaseConn()
si, err := ci.Prepare(query)
if err != nil {
return nil, err
}
stmt := &Stmt{
db: tx.db,
tx: tx,
txsi: si,
query: query,
}
return stmt, nil
}
// Stmt returns a transaction-specific prepared statement from
// an existing statement.
//
// Example:
// updateMoney, err := db.Prepare("UPDATE balance SET money=money+? WHERE id=?")
// ...
// tx, err := db.Begin()
// ...
// res, err := tx.Stmt(updateMoney).Exec(123.45, 98293203)
func (tx *Tx) Stmt(stmt *Stmt) *Stmt {
// TODO(bradfitz): optimize this. Currently this re-prepares
// each time. This is fine for now to illustrate the API but
// we should really cache already-prepared statements
// per-Conn. See also the big comment in Tx.Prepare.
if tx.db != stmt.db {
return &Stmt{stickyErr: errors.New("sql: Tx.Stmt: statement from different database used")}
}
ci, err := tx.grabConn()
if err != nil {
return &Stmt{stickyErr: err}
}
defer tx.releaseConn()
si, err := ci.Prepare(stmt.query)
return &Stmt{
db: tx.db,
tx: tx,
txsi: si,
query: stmt.query,
stickyErr: err,
}
}
// Exec executes a query that doesn't return rows.
// For example: an INSERT and UPDATE.
func (tx *Tx) Exec(query string, args ...interface{}) (Result, error) {
ci, err := tx.grabConn()
if err != nil {
return nil, err
}
defer tx.releaseConn()
if execer, ok := ci.(driver.Execer); ok {
resi, err := execer.Exec(query, args)
if err != nil {
return nil, err
}
return result{resi}, nil
}
sti, err := ci.Prepare(query)
if err != nil {
return nil, err
}
defer sti.Close()
sargs, err := subsetTypeArgs(args)
if err != nil {
return nil, err
}
resi, err := sti.Exec(sargs)
if err != nil {
return nil, err
}
return result{resi}, nil
}
// Query executes a query that returns rows, typically a SELECT.
func (tx *Tx) Query(query string, args ...interface{}) (*Rows, error) {
if tx.done {
return nil, ErrTxDone
}
stmt, err := tx.Prepare(query)
if err != nil {
return nil, err
}
rows, err := stmt.Query(args...)
if err == nil {
rows.closeStmt = stmt
}
return rows, err
}
// QueryRow executes a query that is expected to return at most one row.
// QueryRow always return a non-nil value. Errors are deferred until
// Row's Scan method is called.
func (tx *Tx) QueryRow(query string, args ...interface{}) *Row {
rows, err := tx.Query(query, args...)
return &Row{rows: rows, err: err}
}
// connStmt is a prepared statement on a particular connection.
type connStmt struct {
ci driver.Conn
si driver.Stmt
}
// Stmt is a prepared statement. Stmt is safe for concurrent use by multiple goroutines.
type Stmt struct {
// Immutable:
db *DB // where we came from
query string // that created the Stmt
stickyErr error // if non-nil, this error is returned for all operations
// If in a transaction, else both nil:
tx *Tx
txsi driver.Stmt
mu sync.Mutex // protects the rest of the fields
closed bool
// css is a list of underlying driver statement interfaces
// that are valid on particular connections. This is only
// used if tx == nil and one is found that has idle
// connections. If tx != nil, txsi is always used.
css []connStmt
}
// Exec executes a prepared statement with the given arguments and
// returns a Result summarizing the effect of the statement.
func (s *Stmt) Exec(args ...interface{}) (Result, error) {
_, releaseConn, si, err := s.connStmt()
if err != nil {
return nil, err
}
defer releaseConn()
// -1 means the driver doesn't know how to count the number of
// placeholders, so we won't sanity check input here and instead let the
// driver deal with errors.
if want := si.NumInput(); want != -1 && len(args) != want {
return nil, fmt.Errorf("sql: expected %d arguments, got %d", want, len(args))
}
// Convert args to subset types.
if cc, ok := si.(driver.ColumnConverter); ok {
for n, arg := range args {
// First, see if the value itself knows how to convert
// itself to a driver type. For example, a NullString
// struct changing into a string or nil.
if svi, ok := arg.(driver.SubsetValuer); ok {
sv, err := svi.SubsetValue()
if err != nil {
return nil, fmt.Errorf("sql: argument index %d from SubsetValue: %v", n, err)
}
if !driver.IsParameterSubsetType(sv) {
return nil, fmt.Errorf("sql: argument index %d: non-subset type %T returned from SubsetValue", n, sv)
}
arg = sv
}
// Second, ask the column to sanity check itself. For
// example, drivers might use this to make sure that
// an int64 values being inserted into a 16-bit
// integer field is in range (before getting
// truncated), or that a nil can't go into a NOT NULL
// column before going across the network to get the
// same error.
args[n], err = cc.ColumnConverter(n).ConvertValue(arg)
if err != nil {
return nil, fmt.Errorf("sql: converting Exec argument #%d's type: %v", n, err)
}
if !driver.IsParameterSubsetType(args[n]) {
return nil, fmt.Errorf("sql: driver ColumnConverter error converted %T to unsupported type %T",
arg, args[n])
}
}
} else {
for n, arg := range args {
args[n], err = driver.DefaultParameterConverter.ConvertValue(arg)
if err != nil {
return nil, fmt.Errorf("sql: converting Exec argument #%d's type: %v", n, err)
}
}
}
resi, err := si.Exec(args)
if err != nil {
return nil, err
}
return result{resi}, nil
}
// connStmt returns a free driver connection on which to execute the
// statement, a function to call to release the connection, and a
// statement bound to that connection.
func (s *Stmt) connStmt() (ci driver.Conn, releaseConn func(), si driver.Stmt, err error) {
if err = s.stickyErr; err != nil {
return
}
s.mu.Lock()
if s.closed {
s.mu.Unlock()
err = errors.New("sql: statement is closed")
return
}
// In a transaction, we always use the connection that the
// transaction was created on.
if s.tx != nil {
s.mu.Unlock()
ci, err = s.tx.grabConn() // blocks, waiting for the connection.
if err != nil {
return
}
releaseConn = func() { s.tx.releaseConn() }
return ci, releaseConn, s.txsi, nil
}
var cs connStmt
match := false
for _, v := range s.css {
// TODO(bradfitz): lazily clean up entries in this
// list with dead conns while enumerating
if _, match = s.db.connIfFree(cs.ci); match {
cs = v
break
}
}
s.mu.Unlock()
// Make a new conn if all are busy.
// TODO(bradfitz): or wait for one? make configurable later?
if !match {
ci, err := s.db.conn()
if err != nil {
return nil, nil, nil, err
}
si, err := ci.Prepare(s.query)
if err != nil {
return nil, nil, nil, err
}
s.mu.Lock()
cs = connStmt{ci, si}
s.css = append(s.css, cs)
s.mu.Unlock()
}
conn := cs.ci
releaseConn = func() { s.db.putConn(conn) }
return conn, releaseConn, cs.si, nil
}
// Query executes a prepared query statement with the given arguments
// and returns the query results as a *Rows.
func (s *Stmt) Query(args ...interface{}) (*Rows, error) {
ci, releaseConn, si, err := s.connStmt()
if err != nil {
return nil, err
}
// -1 means the driver doesn't know how to count the number of
// placeholders, so we won't sanity check input here and instead let the
// driver deal with errors.
if want := si.NumInput(); want != -1 && len(args) != want {
return nil, fmt.Errorf("sql: statement expects %d inputs; got %d", si.NumInput(), len(args))
}
sargs, err := subsetTypeArgs(args)
if err != nil {
return nil, err
}
rowsi, err := si.Query(sargs)
if err != nil {
s.db.putConn(ci)
return nil, err
}
// Note: ownership of ci passes to the *Rows, to be freed
// with releaseConn.
rows := &Rows{
db: s.db,
ci: ci,
releaseConn: releaseConn,
rowsi: rowsi,
}
return rows, nil
}
// QueryRow executes a prepared query statement with the given arguments.
// If an error occurs during the execution of the statement, that error will
// be returned by a call to Scan on the returned *Row, which is always non-nil.
// If the query selects no rows, the *Row's Scan will return ErrNoRows.
// Otherwise, the *Row's Scan scans the first selected row and discards
// the rest.
//
// Example usage:
//
// var name string
// err := nameByUseridStmt.QueryRow(id).Scan(&s)
func (s *Stmt) QueryRow(args ...interface{}) *Row {
rows, err := s.Query(args...)
if err != nil {
return &Row{err: err}
}
return &Row{rows: rows}
}
// Close closes the statement.
func (s *Stmt) Close() error {
if s.stickyErr != nil {
return s.stickyErr
}
s.mu.Lock()
defer s.mu.Unlock()
if s.closed {
return nil
}
s.closed = true
if s.tx != nil {
s.txsi.Close()
} else {
for _, v := range s.css {
if ci, match := s.db.connIfFree(v.ci); match {
v.si.Close()
s.db.putConn(ci)
} else {
// TODO(bradfitz): care that we can't close
// this statement because the statement's
// connection is in use?
}
}
}
return nil
}
// Rows is the result of a query. Its cursor starts before the first row
// of the result set. Use Next to advance through the rows:
//
// rows, err := db.Query("SELECT ...")
// ...
// for rows.Next() {
// var id int
// var name string
// err = rows.Scan(&id, &name)
// ...
// }
// err = rows.Err() // get any error encountered during iteration
// ...
type Rows struct {
db *DB
ci driver.Conn // owned; must call putconn when closed to release
releaseConn func()
rowsi driver.Rows
closed bool
lastcols []interface{}
lasterr error
closeStmt *Stmt // if non-nil, statement to Close on close
}
// Next prepares the next result row for reading with the Scan method.
// It returns true on success, false if there is no next result row.
// Every call to Scan, even the first one, must be preceded by a call
// to Next.
func (rs *Rows) Next() bool {
if rs.closed {
return false
}
if rs.lasterr != nil {
return false
}
if rs.lastcols == nil {
rs.lastcols = make([]interface{}, len(rs.rowsi.Columns()))
}
rs.lasterr = rs.rowsi.Next(rs.lastcols)
if rs.lasterr == io.EOF {
rs.Close()
}
return rs.lasterr == nil
}
// Err returns the error, if any, that was encountered during iteration.
func (rs *Rows) Err() error {
if rs.lasterr == io.EOF {
return nil
}
return rs.lasterr
}
// Columns returns the column names.
// Columns returns an error if the rows are closed, or if the rows
// are from QueryRow and there was a deferred error.
func (rs *Rows) Columns() ([]string, error) {
if rs.closed {
return nil, errors.New("sql: Rows are closed")
}
if rs.rowsi == nil {
return nil, errors.New("sql: no Rows available")
}
return rs.rowsi.Columns(), nil
}
// Scan copies the columns in the current row into the values pointed
// at by dest.
//
// If an argument has type *[]byte, Scan saves in that argument a copy
// of the corresponding data. The copy is owned by the caller and can
// be modified and held indefinitely. The copy can be avoided by using
// an argument of type *RawBytes instead; see the documentation for
// RawBytes for restrictions on its use.
//
// If an argument has type *interface{}, Scan copies the value
// provided by the underlying driver without conversion. If the value
// is of type []byte, a copy is made and the caller owns the result.
func (rs *Rows) Scan(dest ...interface{}) error {
if rs.closed {
return errors.New("sql: Rows closed")
}
if rs.lasterr != nil {
return rs.lasterr
}
if rs.lastcols == nil {
return errors.New("sql: Scan called without calling Next")
}
if len(dest) != len(rs.lastcols) {
return fmt.Errorf("sql: expected %d destination arguments in Scan, not %d", len(rs.lastcols), len(dest))
}
for i, sv := range rs.lastcols {
err := convertAssign(dest[i], sv)
if err != nil {
return fmt.Errorf("sql: Scan error on column index %d: %v", i, err)
}
}
for _, dp := range dest {
b, ok := dp.(*[]byte)
if !ok {
continue
}
if *b == nil {
// If the []byte is now nil (for a NULL value),
// don't fall through to below which would
// turn it into a non-nil 0-length byte slice
continue
}
if _, ok = dp.(*RawBytes); ok {
continue
}
clone := make([]byte, len(*b))
copy(clone, *b)
*b = clone
}
return nil
}
// Close closes the Rows, preventing further enumeration. If the
// end is encountered, the Rows are closed automatically. Close
// is idempotent.
func (rs *Rows) Close() error {
if rs.closed {
return nil
}
rs.closed = true
err := rs.rowsi.Close()
rs.releaseConn()
if rs.closeStmt != nil {
rs.closeStmt.Close()
}
return err
}
// Row is the result of calling QueryRow to select a single row.
type Row struct {
// One of these two will be non-nil:
err error // deferred error for easy chaining
rows *Rows
}
// Scan copies the columns from the matched row into the values
// pointed at by dest. If more than one row matches the query,
// Scan uses the first row and discards the rest. If no row matches
// the query, Scan returns ErrNoRows.
func (r *Row) Scan(dest ...interface{}) error {
if r.err != nil {
return r.err
}
// TODO(bradfitz): for now we need to defensively clone all
// []byte that the driver returned (not permitting
// *RawBytes in Rows.Scan), since we're about to close
// the Rows in our defer, when we return from this function.
// the contract with the driver.Next(...) interface is that it
// can return slices into read-only temporary memory that's
// only valid until the next Scan/Close. But the TODO is that
// for a lot of drivers, this copy will be unnecessary. We
// should provide an optional interface for drivers to
// implement to say, "don't worry, the []bytes that I return
// from Next will not be modified again." (for instance, if
// they were obtained from the network anyway) But for now we
// don't care.
for _, dp := range dest {
if _, ok := dp.(*RawBytes); ok {
return errors.New("sql: RawBytes isn't allowed on Row.Scan")
}
}
defer r.rows.Close()
if !r.rows.Next() {
return ErrNoRows
}
err := r.rows.Scan(dest...)
if err != nil {
return err
}
return nil
}
// A Result summarizes an executed SQL command.
type Result interface {
LastInsertId() (int64, error)
RowsAffected() (int64, error)
}
type result struct {
driver.Result
}
database/sql: support ErrSkip in Tx.Exec
If the database driver supports the Execer interface but returns
ErrSkip, calling Exec on a transaction was returning the error instead
of using the slow path.
R=golang-dev, bradfitz
CC=golang-dev
http://codereview.appspot.com/5654044
Committer: Brad Fitzpatrick <ae9783c0b0efc69cd85ab025ddd17aa44cdc4aa5@golang.org>
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package sql provides a generic interface around SQL (or SQL-like)
// databases.
package sql
import (
"database/sql/driver"
"errors"
"fmt"
"io"
"sync"
)
var drivers = make(map[string]driver.Driver)
// Register makes a database driver available by the provided name.
// If Register is called twice with the same name or if driver is nil,
// it panics.
func Register(name string, driver driver.Driver) {
if driver == nil {
panic("sql: Register driver is nil")
}
if _, dup := drivers[name]; dup {
panic("sql: Register called twice for driver " + name)
}
drivers[name] = driver
}
// RawBytes is a byte slice that holds a reference to memory owned by
// the database itself. After a Scan into a RawBytes, the slice is only
// valid until the next call to Next, Scan, or Close.
type RawBytes []byte
// NullString represents a string that may be null.
// NullString implements the ScannerInto interface so
// it can be used as a scan destination:
//
// var s NullString
// err := db.QueryRow("SELECT name FROM foo WHERE id=?", id).Scan(&s)
// ...
// if s.Valid {
// // use s.String
// } else {
// // NULL value
// }
//
type NullString struct {
String string
Valid bool // Valid is true if String is not NULL
}
// ScanInto implements the ScannerInto interface.
func (ns *NullString) ScanInto(value interface{}) error {
if value == nil {
ns.String, ns.Valid = "", false
return nil
}
ns.Valid = true
return convertAssign(&ns.String, value)
}
// SubsetValue implements the driver SubsetValuer interface.
func (ns NullString) SubsetValue() (interface{}, error) {
if !ns.Valid {
return nil, nil
}
return ns.String, nil
}
// NullInt64 represents an int64 that may be null.
// NullInt64 implements the ScannerInto interface so
// it can be used as a scan destination, similar to NullString.
type NullInt64 struct {
Int64 int64
Valid bool // Valid is true if Int64 is not NULL
}
// ScanInto implements the ScannerInto interface.
func (n *NullInt64) ScanInto(value interface{}) error {
if value == nil {
n.Int64, n.Valid = 0, false
return nil
}
n.Valid = true
return convertAssign(&n.Int64, value)
}
// SubsetValue implements the driver SubsetValuer interface.
func (n NullInt64) SubsetValue() (interface{}, error) {
if !n.Valid {
return nil, nil
}
return n.Int64, nil
}
// NullFloat64 represents a float64 that may be null.
// NullFloat64 implements the ScannerInto interface so
// it can be used as a scan destination, similar to NullString.
type NullFloat64 struct {
Float64 float64
Valid bool // Valid is true if Float64 is not NULL
}
// ScanInto implements the ScannerInto interface.
func (n *NullFloat64) ScanInto(value interface{}) error {
if value == nil {
n.Float64, n.Valid = 0, false
return nil
}
n.Valid = true
return convertAssign(&n.Float64, value)
}
// SubsetValue implements the driver SubsetValuer interface.
func (n NullFloat64) SubsetValue() (interface{}, error) {
if !n.Valid {
return nil, nil
}
return n.Float64, nil
}
// NullBool represents a bool that may be null.
// NullBool implements the ScannerInto interface so
// it can be used as a scan destination, similar to NullString.
type NullBool struct {
Bool bool
Valid bool // Valid is true if Bool is not NULL
}
// ScanInto implements the ScannerInto interface.
func (n *NullBool) ScanInto(value interface{}) error {
if value == nil {
n.Bool, n.Valid = false, false
return nil
}
n.Valid = true
return convertAssign(&n.Bool, value)
}
// SubsetValue implements the driver SubsetValuer interface.
func (n NullBool) SubsetValue() (interface{}, error) {
if !n.Valid {
return nil, nil
}
return n.Bool, nil
}
// ScannerInto is an interface used by Scan.
type ScannerInto interface {
// ScanInto assigns a value from a database driver.
//
// The value will be of one of the following restricted
// set of types:
//
// int64
// float64
// bool
// []byte
// nil - for NULL values
//
// An error should be returned if the value can not be stored
// without loss of information.
ScanInto(value interface{}) error
}
// ErrNoRows is returned by Scan when QueryRow doesn't return a
// row. In such a case, QueryRow returns a placeholder *Row value that
// defers this error until a Scan.
var ErrNoRows = errors.New("sql: no rows in result set")
// DB is a database handle. It's safe for concurrent use by multiple
// goroutines.
type DB struct {
driver driver.Driver
dsn string
mu sync.Mutex // protects freeConn and closed
freeConn []driver.Conn
closed bool
}
// Open opens a database specified by its database driver name and a
// driver-specific data source name, usually consisting of at least a
// database name and connection information.
//
// Most users will open a database via a driver-specific connection
// helper function that returns a *DB.
func Open(driverName, dataSourceName string) (*DB, error) {
driver, ok := drivers[driverName]
if !ok {
return nil, fmt.Errorf("sql: unknown driver %q (forgotten import?)", driverName)
}
return &DB{driver: driver, dsn: dataSourceName}, nil
}
// Close closes the database, releasing any open resources.
func (db *DB) Close() error {
db.mu.Lock()
defer db.mu.Unlock()
var err error
for _, c := range db.freeConn {
err1 := c.Close()
if err1 != nil {
err = err1
}
}
db.freeConn = nil
db.closed = true
return err
}
func (db *DB) maxIdleConns() int {
const defaultMaxIdleConns = 2
// TODO(bradfitz): ask driver, if supported, for its default preference
// TODO(bradfitz): let users override?
return defaultMaxIdleConns
}
// conn returns a newly-opened or cached driver.Conn
func (db *DB) conn() (driver.Conn, error) {
db.mu.Lock()
if db.closed {
db.mu.Unlock()
return nil, errors.New("sql: database is closed")
}
if n := len(db.freeConn); n > 0 {
conn := db.freeConn[n-1]
db.freeConn = db.freeConn[:n-1]
db.mu.Unlock()
return conn, nil
}
db.mu.Unlock()
return db.driver.Open(db.dsn)
}
func (db *DB) connIfFree(wanted driver.Conn) (conn driver.Conn, ok bool) {
db.mu.Lock()
defer db.mu.Unlock()
for n, conn := range db.freeConn {
if conn == wanted {
db.freeConn[n] = db.freeConn[len(db.freeConn)-1]
db.freeConn = db.freeConn[:len(db.freeConn)-1]
return wanted, true
}
}
return nil, false
}
func (db *DB) putConn(c driver.Conn) {
db.mu.Lock()
defer db.mu.Unlock()
if n := len(db.freeConn); !db.closed && n < db.maxIdleConns() {
db.freeConn = append(db.freeConn, c)
return
}
db.closeConn(c) // TODO(bradfitz): release lock before calling this?
}
func (db *DB) closeConn(c driver.Conn) {
// TODO: check to see if we need this Conn for any prepared statements
// that are active.
c.Close()
}
// Prepare creates a prepared statement for later execution.
func (db *DB) Prepare(query string) (*Stmt, error) {
// TODO: check if db.driver supports an optional
// driver.Preparer interface and call that instead, if so,
// otherwise we make a prepared statement that's bound
// to a connection, and to execute this prepared statement
// we either need to use this connection (if it's free), else
// get a new connection + re-prepare + execute on that one.
ci, err := db.conn()
if err != nil {
return nil, err
}
defer db.putConn(ci)
si, err := ci.Prepare(query)
if err != nil {
return nil, err
}
stmt := &Stmt{
db: db,
query: query,
css: []connStmt{{ci, si}},
}
return stmt, nil
}
// Exec executes a query without returning any rows.
func (db *DB) Exec(query string, args ...interface{}) (Result, error) {
sargs, err := subsetTypeArgs(args)
if err != nil {
return nil, err
}
ci, err := db.conn()
if err != nil {
return nil, err
}
defer db.putConn(ci)
if execer, ok := ci.(driver.Execer); ok {
resi, err := execer.Exec(query, sargs)
if err != driver.ErrSkip {
if err != nil {
return nil, err
}
return result{resi}, nil
}
}
sti, err := ci.Prepare(query)
if err != nil {
return nil, err
}
defer sti.Close()
resi, err := sti.Exec(sargs)
if err != nil {
return nil, err
}
return result{resi}, nil
}
// Query executes a query that returns rows, typically a SELECT.
func (db *DB) Query(query string, args ...interface{}) (*Rows, error) {
stmt, err := db.Prepare(query)
if err != nil {
return nil, err
}
rows, err := stmt.Query(args...)
if err != nil {
stmt.Close()
return nil, err
}
rows.closeStmt = stmt
return rows, nil
}
// QueryRow executes a query that is expected to return at most one row.
// QueryRow always return a non-nil value. Errors are deferred until
// Row's Scan method is called.
func (db *DB) QueryRow(query string, args ...interface{}) *Row {
rows, err := db.Query(query, args...)
return &Row{rows: rows, err: err}
}
// Begin starts a transaction. The isolation level is dependent on
// the driver.
func (db *DB) Begin() (*Tx, error) {
ci, err := db.conn()
if err != nil {
return nil, err
}
txi, err := ci.Begin()
if err != nil {
db.putConn(ci)
return nil, fmt.Errorf("sql: failed to Begin transaction: %v", err)
}
return &Tx{
db: db,
ci: ci,
txi: txi,
}, nil
}
// Driver returns the database's underlying driver.
func (db *DB) Driver() driver.Driver {
return db.driver
}
// Tx is an in-progress database transaction.
//
// A transaction must end with a call to Commit or Rollback.
//
// After a call to Commit or Rollback, all operations on the
// transaction fail with ErrTxDone.
type Tx struct {
db *DB
// ci is owned exclusively until Commit or Rollback, at which point
// it's returned with putConn.
ci driver.Conn
txi driver.Tx
// cimu is held while somebody is using ci (between grabConn
// and releaseConn)
cimu sync.Mutex
// done transitions from false to true exactly once, on Commit
// or Rollback. once done, all operations fail with
// ErrTxDone.
done bool
}
var ErrTxDone = errors.New("sql: Transaction has already been committed or rolled back")
func (tx *Tx) close() {
if tx.done {
panic("double close") // internal error
}
tx.done = true
tx.db.putConn(tx.ci)
tx.ci = nil
tx.txi = nil
}
func (tx *Tx) grabConn() (driver.Conn, error) {
if tx.done {
return nil, ErrTxDone
}
tx.cimu.Lock()
return tx.ci, nil
}
func (tx *Tx) releaseConn() {
tx.cimu.Unlock()
}
// Commit commits the transaction.
func (tx *Tx) Commit() error {
if tx.done {
return ErrTxDone
}
defer tx.close()
return tx.txi.Commit()
}
// Rollback aborts the transaction.
func (tx *Tx) Rollback() error {
if tx.done {
return ErrTxDone
}
defer tx.close()
return tx.txi.Rollback()
}
// Prepare creates a prepared statement for use within a transaction.
//
// The returned statement operates within the transaction and can no longer
// be used once the transaction has been committed or rolled back.
//
// To use an existing prepared statement on this transaction, see Tx.Stmt.
func (tx *Tx) Prepare(query string) (*Stmt, error) {
// TODO(bradfitz): We could be more efficient here and either
// provide a method to take an existing Stmt (created on
// perhaps a different Conn), and re-create it on this Conn if
// necessary. Or, better: keep a map in DB of query string to
// Stmts, and have Stmt.Execute do the right thing and
// re-prepare if the Conn in use doesn't have that prepared
// statement. But we'll want to avoid caching the statement
// in the case where we only call conn.Prepare implicitly
// (such as in db.Exec or tx.Exec), but the caller package
// can't be holding a reference to the returned statement.
// Perhaps just looking at the reference count (by noting
// Stmt.Close) would be enough. We might also want a finalizer
// on Stmt to drop the reference count.
ci, err := tx.grabConn()
if err != nil {
return nil, err
}
defer tx.releaseConn()
si, err := ci.Prepare(query)
if err != nil {
return nil, err
}
stmt := &Stmt{
db: tx.db,
tx: tx,
txsi: si,
query: query,
}
return stmt, nil
}
// Stmt returns a transaction-specific prepared statement from
// an existing statement.
//
// Example:
// updateMoney, err := db.Prepare("UPDATE balance SET money=money+? WHERE id=?")
// ...
// tx, err := db.Begin()
// ...
// res, err := tx.Stmt(updateMoney).Exec(123.45, 98293203)
func (tx *Tx) Stmt(stmt *Stmt) *Stmt {
// TODO(bradfitz): optimize this. Currently this re-prepares
// each time. This is fine for now to illustrate the API but
// we should really cache already-prepared statements
// per-Conn. See also the big comment in Tx.Prepare.
if tx.db != stmt.db {
return &Stmt{stickyErr: errors.New("sql: Tx.Stmt: statement from different database used")}
}
ci, err := tx.grabConn()
if err != nil {
return &Stmt{stickyErr: err}
}
defer tx.releaseConn()
si, err := ci.Prepare(stmt.query)
return &Stmt{
db: tx.db,
tx: tx,
txsi: si,
query: stmt.query,
stickyErr: err,
}
}
// Exec executes a query that doesn't return rows.
// For example: an INSERT and UPDATE.
func (tx *Tx) Exec(query string, args ...interface{}) (Result, error) {
ci, err := tx.grabConn()
if err != nil {
return nil, err
}
defer tx.releaseConn()
if execer, ok := ci.(driver.Execer); ok {
resi, err := execer.Exec(query, args)
if err == nil {
return result{resi}, nil
}
if err != driver.ErrSkip {
return nil, err
}
}
sti, err := ci.Prepare(query)
if err != nil {
return nil, err
}
defer sti.Close()
sargs, err := subsetTypeArgs(args)
if err != nil {
return nil, err
}
resi, err := sti.Exec(sargs)
if err != nil {
return nil, err
}
return result{resi}, nil
}
// Query executes a query that returns rows, typically a SELECT.
func (tx *Tx) Query(query string, args ...interface{}) (*Rows, error) {
if tx.done {
return nil, ErrTxDone
}
stmt, err := tx.Prepare(query)
if err != nil {
return nil, err
}
rows, err := stmt.Query(args...)
if err == nil {
rows.closeStmt = stmt
}
return rows, err
}
// QueryRow executes a query that is expected to return at most one row.
// QueryRow always return a non-nil value. Errors are deferred until
// Row's Scan method is called.
func (tx *Tx) QueryRow(query string, args ...interface{}) *Row {
rows, err := tx.Query(query, args...)
return &Row{rows: rows, err: err}
}
// connStmt is a prepared statement on a particular connection.
type connStmt struct {
ci driver.Conn
si driver.Stmt
}
// Stmt is a prepared statement. Stmt is safe for concurrent use by multiple goroutines.
type Stmt struct {
// Immutable:
db *DB // where we came from
query string // that created the Stmt
stickyErr error // if non-nil, this error is returned for all operations
// If in a transaction, else both nil:
tx *Tx
txsi driver.Stmt
mu sync.Mutex // protects the rest of the fields
closed bool
// css is a list of underlying driver statement interfaces
// that are valid on particular connections. This is only
// used if tx == nil and one is found that has idle
// connections. If tx != nil, txsi is always used.
css []connStmt
}
// Exec executes a prepared statement with the given arguments and
// returns a Result summarizing the effect of the statement.
func (s *Stmt) Exec(args ...interface{}) (Result, error) {
_, releaseConn, si, err := s.connStmt()
if err != nil {
return nil, err
}
defer releaseConn()
// -1 means the driver doesn't know how to count the number of
// placeholders, so we won't sanity check input here and instead let the
// driver deal with errors.
if want := si.NumInput(); want != -1 && len(args) != want {
return nil, fmt.Errorf("sql: expected %d arguments, got %d", want, len(args))
}
// Convert args to subset types.
if cc, ok := si.(driver.ColumnConverter); ok {
for n, arg := range args {
// First, see if the value itself knows how to convert
// itself to a driver type. For example, a NullString
// struct changing into a string or nil.
if svi, ok := arg.(driver.SubsetValuer); ok {
sv, err := svi.SubsetValue()
if err != nil {
return nil, fmt.Errorf("sql: argument index %d from SubsetValue: %v", n, err)
}
if !driver.IsParameterSubsetType(sv) {
return nil, fmt.Errorf("sql: argument index %d: non-subset type %T returned from SubsetValue", n, sv)
}
arg = sv
}
// Second, ask the column to sanity check itself. For
// example, drivers might use this to make sure that
// an int64 values being inserted into a 16-bit
// integer field is in range (before getting
// truncated), or that a nil can't go into a NOT NULL
// column before going across the network to get the
// same error.
args[n], err = cc.ColumnConverter(n).ConvertValue(arg)
if err != nil {
return nil, fmt.Errorf("sql: converting Exec argument #%d's type: %v", n, err)
}
if !driver.IsParameterSubsetType(args[n]) {
return nil, fmt.Errorf("sql: driver ColumnConverter error converted %T to unsupported type %T",
arg, args[n])
}
}
} else {
for n, arg := range args {
args[n], err = driver.DefaultParameterConverter.ConvertValue(arg)
if err != nil {
return nil, fmt.Errorf("sql: converting Exec argument #%d's type: %v", n, err)
}
}
}
resi, err := si.Exec(args)
if err != nil {
return nil, err
}
return result{resi}, nil
}
// connStmt returns a free driver connection on which to execute the
// statement, a function to call to release the connection, and a
// statement bound to that connection.
func (s *Stmt) connStmt() (ci driver.Conn, releaseConn func(), si driver.Stmt, err error) {
if err = s.stickyErr; err != nil {
return
}
s.mu.Lock()
if s.closed {
s.mu.Unlock()
err = errors.New("sql: statement is closed")
return
}
// In a transaction, we always use the connection that the
// transaction was created on.
if s.tx != nil {
s.mu.Unlock()
ci, err = s.tx.grabConn() // blocks, waiting for the connection.
if err != nil {
return
}
releaseConn = func() { s.tx.releaseConn() }
return ci, releaseConn, s.txsi, nil
}
var cs connStmt
match := false
for _, v := range s.css {
// TODO(bradfitz): lazily clean up entries in this
// list with dead conns while enumerating
if _, match = s.db.connIfFree(cs.ci); match {
cs = v
break
}
}
s.mu.Unlock()
// Make a new conn if all are busy.
// TODO(bradfitz): or wait for one? make configurable later?
if !match {
ci, err := s.db.conn()
if err != nil {
return nil, nil, nil, err
}
si, err := ci.Prepare(s.query)
if err != nil {
return nil, nil, nil, err
}
s.mu.Lock()
cs = connStmt{ci, si}
s.css = append(s.css, cs)
s.mu.Unlock()
}
conn := cs.ci
releaseConn = func() { s.db.putConn(conn) }
return conn, releaseConn, cs.si, nil
}
// Query executes a prepared query statement with the given arguments
// and returns the query results as a *Rows.
func (s *Stmt) Query(args ...interface{}) (*Rows, error) {
ci, releaseConn, si, err := s.connStmt()
if err != nil {
return nil, err
}
// -1 means the driver doesn't know how to count the number of
// placeholders, so we won't sanity check input here and instead let the
// driver deal with errors.
if want := si.NumInput(); want != -1 && len(args) != want {
return nil, fmt.Errorf("sql: statement expects %d inputs; got %d", si.NumInput(), len(args))
}
sargs, err := subsetTypeArgs(args)
if err != nil {
return nil, err
}
rowsi, err := si.Query(sargs)
if err != nil {
s.db.putConn(ci)
return nil, err
}
// Note: ownership of ci passes to the *Rows, to be freed
// with releaseConn.
rows := &Rows{
db: s.db,
ci: ci,
releaseConn: releaseConn,
rowsi: rowsi,
}
return rows, nil
}
// QueryRow executes a prepared query statement with the given arguments.
// If an error occurs during the execution of the statement, that error will
// be returned by a call to Scan on the returned *Row, which is always non-nil.
// If the query selects no rows, the *Row's Scan will return ErrNoRows.
// Otherwise, the *Row's Scan scans the first selected row and discards
// the rest.
//
// Example usage:
//
// var name string
// err := nameByUseridStmt.QueryRow(id).Scan(&s)
func (s *Stmt) QueryRow(args ...interface{}) *Row {
rows, err := s.Query(args...)
if err != nil {
return &Row{err: err}
}
return &Row{rows: rows}
}
// Close closes the statement.
func (s *Stmt) Close() error {
if s.stickyErr != nil {
return s.stickyErr
}
s.mu.Lock()
defer s.mu.Unlock()
if s.closed {
return nil
}
s.closed = true
if s.tx != nil {
s.txsi.Close()
} else {
for _, v := range s.css {
if ci, match := s.db.connIfFree(v.ci); match {
v.si.Close()
s.db.putConn(ci)
} else {
// TODO(bradfitz): care that we can't close
// this statement because the statement's
// connection is in use?
}
}
}
return nil
}
// Rows is the result of a query. Its cursor starts before the first row
// of the result set. Use Next to advance through the rows:
//
// rows, err := db.Query("SELECT ...")
// ...
// for rows.Next() {
// var id int
// var name string
// err = rows.Scan(&id, &name)
// ...
// }
// err = rows.Err() // get any error encountered during iteration
// ...
type Rows struct {
db *DB
ci driver.Conn // owned; must call putconn when closed to release
releaseConn func()
rowsi driver.Rows
closed bool
lastcols []interface{}
lasterr error
closeStmt *Stmt // if non-nil, statement to Close on close
}
// Next prepares the next result row for reading with the Scan method.
// It returns true on success, false if there is no next result row.
// Every call to Scan, even the first one, must be preceded by a call
// to Next.
func (rs *Rows) Next() bool {
if rs.closed {
return false
}
if rs.lasterr != nil {
return false
}
if rs.lastcols == nil {
rs.lastcols = make([]interface{}, len(rs.rowsi.Columns()))
}
rs.lasterr = rs.rowsi.Next(rs.lastcols)
if rs.lasterr == io.EOF {
rs.Close()
}
return rs.lasterr == nil
}
// Err returns the error, if any, that was encountered during iteration.
func (rs *Rows) Err() error {
if rs.lasterr == io.EOF {
return nil
}
return rs.lasterr
}
// Columns returns the column names.
// Columns returns an error if the rows are closed, or if the rows
// are from QueryRow and there was a deferred error.
func (rs *Rows) Columns() ([]string, error) {
if rs.closed {
return nil, errors.New("sql: Rows are closed")
}
if rs.rowsi == nil {
return nil, errors.New("sql: no Rows available")
}
return rs.rowsi.Columns(), nil
}
// Scan copies the columns in the current row into the values pointed
// at by dest.
//
// If an argument has type *[]byte, Scan saves in that argument a copy
// of the corresponding data. The copy is owned by the caller and can
// be modified and held indefinitely. The copy can be avoided by using
// an argument of type *RawBytes instead; see the documentation for
// RawBytes for restrictions on its use.
//
// If an argument has type *interface{}, Scan copies the value
// provided by the underlying driver without conversion. If the value
// is of type []byte, a copy is made and the caller owns the result.
func (rs *Rows) Scan(dest ...interface{}) error {
if rs.closed {
return errors.New("sql: Rows closed")
}
if rs.lasterr != nil {
return rs.lasterr
}
if rs.lastcols == nil {
return errors.New("sql: Scan called without calling Next")
}
if len(dest) != len(rs.lastcols) {
return fmt.Errorf("sql: expected %d destination arguments in Scan, not %d", len(rs.lastcols), len(dest))
}
for i, sv := range rs.lastcols {
err := convertAssign(dest[i], sv)
if err != nil {
return fmt.Errorf("sql: Scan error on column index %d: %v", i, err)
}
}
for _, dp := range dest {
b, ok := dp.(*[]byte)
if !ok {
continue
}
if *b == nil {
// If the []byte is now nil (for a NULL value),
// don't fall through to below which would
// turn it into a non-nil 0-length byte slice
continue
}
if _, ok = dp.(*RawBytes); ok {
continue
}
clone := make([]byte, len(*b))
copy(clone, *b)
*b = clone
}
return nil
}
// Close closes the Rows, preventing further enumeration. If the
// end is encountered, the Rows are closed automatically. Close
// is idempotent.
func (rs *Rows) Close() error {
if rs.closed {
return nil
}
rs.closed = true
err := rs.rowsi.Close()
rs.releaseConn()
if rs.closeStmt != nil {
rs.closeStmt.Close()
}
return err
}
// Row is the result of calling QueryRow to select a single row.
type Row struct {
// One of these two will be non-nil:
err error // deferred error for easy chaining
rows *Rows
}
// Scan copies the columns from the matched row into the values
// pointed at by dest. If more than one row matches the query,
// Scan uses the first row and discards the rest. If no row matches
// the query, Scan returns ErrNoRows.
func (r *Row) Scan(dest ...interface{}) error {
if r.err != nil {
return r.err
}
// TODO(bradfitz): for now we need to defensively clone all
// []byte that the driver returned (not permitting
// *RawBytes in Rows.Scan), since we're about to close
// the Rows in our defer, when we return from this function.
// the contract with the driver.Next(...) interface is that it
// can return slices into read-only temporary memory that's
// only valid until the next Scan/Close. But the TODO is that
// for a lot of drivers, this copy will be unnecessary. We
// should provide an optional interface for drivers to
// implement to say, "don't worry, the []bytes that I return
// from Next will not be modified again." (for instance, if
// they were obtained from the network anyway) But for now we
// don't care.
for _, dp := range dest {
if _, ok := dp.(*RawBytes); ok {
return errors.New("sql: RawBytes isn't allowed on Row.Scan")
}
}
defer r.rows.Close()
if !r.rows.Next() {
return ErrNoRows
}
err := r.rows.Scan(dest...)
if err != nil {
return err
}
return nil
}
// A Result summarizes an executed SQL command.
type Result interface {
LastInsertId() (int64, error)
RowsAffected() (int64, error)
}
type result struct {
driver.Result
}
|
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package sql provides a generic interface around SQL (or SQL-like)
// databases.
//
// The sql package must be used in conjunction with a database driver.
// See http://golang.org/s/sqldrivers for a list of drivers.
package sql
import (
"database/sql/driver"
"errors"
"fmt"
"io"
"runtime"
"sync"
)
var drivers = make(map[string]driver.Driver)
// Register makes a database driver available by the provided name.
// If Register is called twice with the same name or if driver is nil,
// it panics.
func Register(name string, driver driver.Driver) {
if driver == nil {
panic("sql: Register driver is nil")
}
if _, dup := drivers[name]; dup {
panic("sql: Register called twice for driver " + name)
}
drivers[name] = driver
}
// RawBytes is a byte slice that holds a reference to memory owned by
// the database itself. After a Scan into a RawBytes, the slice is only
// valid until the next call to Next, Scan, or Close.
type RawBytes []byte
// NullString represents a string that may be null.
// NullString implements the Scanner interface so
// it can be used as a scan destination:
//
// var s NullString
// err := db.QueryRow("SELECT name FROM foo WHERE id=?", id).Scan(&s)
// ...
// if s.Valid {
// // use s.String
// } else {
// // NULL value
// }
//
type NullString struct {
String string
Valid bool // Valid is true if String is not NULL
}
// Scan implements the Scanner interface.
func (ns *NullString) Scan(value interface{}) error {
if value == nil {
ns.String, ns.Valid = "", false
return nil
}
ns.Valid = true
return convertAssign(&ns.String, value)
}
// Value implements the driver Valuer interface.
func (ns NullString) Value() (driver.Value, error) {
if !ns.Valid {
return nil, nil
}
return ns.String, nil
}
// NullInt64 represents an int64 that may be null.
// NullInt64 implements the Scanner interface so
// it can be used as a scan destination, similar to NullString.
type NullInt64 struct {
Int64 int64
Valid bool // Valid is true if Int64 is not NULL
}
// Scan implements the Scanner interface.
func (n *NullInt64) Scan(value interface{}) error {
if value == nil {
n.Int64, n.Valid = 0, false
return nil
}
n.Valid = true
return convertAssign(&n.Int64, value)
}
// Value implements the driver Valuer interface.
func (n NullInt64) Value() (driver.Value, error) {
if !n.Valid {
return nil, nil
}
return n.Int64, nil
}
// NullFloat64 represents a float64 that may be null.
// NullFloat64 implements the Scanner interface so
// it can be used as a scan destination, similar to NullString.
type NullFloat64 struct {
Float64 float64
Valid bool // Valid is true if Float64 is not NULL
}
// Scan implements the Scanner interface.
func (n *NullFloat64) Scan(value interface{}) error {
if value == nil {
n.Float64, n.Valid = 0, false
return nil
}
n.Valid = true
return convertAssign(&n.Float64, value)
}
// Value implements the driver Valuer interface.
func (n NullFloat64) Value() (driver.Value, error) {
if !n.Valid {
return nil, nil
}
return n.Float64, nil
}
// NullBool represents a bool that may be null.
// NullBool implements the Scanner interface so
// it can be used as a scan destination, similar to NullString.
type NullBool struct {
Bool bool
Valid bool // Valid is true if Bool is not NULL
}
// Scan implements the Scanner interface.
func (n *NullBool) Scan(value interface{}) error {
if value == nil {
n.Bool, n.Valid = false, false
return nil
}
n.Valid = true
return convertAssign(&n.Bool, value)
}
// Value implements the driver Valuer interface.
func (n NullBool) Value() (driver.Value, error) {
if !n.Valid {
return nil, nil
}
return n.Bool, nil
}
// Scanner is an interface used by Scan.
type Scanner interface {
// Scan assigns a value from a database driver.
//
// The src value will be of one of the following restricted
// set of types:
//
// int64
// float64
// bool
// []byte
// string
// time.Time
// nil - for NULL values
//
// An error should be returned if the value can not be stored
// without loss of information.
Scan(src interface{}) error
}
// ErrNoRows is returned by Scan when QueryRow doesn't return a
// row. In such a case, QueryRow returns a placeholder *Row value that
// defers this error until a Scan.
var ErrNoRows = errors.New("sql: no rows in result set")
// DB is a database handle. It's safe for concurrent use by multiple
// goroutines.
//
// The sql package creates and frees connections automatically; it
// also maintains a free pool of idle connections. If the database has
// a concept of per-connection state, such state can only be reliably
// observed within a transaction. Once DB.Begin is called, the
// returned Tx is bound to a single connection. Once Commit or
// Rollback is called on the transaction, that transaction's
// connection is returned to DB's idle connection pool. The pool size
// can be controlled with SetMaxIdleConns.
type DB struct {
driver driver.Driver
dsn string
mu sync.Mutex // protects following fields
freeConn []*driverConn
closed bool
dep map[finalCloser]depSet
lastPut map[*driverConn]string // stacktrace of last conn's put; debug only
maxIdle int // zero means defaultMaxIdleConns; negative means 0
}
// driverConn wraps a driver.Conn with a mutex, to
// be held during all calls into the Conn. (including any calls onto
// interfaces returned via that Conn, such as calls on Tx, Stmt,
// Result, Rows)
type driverConn struct {
db *DB
sync.Mutex // guards following
ci driver.Conn
closed bool
finalClosed bool // ci.Close has been called
openStmt map[driver.Stmt]bool
// guarded by db.mu
inUse bool
onPut []func() // code (with db.mu held) run when conn is next returned
dbmuClosed bool // same as closed, but guarded by db.mu, for connIfFree
}
func (dc *driverConn) removeOpenStmt(si driver.Stmt) {
dc.Lock()
defer dc.Unlock()
delete(dc.openStmt, si)
}
func (dc *driverConn) prepareLocked(query string) (driver.Stmt, error) {
si, err := dc.ci.Prepare(query)
if err == nil {
// Track each driverConn's open statements, so we can close them
// before closing the conn.
//
// TODO(bradfitz): let drivers opt out of caring about
// stmt closes if the conn is about to close anyway? For now
// do the safe thing, in case stmts need to be closed.
//
// TODO(bradfitz): after Go 1.1, closing driver.Stmts
// should be moved to driverStmt, using unique
// *driverStmts everywhere (including from
// *Stmt.connStmt, instead of returning a
// driver.Stmt), using driverStmt as a pointer
// everywhere, and making it a finalCloser.
if dc.openStmt == nil {
dc.openStmt = make(map[driver.Stmt]bool)
}
dc.openStmt[si] = true
}
return si, err
}
// the dc.db's Mutex is held.
func (dc *driverConn) closeDBLocked() error {
dc.Lock()
if dc.closed {
dc.Unlock()
return errors.New("sql: duplicate driverConn close")
}
dc.closed = true
dc.Unlock() // not defer; removeDep finalClose calls may need to lock
return dc.db.removeDepLocked(dc, dc)()
}
func (dc *driverConn) Close() error {
dc.Lock()
if dc.closed {
dc.Unlock()
return errors.New("sql: duplicate driverConn close")
}
dc.closed = true
dc.Unlock() // not defer; removeDep finalClose calls may need to lock
// And now updates that require holding dc.mu.Lock.
dc.db.mu.Lock()
dc.dbmuClosed = true
fn := dc.db.removeDepLocked(dc, dc)
dc.db.mu.Unlock()
return fn()
}
func (dc *driverConn) finalClose() error {
dc.Lock()
for si := range dc.openStmt {
si.Close()
}
dc.openStmt = nil
err := dc.ci.Close()
dc.ci = nil
dc.finalClosed = true
dc.Unlock()
return err
}
// driverStmt associates a driver.Stmt with the
// *driverConn from which it came, so the driverConn's lock can be
// held during calls.
type driverStmt struct {
sync.Locker // the *driverConn
si driver.Stmt
}
func (ds *driverStmt) Close() error {
ds.Lock()
defer ds.Unlock()
return ds.si.Close()
}
// depSet is a finalCloser's outstanding dependencies
type depSet map[interface{}]bool // set of true bools
// The finalCloser interface is used by (*DB).addDep and related
// dependency reference counting.
type finalCloser interface {
// finalClose is called when the reference count of an object
// goes to zero. (*DB).mu is not held while calling it.
finalClose() error
}
// addDep notes that x now depends on dep, and x's finalClose won't be
// called until all of x's dependencies are removed with removeDep.
func (db *DB) addDep(x finalCloser, dep interface{}) {
//println(fmt.Sprintf("addDep(%T %p, %T %p)", x, x, dep, dep))
db.mu.Lock()
defer db.mu.Unlock()
db.addDepLocked(x, dep)
}
func (db *DB) addDepLocked(x finalCloser, dep interface{}) {
if db.dep == nil {
db.dep = make(map[finalCloser]depSet)
}
xdep := db.dep[x]
if xdep == nil {
xdep = make(depSet)
db.dep[x] = xdep
}
xdep[dep] = true
}
// removeDep notes that x no longer depends on dep.
// If x still has dependencies, nil is returned.
// If x no longer has any dependencies, its finalClose method will be
// called and its error value will be returned.
func (db *DB) removeDep(x finalCloser, dep interface{}) error {
db.mu.Lock()
fn := db.removeDepLocked(x, dep)
db.mu.Unlock()
return fn()
}
func (db *DB) removeDepLocked(x finalCloser, dep interface{}) func() error {
//println(fmt.Sprintf("removeDep(%T %p, %T %p)", x, x, dep, dep))
done := false
xdep := db.dep[x]
if xdep != nil {
delete(xdep, dep)
if len(xdep) == 0 {
delete(db.dep, x)
done = true
}
}
if !done {
return func() error { return nil }
}
return func() error {
//println(fmt.Sprintf("calling final close on %T %v (%#v)", x, x, x))
return x.finalClose()
}
}
// Open opens a database specified by its database driver name and a
// driver-specific data source name, usually consisting of at least a
// database name and connection information.
//
// Most users will open a database via a driver-specific connection
// helper function that returns a *DB. No database drivers are included
// in the Go standard library. See http://golang.org/s/sqldrivers for
// a list of third-party drivers.
//
// Open may just validate its arguments without creating a connection
// to the database. To verify that the data source name is valid, call
// Ping.
func Open(driverName, dataSourceName string) (*DB, error) {
driveri, ok := drivers[driverName]
if !ok {
return nil, fmt.Errorf("sql: unknown driver %q (forgotten import?)", driverName)
}
db := &DB{
driver: driveri,
dsn: dataSourceName,
lastPut: make(map[*driverConn]string),
}
return db, nil
}
// Ping verifies a connection to the database is still alive,
// establishing a connection if necessary.
func (db *DB) Ping() error {
// TODO(bradfitz): give drivers an optional hook to implement
// this in a more efficient or more reliable way, if they
// have one.
dc, err := db.conn()
if err != nil {
return err
}
db.putConn(dc, nil)
return nil
}
// Close closes the database, releasing any open resources.
func (db *DB) Close() error {
db.mu.Lock()
defer db.mu.Unlock()
var err error
for _, dc := range db.freeConn {
err1 := dc.closeDBLocked()
if err1 != nil {
err = err1
}
}
db.freeConn = nil
db.closed = true
return err
}
const defaultMaxIdleConns = 2
func (db *DB) maxIdleConnsLocked() int {
n := db.maxIdle
switch {
case n == 0:
// TODO(bradfitz): ask driver, if supported, for its default preference
return defaultMaxIdleConns
case n < 0:
return 0
default:
return n
}
}
// SetMaxIdleConns sets the maximum number of connections in the idle
// connection pool.
//
// If n <= 0, no idle connections are retained.
func (db *DB) SetMaxIdleConns(n int) {
db.mu.Lock()
defer db.mu.Unlock()
if n > 0 {
db.maxIdle = n
} else {
// No idle connections.
db.maxIdle = -1
}
for len(db.freeConn) > 0 && len(db.freeConn) > n {
nfree := len(db.freeConn)
dc := db.freeConn[nfree-1]
db.freeConn[nfree-1] = nil
db.freeConn = db.freeConn[:nfree-1]
go dc.Close()
}
}
// conn returns a newly-opened or cached *driverConn
func (db *DB) conn() (*driverConn, error) {
db.mu.Lock()
if db.closed {
db.mu.Unlock()
return nil, errors.New("sql: database is closed")
}
if n := len(db.freeConn); n > 0 {
conn := db.freeConn[n-1]
db.freeConn = db.freeConn[:n-1]
conn.inUse = true
db.mu.Unlock()
return conn, nil
}
db.mu.Unlock()
ci, err := db.driver.Open(db.dsn)
if err != nil {
return nil, err
}
dc := &driverConn{
db: db,
ci: ci,
}
db.mu.Lock()
db.addDepLocked(dc, dc)
dc.inUse = true
db.mu.Unlock()
return dc, nil
}
var (
errConnClosed = errors.New("database/sql: internal sentinel error: conn is closed")
errConnBusy = errors.New("database/sql: internal sentinel error: conn is busy")
)
// connIfFree returns (wanted, nil) if wanted is still a valid conn and
// isn't in use.
//
// The error is errConnClosed if the connection if the requested connection
// is invalid because it's been closed.
//
// The error is errConnBusy if the connection is in use.
func (db *DB) connIfFree(wanted *driverConn) (*driverConn, error) {
db.mu.Lock()
defer db.mu.Unlock()
if wanted.inUse {
return nil, errConnBusy
}
if wanted.dbmuClosed {
return nil, errConnClosed
}
for i, conn := range db.freeConn {
if conn != wanted {
continue
}
db.freeConn[i] = db.freeConn[len(db.freeConn)-1]
db.freeConn = db.freeConn[:len(db.freeConn)-1]
wanted.inUse = true
return wanted, nil
}
// TODO(bradfitz): shouldn't get here. After Go 1.1, change this to:
// panic("connIfFree call requested a non-closed, non-busy, non-free conn")
// Which passes all the tests, but I'm too paranoid to include this
// late in Go 1.1.
// Instead, treat it like a busy connection:
return nil, errConnBusy
}
// putConnHook is a hook for testing.
var putConnHook func(*DB, *driverConn)
// noteUnusedDriverStatement notes that si is no longer used and should
// be closed whenever possible (when c is next not in use), unless c is
// already closed.
func (db *DB) noteUnusedDriverStatement(c *driverConn, si driver.Stmt) {
db.mu.Lock()
defer db.mu.Unlock()
if c.inUse {
c.onPut = append(c.onPut, func() {
si.Close()
})
} else {
c.Lock()
defer c.Unlock()
if !c.finalClosed {
si.Close()
}
}
}
// debugGetPut determines whether getConn & putConn calls' stack traces
// are returned for more verbose crashes.
const debugGetPut = false
// putConn adds a connection to the db's free pool.
// err is optionally the last error that occurred on this connection.
func (db *DB) putConn(dc *driverConn, err error) {
db.mu.Lock()
if !dc.inUse {
if debugGetPut {
fmt.Printf("putConn(%v) DUPLICATE was: %s\n\nPREVIOUS was: %s", dc, stack(), db.lastPut[dc])
}
panic("sql: connection returned that was never out")
}
if debugGetPut {
db.lastPut[dc] = stack()
}
dc.inUse = false
for _, fn := range dc.onPut {
fn()
}
dc.onPut = nil
if err == driver.ErrBadConn {
// Don't reuse bad connections.
db.mu.Unlock()
return
}
if putConnHook != nil {
putConnHook(db, dc)
}
if n := len(db.freeConn); !db.closed && n < db.maxIdleConnsLocked() {
db.freeConn = append(db.freeConn, dc)
db.mu.Unlock()
return
}
db.mu.Unlock()
dc.Close()
}
// Prepare creates a prepared statement for later queries or executions.
// Multiple queries or executions may be run concurrently from the
// returned statement.
func (db *DB) Prepare(query string) (*Stmt, error) {
var stmt *Stmt
var err error
for i := 0; i < 10; i++ {
stmt, err = db.prepare(query)
if err != driver.ErrBadConn {
break
}
}
return stmt, err
}
func (db *DB) prepare(query string) (*Stmt, error) {
// TODO: check if db.driver supports an optional
// driver.Preparer interface and call that instead, if so,
// otherwise we make a prepared statement that's bound
// to a connection, and to execute this prepared statement
// we either need to use this connection (if it's free), else
// get a new connection + re-prepare + execute on that one.
dc, err := db.conn()
if err != nil {
return nil, err
}
dc.Lock()
si, err := dc.prepareLocked(query)
dc.Unlock()
if err != nil {
db.putConn(dc, err)
return nil, err
}
stmt := &Stmt{
db: db,
query: query,
css: []connStmt{{dc, si}},
}
db.addDep(stmt, stmt)
db.putConn(dc, nil)
return stmt, nil
}
// Exec executes a query without returning any rows.
// The args are for any placeholder parameters in the query.
func (db *DB) Exec(query string, args ...interface{}) (Result, error) {
var res Result
var err error
for i := 0; i < 10; i++ {
res, err = db.exec(query, args)
if err != driver.ErrBadConn {
break
}
}
return res, err
}
func (db *DB) exec(query string, args []interface{}) (res Result, err error) {
dc, err := db.conn()
if err != nil {
return nil, err
}
defer func() {
db.putConn(dc, err)
}()
if execer, ok := dc.ci.(driver.Execer); ok {
dargs, err := driverArgs(nil, args)
if err != nil {
return nil, err
}
dc.Lock()
resi, err := execer.Exec(query, dargs)
dc.Unlock()
if err != driver.ErrSkip {
if err != nil {
return nil, err
}
return driverResult{dc, resi}, nil
}
}
dc.Lock()
si, err := dc.ci.Prepare(query)
dc.Unlock()
if err != nil {
return nil, err
}
defer withLock(dc, func() { si.Close() })
return resultFromStatement(driverStmt{dc, si}, args...)
}
// Query executes a query that returns rows, typically a SELECT.
// The args are for any placeholder parameters in the query.
func (db *DB) Query(query string, args ...interface{}) (*Rows, error) {
var rows *Rows
var err error
for i := 0; i < 10; i++ {
rows, err = db.query(query, args)
if err != driver.ErrBadConn {
break
}
}
return rows, err
}
func (db *DB) query(query string, args []interface{}) (*Rows, error) {
ci, err := db.conn()
if err != nil {
return nil, err
}
releaseConn := func(err error) { db.putConn(ci, err) }
return db.queryConn(ci, releaseConn, query, args)
}
// queryConn executes a query on the given connection.
// The connection gets released by the releaseConn function.
func (db *DB) queryConn(dc *driverConn, releaseConn func(error), query string, args []interface{}) (*Rows, error) {
if queryer, ok := dc.ci.(driver.Queryer); ok {
dargs, err := driverArgs(nil, args)
if err != nil {
releaseConn(err)
return nil, err
}
dc.Lock()
rowsi, err := queryer.Query(query, dargs)
dc.Unlock()
if err != driver.ErrSkip {
if err != nil {
releaseConn(err)
return nil, err
}
// Note: ownership of dc passes to the *Rows, to be freed
// with releaseConn.
rows := &Rows{
db: db,
dc: dc,
releaseConn: releaseConn,
rowsi: rowsi,
}
return rows, nil
}
}
dc.Lock()
si, err := dc.ci.Prepare(query)
dc.Unlock()
if err != nil {
releaseConn(err)
return nil, err
}
ds := driverStmt{dc, si}
rowsi, err := rowsiFromStatement(ds, args...)
if err != nil {
releaseConn(err)
dc.Lock()
si.Close()
dc.Unlock()
return nil, err
}
// Note: ownership of ci passes to the *Rows, to be freed
// with releaseConn.
rows := &Rows{
db: db,
dc: dc,
releaseConn: releaseConn,
rowsi: rowsi,
closeStmt: si,
}
return rows, nil
}
// QueryRow executes a query that is expected to return at most one row.
// QueryRow always return a non-nil value. Errors are deferred until
// Row's Scan method is called.
func (db *DB) QueryRow(query string, args ...interface{}) *Row {
rows, err := db.Query(query, args...)
return &Row{rows: rows, err: err}
}
// Begin starts a transaction. The isolation level is dependent on
// the driver.
func (db *DB) Begin() (*Tx, error) {
var tx *Tx
var err error
for i := 0; i < 10; i++ {
tx, err = db.begin()
if err != driver.ErrBadConn {
break
}
}
return tx, err
}
func (db *DB) begin() (tx *Tx, err error) {
dc, err := db.conn()
if err != nil {
return nil, err
}
dc.Lock()
txi, err := dc.ci.Begin()
dc.Unlock()
if err != nil {
db.putConn(dc, err)
return nil, err
}
return &Tx{
db: db,
dc: dc,
txi: txi,
}, nil
}
// Driver returns the database's underlying driver.
func (db *DB) Driver() driver.Driver {
return db.driver
}
// Tx is an in-progress database transaction.
//
// A transaction must end with a call to Commit or Rollback.
//
// After a call to Commit or Rollback, all operations on the
// transaction fail with ErrTxDone.
type Tx struct {
db *DB
// dc is owned exclusively until Commit or Rollback, at which point
// it's returned with putConn.
dc *driverConn
txi driver.Tx
// done transitions from false to true exactly once, on Commit
// or Rollback. once done, all operations fail with
// ErrTxDone.
done bool
}
var ErrTxDone = errors.New("sql: Transaction has already been committed or rolled back")
func (tx *Tx) close() {
if tx.done {
panic("double close") // internal error
}
tx.done = true
tx.db.putConn(tx.dc, nil)
tx.dc = nil
tx.txi = nil
}
func (tx *Tx) grabConn() (*driverConn, error) {
if tx.done {
return nil, ErrTxDone
}
return tx.dc, nil
}
// Commit commits the transaction.
func (tx *Tx) Commit() error {
if tx.done {
return ErrTxDone
}
defer tx.close()
tx.dc.Lock()
defer tx.dc.Unlock()
return tx.txi.Commit()
}
// Rollback aborts the transaction.
func (tx *Tx) Rollback() error {
if tx.done {
return ErrTxDone
}
defer tx.close()
tx.dc.Lock()
defer tx.dc.Unlock()
return tx.txi.Rollback()
}
// Prepare creates a prepared statement for use within a transaction.
//
// The returned statement operates within the transaction and can no longer
// be used once the transaction has been committed or rolled back.
//
// To use an existing prepared statement on this transaction, see Tx.Stmt.
func (tx *Tx) Prepare(query string) (*Stmt, error) {
// TODO(bradfitz): We could be more efficient here and either
// provide a method to take an existing Stmt (created on
// perhaps a different Conn), and re-create it on this Conn if
// necessary. Or, better: keep a map in DB of query string to
// Stmts, and have Stmt.Execute do the right thing and
// re-prepare if the Conn in use doesn't have that prepared
// statement. But we'll want to avoid caching the statement
// in the case where we only call conn.Prepare implicitly
// (such as in db.Exec or tx.Exec), but the caller package
// can't be holding a reference to the returned statement.
// Perhaps just looking at the reference count (by noting
// Stmt.Close) would be enough. We might also want a finalizer
// on Stmt to drop the reference count.
dc, err := tx.grabConn()
if err != nil {
return nil, err
}
dc.Lock()
si, err := dc.ci.Prepare(query)
dc.Unlock()
if err != nil {
return nil, err
}
stmt := &Stmt{
db: tx.db,
tx: tx,
txsi: &driverStmt{
Locker: dc,
si: si,
},
query: query,
}
return stmt, nil
}
// Stmt returns a transaction-specific prepared statement from
// an existing statement.
//
// Example:
// updateMoney, err := db.Prepare("UPDATE balance SET money=money+? WHERE id=?")
// ...
// tx, err := db.Begin()
// ...
// res, err := tx.Stmt(updateMoney).Exec(123.45, 98293203)
func (tx *Tx) Stmt(stmt *Stmt) *Stmt {
// TODO(bradfitz): optimize this. Currently this re-prepares
// each time. This is fine for now to illustrate the API but
// we should really cache already-prepared statements
// per-Conn. See also the big comment in Tx.Prepare.
if tx.db != stmt.db {
return &Stmt{stickyErr: errors.New("sql: Tx.Stmt: statement from different database used")}
}
dc, err := tx.grabConn()
if err != nil {
return &Stmt{stickyErr: err}
}
dc.Lock()
si, err := dc.ci.Prepare(stmt.query)
dc.Unlock()
return &Stmt{
db: tx.db,
tx: tx,
txsi: &driverStmt{
Locker: dc,
si: si,
},
query: stmt.query,
stickyErr: err,
}
}
// Exec executes a query that doesn't return rows.
// For example: an INSERT and UPDATE.
func (tx *Tx) Exec(query string, args ...interface{}) (Result, error) {
dc, err := tx.grabConn()
if err != nil {
return nil, err
}
if execer, ok := dc.ci.(driver.Execer); ok {
dargs, err := driverArgs(nil, args)
if err != nil {
return nil, err
}
dc.Lock()
resi, err := execer.Exec(query, dargs)
dc.Unlock()
if err == nil {
return driverResult{dc, resi}, nil
}
if err != driver.ErrSkip {
return nil, err
}
}
dc.Lock()
si, err := dc.ci.Prepare(query)
dc.Unlock()
if err != nil {
return nil, err
}
defer withLock(dc, func() { si.Close() })
return resultFromStatement(driverStmt{dc, si}, args...)
}
// Query executes a query that returns rows, typically a SELECT.
func (tx *Tx) Query(query string, args ...interface{}) (*Rows, error) {
dc, err := tx.grabConn()
if err != nil {
return nil, err
}
releaseConn := func(error) {}
return tx.db.queryConn(dc, releaseConn, query, args)
}
// QueryRow executes a query that is expected to return at most one row.
// QueryRow always return a non-nil value. Errors are deferred until
// Row's Scan method is called.
func (tx *Tx) QueryRow(query string, args ...interface{}) *Row {
rows, err := tx.Query(query, args...)
return &Row{rows: rows, err: err}
}
// connStmt is a prepared statement on a particular connection.
type connStmt struct {
dc *driverConn
si driver.Stmt
}
// Stmt is a prepared statement. Stmt is safe for concurrent use by multiple goroutines.
type Stmt struct {
// Immutable:
db *DB // where we came from
query string // that created the Stmt
stickyErr error // if non-nil, this error is returned for all operations
closemu sync.RWMutex // held exclusively during close, for read otherwise.
// If in a transaction, else both nil:
tx *Tx
txsi *driverStmt
mu sync.Mutex // protects the rest of the fields
closed bool
// css is a list of underlying driver statement interfaces
// that are valid on particular connections. This is only
// used if tx == nil and one is found that has idle
// connections. If tx != nil, txsi is always used.
css []connStmt
}
// Exec executes a prepared statement with the given arguments and
// returns a Result summarizing the effect of the statement.
func (s *Stmt) Exec(args ...interface{}) (Result, error) {
s.closemu.RLock()
defer s.closemu.RUnlock()
dc, releaseConn, si, err := s.connStmt()
if err != nil {
return nil, err
}
defer releaseConn(nil)
return resultFromStatement(driverStmt{dc, si}, args...)
}
func resultFromStatement(ds driverStmt, args ...interface{}) (Result, error) {
ds.Lock()
want := ds.si.NumInput()
ds.Unlock()
// -1 means the driver doesn't know how to count the number of
// placeholders, so we won't sanity check input here and instead let the
// driver deal with errors.
if want != -1 && len(args) != want {
return nil, fmt.Errorf("sql: expected %d arguments, got %d", want, len(args))
}
dargs, err := driverArgs(&ds, args)
if err != nil {
return nil, err
}
ds.Lock()
resi, err := ds.si.Exec(dargs)
ds.Unlock()
if err != nil {
return nil, err
}
return driverResult{ds.Locker, resi}, nil
}
// connStmt returns a free driver connection on which to execute the
// statement, a function to call to release the connection, and a
// statement bound to that connection.
func (s *Stmt) connStmt() (ci *driverConn, releaseConn func(error), si driver.Stmt, err error) {
if err = s.stickyErr; err != nil {
return
}
s.mu.Lock()
if s.closed {
s.mu.Unlock()
err = errors.New("sql: statement is closed")
return
}
// In a transaction, we always use the connection that the
// transaction was created on.
if s.tx != nil {
s.mu.Unlock()
ci, err = s.tx.grabConn() // blocks, waiting for the connection.
if err != nil {
return
}
releaseConn = func(error) {}
return ci, releaseConn, s.txsi.si, nil
}
var cs connStmt
match := false
for i := 0; i < len(s.css); i++ {
v := s.css[i]
_, err := s.db.connIfFree(v.dc)
if err == nil {
match = true
cs = v
break
}
if err == errConnClosed {
// Lazily remove dead conn from our freelist.
s.css[i] = s.css[len(s.css)-1]
s.css = s.css[:len(s.css)-1]
i--
}
}
s.mu.Unlock()
// Make a new conn if all are busy.
// TODO(bradfitz): or wait for one? make configurable later?
if !match {
for i := 0; ; i++ {
dc, err := s.db.conn()
if err != nil {
return nil, nil, nil, err
}
dc.Lock()
si, err := dc.prepareLocked(s.query)
dc.Unlock()
if err == driver.ErrBadConn && i < 10 {
continue
}
if err != nil {
return nil, nil, nil, err
}
s.mu.Lock()
cs = connStmt{dc, si}
s.css = append(s.css, cs)
s.mu.Unlock()
break
}
}
conn := cs.dc
releaseConn = func(err error) { s.db.putConn(conn, err) }
return conn, releaseConn, cs.si, nil
}
// Query executes a prepared query statement with the given arguments
// and returns the query results as a *Rows.
func (s *Stmt) Query(args ...interface{}) (*Rows, error) {
s.closemu.RLock()
defer s.closemu.RUnlock()
dc, releaseConn, si, err := s.connStmt()
if err != nil {
return nil, err
}
ds := driverStmt{dc, si}
rowsi, err := rowsiFromStatement(ds, args...)
if err != nil {
releaseConn(err)
return nil, err
}
// Note: ownership of ci passes to the *Rows, to be freed
// with releaseConn.
rows := &Rows{
db: s.db,
dc: dc,
rowsi: rowsi,
// releaseConn set below
}
s.db.addDep(s, rows)
rows.releaseConn = func(err error) {
releaseConn(err)
s.db.removeDep(s, rows)
}
return rows, nil
}
func rowsiFromStatement(ds driverStmt, args ...interface{}) (driver.Rows, error) {
ds.Lock()
want := ds.si.NumInput()
ds.Unlock()
// -1 means the driver doesn't know how to count the number of
// placeholders, so we won't sanity check input here and instead let the
// driver deal with errors.
if want != -1 && len(args) != want {
return nil, fmt.Errorf("sql: statement expects %d inputs; got %d", want, len(args))
}
dargs, err := driverArgs(&ds, args)
if err != nil {
return nil, err
}
ds.Lock()
rowsi, err := ds.si.Query(dargs)
ds.Unlock()
if err != nil {
return nil, err
}
return rowsi, nil
}
// QueryRow executes a prepared query statement with the given arguments.
// If an error occurs during the execution of the statement, that error will
// be returned by a call to Scan on the returned *Row, which is always non-nil.
// If the query selects no rows, the *Row's Scan will return ErrNoRows.
// Otherwise, the *Row's Scan scans the first selected row and discards
// the rest.
//
// Example usage:
//
// var name string
// err := nameByUseridStmt.QueryRow(id).Scan(&name)
func (s *Stmt) QueryRow(args ...interface{}) *Row {
rows, err := s.Query(args...)
if err != nil {
return &Row{err: err}
}
return &Row{rows: rows}
}
// Close closes the statement.
func (s *Stmt) Close() error {
s.closemu.Lock()
defer s.closemu.Unlock()
if s.stickyErr != nil {
return s.stickyErr
}
s.mu.Lock()
defer s.mu.Unlock()
if s.closed {
return nil
}
s.closed = true
if s.tx != nil {
s.txsi.Close()
return nil
}
return s.db.removeDep(s, s)
}
func (s *Stmt) finalClose() error {
for _, v := range s.css {
s.db.noteUnusedDriverStatement(v.dc, v.si)
v.dc.removeOpenStmt(v.si)
s.db.removeDep(v.dc, s)
}
s.css = nil
return nil
}
// Rows is the result of a query. Its cursor starts before the first row
// of the result set. Use Next to advance through the rows:
//
// rows, err := db.Query("SELECT ...")
// ...
// for rows.Next() {
// var id int
// var name string
// err = rows.Scan(&id, &name)
// ...
// }
// err = rows.Err() // get any error encountered during iteration
// ...
type Rows struct {
db *DB
dc *driverConn // owned; must call releaseConn when closed to release
releaseConn func(error)
rowsi driver.Rows
closed bool
lastcols []driver.Value
lasterr error
closeStmt driver.Stmt // if non-nil, statement to Close on close
}
// Next prepares the next result row for reading with the Scan method.
// It returns true on success, false if there is no next result row.
// Every call to Scan, even the first one, must be preceded by a call
// to Next.
func (rs *Rows) Next() bool {
if rs.closed {
return false
}
if rs.lasterr != nil {
return false
}
if rs.lastcols == nil {
rs.lastcols = make([]driver.Value, len(rs.rowsi.Columns()))
}
rs.lasterr = rs.rowsi.Next(rs.lastcols)
if rs.lasterr == io.EOF {
rs.Close()
}
return rs.lasterr == nil
}
// Err returns the error, if any, that was encountered during iteration.
func (rs *Rows) Err() error {
if rs.lasterr == io.EOF {
return nil
}
return rs.lasterr
}
// Columns returns the column names.
// Columns returns an error if the rows are closed, or if the rows
// are from QueryRow and there was a deferred error.
func (rs *Rows) Columns() ([]string, error) {
if rs.closed {
return nil, errors.New("sql: Rows are closed")
}
if rs.rowsi == nil {
return nil, errors.New("sql: no Rows available")
}
return rs.rowsi.Columns(), nil
}
// Scan copies the columns in the current row into the values pointed
// at by dest.
//
// If an argument has type *[]byte, Scan saves in that argument a copy
// of the corresponding data. The copy is owned by the caller and can
// be modified and held indefinitely. The copy can be avoided by using
// an argument of type *RawBytes instead; see the documentation for
// RawBytes for restrictions on its use.
//
// If an argument has type *interface{}, Scan copies the value
// provided by the underlying driver without conversion. If the value
// is of type []byte, a copy is made and the caller owns the result.
func (rs *Rows) Scan(dest ...interface{}) error {
if rs.closed {
return errors.New("sql: Rows closed")
}
if rs.lasterr != nil {
return rs.lasterr
}
if rs.lastcols == nil {
return errors.New("sql: Scan called without calling Next")
}
if len(dest) != len(rs.lastcols) {
return fmt.Errorf("sql: expected %d destination arguments in Scan, not %d", len(rs.lastcols), len(dest))
}
for i, sv := range rs.lastcols {
err := convertAssign(dest[i], sv)
if err != nil {
return fmt.Errorf("sql: Scan error on column index %d: %v", i, err)
}
}
return nil
}
// Close closes the Rows, preventing further enumeration. If the
// end is encountered, the Rows are closed automatically. Close
// is idempotent.
func (rs *Rows) Close() error {
if rs.closed {
return nil
}
rs.closed = true
err := rs.rowsi.Close()
if rs.closeStmt != nil {
rs.closeStmt.Close()
}
rs.releaseConn(err)
return err
}
// Row is the result of calling QueryRow to select a single row.
type Row struct {
// One of these two will be non-nil:
err error // deferred error for easy chaining
rows *Rows
}
// Scan copies the columns from the matched row into the values
// pointed at by dest. If more than one row matches the query,
// Scan uses the first row and discards the rest. If no row matches
// the query, Scan returns ErrNoRows.
func (r *Row) Scan(dest ...interface{}) error {
if r.err != nil {
return r.err
}
// TODO(bradfitz): for now we need to defensively clone all
// []byte that the driver returned (not permitting
// *RawBytes in Rows.Scan), since we're about to close
// the Rows in our defer, when we return from this function.
// the contract with the driver.Next(...) interface is that it
// can return slices into read-only temporary memory that's
// only valid until the next Scan/Close. But the TODO is that
// for a lot of drivers, this copy will be unnecessary. We
// should provide an optional interface for drivers to
// implement to say, "don't worry, the []bytes that I return
// from Next will not be modified again." (for instance, if
// they were obtained from the network anyway) But for now we
// don't care.
for _, dp := range dest {
if _, ok := dp.(*RawBytes); ok {
return errors.New("sql: RawBytes isn't allowed on Row.Scan")
}
}
defer r.rows.Close()
if !r.rows.Next() {
return ErrNoRows
}
err := r.rows.Scan(dest...)
if err != nil {
return err
}
return nil
}
// A Result summarizes an executed SQL command.
type Result interface {
LastInsertId() (int64, error)
RowsAffected() (int64, error)
}
type driverResult struct {
sync.Locker // the *driverConn
resi driver.Result
}
func (dr driverResult) LastInsertId() (int64, error) {
dr.Lock()
defer dr.Unlock()
return dr.resi.LastInsertId()
}
func (dr driverResult) RowsAffected() (int64, error) {
dr.Lock()
defer dr.Unlock()
return dr.resi.RowsAffected()
}
func stack() string {
var buf [2 << 10]byte
return string(buf[:runtime.Stack(buf[:], false)])
}
// withLock runs while holding lk.
func withLock(lk sync.Locker, fn func()) {
lk.Lock()
fn()
lk.Unlock()
}
database/sql: remove an unused field from Rows
Found while debugging memory usage. Nobody accesses this field
anymore.
R=golang-dev, i.caught.air, adg, r
CC=golang-dev
https://codereview.appspot.com/9108043
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package sql provides a generic interface around SQL (or SQL-like)
// databases.
//
// The sql package must be used in conjunction with a database driver.
// See http://golang.org/s/sqldrivers for a list of drivers.
package sql
import (
"database/sql/driver"
"errors"
"fmt"
"io"
"runtime"
"sync"
)
var drivers = make(map[string]driver.Driver)
// Register makes a database driver available by the provided name.
// If Register is called twice with the same name or if driver is nil,
// it panics.
func Register(name string, driver driver.Driver) {
if driver == nil {
panic("sql: Register driver is nil")
}
if _, dup := drivers[name]; dup {
panic("sql: Register called twice for driver " + name)
}
drivers[name] = driver
}
// RawBytes is a byte slice that holds a reference to memory owned by
// the database itself. After a Scan into a RawBytes, the slice is only
// valid until the next call to Next, Scan, or Close.
type RawBytes []byte
// NullString represents a string that may be null.
// NullString implements the Scanner interface so
// it can be used as a scan destination:
//
// var s NullString
// err := db.QueryRow("SELECT name FROM foo WHERE id=?", id).Scan(&s)
// ...
// if s.Valid {
// // use s.String
// } else {
// // NULL value
// }
//
type NullString struct {
String string
Valid bool // Valid is true if String is not NULL
}
// Scan implements the Scanner interface.
func (ns *NullString) Scan(value interface{}) error {
if value == nil {
ns.String, ns.Valid = "", false
return nil
}
ns.Valid = true
return convertAssign(&ns.String, value)
}
// Value implements the driver Valuer interface.
func (ns NullString) Value() (driver.Value, error) {
if !ns.Valid {
return nil, nil
}
return ns.String, nil
}
// NullInt64 represents an int64 that may be null.
// NullInt64 implements the Scanner interface so
// it can be used as a scan destination, similar to NullString.
type NullInt64 struct {
Int64 int64
Valid bool // Valid is true if Int64 is not NULL
}
// Scan implements the Scanner interface.
func (n *NullInt64) Scan(value interface{}) error {
if value == nil {
n.Int64, n.Valid = 0, false
return nil
}
n.Valid = true
return convertAssign(&n.Int64, value)
}
// Value implements the driver Valuer interface.
func (n NullInt64) Value() (driver.Value, error) {
if !n.Valid {
return nil, nil
}
return n.Int64, nil
}
// NullFloat64 represents a float64 that may be null.
// NullFloat64 implements the Scanner interface so
// it can be used as a scan destination, similar to NullString.
type NullFloat64 struct {
Float64 float64
Valid bool // Valid is true if Float64 is not NULL
}
// Scan implements the Scanner interface.
func (n *NullFloat64) Scan(value interface{}) error {
if value == nil {
n.Float64, n.Valid = 0, false
return nil
}
n.Valid = true
return convertAssign(&n.Float64, value)
}
// Value implements the driver Valuer interface.
func (n NullFloat64) Value() (driver.Value, error) {
if !n.Valid {
return nil, nil
}
return n.Float64, nil
}
// NullBool represents a bool that may be null.
// NullBool implements the Scanner interface so
// it can be used as a scan destination, similar to NullString.
type NullBool struct {
Bool bool
Valid bool // Valid is true if Bool is not NULL
}
// Scan implements the Scanner interface.
func (n *NullBool) Scan(value interface{}) error {
if value == nil {
n.Bool, n.Valid = false, false
return nil
}
n.Valid = true
return convertAssign(&n.Bool, value)
}
// Value implements the driver Valuer interface.
func (n NullBool) Value() (driver.Value, error) {
if !n.Valid {
return nil, nil
}
return n.Bool, nil
}
// Scanner is an interface used by Scan.
type Scanner interface {
// Scan assigns a value from a database driver.
//
// The src value will be of one of the following restricted
// set of types:
//
// int64
// float64
// bool
// []byte
// string
// time.Time
// nil - for NULL values
//
// An error should be returned if the value can not be stored
// without loss of information.
Scan(src interface{}) error
}
// ErrNoRows is returned by Scan when QueryRow doesn't return a
// row. In such a case, QueryRow returns a placeholder *Row value that
// defers this error until a Scan.
var ErrNoRows = errors.New("sql: no rows in result set")
// DB is a database handle. It's safe for concurrent use by multiple
// goroutines.
//
// The sql package creates and frees connections automatically; it
// also maintains a free pool of idle connections. If the database has
// a concept of per-connection state, such state can only be reliably
// observed within a transaction. Once DB.Begin is called, the
// returned Tx is bound to a single connection. Once Commit or
// Rollback is called on the transaction, that transaction's
// connection is returned to DB's idle connection pool. The pool size
// can be controlled with SetMaxIdleConns.
type DB struct {
driver driver.Driver
dsn string
mu sync.Mutex // protects following fields
freeConn []*driverConn
closed bool
dep map[finalCloser]depSet
lastPut map[*driverConn]string // stacktrace of last conn's put; debug only
maxIdle int // zero means defaultMaxIdleConns; negative means 0
}
// driverConn wraps a driver.Conn with a mutex, to
// be held during all calls into the Conn. (including any calls onto
// interfaces returned via that Conn, such as calls on Tx, Stmt,
// Result, Rows)
type driverConn struct {
db *DB
sync.Mutex // guards following
ci driver.Conn
closed bool
finalClosed bool // ci.Close has been called
openStmt map[driver.Stmt]bool
// guarded by db.mu
inUse bool
onPut []func() // code (with db.mu held) run when conn is next returned
dbmuClosed bool // same as closed, but guarded by db.mu, for connIfFree
}
func (dc *driverConn) removeOpenStmt(si driver.Stmt) {
dc.Lock()
defer dc.Unlock()
delete(dc.openStmt, si)
}
func (dc *driverConn) prepareLocked(query string) (driver.Stmt, error) {
si, err := dc.ci.Prepare(query)
if err == nil {
// Track each driverConn's open statements, so we can close them
// before closing the conn.
//
// TODO(bradfitz): let drivers opt out of caring about
// stmt closes if the conn is about to close anyway? For now
// do the safe thing, in case stmts need to be closed.
//
// TODO(bradfitz): after Go 1.1, closing driver.Stmts
// should be moved to driverStmt, using unique
// *driverStmts everywhere (including from
// *Stmt.connStmt, instead of returning a
// driver.Stmt), using driverStmt as a pointer
// everywhere, and making it a finalCloser.
if dc.openStmt == nil {
dc.openStmt = make(map[driver.Stmt]bool)
}
dc.openStmt[si] = true
}
return si, err
}
// the dc.db's Mutex is held.
func (dc *driverConn) closeDBLocked() error {
dc.Lock()
if dc.closed {
dc.Unlock()
return errors.New("sql: duplicate driverConn close")
}
dc.closed = true
dc.Unlock() // not defer; removeDep finalClose calls may need to lock
return dc.db.removeDepLocked(dc, dc)()
}
func (dc *driverConn) Close() error {
dc.Lock()
if dc.closed {
dc.Unlock()
return errors.New("sql: duplicate driverConn close")
}
dc.closed = true
dc.Unlock() // not defer; removeDep finalClose calls may need to lock
// And now updates that require holding dc.mu.Lock.
dc.db.mu.Lock()
dc.dbmuClosed = true
fn := dc.db.removeDepLocked(dc, dc)
dc.db.mu.Unlock()
return fn()
}
func (dc *driverConn) finalClose() error {
dc.Lock()
for si := range dc.openStmt {
si.Close()
}
dc.openStmt = nil
err := dc.ci.Close()
dc.ci = nil
dc.finalClosed = true
dc.Unlock()
return err
}
// driverStmt associates a driver.Stmt with the
// *driverConn from which it came, so the driverConn's lock can be
// held during calls.
type driverStmt struct {
sync.Locker // the *driverConn
si driver.Stmt
}
func (ds *driverStmt) Close() error {
ds.Lock()
defer ds.Unlock()
return ds.si.Close()
}
// depSet is a finalCloser's outstanding dependencies
type depSet map[interface{}]bool // set of true bools
// The finalCloser interface is used by (*DB).addDep and related
// dependency reference counting.
type finalCloser interface {
// finalClose is called when the reference count of an object
// goes to zero. (*DB).mu is not held while calling it.
finalClose() error
}
// addDep notes that x now depends on dep, and x's finalClose won't be
// called until all of x's dependencies are removed with removeDep.
func (db *DB) addDep(x finalCloser, dep interface{}) {
//println(fmt.Sprintf("addDep(%T %p, %T %p)", x, x, dep, dep))
db.mu.Lock()
defer db.mu.Unlock()
db.addDepLocked(x, dep)
}
func (db *DB) addDepLocked(x finalCloser, dep interface{}) {
if db.dep == nil {
db.dep = make(map[finalCloser]depSet)
}
xdep := db.dep[x]
if xdep == nil {
xdep = make(depSet)
db.dep[x] = xdep
}
xdep[dep] = true
}
// removeDep notes that x no longer depends on dep.
// If x still has dependencies, nil is returned.
// If x no longer has any dependencies, its finalClose method will be
// called and its error value will be returned.
func (db *DB) removeDep(x finalCloser, dep interface{}) error {
db.mu.Lock()
fn := db.removeDepLocked(x, dep)
db.mu.Unlock()
return fn()
}
func (db *DB) removeDepLocked(x finalCloser, dep interface{}) func() error {
//println(fmt.Sprintf("removeDep(%T %p, %T %p)", x, x, dep, dep))
done := false
xdep := db.dep[x]
if xdep != nil {
delete(xdep, dep)
if len(xdep) == 0 {
delete(db.dep, x)
done = true
}
}
if !done {
return func() error { return nil }
}
return func() error {
//println(fmt.Sprintf("calling final close on %T %v (%#v)", x, x, x))
return x.finalClose()
}
}
// Open opens a database specified by its database driver name and a
// driver-specific data source name, usually consisting of at least a
// database name and connection information.
//
// Most users will open a database via a driver-specific connection
// helper function that returns a *DB. No database drivers are included
// in the Go standard library. See http://golang.org/s/sqldrivers for
// a list of third-party drivers.
//
// Open may just validate its arguments without creating a connection
// to the database. To verify that the data source name is valid, call
// Ping.
func Open(driverName, dataSourceName string) (*DB, error) {
driveri, ok := drivers[driverName]
if !ok {
return nil, fmt.Errorf("sql: unknown driver %q (forgotten import?)", driverName)
}
db := &DB{
driver: driveri,
dsn: dataSourceName,
lastPut: make(map[*driverConn]string),
}
return db, nil
}
// Ping verifies a connection to the database is still alive,
// establishing a connection if necessary.
func (db *DB) Ping() error {
// TODO(bradfitz): give drivers an optional hook to implement
// this in a more efficient or more reliable way, if they
// have one.
dc, err := db.conn()
if err != nil {
return err
}
db.putConn(dc, nil)
return nil
}
// Close closes the database, releasing any open resources.
func (db *DB) Close() error {
db.mu.Lock()
defer db.mu.Unlock()
var err error
for _, dc := range db.freeConn {
err1 := dc.closeDBLocked()
if err1 != nil {
err = err1
}
}
db.freeConn = nil
db.closed = true
return err
}
const defaultMaxIdleConns = 2
func (db *DB) maxIdleConnsLocked() int {
n := db.maxIdle
switch {
case n == 0:
// TODO(bradfitz): ask driver, if supported, for its default preference
return defaultMaxIdleConns
case n < 0:
return 0
default:
return n
}
}
// SetMaxIdleConns sets the maximum number of connections in the idle
// connection pool.
//
// If n <= 0, no idle connections are retained.
func (db *DB) SetMaxIdleConns(n int) {
db.mu.Lock()
defer db.mu.Unlock()
if n > 0 {
db.maxIdle = n
} else {
// No idle connections.
db.maxIdle = -1
}
for len(db.freeConn) > 0 && len(db.freeConn) > n {
nfree := len(db.freeConn)
dc := db.freeConn[nfree-1]
db.freeConn[nfree-1] = nil
db.freeConn = db.freeConn[:nfree-1]
go dc.Close()
}
}
// conn returns a newly-opened or cached *driverConn
func (db *DB) conn() (*driverConn, error) {
db.mu.Lock()
if db.closed {
db.mu.Unlock()
return nil, errors.New("sql: database is closed")
}
if n := len(db.freeConn); n > 0 {
conn := db.freeConn[n-1]
db.freeConn = db.freeConn[:n-1]
conn.inUse = true
db.mu.Unlock()
return conn, nil
}
db.mu.Unlock()
ci, err := db.driver.Open(db.dsn)
if err != nil {
return nil, err
}
dc := &driverConn{
db: db,
ci: ci,
}
db.mu.Lock()
db.addDepLocked(dc, dc)
dc.inUse = true
db.mu.Unlock()
return dc, nil
}
var (
errConnClosed = errors.New("database/sql: internal sentinel error: conn is closed")
errConnBusy = errors.New("database/sql: internal sentinel error: conn is busy")
)
// connIfFree returns (wanted, nil) if wanted is still a valid conn and
// isn't in use.
//
// The error is errConnClosed if the connection if the requested connection
// is invalid because it's been closed.
//
// The error is errConnBusy if the connection is in use.
func (db *DB) connIfFree(wanted *driverConn) (*driverConn, error) {
db.mu.Lock()
defer db.mu.Unlock()
if wanted.inUse {
return nil, errConnBusy
}
if wanted.dbmuClosed {
return nil, errConnClosed
}
for i, conn := range db.freeConn {
if conn != wanted {
continue
}
db.freeConn[i] = db.freeConn[len(db.freeConn)-1]
db.freeConn = db.freeConn[:len(db.freeConn)-1]
wanted.inUse = true
return wanted, nil
}
// TODO(bradfitz): shouldn't get here. After Go 1.1, change this to:
// panic("connIfFree call requested a non-closed, non-busy, non-free conn")
// Which passes all the tests, but I'm too paranoid to include this
// late in Go 1.1.
// Instead, treat it like a busy connection:
return nil, errConnBusy
}
// putConnHook is a hook for testing.
var putConnHook func(*DB, *driverConn)
// noteUnusedDriverStatement notes that si is no longer used and should
// be closed whenever possible (when c is next not in use), unless c is
// already closed.
func (db *DB) noteUnusedDriverStatement(c *driverConn, si driver.Stmt) {
db.mu.Lock()
defer db.mu.Unlock()
if c.inUse {
c.onPut = append(c.onPut, func() {
si.Close()
})
} else {
c.Lock()
defer c.Unlock()
if !c.finalClosed {
si.Close()
}
}
}
// debugGetPut determines whether getConn & putConn calls' stack traces
// are returned for more verbose crashes.
const debugGetPut = false
// putConn adds a connection to the db's free pool.
// err is optionally the last error that occurred on this connection.
func (db *DB) putConn(dc *driverConn, err error) {
db.mu.Lock()
if !dc.inUse {
if debugGetPut {
fmt.Printf("putConn(%v) DUPLICATE was: %s\n\nPREVIOUS was: %s", dc, stack(), db.lastPut[dc])
}
panic("sql: connection returned that was never out")
}
if debugGetPut {
db.lastPut[dc] = stack()
}
dc.inUse = false
for _, fn := range dc.onPut {
fn()
}
dc.onPut = nil
if err == driver.ErrBadConn {
// Don't reuse bad connections.
db.mu.Unlock()
return
}
if putConnHook != nil {
putConnHook(db, dc)
}
if n := len(db.freeConn); !db.closed && n < db.maxIdleConnsLocked() {
db.freeConn = append(db.freeConn, dc)
db.mu.Unlock()
return
}
db.mu.Unlock()
dc.Close()
}
// Prepare creates a prepared statement for later queries or executions.
// Multiple queries or executions may be run concurrently from the
// returned statement.
func (db *DB) Prepare(query string) (*Stmt, error) {
var stmt *Stmt
var err error
for i := 0; i < 10; i++ {
stmt, err = db.prepare(query)
if err != driver.ErrBadConn {
break
}
}
return stmt, err
}
func (db *DB) prepare(query string) (*Stmt, error) {
// TODO: check if db.driver supports an optional
// driver.Preparer interface and call that instead, if so,
// otherwise we make a prepared statement that's bound
// to a connection, and to execute this prepared statement
// we either need to use this connection (if it's free), else
// get a new connection + re-prepare + execute on that one.
dc, err := db.conn()
if err != nil {
return nil, err
}
dc.Lock()
si, err := dc.prepareLocked(query)
dc.Unlock()
if err != nil {
db.putConn(dc, err)
return nil, err
}
stmt := &Stmt{
db: db,
query: query,
css: []connStmt{{dc, si}},
}
db.addDep(stmt, stmt)
db.putConn(dc, nil)
return stmt, nil
}
// Exec executes a query without returning any rows.
// The args are for any placeholder parameters in the query.
func (db *DB) Exec(query string, args ...interface{}) (Result, error) {
var res Result
var err error
for i := 0; i < 10; i++ {
res, err = db.exec(query, args)
if err != driver.ErrBadConn {
break
}
}
return res, err
}
func (db *DB) exec(query string, args []interface{}) (res Result, err error) {
dc, err := db.conn()
if err != nil {
return nil, err
}
defer func() {
db.putConn(dc, err)
}()
if execer, ok := dc.ci.(driver.Execer); ok {
dargs, err := driverArgs(nil, args)
if err != nil {
return nil, err
}
dc.Lock()
resi, err := execer.Exec(query, dargs)
dc.Unlock()
if err != driver.ErrSkip {
if err != nil {
return nil, err
}
return driverResult{dc, resi}, nil
}
}
dc.Lock()
si, err := dc.ci.Prepare(query)
dc.Unlock()
if err != nil {
return nil, err
}
defer withLock(dc, func() { si.Close() })
return resultFromStatement(driverStmt{dc, si}, args...)
}
// Query executes a query that returns rows, typically a SELECT.
// The args are for any placeholder parameters in the query.
func (db *DB) Query(query string, args ...interface{}) (*Rows, error) {
var rows *Rows
var err error
for i := 0; i < 10; i++ {
rows, err = db.query(query, args)
if err != driver.ErrBadConn {
break
}
}
return rows, err
}
func (db *DB) query(query string, args []interface{}) (*Rows, error) {
ci, err := db.conn()
if err != nil {
return nil, err
}
releaseConn := func(err error) { db.putConn(ci, err) }
return db.queryConn(ci, releaseConn, query, args)
}
// queryConn executes a query on the given connection.
// The connection gets released by the releaseConn function.
func (db *DB) queryConn(dc *driverConn, releaseConn func(error), query string, args []interface{}) (*Rows, error) {
if queryer, ok := dc.ci.(driver.Queryer); ok {
dargs, err := driverArgs(nil, args)
if err != nil {
releaseConn(err)
return nil, err
}
dc.Lock()
rowsi, err := queryer.Query(query, dargs)
dc.Unlock()
if err != driver.ErrSkip {
if err != nil {
releaseConn(err)
return nil, err
}
// Note: ownership of dc passes to the *Rows, to be freed
// with releaseConn.
rows := &Rows{
dc: dc,
releaseConn: releaseConn,
rowsi: rowsi,
}
return rows, nil
}
}
dc.Lock()
si, err := dc.ci.Prepare(query)
dc.Unlock()
if err != nil {
releaseConn(err)
return nil, err
}
ds := driverStmt{dc, si}
rowsi, err := rowsiFromStatement(ds, args...)
if err != nil {
releaseConn(err)
dc.Lock()
si.Close()
dc.Unlock()
return nil, err
}
// Note: ownership of ci passes to the *Rows, to be freed
// with releaseConn.
rows := &Rows{
dc: dc,
releaseConn: releaseConn,
rowsi: rowsi,
closeStmt: si,
}
return rows, nil
}
// QueryRow executes a query that is expected to return at most one row.
// QueryRow always return a non-nil value. Errors are deferred until
// Row's Scan method is called.
func (db *DB) QueryRow(query string, args ...interface{}) *Row {
rows, err := db.Query(query, args...)
return &Row{rows: rows, err: err}
}
// Begin starts a transaction. The isolation level is dependent on
// the driver.
func (db *DB) Begin() (*Tx, error) {
var tx *Tx
var err error
for i := 0; i < 10; i++ {
tx, err = db.begin()
if err != driver.ErrBadConn {
break
}
}
return tx, err
}
func (db *DB) begin() (tx *Tx, err error) {
dc, err := db.conn()
if err != nil {
return nil, err
}
dc.Lock()
txi, err := dc.ci.Begin()
dc.Unlock()
if err != nil {
db.putConn(dc, err)
return nil, err
}
return &Tx{
db: db,
dc: dc,
txi: txi,
}, nil
}
// Driver returns the database's underlying driver.
func (db *DB) Driver() driver.Driver {
return db.driver
}
// Tx is an in-progress database transaction.
//
// A transaction must end with a call to Commit or Rollback.
//
// After a call to Commit or Rollback, all operations on the
// transaction fail with ErrTxDone.
type Tx struct {
db *DB
// dc is owned exclusively until Commit or Rollback, at which point
// it's returned with putConn.
dc *driverConn
txi driver.Tx
// done transitions from false to true exactly once, on Commit
// or Rollback. once done, all operations fail with
// ErrTxDone.
done bool
}
var ErrTxDone = errors.New("sql: Transaction has already been committed or rolled back")
func (tx *Tx) close() {
if tx.done {
panic("double close") // internal error
}
tx.done = true
tx.db.putConn(tx.dc, nil)
tx.dc = nil
tx.txi = nil
}
func (tx *Tx) grabConn() (*driverConn, error) {
if tx.done {
return nil, ErrTxDone
}
return tx.dc, nil
}
// Commit commits the transaction.
func (tx *Tx) Commit() error {
if tx.done {
return ErrTxDone
}
defer tx.close()
tx.dc.Lock()
defer tx.dc.Unlock()
return tx.txi.Commit()
}
// Rollback aborts the transaction.
func (tx *Tx) Rollback() error {
if tx.done {
return ErrTxDone
}
defer tx.close()
tx.dc.Lock()
defer tx.dc.Unlock()
return tx.txi.Rollback()
}
// Prepare creates a prepared statement for use within a transaction.
//
// The returned statement operates within the transaction and can no longer
// be used once the transaction has been committed or rolled back.
//
// To use an existing prepared statement on this transaction, see Tx.Stmt.
func (tx *Tx) Prepare(query string) (*Stmt, error) {
// TODO(bradfitz): We could be more efficient here and either
// provide a method to take an existing Stmt (created on
// perhaps a different Conn), and re-create it on this Conn if
// necessary. Or, better: keep a map in DB of query string to
// Stmts, and have Stmt.Execute do the right thing and
// re-prepare if the Conn in use doesn't have that prepared
// statement. But we'll want to avoid caching the statement
// in the case where we only call conn.Prepare implicitly
// (such as in db.Exec or tx.Exec), but the caller package
// can't be holding a reference to the returned statement.
// Perhaps just looking at the reference count (by noting
// Stmt.Close) would be enough. We might also want a finalizer
// on Stmt to drop the reference count.
dc, err := tx.grabConn()
if err != nil {
return nil, err
}
dc.Lock()
si, err := dc.ci.Prepare(query)
dc.Unlock()
if err != nil {
return nil, err
}
stmt := &Stmt{
db: tx.db,
tx: tx,
txsi: &driverStmt{
Locker: dc,
si: si,
},
query: query,
}
return stmt, nil
}
// Stmt returns a transaction-specific prepared statement from
// an existing statement.
//
// Example:
// updateMoney, err := db.Prepare("UPDATE balance SET money=money+? WHERE id=?")
// ...
// tx, err := db.Begin()
// ...
// res, err := tx.Stmt(updateMoney).Exec(123.45, 98293203)
func (tx *Tx) Stmt(stmt *Stmt) *Stmt {
// TODO(bradfitz): optimize this. Currently this re-prepares
// each time. This is fine for now to illustrate the API but
// we should really cache already-prepared statements
// per-Conn. See also the big comment in Tx.Prepare.
if tx.db != stmt.db {
return &Stmt{stickyErr: errors.New("sql: Tx.Stmt: statement from different database used")}
}
dc, err := tx.grabConn()
if err != nil {
return &Stmt{stickyErr: err}
}
dc.Lock()
si, err := dc.ci.Prepare(stmt.query)
dc.Unlock()
return &Stmt{
db: tx.db,
tx: tx,
txsi: &driverStmt{
Locker: dc,
si: si,
},
query: stmt.query,
stickyErr: err,
}
}
// Exec executes a query that doesn't return rows.
// For example: an INSERT and UPDATE.
func (tx *Tx) Exec(query string, args ...interface{}) (Result, error) {
dc, err := tx.grabConn()
if err != nil {
return nil, err
}
if execer, ok := dc.ci.(driver.Execer); ok {
dargs, err := driverArgs(nil, args)
if err != nil {
return nil, err
}
dc.Lock()
resi, err := execer.Exec(query, dargs)
dc.Unlock()
if err == nil {
return driverResult{dc, resi}, nil
}
if err != driver.ErrSkip {
return nil, err
}
}
dc.Lock()
si, err := dc.ci.Prepare(query)
dc.Unlock()
if err != nil {
return nil, err
}
defer withLock(dc, func() { si.Close() })
return resultFromStatement(driverStmt{dc, si}, args...)
}
// Query executes a query that returns rows, typically a SELECT.
func (tx *Tx) Query(query string, args ...interface{}) (*Rows, error) {
dc, err := tx.grabConn()
if err != nil {
return nil, err
}
releaseConn := func(error) {}
return tx.db.queryConn(dc, releaseConn, query, args)
}
// QueryRow executes a query that is expected to return at most one row.
// QueryRow always return a non-nil value. Errors are deferred until
// Row's Scan method is called.
func (tx *Tx) QueryRow(query string, args ...interface{}) *Row {
rows, err := tx.Query(query, args...)
return &Row{rows: rows, err: err}
}
// connStmt is a prepared statement on a particular connection.
type connStmt struct {
dc *driverConn
si driver.Stmt
}
// Stmt is a prepared statement. Stmt is safe for concurrent use by multiple goroutines.
type Stmt struct {
// Immutable:
db *DB // where we came from
query string // that created the Stmt
stickyErr error // if non-nil, this error is returned for all operations
closemu sync.RWMutex // held exclusively during close, for read otherwise.
// If in a transaction, else both nil:
tx *Tx
txsi *driverStmt
mu sync.Mutex // protects the rest of the fields
closed bool
// css is a list of underlying driver statement interfaces
// that are valid on particular connections. This is only
// used if tx == nil and one is found that has idle
// connections. If tx != nil, txsi is always used.
css []connStmt
}
// Exec executes a prepared statement with the given arguments and
// returns a Result summarizing the effect of the statement.
func (s *Stmt) Exec(args ...interface{}) (Result, error) {
s.closemu.RLock()
defer s.closemu.RUnlock()
dc, releaseConn, si, err := s.connStmt()
if err != nil {
return nil, err
}
defer releaseConn(nil)
return resultFromStatement(driverStmt{dc, si}, args...)
}
func resultFromStatement(ds driverStmt, args ...interface{}) (Result, error) {
ds.Lock()
want := ds.si.NumInput()
ds.Unlock()
// -1 means the driver doesn't know how to count the number of
// placeholders, so we won't sanity check input here and instead let the
// driver deal with errors.
if want != -1 && len(args) != want {
return nil, fmt.Errorf("sql: expected %d arguments, got %d", want, len(args))
}
dargs, err := driverArgs(&ds, args)
if err != nil {
return nil, err
}
ds.Lock()
resi, err := ds.si.Exec(dargs)
ds.Unlock()
if err != nil {
return nil, err
}
return driverResult{ds.Locker, resi}, nil
}
// connStmt returns a free driver connection on which to execute the
// statement, a function to call to release the connection, and a
// statement bound to that connection.
func (s *Stmt) connStmt() (ci *driverConn, releaseConn func(error), si driver.Stmt, err error) {
if err = s.stickyErr; err != nil {
return
}
s.mu.Lock()
if s.closed {
s.mu.Unlock()
err = errors.New("sql: statement is closed")
return
}
// In a transaction, we always use the connection that the
// transaction was created on.
if s.tx != nil {
s.mu.Unlock()
ci, err = s.tx.grabConn() // blocks, waiting for the connection.
if err != nil {
return
}
releaseConn = func(error) {}
return ci, releaseConn, s.txsi.si, nil
}
var cs connStmt
match := false
for i := 0; i < len(s.css); i++ {
v := s.css[i]
_, err := s.db.connIfFree(v.dc)
if err == nil {
match = true
cs = v
break
}
if err == errConnClosed {
// Lazily remove dead conn from our freelist.
s.css[i] = s.css[len(s.css)-1]
s.css = s.css[:len(s.css)-1]
i--
}
}
s.mu.Unlock()
// Make a new conn if all are busy.
// TODO(bradfitz): or wait for one? make configurable later?
if !match {
for i := 0; ; i++ {
dc, err := s.db.conn()
if err != nil {
return nil, nil, nil, err
}
dc.Lock()
si, err := dc.prepareLocked(s.query)
dc.Unlock()
if err == driver.ErrBadConn && i < 10 {
continue
}
if err != nil {
return nil, nil, nil, err
}
s.mu.Lock()
cs = connStmt{dc, si}
s.css = append(s.css, cs)
s.mu.Unlock()
break
}
}
conn := cs.dc
releaseConn = func(err error) { s.db.putConn(conn, err) }
return conn, releaseConn, cs.si, nil
}
// Query executes a prepared query statement with the given arguments
// and returns the query results as a *Rows.
func (s *Stmt) Query(args ...interface{}) (*Rows, error) {
s.closemu.RLock()
defer s.closemu.RUnlock()
dc, releaseConn, si, err := s.connStmt()
if err != nil {
return nil, err
}
ds := driverStmt{dc, si}
rowsi, err := rowsiFromStatement(ds, args...)
if err != nil {
releaseConn(err)
return nil, err
}
// Note: ownership of ci passes to the *Rows, to be freed
// with releaseConn.
rows := &Rows{
dc: dc,
rowsi: rowsi,
// releaseConn set below
}
s.db.addDep(s, rows)
rows.releaseConn = func(err error) {
releaseConn(err)
s.db.removeDep(s, rows)
}
return rows, nil
}
func rowsiFromStatement(ds driverStmt, args ...interface{}) (driver.Rows, error) {
ds.Lock()
want := ds.si.NumInput()
ds.Unlock()
// -1 means the driver doesn't know how to count the number of
// placeholders, so we won't sanity check input here and instead let the
// driver deal with errors.
if want != -1 && len(args) != want {
return nil, fmt.Errorf("sql: statement expects %d inputs; got %d", want, len(args))
}
dargs, err := driverArgs(&ds, args)
if err != nil {
return nil, err
}
ds.Lock()
rowsi, err := ds.si.Query(dargs)
ds.Unlock()
if err != nil {
return nil, err
}
return rowsi, nil
}
// QueryRow executes a prepared query statement with the given arguments.
// If an error occurs during the execution of the statement, that error will
// be returned by a call to Scan on the returned *Row, which is always non-nil.
// If the query selects no rows, the *Row's Scan will return ErrNoRows.
// Otherwise, the *Row's Scan scans the first selected row and discards
// the rest.
//
// Example usage:
//
// var name string
// err := nameByUseridStmt.QueryRow(id).Scan(&name)
func (s *Stmt) QueryRow(args ...interface{}) *Row {
rows, err := s.Query(args...)
if err != nil {
return &Row{err: err}
}
return &Row{rows: rows}
}
// Close closes the statement.
func (s *Stmt) Close() error {
s.closemu.Lock()
defer s.closemu.Unlock()
if s.stickyErr != nil {
return s.stickyErr
}
s.mu.Lock()
defer s.mu.Unlock()
if s.closed {
return nil
}
s.closed = true
if s.tx != nil {
s.txsi.Close()
return nil
}
return s.db.removeDep(s, s)
}
func (s *Stmt) finalClose() error {
for _, v := range s.css {
s.db.noteUnusedDriverStatement(v.dc, v.si)
v.dc.removeOpenStmt(v.si)
s.db.removeDep(v.dc, s)
}
s.css = nil
return nil
}
// Rows is the result of a query. Its cursor starts before the first row
// of the result set. Use Next to advance through the rows:
//
// rows, err := db.Query("SELECT ...")
// ...
// for rows.Next() {
// var id int
// var name string
// err = rows.Scan(&id, &name)
// ...
// }
// err = rows.Err() // get any error encountered during iteration
// ...
type Rows struct {
dc *driverConn // owned; must call releaseConn when closed to release
releaseConn func(error)
rowsi driver.Rows
closed bool
lastcols []driver.Value
lasterr error
closeStmt driver.Stmt // if non-nil, statement to Close on close
}
// Next prepares the next result row for reading with the Scan method.
// It returns true on success, false if there is no next result row.
// Every call to Scan, even the first one, must be preceded by a call
// to Next.
func (rs *Rows) Next() bool {
if rs.closed {
return false
}
if rs.lasterr != nil {
return false
}
if rs.lastcols == nil {
rs.lastcols = make([]driver.Value, len(rs.rowsi.Columns()))
}
rs.lasterr = rs.rowsi.Next(rs.lastcols)
if rs.lasterr == io.EOF {
rs.Close()
}
return rs.lasterr == nil
}
// Err returns the error, if any, that was encountered during iteration.
func (rs *Rows) Err() error {
if rs.lasterr == io.EOF {
return nil
}
return rs.lasterr
}
// Columns returns the column names.
// Columns returns an error if the rows are closed, or if the rows
// are from QueryRow and there was a deferred error.
func (rs *Rows) Columns() ([]string, error) {
if rs.closed {
return nil, errors.New("sql: Rows are closed")
}
if rs.rowsi == nil {
return nil, errors.New("sql: no Rows available")
}
return rs.rowsi.Columns(), nil
}
// Scan copies the columns in the current row into the values pointed
// at by dest.
//
// If an argument has type *[]byte, Scan saves in that argument a copy
// of the corresponding data. The copy is owned by the caller and can
// be modified and held indefinitely. The copy can be avoided by using
// an argument of type *RawBytes instead; see the documentation for
// RawBytes for restrictions on its use.
//
// If an argument has type *interface{}, Scan copies the value
// provided by the underlying driver without conversion. If the value
// is of type []byte, a copy is made and the caller owns the result.
func (rs *Rows) Scan(dest ...interface{}) error {
if rs.closed {
return errors.New("sql: Rows closed")
}
if rs.lasterr != nil {
return rs.lasterr
}
if rs.lastcols == nil {
return errors.New("sql: Scan called without calling Next")
}
if len(dest) != len(rs.lastcols) {
return fmt.Errorf("sql: expected %d destination arguments in Scan, not %d", len(rs.lastcols), len(dest))
}
for i, sv := range rs.lastcols {
err := convertAssign(dest[i], sv)
if err != nil {
return fmt.Errorf("sql: Scan error on column index %d: %v", i, err)
}
}
return nil
}
// Close closes the Rows, preventing further enumeration. If the
// end is encountered, the Rows are closed automatically. Close
// is idempotent.
func (rs *Rows) Close() error {
if rs.closed {
return nil
}
rs.closed = true
err := rs.rowsi.Close()
if rs.closeStmt != nil {
rs.closeStmt.Close()
}
rs.releaseConn(err)
return err
}
// Row is the result of calling QueryRow to select a single row.
type Row struct {
// One of these two will be non-nil:
err error // deferred error for easy chaining
rows *Rows
}
// Scan copies the columns from the matched row into the values
// pointed at by dest. If more than one row matches the query,
// Scan uses the first row and discards the rest. If no row matches
// the query, Scan returns ErrNoRows.
func (r *Row) Scan(dest ...interface{}) error {
if r.err != nil {
return r.err
}
// TODO(bradfitz): for now we need to defensively clone all
// []byte that the driver returned (not permitting
// *RawBytes in Rows.Scan), since we're about to close
// the Rows in our defer, when we return from this function.
// the contract with the driver.Next(...) interface is that it
// can return slices into read-only temporary memory that's
// only valid until the next Scan/Close. But the TODO is that
// for a lot of drivers, this copy will be unnecessary. We
// should provide an optional interface for drivers to
// implement to say, "don't worry, the []bytes that I return
// from Next will not be modified again." (for instance, if
// they were obtained from the network anyway) But for now we
// don't care.
for _, dp := range dest {
if _, ok := dp.(*RawBytes); ok {
return errors.New("sql: RawBytes isn't allowed on Row.Scan")
}
}
defer r.rows.Close()
if !r.rows.Next() {
return ErrNoRows
}
err := r.rows.Scan(dest...)
if err != nil {
return err
}
return nil
}
// A Result summarizes an executed SQL command.
type Result interface {
LastInsertId() (int64, error)
RowsAffected() (int64, error)
}
type driverResult struct {
sync.Locker // the *driverConn
resi driver.Result
}
func (dr driverResult) LastInsertId() (int64, error) {
dr.Lock()
defer dr.Unlock()
return dr.resi.LastInsertId()
}
func (dr driverResult) RowsAffected() (int64, error) {
dr.Lock()
defer dr.Unlock()
return dr.resi.RowsAffected()
}
func stack() string {
var buf [2 << 10]byte
return string(buf[:runtime.Stack(buf[:], false)])
}
// withLock runs while holding lk.
func withLock(lk sync.Locker, fn func()) {
lk.Lock()
fn()
lk.Unlock()
}
|
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package parser implements a parser for Go source files. Input may be
// provided in a variety of forms (see the various Parse* functions); the
// output is an abstract syntax tree (AST) representing the Go source. The
// parser is invoked through one of the Parse* functions.
//
package parser
import (
"fmt"
"go/ast"
"go/scanner"
"go/token"
"strconv"
"strings"
"unicode"
)
// The parser structure holds the parser's internal state.
type parser struct {
file *token.File
errors scanner.ErrorList
scanner scanner.Scanner
// Tracing/debugging
mode Mode // parsing mode
trace bool // == (mode & Trace != 0)
indent uint // indentation used for tracing output
// Comments
comments []*ast.CommentGroup
leadComment *ast.CommentGroup // last lead comment
lineComment *ast.CommentGroup // last line comment
// Next token
pos token.Pos // token position
tok token.Token // one token look-ahead
lit string // token literal
// Non-syntactic parser control
exprLev int // < 0: in control clause, >= 0: in expression
// Ordinary identifier scopes
pkgScope *ast.Scope // pkgScope.Outer == nil
topScope *ast.Scope // top-most scope; may be pkgScope
unresolved []*ast.Ident // unresolved identifiers
imports []*ast.ImportSpec // list of imports
// Label scope
// (maintained by open/close LabelScope)
labelScope *ast.Scope // label scope for current function
targetStack [][]*ast.Ident // stack of unresolved labels
}
func (p *parser) init(fset *token.FileSet, filename string, src []byte, mode Mode) {
p.file = fset.AddFile(filename, fset.Base(), len(src))
var m scanner.Mode
if mode&ParseComments != 0 {
m = scanner.ScanComments
}
eh := func(pos token.Position, msg string) { p.errors.Add(pos, msg) }
p.scanner.Init(p.file, src, eh, m)
p.mode = mode
p.trace = mode&Trace != 0 // for convenience (p.trace is used frequently)
p.next()
// set up the pkgScope here (as opposed to in parseFile) because
// there are other parser entry points (ParseExpr, etc.)
p.openScope()
p.pkgScope = p.topScope
// for the same reason, set up a label scope
p.openLabelScope()
}
// ----------------------------------------------------------------------------
// Scoping support
func (p *parser) openScope() {
p.topScope = ast.NewScope(p.topScope)
}
func (p *parser) closeScope() {
p.topScope = p.topScope.Outer
}
func (p *parser) openLabelScope() {
p.labelScope = ast.NewScope(p.labelScope)
p.targetStack = append(p.targetStack, nil)
}
func (p *parser) closeLabelScope() {
// resolve labels
n := len(p.targetStack) - 1
scope := p.labelScope
for _, ident := range p.targetStack[n] {
ident.Obj = scope.Lookup(ident.Name)
if ident.Obj == nil && p.mode&DeclarationErrors != 0 {
p.error(ident.Pos(), fmt.Sprintf("label %s undefined", ident.Name))
}
}
// pop label scope
p.targetStack = p.targetStack[0:n]
p.labelScope = p.labelScope.Outer
}
func (p *parser) declare(decl, data interface{}, scope *ast.Scope, kind ast.ObjKind, idents ...*ast.Ident) {
for _, ident := range idents {
assert(ident.Obj == nil, "identifier already declared or resolved")
obj := ast.NewObj(kind, ident.Name)
// remember the corresponding declaration for redeclaration
// errors and global variable resolution/typechecking phase
obj.Decl = decl
obj.Data = data
ident.Obj = obj
if ident.Name != "_" {
if alt := scope.Insert(obj); alt != nil && p.mode&DeclarationErrors != 0 {
prevDecl := ""
if pos := alt.Pos(); pos.IsValid() {
prevDecl = fmt.Sprintf("\n\tprevious declaration at %s", p.file.Position(pos))
}
p.error(ident.Pos(), fmt.Sprintf("%s redeclared in this block%s", ident.Name, prevDecl))
}
}
}
}
func (p *parser) shortVarDecl(decl *ast.AssignStmt, list []ast.Expr) {
// Go spec: A short variable declaration may redeclare variables
// provided they were originally declared in the same block with
// the same type, and at least one of the non-blank variables is new.
n := 0 // number of new variables
for _, x := range list {
if ident, isIdent := x.(*ast.Ident); isIdent {
assert(ident.Obj == nil, "identifier already declared or resolved")
obj := ast.NewObj(ast.Var, ident.Name)
// remember corresponding assignment for other tools
obj.Decl = decl
ident.Obj = obj
if ident.Name != "_" {
if alt := p.topScope.Insert(obj); alt != nil {
ident.Obj = alt // redeclaration
} else {
n++ // new declaration
}
}
} else {
p.errorExpected(x.Pos(), "identifier")
}
}
if n == 0 && p.mode&DeclarationErrors != 0 {
p.error(list[0].Pos(), "no new variables on left side of :=")
}
}
// The unresolved object is a sentinel to mark identifiers that have been added
// to the list of unresolved identifiers. The sentinel is only used for verifying
// internal consistency.
var unresolved = new(ast.Object)
func (p *parser) resolve(x ast.Expr) {
// nothing to do if x is not an identifier or the blank identifier
ident, _ := x.(*ast.Ident)
if ident == nil {
return
}
assert(ident.Obj == nil, "identifier already declared or resolved")
if ident.Name == "_" {
return
}
// try to resolve the identifier
for s := p.topScope; s != nil; s = s.Outer {
if obj := s.Lookup(ident.Name); obj != nil {
ident.Obj = obj
return
}
}
// all local scopes are known, so any unresolved identifier
// must be found either in the file scope, package scope
// (perhaps in another file), or universe scope --- collect
// them so that they can be resolved later
ident.Obj = unresolved
p.unresolved = append(p.unresolved, ident)
}
// ----------------------------------------------------------------------------
// Parsing support
func (p *parser) printTrace(a ...interface{}) {
const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . " +
". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . "
const n = uint(len(dots))
pos := p.file.Position(p.pos)
fmt.Printf("%5d:%3d: ", pos.Line, pos.Column)
i := 2 * p.indent
for ; i > n; i -= n {
fmt.Print(dots)
}
fmt.Print(dots[0:i])
fmt.Println(a...)
}
func trace(p *parser, msg string) *parser {
p.printTrace(msg, "(")
p.indent++
return p
}
// Usage pattern: defer un(trace(p, "..."));
func un(p *parser) {
p.indent--
p.printTrace(")")
}
// Advance to the next token.
func (p *parser) next0() {
// Because of one-token look-ahead, print the previous token
// when tracing as it provides a more readable output. The
// very first token (!p.pos.IsValid()) is not initialized
// (it is token.ILLEGAL), so don't print it .
if p.trace && p.pos.IsValid() {
s := p.tok.String()
switch {
case p.tok.IsLiteral():
p.printTrace(s, p.lit)
case p.tok.IsOperator(), p.tok.IsKeyword():
p.printTrace("\"" + s + "\"")
default:
p.printTrace(s)
}
}
p.pos, p.tok, p.lit = p.scanner.Scan()
}
// Consume a comment and return it and the line on which it ends.
func (p *parser) consumeComment() (comment *ast.Comment, endline int) {
// /*-style comments may end on a different line than where they start.
// Scan the comment for '\n' chars and adjust endline accordingly.
endline = p.file.Line(p.pos)
if p.lit[1] == '*' {
// don't use range here - no need to decode Unicode code points
for i := 0; i < len(p.lit); i++ {
if p.lit[i] == '\n' {
endline++
}
}
}
comment = &ast.Comment{Slash: p.pos, Text: p.lit}
p.next0()
return
}
// Consume a group of adjacent comments, add it to the parser's
// comments list, and return it together with the line at which
// the last comment in the group ends. An empty line or non-comment
// token terminates a comment group.
//
func (p *parser) consumeCommentGroup() (comments *ast.CommentGroup, endline int) {
var list []*ast.Comment
endline = p.file.Line(p.pos)
for p.tok == token.COMMENT && endline+1 >= p.file.Line(p.pos) {
var comment *ast.Comment
comment, endline = p.consumeComment()
list = append(list, comment)
}
// add comment group to the comments list
comments = &ast.CommentGroup{List: list}
p.comments = append(p.comments, comments)
return
}
// Advance to the next non-comment token. In the process, collect
// any comment groups encountered, and remember the last lead and
// and line comments.
//
// A lead comment is a comment group that starts and ends in a
// line without any other tokens and that is followed by a non-comment
// token on the line immediately after the comment group.
//
// A line comment is a comment group that follows a non-comment
// token on the same line, and that has no tokens after it on the line
// where it ends.
//
// Lead and line comments may be considered documentation that is
// stored in the AST.
//
func (p *parser) next() {
p.leadComment = nil
p.lineComment = nil
line := p.file.Line(p.pos) // current line
p.next0()
if p.tok == token.COMMENT {
var comment *ast.CommentGroup
var endline int
if p.file.Line(p.pos) == line {
// The comment is on same line as the previous token; it
// cannot be a lead comment but may be a line comment.
comment, endline = p.consumeCommentGroup()
if p.file.Line(p.pos) != endline {
// The next token is on a different line, thus
// the last comment group is a line comment.
p.lineComment = comment
}
}
// consume successor comments, if any
endline = -1
for p.tok == token.COMMENT {
comment, endline = p.consumeCommentGroup()
}
if endline+1 == p.file.Line(p.pos) {
// The next token is following on the line immediately after the
// comment group, thus the last comment group is a lead comment.
p.leadComment = comment
}
}
}
func (p *parser) error(pos token.Pos, msg string) {
p.errors.Add(p.file.Position(pos), msg)
}
func (p *parser) errorExpected(pos token.Pos, msg string) {
msg = "expected " + msg
if pos == p.pos {
// the error happened at the current position;
// make the error message more specific
if p.tok == token.SEMICOLON && p.lit == "\n" {
msg += ", found newline"
} else {
msg += ", found '" + p.tok.String() + "'"
if p.tok.IsLiteral() {
msg += " " + p.lit
}
}
}
p.error(pos, msg)
}
func (p *parser) expect(tok token.Token) token.Pos {
pos := p.pos
if p.tok != tok {
p.errorExpected(pos, "'"+tok.String()+"'")
}
p.next() // make progress
return pos
}
// expectClosing is like expect but provides a better error message
// for the common case of a missing comma before a newline.
//
func (p *parser) expectClosing(tok token.Token, construct string) token.Pos {
if p.tok != tok && p.tok == token.SEMICOLON && p.lit == "\n" {
p.error(p.pos, "missing ',' before newline in "+construct)
p.next()
}
return p.expect(tok)
}
func (p *parser) expectSemi() {
if p.tok != token.RPAREN && p.tok != token.RBRACE {
p.expect(token.SEMICOLON)
}
}
func assert(cond bool, msg string) {
if !cond {
panic("go/parser internal error: " + msg)
}
}
// ----------------------------------------------------------------------------
// Identifiers
func (p *parser) parseIdent() *ast.Ident {
pos := p.pos
name := "_"
if p.tok == token.IDENT {
name = p.lit
p.next()
} else {
p.expect(token.IDENT) // use expect() error handling
}
return &ast.Ident{NamePos: pos, Name: name}
}
func (p *parser) parseIdentList() (list []*ast.Ident) {
if p.trace {
defer un(trace(p, "IdentList"))
}
list = append(list, p.parseIdent())
for p.tok == token.COMMA {
p.next()
list = append(list, p.parseIdent())
}
return
}
// ----------------------------------------------------------------------------
// Common productions
// If lhs is set, result list elements which are identifiers are not resolved.
func (p *parser) parseExprList(lhs bool) (list []ast.Expr) {
if p.trace {
defer un(trace(p, "ExpressionList"))
}
list = append(list, p.checkExpr(p.parseExpr(lhs)))
for p.tok == token.COMMA {
p.next()
list = append(list, p.checkExpr(p.parseExpr(lhs)))
}
return
}
func (p *parser) parseLhsList() []ast.Expr {
list := p.parseExprList(true)
switch p.tok {
case token.DEFINE:
// lhs of a short variable declaration
// but doesn't enter scope until later:
// caller must call p.shortVarDecl(p.makeIdentList(list))
// at appropriate time.
case token.COLON:
// lhs of a label declaration or a communication clause of a select
// statement (parseLhsList is not called when parsing the case clause
// of a switch statement):
// - labels are declared by the caller of parseLhsList
// - for communication clauses, if there is a stand-alone identifier
// followed by a colon, we have a syntax error; there is no need
// to resolve the identifier in that case
default:
// identifiers must be declared elsewhere
for _, x := range list {
p.resolve(x)
}
}
return list
}
func (p *parser) parseRhsList() []ast.Expr {
return p.parseExprList(false)
}
// ----------------------------------------------------------------------------
// Types
func (p *parser) parseType() ast.Expr {
if p.trace {
defer un(trace(p, "Type"))
}
typ := p.tryType()
if typ == nil {
pos := p.pos
p.errorExpected(pos, "type")
p.next() // make progress
return &ast.BadExpr{From: pos, To: p.pos}
}
return typ
}
// If the result is an identifier, it is not resolved.
func (p *parser) parseTypeName() ast.Expr {
if p.trace {
defer un(trace(p, "TypeName"))
}
ident := p.parseIdent()
// don't resolve ident yet - it may be a parameter or field name
if p.tok == token.PERIOD {
// ident is a package name
p.next()
p.resolve(ident)
sel := p.parseIdent()
return &ast.SelectorExpr{X: ident, Sel: sel}
}
return ident
}
func (p *parser) parseArrayType(ellipsisOk bool) ast.Expr {
if p.trace {
defer un(trace(p, "ArrayType"))
}
lbrack := p.expect(token.LBRACK)
var len ast.Expr
if ellipsisOk && p.tok == token.ELLIPSIS {
len = &ast.Ellipsis{Ellipsis: p.pos}
p.next()
} else if p.tok != token.RBRACK {
len = p.parseRhs()
}
p.expect(token.RBRACK)
elt := p.parseType()
return &ast.ArrayType{Lbrack: lbrack, Len: len, Elt: elt}
}
func (p *parser) makeIdentList(list []ast.Expr) []*ast.Ident {
idents := make([]*ast.Ident, len(list))
for i, x := range list {
ident, isIdent := x.(*ast.Ident)
if !isIdent {
pos := x.Pos()
p.errorExpected(pos, "identifier")
ident = &ast.Ident{NamePos: pos, Name: "_"}
}
idents[i] = ident
}
return idents
}
func (p *parser) parseFieldDecl(scope *ast.Scope) *ast.Field {
if p.trace {
defer un(trace(p, "FieldDecl"))
}
doc := p.leadComment
// fields
list, typ := p.parseVarList(false)
// optional tag
var tag *ast.BasicLit
if p.tok == token.STRING {
tag = &ast.BasicLit{ValuePos: p.pos, Kind: p.tok, Value: p.lit}
p.next()
}
// analyze case
var idents []*ast.Ident
if typ != nil {
// IdentifierList Type
idents = p.makeIdentList(list)
} else {
// ["*"] TypeName (AnonymousField)
typ = list[0] // we always have at least one element
p.resolve(typ)
if n := len(list); n > 1 || !isTypeName(deref(typ)) {
pos := typ.Pos()
p.errorExpected(pos, "anonymous field")
typ = &ast.BadExpr{From: pos, To: list[n-1].End()}
}
}
p.expectSemi() // call before accessing p.linecomment
field := &ast.Field{Doc: doc, Names: idents, Type: typ, Tag: tag, Comment: p.lineComment}
p.declare(field, nil, scope, ast.Var, idents...)
return field
}
func (p *parser) parseStructType() *ast.StructType {
if p.trace {
defer un(trace(p, "StructType"))
}
pos := p.expect(token.STRUCT)
lbrace := p.expect(token.LBRACE)
scope := ast.NewScope(nil) // struct scope
var list []*ast.Field
for p.tok == token.IDENT || p.tok == token.MUL || p.tok == token.LPAREN {
// a field declaration cannot start with a '(' but we accept
// it here for more robust parsing and better error messages
// (parseFieldDecl will check and complain if necessary)
list = append(list, p.parseFieldDecl(scope))
}
rbrace := p.expect(token.RBRACE)
return &ast.StructType{
Struct: pos,
Fields: &ast.FieldList{
Opening: lbrace,
List: list,
Closing: rbrace,
},
}
}
func (p *parser) parsePointerType() *ast.StarExpr {
if p.trace {
defer un(trace(p, "PointerType"))
}
star := p.expect(token.MUL)
base := p.parseType()
return &ast.StarExpr{Star: star, X: base}
}
func (p *parser) tryVarType(isParam bool) ast.Expr {
if isParam && p.tok == token.ELLIPSIS {
pos := p.pos
p.next()
typ := p.tryIdentOrType(isParam) // don't use parseType so we can provide better error message
if typ == nil {
p.error(pos, "'...' parameter is missing type")
typ = &ast.BadExpr{From: pos, To: p.pos}
}
return &ast.Ellipsis{Ellipsis: pos, Elt: typ}
}
return p.tryIdentOrType(false)
}
func (p *parser) parseVarType(isParam bool) ast.Expr {
typ := p.tryVarType(isParam)
if typ == nil {
pos := p.pos
p.errorExpected(pos, "type")
p.next() // make progress
typ = &ast.BadExpr{From: pos, To: p.pos}
}
return typ
}
func (p *parser) parseVarList(isParam bool) (list []ast.Expr, typ ast.Expr) {
if p.trace {
defer un(trace(p, "VarList"))
}
// a list of identifiers looks like a list of type names
//
// parse/tryVarType accepts any type (including parenthesized
// ones) even though the syntax does not permit them here: we
// accept them all for more robust parsing and complain later
for typ := p.parseVarType(isParam); typ != nil; {
list = append(list, typ)
if p.tok != token.COMMA {
break
}
p.next()
typ = p.tryVarType(isParam) // maybe nil as in: func f(int,) {}
}
// if we had a list of identifiers, it must be followed by a type
if typ = p.tryVarType(isParam); typ != nil {
p.resolve(typ)
}
return
}
func (p *parser) parseParameterList(scope *ast.Scope, ellipsisOk bool) (params []*ast.Field) {
if p.trace {
defer un(trace(p, "ParameterList"))
}
list, typ := p.parseVarList(ellipsisOk)
if typ != nil {
// IdentifierList Type
idents := p.makeIdentList(list)
field := &ast.Field{Names: idents, Type: typ}
params = append(params, field)
// Go spec: The scope of an identifier denoting a function
// parameter or result variable is the function body.
p.declare(field, nil, scope, ast.Var, idents...)
if p.tok == token.COMMA {
p.next()
}
for p.tok != token.RPAREN && p.tok != token.EOF {
idents := p.parseIdentList()
typ := p.parseVarType(ellipsisOk)
field := &ast.Field{Names: idents, Type: typ}
params = append(params, field)
// Go spec: The scope of an identifier denoting a function
// parameter or result variable is the function body.
p.declare(field, nil, scope, ast.Var, idents...)
if p.tok != token.COMMA {
break
}
p.next()
}
} else {
// Type { "," Type } (anonymous parameters)
params = make([]*ast.Field, len(list))
for i, x := range list {
p.resolve(x)
params[i] = &ast.Field{Type: x}
}
}
return
}
func (p *parser) parseParameters(scope *ast.Scope, ellipsisOk bool) *ast.FieldList {
if p.trace {
defer un(trace(p, "Parameters"))
}
var params []*ast.Field
lparen := p.expect(token.LPAREN)
if p.tok != token.RPAREN {
params = p.parseParameterList(scope, ellipsisOk)
}
rparen := p.expect(token.RPAREN)
return &ast.FieldList{Opening: lparen, List: params, Closing: rparen}
}
func (p *parser) parseResult(scope *ast.Scope) *ast.FieldList {
if p.trace {
defer un(trace(p, "Result"))
}
if p.tok == token.LPAREN {
return p.parseParameters(scope, false)
}
typ := p.tryType()
if typ != nil {
list := make([]*ast.Field, 1)
list[0] = &ast.Field{Type: typ}
return &ast.FieldList{List: list}
}
return nil
}
func (p *parser) parseSignature(scope *ast.Scope) (params, results *ast.FieldList) {
if p.trace {
defer un(trace(p, "Signature"))
}
params = p.parseParameters(scope, true)
results = p.parseResult(scope)
return
}
func (p *parser) parseFuncType() (*ast.FuncType, *ast.Scope) {
if p.trace {
defer un(trace(p, "FuncType"))
}
pos := p.expect(token.FUNC)
scope := ast.NewScope(p.topScope) // function scope
params, results := p.parseSignature(scope)
return &ast.FuncType{Func: pos, Params: params, Results: results}, scope
}
func (p *parser) parseMethodSpec(scope *ast.Scope) *ast.Field {
if p.trace {
defer un(trace(p, "MethodSpec"))
}
doc := p.leadComment
var idents []*ast.Ident
var typ ast.Expr
x := p.parseTypeName()
if ident, isIdent := x.(*ast.Ident); isIdent && p.tok == token.LPAREN {
// method
idents = []*ast.Ident{ident}
scope := ast.NewScope(nil) // method scope
params, results := p.parseSignature(scope)
typ = &ast.FuncType{Func: token.NoPos, Params: params, Results: results}
} else {
// embedded interface
typ = x
p.resolve(typ)
}
p.expectSemi() // call before accessing p.linecomment
spec := &ast.Field{Doc: doc, Names: idents, Type: typ, Comment: p.lineComment}
p.declare(spec, nil, scope, ast.Fun, idents...)
return spec
}
func (p *parser) parseInterfaceType() *ast.InterfaceType {
if p.trace {
defer un(trace(p, "InterfaceType"))
}
pos := p.expect(token.INTERFACE)
lbrace := p.expect(token.LBRACE)
scope := ast.NewScope(nil) // interface scope
var list []*ast.Field
for p.tok == token.IDENT {
list = append(list, p.parseMethodSpec(scope))
}
rbrace := p.expect(token.RBRACE)
return &ast.InterfaceType{
Interface: pos,
Methods: &ast.FieldList{
Opening: lbrace,
List: list,
Closing: rbrace,
},
}
}
func (p *parser) parseMapType() *ast.MapType {
if p.trace {
defer un(trace(p, "MapType"))
}
pos := p.expect(token.MAP)
p.expect(token.LBRACK)
key := p.parseType()
p.expect(token.RBRACK)
value := p.parseType()
return &ast.MapType{Map: pos, Key: key, Value: value}
}
func (p *parser) parseChanType() *ast.ChanType {
if p.trace {
defer un(trace(p, "ChanType"))
}
pos := p.pos
dir := ast.SEND | ast.RECV
if p.tok == token.CHAN {
p.next()
if p.tok == token.ARROW {
p.next()
dir = ast.SEND
}
} else {
p.expect(token.ARROW)
p.expect(token.CHAN)
dir = ast.RECV
}
value := p.parseType()
return &ast.ChanType{Begin: pos, Dir: dir, Value: value}
}
// If the result is an identifier, it is not resolved.
func (p *parser) tryIdentOrType(ellipsisOk bool) ast.Expr {
switch p.tok {
case token.IDENT:
return p.parseTypeName()
case token.LBRACK:
return p.parseArrayType(ellipsisOk)
case token.STRUCT:
return p.parseStructType()
case token.MUL:
return p.parsePointerType()
case token.FUNC:
typ, _ := p.parseFuncType()
return typ
case token.INTERFACE:
return p.parseInterfaceType()
case token.MAP:
return p.parseMapType()
case token.CHAN, token.ARROW:
return p.parseChanType()
case token.LPAREN:
lparen := p.pos
p.next()
typ := p.parseType()
rparen := p.expect(token.RPAREN)
return &ast.ParenExpr{Lparen: lparen, X: typ, Rparen: rparen}
}
// no type found
return nil
}
func (p *parser) tryType() ast.Expr {
typ := p.tryIdentOrType(false)
if typ != nil {
p.resolve(typ)
}
return typ
}
// ----------------------------------------------------------------------------
// Blocks
func (p *parser) parseStmtList() (list []ast.Stmt) {
if p.trace {
defer un(trace(p, "StatementList"))
}
for p.tok != token.CASE && p.tok != token.DEFAULT && p.tok != token.RBRACE && p.tok != token.EOF {
list = append(list, p.parseStmt())
}
return
}
func (p *parser) parseBody(scope *ast.Scope) *ast.BlockStmt {
if p.trace {
defer un(trace(p, "Body"))
}
lbrace := p.expect(token.LBRACE)
p.topScope = scope // open function scope
p.openLabelScope()
list := p.parseStmtList()
p.closeLabelScope()
p.closeScope()
rbrace := p.expect(token.RBRACE)
return &ast.BlockStmt{Lbrace: lbrace, List: list, Rbrace: rbrace}
}
func (p *parser) parseBlockStmt() *ast.BlockStmt {
if p.trace {
defer un(trace(p, "BlockStmt"))
}
lbrace := p.expect(token.LBRACE)
p.openScope()
list := p.parseStmtList()
p.closeScope()
rbrace := p.expect(token.RBRACE)
return &ast.BlockStmt{Lbrace: lbrace, List: list, Rbrace: rbrace}
}
// ----------------------------------------------------------------------------
// Expressions
func (p *parser) parseFuncTypeOrLit() ast.Expr {
if p.trace {
defer un(trace(p, "FuncTypeOrLit"))
}
typ, scope := p.parseFuncType()
if p.tok != token.LBRACE {
// function type only
return typ
}
p.exprLev++
body := p.parseBody(scope)
p.exprLev--
return &ast.FuncLit{Type: typ, Body: body}
}
// parseOperand may return an expression or a raw type (incl. array
// types of the form [...]T. Callers must verify the result.
// If lhs is set and the result is an identifier, it is not resolved.
//
func (p *parser) parseOperand(lhs bool) ast.Expr {
if p.trace {
defer un(trace(p, "Operand"))
}
switch p.tok {
case token.IDENT:
x := p.parseIdent()
if !lhs {
p.resolve(x)
}
return x
case token.INT, token.FLOAT, token.IMAG, token.CHAR, token.STRING:
x := &ast.BasicLit{ValuePos: p.pos, Kind: p.tok, Value: p.lit}
p.next()
return x
case token.LPAREN:
lparen := p.pos
p.next()
p.exprLev++
x := p.parseRhsOrType() // types may be parenthesized: (some type)
p.exprLev--
rparen := p.expect(token.RPAREN)
return &ast.ParenExpr{Lparen: lparen, X: x, Rparen: rparen}
case token.FUNC:
return p.parseFuncTypeOrLit()
default:
if typ := p.tryIdentOrType(true); typ != nil {
// could be type for composite literal or conversion
_, isIdent := typ.(*ast.Ident)
assert(!isIdent, "type cannot be identifier")
return typ
}
}
pos := p.pos
p.errorExpected(pos, "operand")
p.next() // make progress
return &ast.BadExpr{From: pos, To: p.pos}
}
func (p *parser) parseSelector(x ast.Expr) ast.Expr {
if p.trace {
defer un(trace(p, "Selector"))
}
sel := p.parseIdent()
return &ast.SelectorExpr{X: x, Sel: sel}
}
func (p *parser) parseTypeAssertion(x ast.Expr) ast.Expr {
if p.trace {
defer un(trace(p, "TypeAssertion"))
}
p.expect(token.LPAREN)
var typ ast.Expr
if p.tok == token.TYPE {
// type switch: typ == nil
p.next()
} else {
typ = p.parseType()
}
p.expect(token.RPAREN)
return &ast.TypeAssertExpr{X: x, Type: typ}
}
func (p *parser) parseIndexOrSlice(x ast.Expr) ast.Expr {
if p.trace {
defer un(trace(p, "IndexOrSlice"))
}
lbrack := p.expect(token.LBRACK)
p.exprLev++
var low, high ast.Expr
isSlice := false
if p.tok != token.COLON {
low = p.parseRhs()
}
if p.tok == token.COLON {
isSlice = true
p.next()
if p.tok != token.RBRACK {
high = p.parseRhs()
}
}
p.exprLev--
rbrack := p.expect(token.RBRACK)
if isSlice {
return &ast.SliceExpr{X: x, Lbrack: lbrack, Low: low, High: high, Rbrack: rbrack}
}
return &ast.IndexExpr{X: x, Lbrack: lbrack, Index: low, Rbrack: rbrack}
}
func (p *parser) parseCallOrConversion(fun ast.Expr) *ast.CallExpr {
if p.trace {
defer un(trace(p, "CallOrConversion"))
}
lparen := p.expect(token.LPAREN)
p.exprLev++
var list []ast.Expr
var ellipsis token.Pos
for p.tok != token.RPAREN && p.tok != token.EOF && !ellipsis.IsValid() {
list = append(list, p.parseRhsOrType()) // builtins may expect a type: make(some type, ...)
if p.tok == token.ELLIPSIS {
ellipsis = p.pos
p.next()
}
if p.tok != token.COMMA {
break
}
p.next()
}
p.exprLev--
rparen := p.expectClosing(token.RPAREN, "argument list")
return &ast.CallExpr{Fun: fun, Lparen: lparen, Args: list, Ellipsis: ellipsis, Rparen: rparen}
}
func (p *parser) parseElement(keyOk bool) ast.Expr {
if p.trace {
defer un(trace(p, "Element"))
}
if p.tok == token.LBRACE {
return p.parseLiteralValue(nil)
}
x := p.checkExpr(p.parseExpr(keyOk)) // don't resolve if map key
if keyOk {
if p.tok == token.COLON {
colon := p.pos
p.next()
return &ast.KeyValueExpr{Key: x, Colon: colon, Value: p.parseElement(false)}
}
p.resolve(x) // not a map key
}
return x
}
func (p *parser) parseElementList() (list []ast.Expr) {
if p.trace {
defer un(trace(p, "ElementList"))
}
for p.tok != token.RBRACE && p.tok != token.EOF {
list = append(list, p.parseElement(true))
if p.tok != token.COMMA {
break
}
p.next()
}
return
}
func (p *parser) parseLiteralValue(typ ast.Expr) ast.Expr {
if p.trace {
defer un(trace(p, "LiteralValue"))
}
lbrace := p.expect(token.LBRACE)
var elts []ast.Expr
p.exprLev++
if p.tok != token.RBRACE {
elts = p.parseElementList()
}
p.exprLev--
rbrace := p.expectClosing(token.RBRACE, "composite literal")
return &ast.CompositeLit{Type: typ, Lbrace: lbrace, Elts: elts, Rbrace: rbrace}
}
// checkExpr checks that x is an expression (and not a type).
func (p *parser) checkExpr(x ast.Expr) ast.Expr {
switch unparen(x).(type) {
case *ast.BadExpr:
case *ast.Ident:
case *ast.BasicLit:
case *ast.FuncLit:
case *ast.CompositeLit:
case *ast.ParenExpr:
panic("unreachable")
case *ast.SelectorExpr:
case *ast.IndexExpr:
case *ast.SliceExpr:
case *ast.TypeAssertExpr:
// If t.Type == nil we have a type assertion of the form
// y.(type), which is only allowed in type switch expressions.
// It's hard to exclude those but for the case where we are in
// a type switch. Instead be lenient and test this in the type
// checker.
case *ast.CallExpr:
case *ast.StarExpr:
case *ast.UnaryExpr:
case *ast.BinaryExpr:
default:
// all other nodes are not proper expressions
p.errorExpected(x.Pos(), "expression")
x = &ast.BadExpr{From: x.Pos(), To: x.End()}
}
return x
}
// isTypeName returns true iff x is a (qualified) TypeName.
func isTypeName(x ast.Expr) bool {
switch t := x.(type) {
case *ast.BadExpr:
case *ast.Ident:
case *ast.SelectorExpr:
_, isIdent := t.X.(*ast.Ident)
return isIdent
default:
return false // all other nodes are not type names
}
return true
}
// isLiteralType returns true iff x is a legal composite literal type.
func isLiteralType(x ast.Expr) bool {
switch t := x.(type) {
case *ast.BadExpr:
case *ast.Ident:
case *ast.SelectorExpr:
_, isIdent := t.X.(*ast.Ident)
return isIdent
case *ast.ArrayType:
case *ast.StructType:
case *ast.MapType:
default:
return false // all other nodes are not legal composite literal types
}
return true
}
// If x is of the form *T, deref returns T, otherwise it returns x.
func deref(x ast.Expr) ast.Expr {
if p, isPtr := x.(*ast.StarExpr); isPtr {
x = p.X
}
return x
}
// If x is of the form (T), unparen returns unparen(T), otherwise it returns x.
func unparen(x ast.Expr) ast.Expr {
if p, isParen := x.(*ast.ParenExpr); isParen {
x = unparen(p.X)
}
return x
}
// checkExprOrType checks that x is an expression or a type
// (and not a raw type such as [...]T).
//
func (p *parser) checkExprOrType(x ast.Expr) ast.Expr {
switch t := unparen(x).(type) {
case *ast.ParenExpr:
panic("unreachable")
case *ast.UnaryExpr:
case *ast.ArrayType:
if len, isEllipsis := t.Len.(*ast.Ellipsis); isEllipsis {
p.error(len.Pos(), "expected array length, found '...'")
x = &ast.BadExpr{From: x.Pos(), To: x.End()}
}
}
// all other nodes are expressions or types
return x
}
// If lhs is set and the result is an identifier, it is not resolved.
func (p *parser) parsePrimaryExpr(lhs bool) ast.Expr {
if p.trace {
defer un(trace(p, "PrimaryExpr"))
}
x := p.parseOperand(lhs)
L:
for {
switch p.tok {
case token.PERIOD:
p.next()
if lhs {
p.resolve(x)
}
switch p.tok {
case token.IDENT:
x = p.parseSelector(p.checkExpr(x))
case token.LPAREN:
x = p.parseTypeAssertion(p.checkExpr(x))
default:
pos := p.pos
p.next() // make progress
p.errorExpected(pos, "selector or type assertion")
x = &ast.BadExpr{From: pos, To: p.pos}
}
case token.LBRACK:
if lhs {
p.resolve(x)
}
x = p.parseIndexOrSlice(p.checkExpr(x))
case token.LPAREN:
if lhs {
p.resolve(x)
}
x = p.parseCallOrConversion(p.checkExprOrType(x))
case token.LBRACE:
if isLiteralType(x) && (p.exprLev >= 0 || !isTypeName(x)) {
if lhs {
p.resolve(x)
}
x = p.parseLiteralValue(x)
} else {
break L
}
default:
break L
}
lhs = false // no need to try to resolve again
}
return x
}
// If lhs is set and the result is an identifier, it is not resolved.
func (p *parser) parseUnaryExpr(lhs bool) ast.Expr {
if p.trace {
defer un(trace(p, "UnaryExpr"))
}
switch p.tok {
case token.ADD, token.SUB, token.NOT, token.XOR, token.AND:
pos, op := p.pos, p.tok
p.next()
x := p.parseUnaryExpr(false)
return &ast.UnaryExpr{OpPos: pos, Op: op, X: p.checkExpr(x)}
case token.ARROW:
// channel type or receive expression
pos := p.pos
p.next()
if p.tok == token.CHAN {
p.next()
value := p.parseType()
return &ast.ChanType{Begin: pos, Dir: ast.RECV, Value: value}
}
x := p.parseUnaryExpr(false)
return &ast.UnaryExpr{OpPos: pos, Op: token.ARROW, X: p.checkExpr(x)}
case token.MUL:
// pointer type or unary "*" expression
pos := p.pos
p.next()
x := p.parseUnaryExpr(false)
return &ast.StarExpr{Star: pos, X: p.checkExprOrType(x)}
}
return p.parsePrimaryExpr(lhs)
}
// If lhs is set and the result is an identifier, it is not resolved.
func (p *parser) parseBinaryExpr(lhs bool, prec1 int) ast.Expr {
if p.trace {
defer un(trace(p, "BinaryExpr"))
}
x := p.parseUnaryExpr(lhs)
for prec := p.tok.Precedence(); prec >= prec1; prec-- {
for p.tok.Precedence() == prec {
pos, op := p.pos, p.tok
p.next()
if lhs {
p.resolve(x)
lhs = false
}
y := p.parseBinaryExpr(false, prec+1)
x = &ast.BinaryExpr{X: p.checkExpr(x), OpPos: pos, Op: op, Y: p.checkExpr(y)}
}
}
return x
}
// If lhs is set and the result is an identifier, it is not resolved.
// The result may be a type or even a raw type ([...]int). Callers must
// check the result (using checkExpr or checkExprOrType), depending on
// context.
func (p *parser) parseExpr(lhs bool) ast.Expr {
if p.trace {
defer un(trace(p, "Expression"))
}
return p.parseBinaryExpr(lhs, token.LowestPrec+1)
}
func (p *parser) parseRhs() ast.Expr {
return p.checkExpr(p.parseExpr(false))
}
func (p *parser) parseRhsOrType() ast.Expr {
return p.checkExprOrType(p.parseExpr(false))
}
// ----------------------------------------------------------------------------
// Statements
// Parsing modes for parseSimpleStmt.
const (
basic = iota
labelOk
rangeOk
)
// parseSimpleStmt returns true as 2nd result if it parsed the assignment
// of a range clause (with mode == rangeOk). The returned statement is an
// assignment with a right-hand side that is a single unary expression of
// the form "range x". No guarantees are given for the left-hand side.
func (p *parser) parseSimpleStmt(mode int) (ast.Stmt, bool) {
if p.trace {
defer un(trace(p, "SimpleStmt"))
}
x := p.parseLhsList()
switch p.tok {
case
token.DEFINE, token.ASSIGN, token.ADD_ASSIGN,
token.SUB_ASSIGN, token.MUL_ASSIGN, token.QUO_ASSIGN,
token.REM_ASSIGN, token.AND_ASSIGN, token.OR_ASSIGN,
token.XOR_ASSIGN, token.SHL_ASSIGN, token.SHR_ASSIGN, token.AND_NOT_ASSIGN:
// assignment statement, possibly part of a range clause
pos, tok := p.pos, p.tok
p.next()
var y []ast.Expr
isRange := false
if mode == rangeOk && p.tok == token.RANGE && (tok == token.DEFINE || tok == token.ASSIGN) {
pos := p.pos
p.next()
y = []ast.Expr{&ast.UnaryExpr{OpPos: pos, Op: token.RANGE, X: p.parseRhs()}}
isRange = true
} else {
y = p.parseRhsList()
}
as := &ast.AssignStmt{Lhs: x, TokPos: pos, Tok: tok, Rhs: y}
if tok == token.DEFINE {
p.shortVarDecl(as, x)
}
return as, isRange
}
if len(x) > 1 {
p.errorExpected(x[0].Pos(), "1 expression")
// continue with first expression
}
switch p.tok {
case token.COLON:
// labeled statement
colon := p.pos
p.next()
if label, isIdent := x[0].(*ast.Ident); mode == labelOk && isIdent {
// Go spec: The scope of a label is the body of the function
// in which it is declared and excludes the body of any nested
// function.
stmt := &ast.LabeledStmt{Label: label, Colon: colon, Stmt: p.parseStmt()}
p.declare(stmt, nil, p.labelScope, ast.Lbl, label)
return stmt, false
}
// The label declaration typically starts at x[0].Pos(), but the label
// declaration may be erroneous due to a token after that position (and
// before the ':'). If SpuriousErrors is not set, the (only) error re-
// ported for the line is the illegal label error instead of the token
// before the ':' that caused the problem. Thus, use the (latest) colon
// position for error reporting.
p.error(colon, "illegal label declaration")
return &ast.BadStmt{From: x[0].Pos(), To: colon + 1}, false
case token.ARROW:
// send statement
arrow := p.pos
p.next()
y := p.parseRhs()
return &ast.SendStmt{Chan: x[0], Arrow: arrow, Value: y}, false
case token.INC, token.DEC:
// increment or decrement
s := &ast.IncDecStmt{X: x[0], TokPos: p.pos, Tok: p.tok}
p.next()
return s, false
}
// expression
return &ast.ExprStmt{X: x[0]}, false
}
func (p *parser) parseCallExpr() *ast.CallExpr {
x := p.parseRhsOrType() // could be a conversion: (some type)(x)
if call, isCall := x.(*ast.CallExpr); isCall {
return call
}
p.errorExpected(x.Pos(), "function/method call")
return nil
}
func (p *parser) parseGoStmt() ast.Stmt {
if p.trace {
defer un(trace(p, "GoStmt"))
}
pos := p.expect(token.GO)
call := p.parseCallExpr()
p.expectSemi()
if call == nil {
return &ast.BadStmt{From: pos, To: pos + 2} // len("go")
}
return &ast.GoStmt{Go: pos, Call: call}
}
func (p *parser) parseDeferStmt() ast.Stmt {
if p.trace {
defer un(trace(p, "DeferStmt"))
}
pos := p.expect(token.DEFER)
call := p.parseCallExpr()
p.expectSemi()
if call == nil {
return &ast.BadStmt{From: pos, To: pos + 5} // len("defer")
}
return &ast.DeferStmt{Defer: pos, Call: call}
}
func (p *parser) parseReturnStmt() *ast.ReturnStmt {
if p.trace {
defer un(trace(p, "ReturnStmt"))
}
pos := p.pos
p.expect(token.RETURN)
var x []ast.Expr
if p.tok != token.SEMICOLON && p.tok != token.RBRACE {
x = p.parseRhsList()
}
p.expectSemi()
return &ast.ReturnStmt{Return: pos, Results: x}
}
func (p *parser) parseBranchStmt(tok token.Token) *ast.BranchStmt {
if p.trace {
defer un(trace(p, "BranchStmt"))
}
pos := p.expect(tok)
var label *ast.Ident
if tok != token.FALLTHROUGH && p.tok == token.IDENT {
label = p.parseIdent()
// add to list of unresolved targets
n := len(p.targetStack) - 1
p.targetStack[n] = append(p.targetStack[n], label)
}
p.expectSemi()
return &ast.BranchStmt{TokPos: pos, Tok: tok, Label: label}
}
func (p *parser) makeExpr(s ast.Stmt) ast.Expr {
if s == nil {
return nil
}
if es, isExpr := s.(*ast.ExprStmt); isExpr {
return p.checkExpr(es.X)
}
p.error(s.Pos(), "expected condition, found simple statement")
return &ast.BadExpr{From: s.Pos(), To: s.End()}
}
func (p *parser) parseIfStmt() *ast.IfStmt {
if p.trace {
defer un(trace(p, "IfStmt"))
}
pos := p.expect(token.IF)
p.openScope()
defer p.closeScope()
var s ast.Stmt
var x ast.Expr
{
prevLev := p.exprLev
p.exprLev = -1
if p.tok == token.SEMICOLON {
p.next()
x = p.parseRhs()
} else {
s, _ = p.parseSimpleStmt(basic)
if p.tok == token.SEMICOLON {
p.next()
x = p.parseRhs()
} else {
x = p.makeExpr(s)
s = nil
}
}
p.exprLev = prevLev
}
body := p.parseBlockStmt()
var else_ ast.Stmt
if p.tok == token.ELSE {
p.next()
else_ = p.parseStmt()
} else {
p.expectSemi()
}
return &ast.IfStmt{If: pos, Init: s, Cond: x, Body: body, Else: else_}
}
func (p *parser) parseTypeList() (list []ast.Expr) {
if p.trace {
defer un(trace(p, "TypeList"))
}
list = append(list, p.parseType())
for p.tok == token.COMMA {
p.next()
list = append(list, p.parseType())
}
return
}
func (p *parser) parseCaseClause(typeSwitch bool) *ast.CaseClause {
if p.trace {
defer un(trace(p, "CaseClause"))
}
pos := p.pos
var list []ast.Expr
if p.tok == token.CASE {
p.next()
if typeSwitch {
list = p.parseTypeList()
} else {
list = p.parseRhsList()
}
} else {
p.expect(token.DEFAULT)
}
colon := p.expect(token.COLON)
p.openScope()
body := p.parseStmtList()
p.closeScope()
return &ast.CaseClause{Case: pos, List: list, Colon: colon, Body: body}
}
func isTypeSwitchAssert(x ast.Expr) bool {
a, ok := x.(*ast.TypeAssertExpr)
return ok && a.Type == nil
}
func isTypeSwitchGuard(s ast.Stmt) bool {
switch t := s.(type) {
case *ast.ExprStmt:
// x.(nil)
return isTypeSwitchAssert(t.X)
case *ast.AssignStmt:
// v := x.(nil)
return len(t.Lhs) == 1 && t.Tok == token.DEFINE && len(t.Rhs) == 1 && isTypeSwitchAssert(t.Rhs[0])
}
return false
}
func (p *parser) parseSwitchStmt() ast.Stmt {
if p.trace {
defer un(trace(p, "SwitchStmt"))
}
pos := p.expect(token.SWITCH)
p.openScope()
defer p.closeScope()
var s1, s2 ast.Stmt
if p.tok != token.LBRACE {
prevLev := p.exprLev
p.exprLev = -1
if p.tok != token.SEMICOLON {
s2, _ = p.parseSimpleStmt(basic)
}
if p.tok == token.SEMICOLON {
p.next()
s1 = s2
s2 = nil
if p.tok != token.LBRACE {
// A TypeSwitchGuard may declare a variable in addition
// to the variable declared in the initial SimpleStmt.
// Introduce extra scope to avoid redeclaration errors:
//
// switch t := 0; t := x.(T) { ... }
//
// (this code is not valid Go because the first t will
// cannot be accessed and thus is never used, the extra
// scope is needed for the correct error message).
//
// If we don't have a type switch, s2 must be an expression.
// Having the extra nested but empty scope won't affect it.
p.openScope()
defer p.closeScope()
s2, _ = p.parseSimpleStmt(basic)
}
}
p.exprLev = prevLev
}
typeSwitch := isTypeSwitchGuard(s2)
lbrace := p.expect(token.LBRACE)
var list []ast.Stmt
for p.tok == token.CASE || p.tok == token.DEFAULT {
list = append(list, p.parseCaseClause(typeSwitch))
}
rbrace := p.expect(token.RBRACE)
p.expectSemi()
body := &ast.BlockStmt{Lbrace: lbrace, List: list, Rbrace: rbrace}
if typeSwitch {
return &ast.TypeSwitchStmt{Switch: pos, Init: s1, Assign: s2, Body: body}
}
return &ast.SwitchStmt{Switch: pos, Init: s1, Tag: p.makeExpr(s2), Body: body}
}
func (p *parser) parseCommClause() *ast.CommClause {
if p.trace {
defer un(trace(p, "CommClause"))
}
p.openScope()
pos := p.pos
var comm ast.Stmt
if p.tok == token.CASE {
p.next()
lhs := p.parseLhsList()
if p.tok == token.ARROW {
// SendStmt
if len(lhs) > 1 {
p.errorExpected(lhs[0].Pos(), "1 expression")
// continue with first expression
}
arrow := p.pos
p.next()
rhs := p.parseRhs()
comm = &ast.SendStmt{Chan: lhs[0], Arrow: arrow, Value: rhs}
} else {
// RecvStmt
if tok := p.tok; tok == token.ASSIGN || tok == token.DEFINE {
// RecvStmt with assignment
if len(lhs) > 2 {
p.errorExpected(lhs[0].Pos(), "1 or 2 expressions")
// continue with first two expressions
lhs = lhs[0:2]
}
pos := p.pos
p.next()
rhs := p.parseRhs()
as := &ast.AssignStmt{Lhs: lhs, TokPos: pos, Tok: tok, Rhs: []ast.Expr{rhs}}
if tok == token.DEFINE {
p.shortVarDecl(as, lhs)
}
comm = as
} else {
// lhs must be single receive operation
if len(lhs) > 1 {
p.errorExpected(lhs[0].Pos(), "1 expression")
// continue with first expression
}
comm = &ast.ExprStmt{X: lhs[0]}
}
}
} else {
p.expect(token.DEFAULT)
}
colon := p.expect(token.COLON)
body := p.parseStmtList()
p.closeScope()
return &ast.CommClause{Case: pos, Comm: comm, Colon: colon, Body: body}
}
func (p *parser) parseSelectStmt() *ast.SelectStmt {
if p.trace {
defer un(trace(p, "SelectStmt"))
}
pos := p.expect(token.SELECT)
lbrace := p.expect(token.LBRACE)
var list []ast.Stmt
for p.tok == token.CASE || p.tok == token.DEFAULT {
list = append(list, p.parseCommClause())
}
rbrace := p.expect(token.RBRACE)
p.expectSemi()
body := &ast.BlockStmt{Lbrace: lbrace, List: list, Rbrace: rbrace}
return &ast.SelectStmt{Select: pos, Body: body}
}
func (p *parser) parseForStmt() ast.Stmt {
if p.trace {
defer un(trace(p, "ForStmt"))
}
pos := p.expect(token.FOR)
p.openScope()
defer p.closeScope()
var s1, s2, s3 ast.Stmt
var isRange bool
if p.tok != token.LBRACE {
prevLev := p.exprLev
p.exprLev = -1
if p.tok != token.SEMICOLON {
s2, isRange = p.parseSimpleStmt(rangeOk)
}
if !isRange && p.tok == token.SEMICOLON {
p.next()
s1 = s2
s2 = nil
if p.tok != token.SEMICOLON {
s2, _ = p.parseSimpleStmt(basic)
}
p.expectSemi()
if p.tok != token.LBRACE {
s3, _ = p.parseSimpleStmt(basic)
}
}
p.exprLev = prevLev
}
body := p.parseBlockStmt()
p.expectSemi()
if isRange {
as := s2.(*ast.AssignStmt)
// check lhs
var key, value ast.Expr
switch len(as.Lhs) {
case 2:
key, value = as.Lhs[0], as.Lhs[1]
case 1:
key = as.Lhs[0]
default:
p.errorExpected(as.Lhs[0].Pos(), "1 or 2 expressions")
return &ast.BadStmt{From: pos, To: body.End()}
}
// parseSimpleStmt returned a right-hand side that
// is a single unary expression of the form "range x"
x := as.Rhs[0].(*ast.UnaryExpr).X
return &ast.RangeStmt{
For: pos,
Key: key,
Value: value,
TokPos: as.TokPos,
Tok: as.Tok,
X: x,
Body: body,
}
}
// regular for statement
return &ast.ForStmt{
For: pos,
Init: s1,
Cond: p.makeExpr(s2),
Post: s3,
Body: body,
}
}
func (p *parser) parseStmt() (s ast.Stmt) {
if p.trace {
defer un(trace(p, "Statement"))
}
switch p.tok {
case token.CONST, token.TYPE, token.VAR:
s = &ast.DeclStmt{Decl: p.parseDecl()}
case
// tokens that may start an expression
token.IDENT, token.INT, token.FLOAT, token.IMAG, token.CHAR, token.STRING, token.FUNC, token.LPAREN, // operands
token.LBRACK, token.STRUCT, // composite types
token.ADD, token.SUB, token.MUL, token.AND, token.XOR, token.ARROW, token.NOT: // unary operators
s, _ = p.parseSimpleStmt(labelOk)
// because of the required look-ahead, labeled statements are
// parsed by parseSimpleStmt - don't expect a semicolon after
// them
if _, isLabeledStmt := s.(*ast.LabeledStmt); !isLabeledStmt {
p.expectSemi()
}
case token.GO:
s = p.parseGoStmt()
case token.DEFER:
s = p.parseDeferStmt()
case token.RETURN:
s = p.parseReturnStmt()
case token.BREAK, token.CONTINUE, token.GOTO, token.FALLTHROUGH:
s = p.parseBranchStmt(p.tok)
case token.LBRACE:
s = p.parseBlockStmt()
p.expectSemi()
case token.IF:
s = p.parseIfStmt()
case token.SWITCH:
s = p.parseSwitchStmt()
case token.SELECT:
s = p.parseSelectStmt()
case token.FOR:
s = p.parseForStmt()
case token.SEMICOLON:
s = &ast.EmptyStmt{Semicolon: p.pos}
p.next()
case token.RBRACE:
// a semicolon may be omitted before a closing "}"
s = &ast.EmptyStmt{Semicolon: p.pos}
default:
// no statement found
pos := p.pos
p.errorExpected(pos, "statement")
p.next() // make progress
s = &ast.BadStmt{From: pos, To: p.pos}
}
return
}
// ----------------------------------------------------------------------------
// Declarations
type parseSpecFunction func(p *parser, doc *ast.CommentGroup, iota int) ast.Spec
func isValidImport(lit string) bool {
const illegalChars = `!"#$%&'()*,:;<=>?[\]^{|}` + "`\uFFFD"
s, _ := strconv.Unquote(lit) // go/scanner returns a legal string literal
for _, r := range s {
if !unicode.IsGraphic(r) || unicode.IsSpace(r) || strings.ContainsRune(illegalChars, r) {
return false
}
}
return s != ""
}
func parseImportSpec(p *parser, doc *ast.CommentGroup, _ int) ast.Spec {
if p.trace {
defer un(trace(p, "ImportSpec"))
}
var ident *ast.Ident
switch p.tok {
case token.PERIOD:
ident = &ast.Ident{NamePos: p.pos, Name: "."}
p.next()
case token.IDENT:
ident = p.parseIdent()
}
var path *ast.BasicLit
if p.tok == token.STRING {
if !isValidImport(p.lit) {
p.error(p.pos, "invalid import path: "+p.lit)
}
path = &ast.BasicLit{ValuePos: p.pos, Kind: p.tok, Value: p.lit}
p.next()
} else {
p.expect(token.STRING) // use expect() error handling
}
p.expectSemi() // call before accessing p.linecomment
// collect imports
spec := &ast.ImportSpec{
Doc: doc,
Name: ident,
Path: path,
Comment: p.lineComment,
}
p.imports = append(p.imports, spec)
return spec
}
func parseConstSpec(p *parser, doc *ast.CommentGroup, iota int) ast.Spec {
if p.trace {
defer un(trace(p, "ConstSpec"))
}
idents := p.parseIdentList()
typ := p.tryType()
var values []ast.Expr
if typ != nil || p.tok == token.ASSIGN || iota == 0 {
p.expect(token.ASSIGN)
values = p.parseRhsList()
}
p.expectSemi() // call before accessing p.linecomment
// Go spec: The scope of a constant or variable identifier declared inside
// a function begins at the end of the ConstSpec or VarSpec and ends at
// the end of the innermost containing block.
// (Global identifiers are resolved in a separate phase after parsing.)
spec := &ast.ValueSpec{
Doc: doc,
Names: idents,
Type: typ,
Values: values,
Comment: p.lineComment,
}
p.declare(spec, iota, p.topScope, ast.Con, idents...)
return spec
}
func parseTypeSpec(p *parser, doc *ast.CommentGroup, _ int) ast.Spec {
if p.trace {
defer un(trace(p, "TypeSpec"))
}
ident := p.parseIdent()
// Go spec: The scope of a type identifier declared inside a function begins
// at the identifier in the TypeSpec and ends at the end of the innermost
// containing block.
// (Global identifiers are resolved in a separate phase after parsing.)
spec := &ast.TypeSpec{Doc: doc, Name: ident}
p.declare(spec, nil, p.topScope, ast.Typ, ident)
spec.Type = p.parseType()
p.expectSemi() // call before accessing p.linecomment
spec.Comment = p.lineComment
return spec
}
func parseVarSpec(p *parser, doc *ast.CommentGroup, _ int) ast.Spec {
if p.trace {
defer un(trace(p, "VarSpec"))
}
idents := p.parseIdentList()
typ := p.tryType()
var values []ast.Expr
if typ == nil || p.tok == token.ASSIGN {
p.expect(token.ASSIGN)
values = p.parseRhsList()
}
p.expectSemi() // call before accessing p.linecomment
// Go spec: The scope of a constant or variable identifier declared inside
// a function begins at the end of the ConstSpec or VarSpec and ends at
// the end of the innermost containing block.
// (Global identifiers are resolved in a separate phase after parsing.)
spec := &ast.ValueSpec{
Doc: doc,
Names: idents,
Type: typ,
Values: values,
Comment: p.lineComment,
}
p.declare(spec, nil, p.topScope, ast.Var, idents...)
return spec
}
func (p *parser) parseGenDecl(keyword token.Token, f parseSpecFunction) *ast.GenDecl {
if p.trace {
defer un(trace(p, "GenDecl("+keyword.String()+")"))
}
doc := p.leadComment
pos := p.expect(keyword)
var lparen, rparen token.Pos
var list []ast.Spec
if p.tok == token.LPAREN {
lparen = p.pos
p.next()
for iota := 0; p.tok != token.RPAREN && p.tok != token.EOF; iota++ {
list = append(list, f(p, p.leadComment, iota))
}
rparen = p.expect(token.RPAREN)
p.expectSemi()
} else {
list = append(list, f(p, nil, 0))
}
return &ast.GenDecl{
Doc: doc,
TokPos: pos,
Tok: keyword,
Lparen: lparen,
Specs: list,
Rparen: rparen,
}
}
func (p *parser) parseReceiver(scope *ast.Scope) *ast.FieldList {
if p.trace {
defer un(trace(p, "Receiver"))
}
par := p.parseParameters(scope, false)
// must have exactly one receiver
if par.NumFields() != 1 {
p.errorExpected(par.Opening, "exactly one receiver")
par.List = []*ast.Field{{Type: &ast.BadExpr{From: par.Opening, To: par.Closing + 1}}}
return par
}
// recv type must be of the form ["*"] identifier
recv := par.List[0]
base := deref(recv.Type)
if _, isIdent := base.(*ast.Ident); !isIdent {
p.errorExpected(base.Pos(), "(unqualified) identifier")
par.List = []*ast.Field{{Type: &ast.BadExpr{From: recv.Pos(), To: recv.End()}}}
}
return par
}
func (p *parser) parseFuncDecl() *ast.FuncDecl {
if p.trace {
defer un(trace(p, "FunctionDecl"))
}
doc := p.leadComment
pos := p.expect(token.FUNC)
scope := ast.NewScope(p.topScope) // function scope
var recv *ast.FieldList
if p.tok == token.LPAREN {
recv = p.parseReceiver(scope)
}
ident := p.parseIdent()
params, results := p.parseSignature(scope)
var body *ast.BlockStmt
if p.tok == token.LBRACE {
body = p.parseBody(scope)
}
p.expectSemi()
decl := &ast.FuncDecl{
Doc: doc,
Recv: recv,
Name: ident,
Type: &ast.FuncType{
Func: pos,
Params: params,
Results: results,
},
Body: body,
}
if recv == nil {
// Go spec: The scope of an identifier denoting a constant, type,
// variable, or function (but not method) declared at top level
// (outside any function) is the package block.
//
// init() functions cannot be referred to and there may
// be more than one - don't put them in the pkgScope
if ident.Name != "init" {
p.declare(decl, nil, p.pkgScope, ast.Fun, ident)
}
}
return decl
}
func (p *parser) parseDecl() ast.Decl {
if p.trace {
defer un(trace(p, "Declaration"))
}
var f parseSpecFunction
switch p.tok {
case token.CONST:
f = parseConstSpec
case token.TYPE:
f = parseTypeSpec
case token.VAR:
f = parseVarSpec
case token.FUNC:
return p.parseFuncDecl()
default:
pos := p.pos
p.errorExpected(pos, "declaration")
p.next() // make progress
decl := &ast.BadDecl{From: pos, To: p.pos}
return decl
}
return p.parseGenDecl(p.tok, f)
}
// ----------------------------------------------------------------------------
// Source files
func (p *parser) parseFile() *ast.File {
if p.trace {
defer un(trace(p, "File"))
}
// package clause
doc := p.leadComment
pos := p.expect(token.PACKAGE)
// Go spec: The package clause is not a declaration;
// the package name does not appear in any scope.
ident := p.parseIdent()
if ident.Name == "_" {
p.error(p.pos, "invalid package name _")
}
p.expectSemi()
var decls []ast.Decl
// Don't bother parsing the rest if we had errors already.
// Likely not a Go source file at all.
if p.errors.Len() == 0 && p.mode&PackageClauseOnly == 0 {
// import decls
for p.tok == token.IMPORT {
decls = append(decls, p.parseGenDecl(token.IMPORT, parseImportSpec))
}
if p.mode&ImportsOnly == 0 {
// rest of package body
for p.tok != token.EOF {
decls = append(decls, p.parseDecl())
}
}
}
assert(p.topScope == p.pkgScope, "imbalanced scopes")
// resolve global identifiers within the same file
i := 0
for _, ident := range p.unresolved {
// i <= index for current ident
assert(ident.Obj == unresolved, "object already resolved")
ident.Obj = p.pkgScope.Lookup(ident.Name) // also removes unresolved sentinel
if ident.Obj == nil {
p.unresolved[i] = ident
i++
}
}
return &ast.File{
Doc: doc,
Package: pos,
Name: ident,
Decls: decls,
Scope: p.pkgScope,
Imports: p.imports,
Unresolved: p.unresolved[0:i],
Comments: p.comments,
}
}
go/parser: better error sync. if commas are missing
R=rsc, bradfitz
CC=golang-dev
http://codereview.appspot.com/5756045
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package parser implements a parser for Go source files. Input may be
// provided in a variety of forms (see the various Parse* functions); the
// output is an abstract syntax tree (AST) representing the Go source. The
// parser is invoked through one of the Parse* functions.
//
package parser
import (
"fmt"
"go/ast"
"go/scanner"
"go/token"
"strconv"
"strings"
"unicode"
)
// The parser structure holds the parser's internal state.
type parser struct {
file *token.File
errors scanner.ErrorList
scanner scanner.Scanner
// Tracing/debugging
mode Mode // parsing mode
trace bool // == (mode & Trace != 0)
indent uint // indentation used for tracing output
// Comments
comments []*ast.CommentGroup
leadComment *ast.CommentGroup // last lead comment
lineComment *ast.CommentGroup // last line comment
// Next token
pos token.Pos // token position
tok token.Token // one token look-ahead
lit string // token literal
// Non-syntactic parser control
exprLev int // < 0: in control clause, >= 0: in expression
// Ordinary identifier scopes
pkgScope *ast.Scope // pkgScope.Outer == nil
topScope *ast.Scope // top-most scope; may be pkgScope
unresolved []*ast.Ident // unresolved identifiers
imports []*ast.ImportSpec // list of imports
// Label scope
// (maintained by open/close LabelScope)
labelScope *ast.Scope // label scope for current function
targetStack [][]*ast.Ident // stack of unresolved labels
}
func (p *parser) init(fset *token.FileSet, filename string, src []byte, mode Mode) {
p.file = fset.AddFile(filename, fset.Base(), len(src))
var m scanner.Mode
if mode&ParseComments != 0 {
m = scanner.ScanComments
}
eh := func(pos token.Position, msg string) { p.errors.Add(pos, msg) }
p.scanner.Init(p.file, src, eh, m)
p.mode = mode
p.trace = mode&Trace != 0 // for convenience (p.trace is used frequently)
p.next()
// set up the pkgScope here (as opposed to in parseFile) because
// there are other parser entry points (ParseExpr, etc.)
p.openScope()
p.pkgScope = p.topScope
// for the same reason, set up a label scope
p.openLabelScope()
}
// ----------------------------------------------------------------------------
// Scoping support
func (p *parser) openScope() {
p.topScope = ast.NewScope(p.topScope)
}
func (p *parser) closeScope() {
p.topScope = p.topScope.Outer
}
func (p *parser) openLabelScope() {
p.labelScope = ast.NewScope(p.labelScope)
p.targetStack = append(p.targetStack, nil)
}
func (p *parser) closeLabelScope() {
// resolve labels
n := len(p.targetStack) - 1
scope := p.labelScope
for _, ident := range p.targetStack[n] {
ident.Obj = scope.Lookup(ident.Name)
if ident.Obj == nil && p.mode&DeclarationErrors != 0 {
p.error(ident.Pos(), fmt.Sprintf("label %s undefined", ident.Name))
}
}
// pop label scope
p.targetStack = p.targetStack[0:n]
p.labelScope = p.labelScope.Outer
}
func (p *parser) declare(decl, data interface{}, scope *ast.Scope, kind ast.ObjKind, idents ...*ast.Ident) {
for _, ident := range idents {
assert(ident.Obj == nil, "identifier already declared or resolved")
obj := ast.NewObj(kind, ident.Name)
// remember the corresponding declaration for redeclaration
// errors and global variable resolution/typechecking phase
obj.Decl = decl
obj.Data = data
ident.Obj = obj
if ident.Name != "_" {
if alt := scope.Insert(obj); alt != nil && p.mode&DeclarationErrors != 0 {
prevDecl := ""
if pos := alt.Pos(); pos.IsValid() {
prevDecl = fmt.Sprintf("\n\tprevious declaration at %s", p.file.Position(pos))
}
p.error(ident.Pos(), fmt.Sprintf("%s redeclared in this block%s", ident.Name, prevDecl))
}
}
}
}
func (p *parser) shortVarDecl(decl *ast.AssignStmt, list []ast.Expr) {
// Go spec: A short variable declaration may redeclare variables
// provided they were originally declared in the same block with
// the same type, and at least one of the non-blank variables is new.
n := 0 // number of new variables
for _, x := range list {
if ident, isIdent := x.(*ast.Ident); isIdent {
assert(ident.Obj == nil, "identifier already declared or resolved")
obj := ast.NewObj(ast.Var, ident.Name)
// remember corresponding assignment for other tools
obj.Decl = decl
ident.Obj = obj
if ident.Name != "_" {
if alt := p.topScope.Insert(obj); alt != nil {
ident.Obj = alt // redeclaration
} else {
n++ // new declaration
}
}
} else {
p.errorExpected(x.Pos(), "identifier")
}
}
if n == 0 && p.mode&DeclarationErrors != 0 {
p.error(list[0].Pos(), "no new variables on left side of :=")
}
}
// The unresolved object is a sentinel to mark identifiers that have been added
// to the list of unresolved identifiers. The sentinel is only used for verifying
// internal consistency.
var unresolved = new(ast.Object)
func (p *parser) resolve(x ast.Expr) {
// nothing to do if x is not an identifier or the blank identifier
ident, _ := x.(*ast.Ident)
if ident == nil {
return
}
assert(ident.Obj == nil, "identifier already declared or resolved")
if ident.Name == "_" {
return
}
// try to resolve the identifier
for s := p.topScope; s != nil; s = s.Outer {
if obj := s.Lookup(ident.Name); obj != nil {
ident.Obj = obj
return
}
}
// all local scopes are known, so any unresolved identifier
// must be found either in the file scope, package scope
// (perhaps in another file), or universe scope --- collect
// them so that they can be resolved later
ident.Obj = unresolved
p.unresolved = append(p.unresolved, ident)
}
// ----------------------------------------------------------------------------
// Parsing support
func (p *parser) printTrace(a ...interface{}) {
const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . " +
". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . "
const n = uint(len(dots))
pos := p.file.Position(p.pos)
fmt.Printf("%5d:%3d: ", pos.Line, pos.Column)
i := 2 * p.indent
for ; i > n; i -= n {
fmt.Print(dots)
}
fmt.Print(dots[0:i])
fmt.Println(a...)
}
func trace(p *parser, msg string) *parser {
p.printTrace(msg, "(")
p.indent++
return p
}
// Usage pattern: defer un(trace(p, "..."));
func un(p *parser) {
p.indent--
p.printTrace(")")
}
// Advance to the next token.
func (p *parser) next0() {
// Because of one-token look-ahead, print the previous token
// when tracing as it provides a more readable output. The
// very first token (!p.pos.IsValid()) is not initialized
// (it is token.ILLEGAL), so don't print it .
if p.trace && p.pos.IsValid() {
s := p.tok.String()
switch {
case p.tok.IsLiteral():
p.printTrace(s, p.lit)
case p.tok.IsOperator(), p.tok.IsKeyword():
p.printTrace("\"" + s + "\"")
default:
p.printTrace(s)
}
}
p.pos, p.tok, p.lit = p.scanner.Scan()
}
// Consume a comment and return it and the line on which it ends.
func (p *parser) consumeComment() (comment *ast.Comment, endline int) {
// /*-style comments may end on a different line than where they start.
// Scan the comment for '\n' chars and adjust endline accordingly.
endline = p.file.Line(p.pos)
if p.lit[1] == '*' {
// don't use range here - no need to decode Unicode code points
for i := 0; i < len(p.lit); i++ {
if p.lit[i] == '\n' {
endline++
}
}
}
comment = &ast.Comment{Slash: p.pos, Text: p.lit}
p.next0()
return
}
// Consume a group of adjacent comments, add it to the parser's
// comments list, and return it together with the line at which
// the last comment in the group ends. An empty line or non-comment
// token terminates a comment group.
//
func (p *parser) consumeCommentGroup() (comments *ast.CommentGroup, endline int) {
var list []*ast.Comment
endline = p.file.Line(p.pos)
for p.tok == token.COMMENT && endline+1 >= p.file.Line(p.pos) {
var comment *ast.Comment
comment, endline = p.consumeComment()
list = append(list, comment)
}
// add comment group to the comments list
comments = &ast.CommentGroup{List: list}
p.comments = append(p.comments, comments)
return
}
// Advance to the next non-comment token. In the process, collect
// any comment groups encountered, and remember the last lead and
// and line comments.
//
// A lead comment is a comment group that starts and ends in a
// line without any other tokens and that is followed by a non-comment
// token on the line immediately after the comment group.
//
// A line comment is a comment group that follows a non-comment
// token on the same line, and that has no tokens after it on the line
// where it ends.
//
// Lead and line comments may be considered documentation that is
// stored in the AST.
//
func (p *parser) next() {
p.leadComment = nil
p.lineComment = nil
line := p.file.Line(p.pos) // current line
p.next0()
if p.tok == token.COMMENT {
var comment *ast.CommentGroup
var endline int
if p.file.Line(p.pos) == line {
// The comment is on same line as the previous token; it
// cannot be a lead comment but may be a line comment.
comment, endline = p.consumeCommentGroup()
if p.file.Line(p.pos) != endline {
// The next token is on a different line, thus
// the last comment group is a line comment.
p.lineComment = comment
}
}
// consume successor comments, if any
endline = -1
for p.tok == token.COMMENT {
comment, endline = p.consumeCommentGroup()
}
if endline+1 == p.file.Line(p.pos) {
// The next token is following on the line immediately after the
// comment group, thus the last comment group is a lead comment.
p.leadComment = comment
}
}
}
func (p *parser) error(pos token.Pos, msg string) {
p.errors.Add(p.file.Position(pos), msg)
}
func (p *parser) errorExpected(pos token.Pos, msg string) {
msg = "expected " + msg
if pos == p.pos {
// the error happened at the current position;
// make the error message more specific
if p.tok == token.SEMICOLON && p.lit == "\n" {
msg += ", found newline"
} else {
msg += ", found '" + p.tok.String() + "'"
if p.tok.IsLiteral() {
msg += " " + p.lit
}
}
}
p.error(pos, msg)
}
func (p *parser) expect(tok token.Token) token.Pos {
pos := p.pos
if p.tok != tok {
p.errorExpected(pos, "'"+tok.String()+"'")
}
p.next() // make progress
return pos
}
// expectClosing is like expect but provides a better error message
// for the common case of a missing comma before a newline.
//
func (p *parser) expectClosing(tok token.Token, context string) token.Pos {
if p.tok != tok && p.tok == token.SEMICOLON && p.lit == "\n" {
p.error(p.pos, "missing ',' before newline in "+context)
p.next()
}
return p.expect(tok)
}
func (p *parser) expectSemi() {
if p.tok != token.RPAREN && p.tok != token.RBRACE {
p.expect(token.SEMICOLON)
}
}
func (p *parser) seesComma(context string) bool {
if p.tok == token.COMMA {
return true
}
if p.tok == token.SEMICOLON && p.lit == "\n" {
p.error(p.pos, "missing ',' before newline in "+context)
return true // "insert" the comma and continue
}
return false
}
func assert(cond bool, msg string) {
if !cond {
panic("go/parser internal error: " + msg)
}
}
// ----------------------------------------------------------------------------
// Identifiers
func (p *parser) parseIdent() *ast.Ident {
pos := p.pos
name := "_"
if p.tok == token.IDENT {
name = p.lit
p.next()
} else {
p.expect(token.IDENT) // use expect() error handling
}
return &ast.Ident{NamePos: pos, Name: name}
}
func (p *parser) parseIdentList() (list []*ast.Ident) {
if p.trace {
defer un(trace(p, "IdentList"))
}
list = append(list, p.parseIdent())
for p.tok == token.COMMA {
p.next()
list = append(list, p.parseIdent())
}
return
}
// ----------------------------------------------------------------------------
// Common productions
// If lhs is set, result list elements which are identifiers are not resolved.
func (p *parser) parseExprList(lhs bool) (list []ast.Expr) {
if p.trace {
defer un(trace(p, "ExpressionList"))
}
list = append(list, p.checkExpr(p.parseExpr(lhs)))
for p.tok == token.COMMA {
p.next()
list = append(list, p.checkExpr(p.parseExpr(lhs)))
}
return
}
func (p *parser) parseLhsList() []ast.Expr {
list := p.parseExprList(true)
switch p.tok {
case token.DEFINE:
// lhs of a short variable declaration
// but doesn't enter scope until later:
// caller must call p.shortVarDecl(p.makeIdentList(list))
// at appropriate time.
case token.COLON:
// lhs of a label declaration or a communication clause of a select
// statement (parseLhsList is not called when parsing the case clause
// of a switch statement):
// - labels are declared by the caller of parseLhsList
// - for communication clauses, if there is a stand-alone identifier
// followed by a colon, we have a syntax error; there is no need
// to resolve the identifier in that case
default:
// identifiers must be declared elsewhere
for _, x := range list {
p.resolve(x)
}
}
return list
}
func (p *parser) parseRhsList() []ast.Expr {
return p.parseExprList(false)
}
// ----------------------------------------------------------------------------
// Types
func (p *parser) parseType() ast.Expr {
if p.trace {
defer un(trace(p, "Type"))
}
typ := p.tryType()
if typ == nil {
pos := p.pos
p.errorExpected(pos, "type")
p.next() // make progress
return &ast.BadExpr{From: pos, To: p.pos}
}
return typ
}
// If the result is an identifier, it is not resolved.
func (p *parser) parseTypeName() ast.Expr {
if p.trace {
defer un(trace(p, "TypeName"))
}
ident := p.parseIdent()
// don't resolve ident yet - it may be a parameter or field name
if p.tok == token.PERIOD {
// ident is a package name
p.next()
p.resolve(ident)
sel := p.parseIdent()
return &ast.SelectorExpr{X: ident, Sel: sel}
}
return ident
}
func (p *parser) parseArrayType(ellipsisOk bool) ast.Expr {
if p.trace {
defer un(trace(p, "ArrayType"))
}
lbrack := p.expect(token.LBRACK)
var len ast.Expr
if ellipsisOk && p.tok == token.ELLIPSIS {
len = &ast.Ellipsis{Ellipsis: p.pos}
p.next()
} else if p.tok != token.RBRACK {
len = p.parseRhs()
}
p.expect(token.RBRACK)
elt := p.parseType()
return &ast.ArrayType{Lbrack: lbrack, Len: len, Elt: elt}
}
func (p *parser) makeIdentList(list []ast.Expr) []*ast.Ident {
idents := make([]*ast.Ident, len(list))
for i, x := range list {
ident, isIdent := x.(*ast.Ident)
if !isIdent {
pos := x.Pos()
p.errorExpected(pos, "identifier")
ident = &ast.Ident{NamePos: pos, Name: "_"}
}
idents[i] = ident
}
return idents
}
func (p *parser) parseFieldDecl(scope *ast.Scope) *ast.Field {
if p.trace {
defer un(trace(p, "FieldDecl"))
}
doc := p.leadComment
// fields
list, typ := p.parseVarList(false)
// optional tag
var tag *ast.BasicLit
if p.tok == token.STRING {
tag = &ast.BasicLit{ValuePos: p.pos, Kind: p.tok, Value: p.lit}
p.next()
}
// analyze case
var idents []*ast.Ident
if typ != nil {
// IdentifierList Type
idents = p.makeIdentList(list)
} else {
// ["*"] TypeName (AnonymousField)
typ = list[0] // we always have at least one element
p.resolve(typ)
if n := len(list); n > 1 || !isTypeName(deref(typ)) {
pos := typ.Pos()
p.errorExpected(pos, "anonymous field")
typ = &ast.BadExpr{From: pos, To: list[n-1].End()}
}
}
p.expectSemi() // call before accessing p.linecomment
field := &ast.Field{Doc: doc, Names: idents, Type: typ, Tag: tag, Comment: p.lineComment}
p.declare(field, nil, scope, ast.Var, idents...)
return field
}
func (p *parser) parseStructType() *ast.StructType {
if p.trace {
defer un(trace(p, "StructType"))
}
pos := p.expect(token.STRUCT)
lbrace := p.expect(token.LBRACE)
scope := ast.NewScope(nil) // struct scope
var list []*ast.Field
for p.tok == token.IDENT || p.tok == token.MUL || p.tok == token.LPAREN {
// a field declaration cannot start with a '(' but we accept
// it here for more robust parsing and better error messages
// (parseFieldDecl will check and complain if necessary)
list = append(list, p.parseFieldDecl(scope))
}
rbrace := p.expect(token.RBRACE)
return &ast.StructType{
Struct: pos,
Fields: &ast.FieldList{
Opening: lbrace,
List: list,
Closing: rbrace,
},
}
}
func (p *parser) parsePointerType() *ast.StarExpr {
if p.trace {
defer un(trace(p, "PointerType"))
}
star := p.expect(token.MUL)
base := p.parseType()
return &ast.StarExpr{Star: star, X: base}
}
func (p *parser) tryVarType(isParam bool) ast.Expr {
if isParam && p.tok == token.ELLIPSIS {
pos := p.pos
p.next()
typ := p.tryIdentOrType(isParam) // don't use parseType so we can provide better error message
if typ == nil {
p.error(pos, "'...' parameter is missing type")
typ = &ast.BadExpr{From: pos, To: p.pos}
}
return &ast.Ellipsis{Ellipsis: pos, Elt: typ}
}
return p.tryIdentOrType(false)
}
func (p *parser) parseVarType(isParam bool) ast.Expr {
typ := p.tryVarType(isParam)
if typ == nil {
pos := p.pos
p.errorExpected(pos, "type")
p.next() // make progress
typ = &ast.BadExpr{From: pos, To: p.pos}
}
return typ
}
func (p *parser) parseVarList(isParam bool) (list []ast.Expr, typ ast.Expr) {
if p.trace {
defer un(trace(p, "VarList"))
}
// a list of identifiers looks like a list of type names
//
// parse/tryVarType accepts any type (including parenthesized
// ones) even though the syntax does not permit them here: we
// accept them all for more robust parsing and complain later
for typ := p.parseVarType(isParam); typ != nil; {
list = append(list, typ)
if !p.seesComma("variable list") {
break
}
p.next()
typ = p.tryVarType(isParam) // maybe nil as in: func f(int,) {}
}
// if we had a list of identifiers, it must be followed by a type
if typ = p.tryVarType(isParam); typ != nil {
p.resolve(typ)
}
return
}
func (p *parser) parseParameterList(scope *ast.Scope, ellipsisOk bool) (params []*ast.Field) {
if p.trace {
defer un(trace(p, "ParameterList"))
}
list, typ := p.parseVarList(ellipsisOk)
if typ != nil {
// IdentifierList Type
idents := p.makeIdentList(list)
field := &ast.Field{Names: idents, Type: typ}
params = append(params, field)
// Go spec: The scope of an identifier denoting a function
// parameter or result variable is the function body.
p.declare(field, nil, scope, ast.Var, idents...)
if p.tok == token.COMMA {
p.next()
}
for p.tok != token.RPAREN && p.tok != token.EOF {
idents := p.parseIdentList()
typ := p.parseVarType(ellipsisOk)
field := &ast.Field{Names: idents, Type: typ}
params = append(params, field)
// Go spec: The scope of an identifier denoting a function
// parameter or result variable is the function body.
p.declare(field, nil, scope, ast.Var, idents...)
if !p.seesComma("parameter list") {
break
}
p.next()
}
} else {
// Type { "," Type } (anonymous parameters)
params = make([]*ast.Field, len(list))
for i, x := range list {
p.resolve(x)
params[i] = &ast.Field{Type: x}
}
}
return
}
func (p *parser) parseParameters(scope *ast.Scope, ellipsisOk bool) *ast.FieldList {
if p.trace {
defer un(trace(p, "Parameters"))
}
var params []*ast.Field
lparen := p.expect(token.LPAREN)
if p.tok != token.RPAREN {
params = p.parseParameterList(scope, ellipsisOk)
}
rparen := p.expect(token.RPAREN)
return &ast.FieldList{Opening: lparen, List: params, Closing: rparen}
}
func (p *parser) parseResult(scope *ast.Scope) *ast.FieldList {
if p.trace {
defer un(trace(p, "Result"))
}
if p.tok == token.LPAREN {
return p.parseParameters(scope, false)
}
typ := p.tryType()
if typ != nil {
list := make([]*ast.Field, 1)
list[0] = &ast.Field{Type: typ}
return &ast.FieldList{List: list}
}
return nil
}
func (p *parser) parseSignature(scope *ast.Scope) (params, results *ast.FieldList) {
if p.trace {
defer un(trace(p, "Signature"))
}
params = p.parseParameters(scope, true)
results = p.parseResult(scope)
return
}
func (p *parser) parseFuncType() (*ast.FuncType, *ast.Scope) {
if p.trace {
defer un(trace(p, "FuncType"))
}
pos := p.expect(token.FUNC)
scope := ast.NewScope(p.topScope) // function scope
params, results := p.parseSignature(scope)
return &ast.FuncType{Func: pos, Params: params, Results: results}, scope
}
func (p *parser) parseMethodSpec(scope *ast.Scope) *ast.Field {
if p.trace {
defer un(trace(p, "MethodSpec"))
}
doc := p.leadComment
var idents []*ast.Ident
var typ ast.Expr
x := p.parseTypeName()
if ident, isIdent := x.(*ast.Ident); isIdent && p.tok == token.LPAREN {
// method
idents = []*ast.Ident{ident}
scope := ast.NewScope(nil) // method scope
params, results := p.parseSignature(scope)
typ = &ast.FuncType{Func: token.NoPos, Params: params, Results: results}
} else {
// embedded interface
typ = x
p.resolve(typ)
}
p.expectSemi() // call before accessing p.linecomment
spec := &ast.Field{Doc: doc, Names: idents, Type: typ, Comment: p.lineComment}
p.declare(spec, nil, scope, ast.Fun, idents...)
return spec
}
func (p *parser) parseInterfaceType() *ast.InterfaceType {
if p.trace {
defer un(trace(p, "InterfaceType"))
}
pos := p.expect(token.INTERFACE)
lbrace := p.expect(token.LBRACE)
scope := ast.NewScope(nil) // interface scope
var list []*ast.Field
for p.tok == token.IDENT {
list = append(list, p.parseMethodSpec(scope))
}
rbrace := p.expect(token.RBRACE)
return &ast.InterfaceType{
Interface: pos,
Methods: &ast.FieldList{
Opening: lbrace,
List: list,
Closing: rbrace,
},
}
}
func (p *parser) parseMapType() *ast.MapType {
if p.trace {
defer un(trace(p, "MapType"))
}
pos := p.expect(token.MAP)
p.expect(token.LBRACK)
key := p.parseType()
p.expect(token.RBRACK)
value := p.parseType()
return &ast.MapType{Map: pos, Key: key, Value: value}
}
func (p *parser) parseChanType() *ast.ChanType {
if p.trace {
defer un(trace(p, "ChanType"))
}
pos := p.pos
dir := ast.SEND | ast.RECV
if p.tok == token.CHAN {
p.next()
if p.tok == token.ARROW {
p.next()
dir = ast.SEND
}
} else {
p.expect(token.ARROW)
p.expect(token.CHAN)
dir = ast.RECV
}
value := p.parseType()
return &ast.ChanType{Begin: pos, Dir: dir, Value: value}
}
// If the result is an identifier, it is not resolved.
func (p *parser) tryIdentOrType(ellipsisOk bool) ast.Expr {
switch p.tok {
case token.IDENT:
return p.parseTypeName()
case token.LBRACK:
return p.parseArrayType(ellipsisOk)
case token.STRUCT:
return p.parseStructType()
case token.MUL:
return p.parsePointerType()
case token.FUNC:
typ, _ := p.parseFuncType()
return typ
case token.INTERFACE:
return p.parseInterfaceType()
case token.MAP:
return p.parseMapType()
case token.CHAN, token.ARROW:
return p.parseChanType()
case token.LPAREN:
lparen := p.pos
p.next()
typ := p.parseType()
rparen := p.expect(token.RPAREN)
return &ast.ParenExpr{Lparen: lparen, X: typ, Rparen: rparen}
}
// no type found
return nil
}
func (p *parser) tryType() ast.Expr {
typ := p.tryIdentOrType(false)
if typ != nil {
p.resolve(typ)
}
return typ
}
// ----------------------------------------------------------------------------
// Blocks
func (p *parser) parseStmtList() (list []ast.Stmt) {
if p.trace {
defer un(trace(p, "StatementList"))
}
for p.tok != token.CASE && p.tok != token.DEFAULT && p.tok != token.RBRACE && p.tok != token.EOF {
list = append(list, p.parseStmt())
}
return
}
func (p *parser) parseBody(scope *ast.Scope) *ast.BlockStmt {
if p.trace {
defer un(trace(p, "Body"))
}
lbrace := p.expect(token.LBRACE)
p.topScope = scope // open function scope
p.openLabelScope()
list := p.parseStmtList()
p.closeLabelScope()
p.closeScope()
rbrace := p.expect(token.RBRACE)
return &ast.BlockStmt{Lbrace: lbrace, List: list, Rbrace: rbrace}
}
func (p *parser) parseBlockStmt() *ast.BlockStmt {
if p.trace {
defer un(trace(p, "BlockStmt"))
}
lbrace := p.expect(token.LBRACE)
p.openScope()
list := p.parseStmtList()
p.closeScope()
rbrace := p.expect(token.RBRACE)
return &ast.BlockStmt{Lbrace: lbrace, List: list, Rbrace: rbrace}
}
// ----------------------------------------------------------------------------
// Expressions
func (p *parser) parseFuncTypeOrLit() ast.Expr {
if p.trace {
defer un(trace(p, "FuncTypeOrLit"))
}
typ, scope := p.parseFuncType()
if p.tok != token.LBRACE {
// function type only
return typ
}
p.exprLev++
body := p.parseBody(scope)
p.exprLev--
return &ast.FuncLit{Type: typ, Body: body}
}
// parseOperand may return an expression or a raw type (incl. array
// types of the form [...]T. Callers must verify the result.
// If lhs is set and the result is an identifier, it is not resolved.
//
func (p *parser) parseOperand(lhs bool) ast.Expr {
if p.trace {
defer un(trace(p, "Operand"))
}
switch p.tok {
case token.IDENT:
x := p.parseIdent()
if !lhs {
p.resolve(x)
}
return x
case token.INT, token.FLOAT, token.IMAG, token.CHAR, token.STRING:
x := &ast.BasicLit{ValuePos: p.pos, Kind: p.tok, Value: p.lit}
p.next()
return x
case token.LPAREN:
lparen := p.pos
p.next()
p.exprLev++
x := p.parseRhsOrType() // types may be parenthesized: (some type)
p.exprLev--
rparen := p.expect(token.RPAREN)
return &ast.ParenExpr{Lparen: lparen, X: x, Rparen: rparen}
case token.FUNC:
return p.parseFuncTypeOrLit()
default:
if typ := p.tryIdentOrType(true); typ != nil {
// could be type for composite literal or conversion
_, isIdent := typ.(*ast.Ident)
assert(!isIdent, "type cannot be identifier")
return typ
}
}
pos := p.pos
p.errorExpected(pos, "operand")
p.next() // make progress
return &ast.BadExpr{From: pos, To: p.pos}
}
func (p *parser) parseSelector(x ast.Expr) ast.Expr {
if p.trace {
defer un(trace(p, "Selector"))
}
sel := p.parseIdent()
return &ast.SelectorExpr{X: x, Sel: sel}
}
func (p *parser) parseTypeAssertion(x ast.Expr) ast.Expr {
if p.trace {
defer un(trace(p, "TypeAssertion"))
}
p.expect(token.LPAREN)
var typ ast.Expr
if p.tok == token.TYPE {
// type switch: typ == nil
p.next()
} else {
typ = p.parseType()
}
p.expect(token.RPAREN)
return &ast.TypeAssertExpr{X: x, Type: typ}
}
func (p *parser) parseIndexOrSlice(x ast.Expr) ast.Expr {
if p.trace {
defer un(trace(p, "IndexOrSlice"))
}
lbrack := p.expect(token.LBRACK)
p.exprLev++
var low, high ast.Expr
isSlice := false
if p.tok != token.COLON {
low = p.parseRhs()
}
if p.tok == token.COLON {
isSlice = true
p.next()
if p.tok != token.RBRACK {
high = p.parseRhs()
}
}
p.exprLev--
rbrack := p.expect(token.RBRACK)
if isSlice {
return &ast.SliceExpr{X: x, Lbrack: lbrack, Low: low, High: high, Rbrack: rbrack}
}
return &ast.IndexExpr{X: x, Lbrack: lbrack, Index: low, Rbrack: rbrack}
}
func (p *parser) parseCallOrConversion(fun ast.Expr) *ast.CallExpr {
if p.trace {
defer un(trace(p, "CallOrConversion"))
}
lparen := p.expect(token.LPAREN)
p.exprLev++
var list []ast.Expr
var ellipsis token.Pos
for p.tok != token.RPAREN && p.tok != token.EOF && !ellipsis.IsValid() {
list = append(list, p.parseRhsOrType()) // builtins may expect a type: make(some type, ...)
if p.tok == token.ELLIPSIS {
ellipsis = p.pos
p.next()
}
if !p.seesComma("argument list") {
break
}
p.next()
}
p.exprLev--
rparen := p.expectClosing(token.RPAREN, "argument list")
return &ast.CallExpr{Fun: fun, Lparen: lparen, Args: list, Ellipsis: ellipsis, Rparen: rparen}
}
func (p *parser) parseElement(keyOk bool) ast.Expr {
if p.trace {
defer un(trace(p, "Element"))
}
if p.tok == token.LBRACE {
return p.parseLiteralValue(nil)
}
x := p.checkExpr(p.parseExpr(keyOk)) // don't resolve if map key
if keyOk {
if p.tok == token.COLON {
colon := p.pos
p.next()
return &ast.KeyValueExpr{Key: x, Colon: colon, Value: p.parseElement(false)}
}
p.resolve(x) // not a map key
}
return x
}
func (p *parser) parseElementList() (list []ast.Expr) {
if p.trace {
defer un(trace(p, "ElementList"))
}
for p.tok != token.RBRACE && p.tok != token.EOF {
list = append(list, p.parseElement(true))
if !p.seesComma("composite literal") {
break
}
p.next()
}
return
}
func (p *parser) parseLiteralValue(typ ast.Expr) ast.Expr {
if p.trace {
defer un(trace(p, "LiteralValue"))
}
lbrace := p.expect(token.LBRACE)
var elts []ast.Expr
p.exprLev++
if p.tok != token.RBRACE {
elts = p.parseElementList()
}
p.exprLev--
rbrace := p.expectClosing(token.RBRACE, "composite literal")
return &ast.CompositeLit{Type: typ, Lbrace: lbrace, Elts: elts, Rbrace: rbrace}
}
// checkExpr checks that x is an expression (and not a type).
func (p *parser) checkExpr(x ast.Expr) ast.Expr {
switch unparen(x).(type) {
case *ast.BadExpr:
case *ast.Ident:
case *ast.BasicLit:
case *ast.FuncLit:
case *ast.CompositeLit:
case *ast.ParenExpr:
panic("unreachable")
case *ast.SelectorExpr:
case *ast.IndexExpr:
case *ast.SliceExpr:
case *ast.TypeAssertExpr:
// If t.Type == nil we have a type assertion of the form
// y.(type), which is only allowed in type switch expressions.
// It's hard to exclude those but for the case where we are in
// a type switch. Instead be lenient and test this in the type
// checker.
case *ast.CallExpr:
case *ast.StarExpr:
case *ast.UnaryExpr:
case *ast.BinaryExpr:
default:
// all other nodes are not proper expressions
p.errorExpected(x.Pos(), "expression")
x = &ast.BadExpr{From: x.Pos(), To: x.End()}
}
return x
}
// isTypeName returns true iff x is a (qualified) TypeName.
func isTypeName(x ast.Expr) bool {
switch t := x.(type) {
case *ast.BadExpr:
case *ast.Ident:
case *ast.SelectorExpr:
_, isIdent := t.X.(*ast.Ident)
return isIdent
default:
return false // all other nodes are not type names
}
return true
}
// isLiteralType returns true iff x is a legal composite literal type.
func isLiteralType(x ast.Expr) bool {
switch t := x.(type) {
case *ast.BadExpr:
case *ast.Ident:
case *ast.SelectorExpr:
_, isIdent := t.X.(*ast.Ident)
return isIdent
case *ast.ArrayType:
case *ast.StructType:
case *ast.MapType:
default:
return false // all other nodes are not legal composite literal types
}
return true
}
// If x is of the form *T, deref returns T, otherwise it returns x.
func deref(x ast.Expr) ast.Expr {
if p, isPtr := x.(*ast.StarExpr); isPtr {
x = p.X
}
return x
}
// If x is of the form (T), unparen returns unparen(T), otherwise it returns x.
func unparen(x ast.Expr) ast.Expr {
if p, isParen := x.(*ast.ParenExpr); isParen {
x = unparen(p.X)
}
return x
}
// checkExprOrType checks that x is an expression or a type
// (and not a raw type such as [...]T).
//
func (p *parser) checkExprOrType(x ast.Expr) ast.Expr {
switch t := unparen(x).(type) {
case *ast.ParenExpr:
panic("unreachable")
case *ast.UnaryExpr:
case *ast.ArrayType:
if len, isEllipsis := t.Len.(*ast.Ellipsis); isEllipsis {
p.error(len.Pos(), "expected array length, found '...'")
x = &ast.BadExpr{From: x.Pos(), To: x.End()}
}
}
// all other nodes are expressions or types
return x
}
// If lhs is set and the result is an identifier, it is not resolved.
func (p *parser) parsePrimaryExpr(lhs bool) ast.Expr {
if p.trace {
defer un(trace(p, "PrimaryExpr"))
}
x := p.parseOperand(lhs)
L:
for {
switch p.tok {
case token.PERIOD:
p.next()
if lhs {
p.resolve(x)
}
switch p.tok {
case token.IDENT:
x = p.parseSelector(p.checkExpr(x))
case token.LPAREN:
x = p.parseTypeAssertion(p.checkExpr(x))
default:
pos := p.pos
p.next() // make progress
p.errorExpected(pos, "selector or type assertion")
x = &ast.BadExpr{From: pos, To: p.pos}
}
case token.LBRACK:
if lhs {
p.resolve(x)
}
x = p.parseIndexOrSlice(p.checkExpr(x))
case token.LPAREN:
if lhs {
p.resolve(x)
}
x = p.parseCallOrConversion(p.checkExprOrType(x))
case token.LBRACE:
if isLiteralType(x) && (p.exprLev >= 0 || !isTypeName(x)) {
if lhs {
p.resolve(x)
}
x = p.parseLiteralValue(x)
} else {
break L
}
default:
break L
}
lhs = false // no need to try to resolve again
}
return x
}
// If lhs is set and the result is an identifier, it is not resolved.
func (p *parser) parseUnaryExpr(lhs bool) ast.Expr {
if p.trace {
defer un(trace(p, "UnaryExpr"))
}
switch p.tok {
case token.ADD, token.SUB, token.NOT, token.XOR, token.AND:
pos, op := p.pos, p.tok
p.next()
x := p.parseUnaryExpr(false)
return &ast.UnaryExpr{OpPos: pos, Op: op, X: p.checkExpr(x)}
case token.ARROW:
// channel type or receive expression
pos := p.pos
p.next()
if p.tok == token.CHAN {
p.next()
value := p.parseType()
return &ast.ChanType{Begin: pos, Dir: ast.RECV, Value: value}
}
x := p.parseUnaryExpr(false)
return &ast.UnaryExpr{OpPos: pos, Op: token.ARROW, X: p.checkExpr(x)}
case token.MUL:
// pointer type or unary "*" expression
pos := p.pos
p.next()
x := p.parseUnaryExpr(false)
return &ast.StarExpr{Star: pos, X: p.checkExprOrType(x)}
}
return p.parsePrimaryExpr(lhs)
}
// If lhs is set and the result is an identifier, it is not resolved.
func (p *parser) parseBinaryExpr(lhs bool, prec1 int) ast.Expr {
if p.trace {
defer un(trace(p, "BinaryExpr"))
}
x := p.parseUnaryExpr(lhs)
for prec := p.tok.Precedence(); prec >= prec1; prec-- {
for p.tok.Precedence() == prec {
pos, op := p.pos, p.tok
p.next()
if lhs {
p.resolve(x)
lhs = false
}
y := p.parseBinaryExpr(false, prec+1)
x = &ast.BinaryExpr{X: p.checkExpr(x), OpPos: pos, Op: op, Y: p.checkExpr(y)}
}
}
return x
}
// If lhs is set and the result is an identifier, it is not resolved.
// The result may be a type or even a raw type ([...]int). Callers must
// check the result (using checkExpr or checkExprOrType), depending on
// context.
func (p *parser) parseExpr(lhs bool) ast.Expr {
if p.trace {
defer un(trace(p, "Expression"))
}
return p.parseBinaryExpr(lhs, token.LowestPrec+1)
}
func (p *parser) parseRhs() ast.Expr {
return p.checkExpr(p.parseExpr(false))
}
func (p *parser) parseRhsOrType() ast.Expr {
return p.checkExprOrType(p.parseExpr(false))
}
// ----------------------------------------------------------------------------
// Statements
// Parsing modes for parseSimpleStmt.
const (
basic = iota
labelOk
rangeOk
)
// parseSimpleStmt returns true as 2nd result if it parsed the assignment
// of a range clause (with mode == rangeOk). The returned statement is an
// assignment with a right-hand side that is a single unary expression of
// the form "range x". No guarantees are given for the left-hand side.
func (p *parser) parseSimpleStmt(mode int) (ast.Stmt, bool) {
if p.trace {
defer un(trace(p, "SimpleStmt"))
}
x := p.parseLhsList()
switch p.tok {
case
token.DEFINE, token.ASSIGN, token.ADD_ASSIGN,
token.SUB_ASSIGN, token.MUL_ASSIGN, token.QUO_ASSIGN,
token.REM_ASSIGN, token.AND_ASSIGN, token.OR_ASSIGN,
token.XOR_ASSIGN, token.SHL_ASSIGN, token.SHR_ASSIGN, token.AND_NOT_ASSIGN:
// assignment statement, possibly part of a range clause
pos, tok := p.pos, p.tok
p.next()
var y []ast.Expr
isRange := false
if mode == rangeOk && p.tok == token.RANGE && (tok == token.DEFINE || tok == token.ASSIGN) {
pos := p.pos
p.next()
y = []ast.Expr{&ast.UnaryExpr{OpPos: pos, Op: token.RANGE, X: p.parseRhs()}}
isRange = true
} else {
y = p.parseRhsList()
}
as := &ast.AssignStmt{Lhs: x, TokPos: pos, Tok: tok, Rhs: y}
if tok == token.DEFINE {
p.shortVarDecl(as, x)
}
return as, isRange
}
if len(x) > 1 {
p.errorExpected(x[0].Pos(), "1 expression")
// continue with first expression
}
switch p.tok {
case token.COLON:
// labeled statement
colon := p.pos
p.next()
if label, isIdent := x[0].(*ast.Ident); mode == labelOk && isIdent {
// Go spec: The scope of a label is the body of the function
// in which it is declared and excludes the body of any nested
// function.
stmt := &ast.LabeledStmt{Label: label, Colon: colon, Stmt: p.parseStmt()}
p.declare(stmt, nil, p.labelScope, ast.Lbl, label)
return stmt, false
}
// The label declaration typically starts at x[0].Pos(), but the label
// declaration may be erroneous due to a token after that position (and
// before the ':'). If SpuriousErrors is not set, the (only) error re-
// ported for the line is the illegal label error instead of the token
// before the ':' that caused the problem. Thus, use the (latest) colon
// position for error reporting.
p.error(colon, "illegal label declaration")
return &ast.BadStmt{From: x[0].Pos(), To: colon + 1}, false
case token.ARROW:
// send statement
arrow := p.pos
p.next()
y := p.parseRhs()
return &ast.SendStmt{Chan: x[0], Arrow: arrow, Value: y}, false
case token.INC, token.DEC:
// increment or decrement
s := &ast.IncDecStmt{X: x[0], TokPos: p.pos, Tok: p.tok}
p.next()
return s, false
}
// expression
return &ast.ExprStmt{X: x[0]}, false
}
func (p *parser) parseCallExpr() *ast.CallExpr {
x := p.parseRhsOrType() // could be a conversion: (some type)(x)
if call, isCall := x.(*ast.CallExpr); isCall {
return call
}
p.errorExpected(x.Pos(), "function/method call")
return nil
}
func (p *parser) parseGoStmt() ast.Stmt {
if p.trace {
defer un(trace(p, "GoStmt"))
}
pos := p.expect(token.GO)
call := p.parseCallExpr()
p.expectSemi()
if call == nil {
return &ast.BadStmt{From: pos, To: pos + 2} // len("go")
}
return &ast.GoStmt{Go: pos, Call: call}
}
func (p *parser) parseDeferStmt() ast.Stmt {
if p.trace {
defer un(trace(p, "DeferStmt"))
}
pos := p.expect(token.DEFER)
call := p.parseCallExpr()
p.expectSemi()
if call == nil {
return &ast.BadStmt{From: pos, To: pos + 5} // len("defer")
}
return &ast.DeferStmt{Defer: pos, Call: call}
}
func (p *parser) parseReturnStmt() *ast.ReturnStmt {
if p.trace {
defer un(trace(p, "ReturnStmt"))
}
pos := p.pos
p.expect(token.RETURN)
var x []ast.Expr
if p.tok != token.SEMICOLON && p.tok != token.RBRACE {
x = p.parseRhsList()
}
p.expectSemi()
return &ast.ReturnStmt{Return: pos, Results: x}
}
func (p *parser) parseBranchStmt(tok token.Token) *ast.BranchStmt {
if p.trace {
defer un(trace(p, "BranchStmt"))
}
pos := p.expect(tok)
var label *ast.Ident
if tok != token.FALLTHROUGH && p.tok == token.IDENT {
label = p.parseIdent()
// add to list of unresolved targets
n := len(p.targetStack) - 1
p.targetStack[n] = append(p.targetStack[n], label)
}
p.expectSemi()
return &ast.BranchStmt{TokPos: pos, Tok: tok, Label: label}
}
func (p *parser) makeExpr(s ast.Stmt) ast.Expr {
if s == nil {
return nil
}
if es, isExpr := s.(*ast.ExprStmt); isExpr {
return p.checkExpr(es.X)
}
p.error(s.Pos(), "expected condition, found simple statement")
return &ast.BadExpr{From: s.Pos(), To: s.End()}
}
func (p *parser) parseIfStmt() *ast.IfStmt {
if p.trace {
defer un(trace(p, "IfStmt"))
}
pos := p.expect(token.IF)
p.openScope()
defer p.closeScope()
var s ast.Stmt
var x ast.Expr
{
prevLev := p.exprLev
p.exprLev = -1
if p.tok == token.SEMICOLON {
p.next()
x = p.parseRhs()
} else {
s, _ = p.parseSimpleStmt(basic)
if p.tok == token.SEMICOLON {
p.next()
x = p.parseRhs()
} else {
x = p.makeExpr(s)
s = nil
}
}
p.exprLev = prevLev
}
body := p.parseBlockStmt()
var else_ ast.Stmt
if p.tok == token.ELSE {
p.next()
else_ = p.parseStmt()
} else {
p.expectSemi()
}
return &ast.IfStmt{If: pos, Init: s, Cond: x, Body: body, Else: else_}
}
func (p *parser) parseTypeList() (list []ast.Expr) {
if p.trace {
defer un(trace(p, "TypeList"))
}
list = append(list, p.parseType())
for p.tok == token.COMMA {
p.next()
list = append(list, p.parseType())
}
return
}
func (p *parser) parseCaseClause(typeSwitch bool) *ast.CaseClause {
if p.trace {
defer un(trace(p, "CaseClause"))
}
pos := p.pos
var list []ast.Expr
if p.tok == token.CASE {
p.next()
if typeSwitch {
list = p.parseTypeList()
} else {
list = p.parseRhsList()
}
} else {
p.expect(token.DEFAULT)
}
colon := p.expect(token.COLON)
p.openScope()
body := p.parseStmtList()
p.closeScope()
return &ast.CaseClause{Case: pos, List: list, Colon: colon, Body: body}
}
func isTypeSwitchAssert(x ast.Expr) bool {
a, ok := x.(*ast.TypeAssertExpr)
return ok && a.Type == nil
}
func isTypeSwitchGuard(s ast.Stmt) bool {
switch t := s.(type) {
case *ast.ExprStmt:
// x.(nil)
return isTypeSwitchAssert(t.X)
case *ast.AssignStmt:
// v := x.(nil)
return len(t.Lhs) == 1 && t.Tok == token.DEFINE && len(t.Rhs) == 1 && isTypeSwitchAssert(t.Rhs[0])
}
return false
}
func (p *parser) parseSwitchStmt() ast.Stmt {
if p.trace {
defer un(trace(p, "SwitchStmt"))
}
pos := p.expect(token.SWITCH)
p.openScope()
defer p.closeScope()
var s1, s2 ast.Stmt
if p.tok != token.LBRACE {
prevLev := p.exprLev
p.exprLev = -1
if p.tok != token.SEMICOLON {
s2, _ = p.parseSimpleStmt(basic)
}
if p.tok == token.SEMICOLON {
p.next()
s1 = s2
s2 = nil
if p.tok != token.LBRACE {
// A TypeSwitchGuard may declare a variable in addition
// to the variable declared in the initial SimpleStmt.
// Introduce extra scope to avoid redeclaration errors:
//
// switch t := 0; t := x.(T) { ... }
//
// (this code is not valid Go because the first t will
// cannot be accessed and thus is never used, the extra
// scope is needed for the correct error message).
//
// If we don't have a type switch, s2 must be an expression.
// Having the extra nested but empty scope won't affect it.
p.openScope()
defer p.closeScope()
s2, _ = p.parseSimpleStmt(basic)
}
}
p.exprLev = prevLev
}
typeSwitch := isTypeSwitchGuard(s2)
lbrace := p.expect(token.LBRACE)
var list []ast.Stmt
for p.tok == token.CASE || p.tok == token.DEFAULT {
list = append(list, p.parseCaseClause(typeSwitch))
}
rbrace := p.expect(token.RBRACE)
p.expectSemi()
body := &ast.BlockStmt{Lbrace: lbrace, List: list, Rbrace: rbrace}
if typeSwitch {
return &ast.TypeSwitchStmt{Switch: pos, Init: s1, Assign: s2, Body: body}
}
return &ast.SwitchStmt{Switch: pos, Init: s1, Tag: p.makeExpr(s2), Body: body}
}
func (p *parser) parseCommClause() *ast.CommClause {
if p.trace {
defer un(trace(p, "CommClause"))
}
p.openScope()
pos := p.pos
var comm ast.Stmt
if p.tok == token.CASE {
p.next()
lhs := p.parseLhsList()
if p.tok == token.ARROW {
// SendStmt
if len(lhs) > 1 {
p.errorExpected(lhs[0].Pos(), "1 expression")
// continue with first expression
}
arrow := p.pos
p.next()
rhs := p.parseRhs()
comm = &ast.SendStmt{Chan: lhs[0], Arrow: arrow, Value: rhs}
} else {
// RecvStmt
if tok := p.tok; tok == token.ASSIGN || tok == token.DEFINE {
// RecvStmt with assignment
if len(lhs) > 2 {
p.errorExpected(lhs[0].Pos(), "1 or 2 expressions")
// continue with first two expressions
lhs = lhs[0:2]
}
pos := p.pos
p.next()
rhs := p.parseRhs()
as := &ast.AssignStmt{Lhs: lhs, TokPos: pos, Tok: tok, Rhs: []ast.Expr{rhs}}
if tok == token.DEFINE {
p.shortVarDecl(as, lhs)
}
comm = as
} else {
// lhs must be single receive operation
if len(lhs) > 1 {
p.errorExpected(lhs[0].Pos(), "1 expression")
// continue with first expression
}
comm = &ast.ExprStmt{X: lhs[0]}
}
}
} else {
p.expect(token.DEFAULT)
}
colon := p.expect(token.COLON)
body := p.parseStmtList()
p.closeScope()
return &ast.CommClause{Case: pos, Comm: comm, Colon: colon, Body: body}
}
func (p *parser) parseSelectStmt() *ast.SelectStmt {
if p.trace {
defer un(trace(p, "SelectStmt"))
}
pos := p.expect(token.SELECT)
lbrace := p.expect(token.LBRACE)
var list []ast.Stmt
for p.tok == token.CASE || p.tok == token.DEFAULT {
list = append(list, p.parseCommClause())
}
rbrace := p.expect(token.RBRACE)
p.expectSemi()
body := &ast.BlockStmt{Lbrace: lbrace, List: list, Rbrace: rbrace}
return &ast.SelectStmt{Select: pos, Body: body}
}
func (p *parser) parseForStmt() ast.Stmt {
if p.trace {
defer un(trace(p, "ForStmt"))
}
pos := p.expect(token.FOR)
p.openScope()
defer p.closeScope()
var s1, s2, s3 ast.Stmt
var isRange bool
if p.tok != token.LBRACE {
prevLev := p.exprLev
p.exprLev = -1
if p.tok != token.SEMICOLON {
s2, isRange = p.parseSimpleStmt(rangeOk)
}
if !isRange && p.tok == token.SEMICOLON {
p.next()
s1 = s2
s2 = nil
if p.tok != token.SEMICOLON {
s2, _ = p.parseSimpleStmt(basic)
}
p.expectSemi()
if p.tok != token.LBRACE {
s3, _ = p.parseSimpleStmt(basic)
}
}
p.exprLev = prevLev
}
body := p.parseBlockStmt()
p.expectSemi()
if isRange {
as := s2.(*ast.AssignStmt)
// check lhs
var key, value ast.Expr
switch len(as.Lhs) {
case 2:
key, value = as.Lhs[0], as.Lhs[1]
case 1:
key = as.Lhs[0]
default:
p.errorExpected(as.Lhs[0].Pos(), "1 or 2 expressions")
return &ast.BadStmt{From: pos, To: body.End()}
}
// parseSimpleStmt returned a right-hand side that
// is a single unary expression of the form "range x"
x := as.Rhs[0].(*ast.UnaryExpr).X
return &ast.RangeStmt{
For: pos,
Key: key,
Value: value,
TokPos: as.TokPos,
Tok: as.Tok,
X: x,
Body: body,
}
}
// regular for statement
return &ast.ForStmt{
For: pos,
Init: s1,
Cond: p.makeExpr(s2),
Post: s3,
Body: body,
}
}
func (p *parser) parseStmt() (s ast.Stmt) {
if p.trace {
defer un(trace(p, "Statement"))
}
switch p.tok {
case token.CONST, token.TYPE, token.VAR:
s = &ast.DeclStmt{Decl: p.parseDecl()}
case
// tokens that may start an expression
token.IDENT, token.INT, token.FLOAT, token.IMAG, token.CHAR, token.STRING, token.FUNC, token.LPAREN, // operands
token.LBRACK, token.STRUCT, // composite types
token.ADD, token.SUB, token.MUL, token.AND, token.XOR, token.ARROW, token.NOT: // unary operators
s, _ = p.parseSimpleStmt(labelOk)
// because of the required look-ahead, labeled statements are
// parsed by parseSimpleStmt - don't expect a semicolon after
// them
if _, isLabeledStmt := s.(*ast.LabeledStmt); !isLabeledStmt {
p.expectSemi()
}
case token.GO:
s = p.parseGoStmt()
case token.DEFER:
s = p.parseDeferStmt()
case token.RETURN:
s = p.parseReturnStmt()
case token.BREAK, token.CONTINUE, token.GOTO, token.FALLTHROUGH:
s = p.parseBranchStmt(p.tok)
case token.LBRACE:
s = p.parseBlockStmt()
p.expectSemi()
case token.IF:
s = p.parseIfStmt()
case token.SWITCH:
s = p.parseSwitchStmt()
case token.SELECT:
s = p.parseSelectStmt()
case token.FOR:
s = p.parseForStmt()
case token.SEMICOLON:
s = &ast.EmptyStmt{Semicolon: p.pos}
p.next()
case token.RBRACE:
// a semicolon may be omitted before a closing "}"
s = &ast.EmptyStmt{Semicolon: p.pos}
default:
// no statement found
pos := p.pos
p.errorExpected(pos, "statement")
p.next() // make progress
s = &ast.BadStmt{From: pos, To: p.pos}
}
return
}
// ----------------------------------------------------------------------------
// Declarations
type parseSpecFunction func(p *parser, doc *ast.CommentGroup, iota int) ast.Spec
func isValidImport(lit string) bool {
const illegalChars = `!"#$%&'()*,:;<=>?[\]^{|}` + "`\uFFFD"
s, _ := strconv.Unquote(lit) // go/scanner returns a legal string literal
for _, r := range s {
if !unicode.IsGraphic(r) || unicode.IsSpace(r) || strings.ContainsRune(illegalChars, r) {
return false
}
}
return s != ""
}
func parseImportSpec(p *parser, doc *ast.CommentGroup, _ int) ast.Spec {
if p.trace {
defer un(trace(p, "ImportSpec"))
}
var ident *ast.Ident
switch p.tok {
case token.PERIOD:
ident = &ast.Ident{NamePos: p.pos, Name: "."}
p.next()
case token.IDENT:
ident = p.parseIdent()
}
var path *ast.BasicLit
if p.tok == token.STRING {
if !isValidImport(p.lit) {
p.error(p.pos, "invalid import path: "+p.lit)
}
path = &ast.BasicLit{ValuePos: p.pos, Kind: p.tok, Value: p.lit}
p.next()
} else {
p.expect(token.STRING) // use expect() error handling
}
p.expectSemi() // call before accessing p.linecomment
// collect imports
spec := &ast.ImportSpec{
Doc: doc,
Name: ident,
Path: path,
Comment: p.lineComment,
}
p.imports = append(p.imports, spec)
return spec
}
func parseConstSpec(p *parser, doc *ast.CommentGroup, iota int) ast.Spec {
if p.trace {
defer un(trace(p, "ConstSpec"))
}
idents := p.parseIdentList()
typ := p.tryType()
var values []ast.Expr
if typ != nil || p.tok == token.ASSIGN || iota == 0 {
p.expect(token.ASSIGN)
values = p.parseRhsList()
}
p.expectSemi() // call before accessing p.linecomment
// Go spec: The scope of a constant or variable identifier declared inside
// a function begins at the end of the ConstSpec or VarSpec and ends at
// the end of the innermost containing block.
// (Global identifiers are resolved in a separate phase after parsing.)
spec := &ast.ValueSpec{
Doc: doc,
Names: idents,
Type: typ,
Values: values,
Comment: p.lineComment,
}
p.declare(spec, iota, p.topScope, ast.Con, idents...)
return spec
}
func parseTypeSpec(p *parser, doc *ast.CommentGroup, _ int) ast.Spec {
if p.trace {
defer un(trace(p, "TypeSpec"))
}
ident := p.parseIdent()
// Go spec: The scope of a type identifier declared inside a function begins
// at the identifier in the TypeSpec and ends at the end of the innermost
// containing block.
// (Global identifiers are resolved in a separate phase after parsing.)
spec := &ast.TypeSpec{Doc: doc, Name: ident}
p.declare(spec, nil, p.topScope, ast.Typ, ident)
spec.Type = p.parseType()
p.expectSemi() // call before accessing p.linecomment
spec.Comment = p.lineComment
return spec
}
func parseVarSpec(p *parser, doc *ast.CommentGroup, _ int) ast.Spec {
if p.trace {
defer un(trace(p, "VarSpec"))
}
idents := p.parseIdentList()
typ := p.tryType()
var values []ast.Expr
if typ == nil || p.tok == token.ASSIGN {
p.expect(token.ASSIGN)
values = p.parseRhsList()
}
p.expectSemi() // call before accessing p.linecomment
// Go spec: The scope of a constant or variable identifier declared inside
// a function begins at the end of the ConstSpec or VarSpec and ends at
// the end of the innermost containing block.
// (Global identifiers are resolved in a separate phase after parsing.)
spec := &ast.ValueSpec{
Doc: doc,
Names: idents,
Type: typ,
Values: values,
Comment: p.lineComment,
}
p.declare(spec, nil, p.topScope, ast.Var, idents...)
return spec
}
func (p *parser) parseGenDecl(keyword token.Token, f parseSpecFunction) *ast.GenDecl {
if p.trace {
defer un(trace(p, "GenDecl("+keyword.String()+")"))
}
doc := p.leadComment
pos := p.expect(keyword)
var lparen, rparen token.Pos
var list []ast.Spec
if p.tok == token.LPAREN {
lparen = p.pos
p.next()
for iota := 0; p.tok != token.RPAREN && p.tok != token.EOF; iota++ {
list = append(list, f(p, p.leadComment, iota))
}
rparen = p.expect(token.RPAREN)
p.expectSemi()
} else {
list = append(list, f(p, nil, 0))
}
return &ast.GenDecl{
Doc: doc,
TokPos: pos,
Tok: keyword,
Lparen: lparen,
Specs: list,
Rparen: rparen,
}
}
func (p *parser) parseReceiver(scope *ast.Scope) *ast.FieldList {
if p.trace {
defer un(trace(p, "Receiver"))
}
par := p.parseParameters(scope, false)
// must have exactly one receiver
if par.NumFields() != 1 {
p.errorExpected(par.Opening, "exactly one receiver")
par.List = []*ast.Field{{Type: &ast.BadExpr{From: par.Opening, To: par.Closing + 1}}}
return par
}
// recv type must be of the form ["*"] identifier
recv := par.List[0]
base := deref(recv.Type)
if _, isIdent := base.(*ast.Ident); !isIdent {
p.errorExpected(base.Pos(), "(unqualified) identifier")
par.List = []*ast.Field{{Type: &ast.BadExpr{From: recv.Pos(), To: recv.End()}}}
}
return par
}
func (p *parser) parseFuncDecl() *ast.FuncDecl {
if p.trace {
defer un(trace(p, "FunctionDecl"))
}
doc := p.leadComment
pos := p.expect(token.FUNC)
scope := ast.NewScope(p.topScope) // function scope
var recv *ast.FieldList
if p.tok == token.LPAREN {
recv = p.parseReceiver(scope)
}
ident := p.parseIdent()
params, results := p.parseSignature(scope)
var body *ast.BlockStmt
if p.tok == token.LBRACE {
body = p.parseBody(scope)
}
p.expectSemi()
decl := &ast.FuncDecl{
Doc: doc,
Recv: recv,
Name: ident,
Type: &ast.FuncType{
Func: pos,
Params: params,
Results: results,
},
Body: body,
}
if recv == nil {
// Go spec: The scope of an identifier denoting a constant, type,
// variable, or function (but not method) declared at top level
// (outside any function) is the package block.
//
// init() functions cannot be referred to and there may
// be more than one - don't put them in the pkgScope
if ident.Name != "init" {
p.declare(decl, nil, p.pkgScope, ast.Fun, ident)
}
}
return decl
}
func (p *parser) parseDecl() ast.Decl {
if p.trace {
defer un(trace(p, "Declaration"))
}
var f parseSpecFunction
switch p.tok {
case token.CONST:
f = parseConstSpec
case token.TYPE:
f = parseTypeSpec
case token.VAR:
f = parseVarSpec
case token.FUNC:
return p.parseFuncDecl()
default:
pos := p.pos
p.errorExpected(pos, "declaration")
p.next() // make progress
decl := &ast.BadDecl{From: pos, To: p.pos}
return decl
}
return p.parseGenDecl(p.tok, f)
}
// ----------------------------------------------------------------------------
// Source files
func (p *parser) parseFile() *ast.File {
if p.trace {
defer un(trace(p, "File"))
}
// package clause
doc := p.leadComment
pos := p.expect(token.PACKAGE)
// Go spec: The package clause is not a declaration;
// the package name does not appear in any scope.
ident := p.parseIdent()
if ident.Name == "_" {
p.error(p.pos, "invalid package name _")
}
p.expectSemi()
var decls []ast.Decl
// Don't bother parsing the rest if we had errors already.
// Likely not a Go source file at all.
if p.errors.Len() == 0 && p.mode&PackageClauseOnly == 0 {
// import decls
for p.tok == token.IMPORT {
decls = append(decls, p.parseGenDecl(token.IMPORT, parseImportSpec))
}
if p.mode&ImportsOnly == 0 {
// rest of package body
for p.tok != token.EOF {
decls = append(decls, p.parseDecl())
}
}
}
assert(p.topScope == p.pkgScope, "imbalanced scopes")
// resolve global identifiers within the same file
i := 0
for _, ident := range p.unresolved {
// i <= index for current ident
assert(ident.Obj == unresolved, "object already resolved")
ident.Obj = p.pkgScope.Lookup(ident.Name) // also removes unresolved sentinel
if ident.Obj == nil {
p.unresolved[i] = ident
i++
}
}
return &ast.File{
Doc: doc,
Package: pos,
Name: ident,
Decls: decls,
Scope: p.pkgScope,
Imports: p.imports,
Unresolved: p.unresolved[0:i],
Comments: p.comments,
}
}
|
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// A parser for Go source files. Input may be provided in a variety of
// forms (see the various Parse* functions); the output is an abstract
// syntax tree (AST) representing the Go source. The parser is invoked
// through one of the Parse* functions.
//
package parser
import (
"container/vector"
"fmt"
"go/ast"
"go/scanner"
"go/token"
)
// noPos is used when there is no corresponding source position for a token.
var noPos token.Position
// The mode parameter to the Parse* functions is a set of flags (or 0).
// They control the amount of source code parsed and other optional
// parser functionality.
//
const (
PackageClauseOnly uint = 1 << iota // parsing stops after package clause
ImportsOnly // parsing stops after import declarations
ParseComments // parse comments and add them to AST
CheckSemantics // do semantic checks (only declarations for now)
Trace // print a trace of parsed productions
)
// The parser structure holds the parser's internal state.
type parser struct {
scanner.ErrorVector
scanner scanner.Scanner
// Tracing/debugging
mode uint // parsing mode
check bool // == (mode & CheckSemantics != 0)
trace bool // == (mode & Trace != 0)
indent uint // indentation used for tracing output
// Comments
comments *ast.CommentGroup // list of collected comments
lastComment *ast.CommentGroup // last comment in the comments list
leadComment *ast.CommentGroup // the last lead comment
lineComment *ast.CommentGroup // the last line comment
// Next token
pos token.Position // token position
tok token.Token // one token look-ahead
lit []byte // token literal
// Non-syntactic parser control
exprLev int // < 0: in control clause, >= 0: in expression
// Scopes
pkgScope *ast.Scope
fileScope *ast.Scope
topScope *ast.Scope
}
// scannerMode returns the scanner mode bits given the parser's mode bits.
func scannerMode(mode uint) uint {
var m uint = scanner.InsertSemis
if mode&ParseComments != 0 {
m |= scanner.ScanComments
}
return m
}
func (p *parser) init(filename string, src []byte, mode uint) {
p.scanner.Init(filename, src, p, scannerMode(mode))
p.mode = mode
p.trace = mode&Trace != 0 // for convenience (p.trace is used frequently)
p.check = mode&CheckSemantics != 0 // for convenience (p.check is used frequently)
p.next()
}
// ----------------------------------------------------------------------------
// Parsing support
func (p *parser) printTrace(a ...) {
const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . " +
". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . "
const n = uint(len(dots))
fmt.Printf("%5d:%3d: ", p.pos.Line, p.pos.Column)
i := 2 * p.indent
for ; i > n; i -= n {
fmt.Print(dots)
}
fmt.Print(dots[0:i])
fmt.Println(a)
}
func trace(p *parser, msg string) *parser {
p.printTrace(msg, "(")
p.indent++
return p
}
// Usage pattern: defer un(trace(p, "..."));
func un(p *parser) {
p.indent--
p.printTrace(")")
}
// Advance to the next token.
func (p *parser) next0() {
// Because of one-token look-ahead, print the previous token
// when tracing as it provides a more readable output. The
// very first token (p.pos.Line == 0) is not initialized (it
// is token.ILLEGAL), so don't print it .
if p.trace && p.pos.Line > 0 {
s := p.tok.String()
switch {
case p.tok.IsLiteral():
p.printTrace(s, string(p.lit))
case p.tok.IsOperator(), p.tok.IsKeyword():
p.printTrace("\"" + s + "\"")
default:
p.printTrace(s)
}
}
p.pos, p.tok, p.lit = p.scanner.Scan()
}
// Consume a comment and return it and the line on which it ends.
func (p *parser) consumeComment() (comment *ast.Comment, endline int) {
// /*-style comments may end on a different line than where they start.
// Scan the comment for '\n' chars and adjust endline accordingly.
endline = p.pos.Line
if p.lit[1] == '*' {
for _, b := range p.lit {
if b == '\n' {
endline++
}
}
}
comment = &ast.Comment{p.pos, p.lit}
p.next0()
return
}
// Consume a group of adjacent comments, add it to the parser's
// comments list, and return the line of which the last comment
// in the group ends. An empty line or non-comment token terminates
// a comment group.
//
func (p *parser) consumeCommentGroup() int {
var list vector.Vector
endline := p.pos.Line
for p.tok == token.COMMENT && endline+1 >= p.pos.Line {
var comment *ast.Comment
comment, endline = p.consumeComment()
list.Push(comment)
}
// convert list
group := make([]*ast.Comment, len(list))
for i, x := range list {
group[i] = x.(*ast.Comment)
}
// add comment group to the comments list
g := &ast.CommentGroup{group, nil}
if p.lastComment != nil {
p.lastComment.Next = g
} else {
p.comments = g
}
p.lastComment = g
return endline
}
// Advance to the next non-comment token. In the process, collect
// any comment groups encountered, and remember the last lead and
// and line comments.
//
// A lead comment is a comment group that starts and ends in a
// line without any other tokens and that is followed by a non-comment
// token on the line immediately after the comment group.
//
// A line comment is a comment group that follows a non-comment
// token on the same line, and that has no tokens after it on the line
// where it ends.
//
// Lead and line comments may be considered documentation that is
// stored in the AST.
//
func (p *parser) next() {
p.leadComment = nil
p.lineComment = nil
line := p.pos.Line // current line
p.next0()
if p.tok == token.COMMENT {
if p.pos.Line == line {
// The comment is on same line as previous token; it
// cannot be a lead comment but may be a line comment.
endline := p.consumeCommentGroup()
if p.pos.Line != endline {
// The next token is on a different line, thus
// the last comment group is a line comment.
p.lineComment = p.lastComment
}
}
// consume successor comments, if any
endline := -1
for p.tok == token.COMMENT {
endline = p.consumeCommentGroup()
}
if endline >= 0 && endline+1 == p.pos.Line {
// The next token is following on the line immediately after the
// comment group, thus the last comment group is a lead comment.
p.leadComment = p.lastComment
}
}
}
func (p *parser) errorExpected(pos token.Position, msg string) {
msg = "expected " + msg
if pos.Offset == p.pos.Offset {
// the error happened at the current position;
// make the error message more specific
msg += ", found '" + p.tok.String() + "'"
if p.tok.IsLiteral() {
msg += " " + string(p.lit)
}
}
p.Error(pos, msg)
}
func (p *parser) expect(tok token.Token) token.Position {
pos := p.pos
if p.tok != tok {
p.errorExpected(pos, "'"+tok.String()+"'")
}
p.next() // make progress in any case
return pos
}
func (p *parser) expectSemi() {
if p.tok != token.RPAREN && p.tok != token.RBRACE {
p.expect(token.SEMICOLON)
}
}
// ----------------------------------------------------------------------------
// Scope support
// Usage pattern: defer closeScope(openScope(p));
func openScope(p *parser) *parser {
p.topScope = ast.NewScope(p.topScope)
return p
}
func closeScope(p *parser) { p.topScope = p.topScope.Outer }
func (p *parser) parseIdent(kind ast.ObjKind) *ast.Ident {
obj := ast.NewObj(ast.Err, p.pos, "")
if p.tok == token.IDENT {
obj.Name = string(p.lit)
p.next()
} else {
p.expect(token.IDENT) // use expect() error handling
}
return &ast.Ident{obj.Pos, obj}
}
// TODO(gri) Separate parsing from declaration since an identifier's
// scope often starts only after the type has been seen.
func (p *parser) declIdent(kind ast.ObjKind) *ast.Ident {
obj := ast.NewObj(kind, p.pos, "")
if p.tok == token.IDENT {
obj.Name = string(p.lit)
// TODO(gri) Consider reversing the conditionals below:
// always do the declaration but only report
// error if enabled (may be necessary to get
// search functionality in the presence of
// incorrect files).
if p.check && !p.topScope.Declare(obj) {
// TODO(gri) Declare could return already-declared
// object for a very good error message.
p.Error(obj.Pos, "'"+obj.Name+"' declared already")
}
p.next()
} else {
p.expect(token.IDENT) // use expect() error handling
}
return &ast.Ident{obj.Pos, obj}
}
// TODO(gri) Separate parsing from declaration since an identifier's
// scope often starts only after the type has been seen.
func (p *parser) declIdentList(kind ast.ObjKind) []*ast.Ident {
if p.trace {
defer un(trace(p, "IdentList"))
}
var list vector.Vector
list.Push(p.declIdent(kind))
for p.tok == token.COMMA {
p.next()
list.Push(p.declIdent(kind))
}
// convert vector
idents := make([]*ast.Ident, len(list))
for i, x := range list {
idents[i] = x.(*ast.Ident)
}
return idents
}
func (p *parser) findIdent() *ast.Ident {
pos := p.pos
name := ""
var obj *ast.Object
if p.tok == token.IDENT {
name = string(p.lit)
obj = p.topScope.Lookup(name)
p.next()
} else {
p.expect(token.IDENT) // use expect() error handling
}
if obj == nil {
obj = ast.NewObj(ast.Err, pos, name)
}
return &ast.Ident{obj.Pos, obj}
}
// ----------------------------------------------------------------------------
// Common productions
func makeExprList(list *vector.Vector) []ast.Expr {
exprs := make([]ast.Expr, len(*list))
for i, x := range *list {
exprs[i] = x.(ast.Expr)
}
return exprs
}
func (p *parser) parseExprList() []ast.Expr {
if p.trace {
defer un(trace(p, "ExpressionList"))
}
var list vector.Vector
list.Push(p.parseExpr())
for p.tok == token.COMMA {
p.next()
list.Push(p.parseExpr())
}
return makeExprList(&list)
}
// ----------------------------------------------------------------------------
// Types
func (p *parser) parseType() ast.Expr {
if p.trace {
defer un(trace(p, "Type"))
}
typ := p.tryType()
if typ == nil {
p.errorExpected(p.pos, "type")
p.next() // make progress
return &ast.BadExpr{p.pos}
}
return typ
}
func (p *parser) parseQualifiedIdent() ast.Expr {
if p.trace {
defer un(trace(p, "QualifiedIdent"))
}
var x ast.Expr = p.findIdent()
if p.tok == token.PERIOD {
// first identifier is a package identifier
p.next()
sel := p.findIdent()
x = &ast.SelectorExpr{x, sel}
}
return x
}
func (p *parser) parseTypeName() ast.Expr {
if p.trace {
defer un(trace(p, "TypeName"))
}
return p.parseQualifiedIdent()
}
func (p *parser) parseArrayType(ellipsisOk bool) ast.Expr {
if p.trace {
defer un(trace(p, "ArrayType"))
}
lbrack := p.expect(token.LBRACK)
var len ast.Expr
if ellipsisOk && p.tok == token.ELLIPSIS {
len = &ast.Ellipsis{p.pos}
p.next()
} else if p.tok != token.RBRACK {
len = p.parseExpr()
}
p.expect(token.RBRACK)
elt := p.parseType()
return &ast.ArrayType{lbrack, len, elt}
}
func (p *parser) makeIdentList(list *vector.Vector) []*ast.Ident {
idents := make([]*ast.Ident, len(*list))
for i, x := range *list {
ident, isIdent := x.(*ast.Ident)
if !isIdent {
pos := x.(ast.Expr).Pos()
p.errorExpected(pos, "identifier")
idents[i] = &ast.Ident{pos, ast.NewObj(ast.Err, pos, "")}
}
idents[i] = ident
}
return idents
}
func (p *parser) parseFieldDecl() *ast.Field {
if p.trace {
defer un(trace(p, "FieldDecl"))
}
doc := p.leadComment
// a list of identifiers looks like a list of type names
var list vector.Vector
for {
// TODO(gri): do not allow ()'s here
list.Push(p.parseType())
if p.tok != token.COMMA {
break
}
p.next()
}
// if we had a list of identifiers, it must be followed by a type
typ := p.tryType()
// optional tag
var tag []*ast.BasicLit
if p.tok == token.STRING {
x := &ast.BasicLit{p.pos, p.tok, p.lit}
p.next()
tag = []*ast.BasicLit{x}
}
// analyze case
var idents []*ast.Ident
if typ != nil {
// IdentifierList Type
idents = p.makeIdentList(&list)
} else {
// Type (anonymous field)
if len(list) == 1 {
// TODO(gri): check that this looks like a type
typ = list.At(0).(ast.Expr)
} else {
p.errorExpected(p.pos, "anonymous field")
typ = &ast.BadExpr{p.pos}
}
}
p.expectSemi()
return &ast.Field{doc, idents, typ, tag, p.lineComment}
}
func (p *parser) parseStructType() *ast.StructType {
if p.trace {
defer un(trace(p, "StructType"))
}
pos := p.expect(token.STRUCT)
lbrace := p.expect(token.LBRACE)
var list vector.Vector
for p.tok == token.IDENT || p.tok == token.MUL {
list.Push(p.parseFieldDecl())
}
rbrace := p.expect(token.RBRACE)
// convert vector
fields := make([]*ast.Field, len(list))
for i, x := range list {
fields[i] = x.(*ast.Field)
}
return &ast.StructType{pos, lbrace, fields, rbrace, false}
}
func (p *parser) parsePointerType() *ast.StarExpr {
if p.trace {
defer un(trace(p, "PointerType"))
}
star := p.expect(token.MUL)
base := p.parseType()
return &ast.StarExpr{star, base}
}
func (p *parser) tryParameterType(ellipsisOk bool) ast.Expr {
if ellipsisOk && p.tok == token.ELLIPSIS {
pos := p.pos
p.next()
if p.tok != token.RPAREN {
// "..." always must be at the very end of a parameter list
p.Error(pos, "expected type, found '...'")
}
return &ast.Ellipsis{pos}
}
return p.tryType()
}
func (p *parser) parseParameterType(ellipsisOk bool) ast.Expr {
typ := p.tryParameterType(ellipsisOk)
if typ == nil {
p.errorExpected(p.pos, "type")
p.next() // make progress
typ = &ast.BadExpr{p.pos}
}
return typ
}
func (p *parser) parseParameterDecl(ellipsisOk bool) (*vector.Vector, ast.Expr) {
if p.trace {
defer un(trace(p, "ParameterDecl"))
}
// a list of identifiers looks like a list of type names
var list vector.Vector
for {
// TODO(gri): do not allow ()'s here
list.Push(p.parseParameterType(ellipsisOk))
if p.tok != token.COMMA {
break
}
p.next()
}
// if we had a list of identifiers, it must be followed by a type
typ := p.tryParameterType(ellipsisOk)
return &list, typ
}
func (p *parser) parseParameterList(ellipsisOk bool) []*ast.Field {
if p.trace {
defer un(trace(p, "ParameterList"))
}
list, typ := p.parseParameterDecl(ellipsisOk)
if typ != nil {
// IdentifierList Type
idents := p.makeIdentList(list)
list.Resize(0, 0)
list.Push(&ast.Field{nil, idents, typ, nil, nil})
if p.tok == token.COMMA {
p.next()
}
for p.tok != token.RPAREN && p.tok != token.EOF {
idents := p.declIdentList(ast.Var)
typ := p.parseParameterType(ellipsisOk)
list.Push(&ast.Field{nil, idents, typ, nil, nil})
if p.tok != token.COMMA {
break
}
p.next()
}
} else {
// Type { "," Type } (anonymous parameters)
// convert list of types into list of *Param
for i, x := range *list {
list.Set(i, &ast.Field{Type: x.(ast.Expr)})
}
}
// convert list
params := make([]*ast.Field, len(*list))
for i, x := range *list {
params[i] = x.(*ast.Field)
}
return params
}
func (p *parser) parseParameters(ellipsisOk bool) []*ast.Field {
if p.trace {
defer un(trace(p, "Parameters"))
}
var params []*ast.Field
p.expect(token.LPAREN)
openScope(p)
if p.tok != token.RPAREN {
params = p.parseParameterList(ellipsisOk)
}
closeScope(p)
p.expect(token.RPAREN)
return params
}
func (p *parser) parseResult() []*ast.Field {
if p.trace {
defer un(trace(p, "Result"))
}
var results []*ast.Field
if p.tok == token.LPAREN {
results = p.parseParameters(false)
} else if p.tok != token.FUNC {
typ := p.tryType()
if typ != nil {
results = make([]*ast.Field, 1)
results[0] = &ast.Field{Type: typ}
}
}
return results
}
func (p *parser) parseSignature() (params []*ast.Field, results []*ast.Field) {
if p.trace {
defer un(trace(p, "Signature"))
}
params = p.parseParameters(true)
results = p.parseResult()
return
}
func (p *parser) parseFuncType() *ast.FuncType {
if p.trace {
defer un(trace(p, "FuncType"))
}
pos := p.expect(token.FUNC)
params, results := p.parseSignature()
return &ast.FuncType{pos, params, results}
}
func (p *parser) parseMethodSpec() *ast.Field {
if p.trace {
defer un(trace(p, "MethodSpec"))
}
doc := p.leadComment
var idents []*ast.Ident
var typ ast.Expr
x := p.parseQualifiedIdent()
if ident, isIdent := x.(*ast.Ident); isIdent && p.tok == token.LPAREN {
// method
idents = []*ast.Ident{ident}
params, results := p.parseSignature()
typ = &ast.FuncType{noPos, params, results}
} else {
// embedded interface
typ = x
}
p.expectSemi()
return &ast.Field{doc, idents, typ, nil, p.lineComment}
}
func (p *parser) parseInterfaceType() *ast.InterfaceType {
if p.trace {
defer un(trace(p, "InterfaceType"))
}
pos := p.expect(token.INTERFACE)
lbrace := p.expect(token.LBRACE)
var list vector.Vector
for p.tok == token.IDENT {
list.Push(p.parseMethodSpec())
}
rbrace := p.expect(token.RBRACE)
// convert vector
methods := make([]*ast.Field, len(list))
for i, x := range list {
methods[i] = x.(*ast.Field)
}
return &ast.InterfaceType{pos, lbrace, methods, rbrace, false}
}
func (p *parser) parseMapType() *ast.MapType {
if p.trace {
defer un(trace(p, "MapType"))
}
pos := p.expect(token.MAP)
p.expect(token.LBRACK)
key := p.parseType()
p.expect(token.RBRACK)
value := p.parseType()
return &ast.MapType{pos, key, value}
}
func (p *parser) parseChanType() *ast.ChanType {
if p.trace {
defer un(trace(p, "ChanType"))
}
pos := p.pos
dir := ast.SEND | ast.RECV
if p.tok == token.CHAN {
p.next()
if p.tok == token.ARROW {
p.next()
dir = ast.SEND
}
} else {
p.expect(token.ARROW)
p.expect(token.CHAN)
dir = ast.RECV
}
value := p.parseType()
return &ast.ChanType{pos, dir, value}
}
func (p *parser) tryRawType(ellipsisOk bool) ast.Expr {
switch p.tok {
case token.IDENT:
return p.parseTypeName()
case token.LBRACK:
return p.parseArrayType(ellipsisOk)
case token.STRUCT:
return p.parseStructType()
case token.MUL:
return p.parsePointerType()
case token.FUNC:
return p.parseFuncType()
case token.INTERFACE:
return p.parseInterfaceType()
case token.MAP:
return p.parseMapType()
case token.CHAN, token.ARROW:
return p.parseChanType()
case token.LPAREN:
lparen := p.pos
p.next()
typ := p.parseType()
rparen := p.expect(token.RPAREN)
return &ast.ParenExpr{lparen, typ, rparen}
}
// no type found
return nil
}
func (p *parser) tryType() ast.Expr { return p.tryRawType(false) }
// ----------------------------------------------------------------------------
// Blocks
func makeStmtList(list *vector.Vector) []ast.Stmt {
stats := make([]ast.Stmt, len(*list))
for i, x := range *list {
stats[i] = x.(ast.Stmt)
}
return stats
}
func (p *parser) parseStmtList() []ast.Stmt {
if p.trace {
defer un(trace(p, "StatementList"))
}
var list vector.Vector
for p.tok != token.CASE && p.tok != token.DEFAULT && p.tok != token.RBRACE && p.tok != token.EOF {
list.Push(p.parseStmt())
}
return makeStmtList(&list)
}
func (p *parser) parseBlockStmt() *ast.BlockStmt {
if p.trace {
defer un(trace(p, "BlockStmt"))
}
defer closeScope(openScope(p))
lbrace := p.expect(token.LBRACE)
list := p.parseStmtList()
rbrace := p.expect(token.RBRACE)
return &ast.BlockStmt{lbrace, list, rbrace}
}
// ----------------------------------------------------------------------------
// Expressions
func (p *parser) parseFuncTypeOrLit() ast.Expr {
if p.trace {
defer un(trace(p, "FuncTypeOrLit"))
}
typ := p.parseFuncType()
if p.tok != token.LBRACE {
// function type only
return typ
}
p.exprLev++
body := p.parseBlockStmt()
p.exprLev--
return &ast.FuncLit{typ, body}
}
// parseOperand may return an expression or a raw type (incl. array
// types of the form [...]T. Callers must verify the result.
//
func (p *parser) parseOperand() ast.Expr {
if p.trace {
defer un(trace(p, "Operand"))
}
switch p.tok {
case token.IDENT:
return p.findIdent()
case token.INT, token.FLOAT, token.CHAR, token.STRING:
x := &ast.BasicLit{p.pos, p.tok, p.lit}
p.next()
return x
case token.LPAREN:
lparen := p.pos
p.next()
p.exprLev++
x := p.parseExpr()
p.exprLev--
rparen := p.expect(token.RPAREN)
return &ast.ParenExpr{lparen, x, rparen}
case token.FUNC:
return p.parseFuncTypeOrLit()
default:
t := p.tryRawType(true) // could be type for composite literal or conversion
if t != nil {
return t
}
}
p.errorExpected(p.pos, "operand")
p.next() // make progress
return &ast.BadExpr{p.pos}
}
func (p *parser) parseSelectorOrTypeAssertion(x ast.Expr) ast.Expr {
if p.trace {
defer un(trace(p, "SelectorOrTypeAssertion"))
}
p.expect(token.PERIOD)
if p.tok == token.IDENT {
// selector
sel := p.findIdent()
return &ast.SelectorExpr{x, sel}
}
// type assertion
p.expect(token.LPAREN)
var typ ast.Expr
if p.tok == token.TYPE {
// type switch: typ == nil
p.next()
} else {
typ = p.parseType()
}
p.expect(token.RPAREN)
return &ast.TypeAssertExpr{x, typ}
}
func (p *parser) parseIndexOrSlice(x ast.Expr) ast.Expr {
if p.trace {
defer un(trace(p, "IndexOrSlice"))
}
p.expect(token.LBRACK)
p.exprLev++
index := p.parseExpr()
if p.tok == token.COLON {
p.next()
var end ast.Expr
if p.tok != token.RBRACK {
end = p.parseExpr()
}
x = &ast.SliceExpr{x, index, end}
} else {
x = &ast.IndexExpr{x, index}
}
p.exprLev--
p.expect(token.RBRACK)
return x
}
func (p *parser) parseCallOrConversion(fun ast.Expr) *ast.CallExpr {
if p.trace {
defer un(trace(p, "CallOrConversion"))
}
lparen := p.expect(token.LPAREN)
p.exprLev++
var list vector.Vector
for p.tok != token.RPAREN && p.tok != token.EOF {
list.Push(p.parseExpr())
if p.tok != token.COMMA {
break
}
p.next()
}
p.exprLev--
rparen := p.expect(token.RPAREN)
return &ast.CallExpr{fun, lparen, makeExprList(&list), rparen}
}
func (p *parser) parseElement() ast.Expr {
if p.trace {
defer un(trace(p, "Element"))
}
x := p.parseExpr()
if p.tok == token.COLON {
colon := p.pos
p.next()
x = &ast.KeyValueExpr{x, colon, p.parseExpr()}
}
return x
}
func (p *parser) parseElementList() []ast.Expr {
if p.trace {
defer un(trace(p, "ElementList"))
}
var list vector.Vector
for p.tok != token.RBRACE && p.tok != token.EOF {
list.Push(p.parseElement())
if p.tok != token.COMMA {
break
}
p.next()
}
return makeExprList(&list)
}
func (p *parser) parseCompositeLit(typ ast.Expr) ast.Expr {
if p.trace {
defer un(trace(p, "CompositeLit"))
}
lbrace := p.expect(token.LBRACE)
var elts []ast.Expr
if p.tok != token.RBRACE {
elts = p.parseElementList()
}
rbrace := p.expect(token.RBRACE)
return &ast.CompositeLit{typ, lbrace, elts, rbrace}
}
// TODO(gri): Consider different approach to checking syntax after parsing:
// Provide a arguments (set of flags) to parsing functions
// restricting what they are supposed to accept depending
// on context.
// checkExpr checks that x is an expression (and not a type).
func (p *parser) checkExpr(x ast.Expr) ast.Expr {
// TODO(gri): should provide predicate in AST nodes
switch t := x.(type) {
case *ast.BadExpr:
case *ast.Ident:
case *ast.BasicLit:
case *ast.StringList:
case *ast.FuncLit:
case *ast.CompositeLit:
case *ast.ParenExpr:
case *ast.SelectorExpr:
case *ast.IndexExpr:
case *ast.SliceExpr:
case *ast.TypeAssertExpr:
if t.Type == nil {
// the form X.(type) is only allowed in type switch expressions
p.errorExpected(x.Pos(), "expression")
x = &ast.BadExpr{x.Pos()}
}
case *ast.CallExpr:
case *ast.StarExpr:
case *ast.UnaryExpr:
if t.Op == token.RANGE {
// the range operator is only allowed at the top of a for statement
p.errorExpected(x.Pos(), "expression")
x = &ast.BadExpr{x.Pos()}
}
case *ast.BinaryExpr:
default:
// all other nodes are not proper expressions
p.errorExpected(x.Pos(), "expression")
x = &ast.BadExpr{x.Pos()}
}
return x
}
// isTypeName returns true iff x is type name.
func isTypeName(x ast.Expr) bool {
// TODO(gri): should provide predicate in AST nodes
switch t := x.(type) {
case *ast.BadExpr:
case *ast.Ident:
case *ast.ParenExpr:
return isTypeName(t.X) // TODO(gri): should (TypeName) be illegal?
case *ast.SelectorExpr:
return isTypeName(t.X)
default:
return false // all other nodes are not type names
}
return true
}
// isCompositeLitType returns true iff x is a legal composite literal type.
func isCompositeLitType(x ast.Expr) bool {
// TODO(gri): should provide predicate in AST nodes
switch t := x.(type) {
case *ast.BadExpr:
case *ast.Ident:
case *ast.ParenExpr:
return isCompositeLitType(t.X)
case *ast.SelectorExpr:
return isTypeName(t.X)
case *ast.ArrayType:
case *ast.StructType:
case *ast.MapType:
default:
return false // all other nodes are not legal composite literal types
}
return true
}
// checkExprOrType checks that x is an expression or a type
// (and not a raw type such as [...]T).
//
func (p *parser) checkExprOrType(x ast.Expr) ast.Expr {
// TODO(gri): should provide predicate in AST nodes
switch t := x.(type) {
case *ast.UnaryExpr:
if t.Op == token.RANGE {
// the range operator is only allowed at the top of a for statement
p.errorExpected(x.Pos(), "expression")
x = &ast.BadExpr{x.Pos()}
}
case *ast.ArrayType:
if len, isEllipsis := t.Len.(*ast.Ellipsis); isEllipsis {
p.Error(len.Pos(), "expected array length, found '...'")
x = &ast.BadExpr{x.Pos()}
}
}
// all other nodes are expressions or types
return x
}
func (p *parser) parsePrimaryExpr() ast.Expr {
if p.trace {
defer un(trace(p, "PrimaryExpr"))
}
x := p.parseOperand()
L: for {
switch p.tok {
case token.PERIOD:
x = p.parseSelectorOrTypeAssertion(p.checkExpr(x))
case token.LBRACK:
x = p.parseIndexOrSlice(p.checkExpr(x))
case token.LPAREN:
x = p.parseCallOrConversion(p.checkExprOrType(x))
case token.LBRACE:
if isCompositeLitType(x) && (p.exprLev >= 0 || !isTypeName(x)) {
x = p.parseCompositeLit(x)
} else {
break L
}
default:
break L
}
}
return x
}
func (p *parser) parseUnaryExpr() ast.Expr {
if p.trace {
defer un(trace(p, "UnaryExpr"))
}
switch p.tok {
case token.ADD, token.SUB, token.NOT, token.XOR, token.ARROW, token.AND, token.RANGE:
pos, op := p.pos, p.tok
p.next()
x := p.parseUnaryExpr()
return &ast.UnaryExpr{pos, op, p.checkExpr(x)}
case token.MUL:
// unary "*" expression or pointer type
pos := p.pos
p.next()
x := p.parseUnaryExpr()
return &ast.StarExpr{pos, p.checkExprOrType(x)}
}
return p.parsePrimaryExpr()
}
func (p *parser) parseBinaryExpr(prec1 int) ast.Expr {
if p.trace {
defer un(trace(p, "BinaryExpr"))
}
x := p.parseUnaryExpr()
for prec := p.tok.Precedence(); prec >= prec1; prec-- {
for p.tok.Precedence() == prec {
pos, op := p.pos, p.tok
p.next()
y := p.parseBinaryExpr(prec + 1)
x = &ast.BinaryExpr{p.checkExpr(x), pos, op, p.checkExpr(y)}
}
}
return x
}
// TODO(gri): parseExpr may return a type or even a raw type ([..]int) -
// should reject when a type/raw type is obviously not allowed
func (p *parser) parseExpr() ast.Expr {
if p.trace {
defer un(trace(p, "Expression"))
}
return p.parseBinaryExpr(token.LowestPrec + 1)
}
// ----------------------------------------------------------------------------
// Statements
func (p *parser) parseSimpleStmt(labelOk bool) ast.Stmt {
if p.trace {
defer un(trace(p, "SimpleStmt"))
}
x := p.parseExprList()
switch p.tok {
case token.COLON:
// labeled statement
p.next()
if labelOk && len(x) == 1 {
if label, isIdent := x[0].(*ast.Ident); isIdent {
return &ast.LabeledStmt{label, p.parseStmt()}
}
}
p.Error(x[0].Pos(), "illegal label declaration")
return &ast.BadStmt{x[0].Pos()}
case
token.DEFINE, token.ASSIGN, token.ADD_ASSIGN,
token.SUB_ASSIGN, token.MUL_ASSIGN, token.QUO_ASSIGN,
token.REM_ASSIGN, token.AND_ASSIGN, token.OR_ASSIGN,
token.XOR_ASSIGN, token.SHL_ASSIGN, token.SHR_ASSIGN, token.AND_NOT_ASSIGN:
// assignment statement
pos, tok := p.pos, p.tok
p.next()
y := p.parseExprList()
return &ast.AssignStmt{x, pos, tok, y}
}
if len(x) > 1 {
p.Error(x[0].Pos(), "only one expression allowed")
// continue with first expression
}
if p.tok == token.INC || p.tok == token.DEC {
// increment or decrement
s := &ast.IncDecStmt{x[0], p.tok}
p.next() // consume "++" or "--"
return s
}
// expression
return &ast.ExprStmt{x[0]}
}
func (p *parser) parseCallExpr() *ast.CallExpr {
x := p.parseExpr()
if call, isCall := x.(*ast.CallExpr); isCall {
return call
}
p.errorExpected(x.Pos(), "function/method call")
return nil
}
func (p *parser) parseGoStmt() ast.Stmt {
if p.trace {
defer un(trace(p, "GoStmt"))
}
pos := p.expect(token.GO)
call := p.parseCallExpr()
p.expectSemi()
if call == nil {
return &ast.BadStmt{pos}
}
return &ast.GoStmt{pos, call}
}
func (p *parser) parseDeferStmt() ast.Stmt {
if p.trace {
defer un(trace(p, "DeferStmt"))
}
pos := p.expect(token.DEFER)
call := p.parseCallExpr()
p.expectSemi()
if call == nil {
return &ast.BadStmt{pos}
}
return &ast.DeferStmt{pos, call}
}
func (p *parser) parseReturnStmt() *ast.ReturnStmt {
if p.trace {
defer un(trace(p, "ReturnStmt"))
}
pos := p.pos
p.expect(token.RETURN)
var x []ast.Expr
if p.tok != token.SEMICOLON && p.tok != token.RBRACE {
x = p.parseExprList()
}
p.expectSemi()
return &ast.ReturnStmt{pos, x}
}
func (p *parser) parseBranchStmt(tok token.Token) *ast.BranchStmt {
if p.trace {
defer un(trace(p, "BranchStmt"))
}
s := &ast.BranchStmt{p.pos, tok, nil}
p.expect(tok)
if tok != token.FALLTHROUGH && p.tok == token.IDENT {
s.Label = p.findIdent()
}
p.expectSemi()
return s
}
func (p *parser) makeExpr(s ast.Stmt) ast.Expr {
if s == nil {
return nil
}
if es, isExpr := s.(*ast.ExprStmt); isExpr {
return p.checkExpr(es.X)
}
p.Error(s.Pos(), "expected condition, found simple statement")
return &ast.BadExpr{s.Pos()}
}
func (p *parser) parseControlClause(isForStmt bool) (s1, s2, s3 ast.Stmt) {
if p.tok != token.LBRACE {
prevLev := p.exprLev
p.exprLev = -1
if p.tok != token.SEMICOLON {
s1 = p.parseSimpleStmt(false)
}
if p.tok == token.SEMICOLON {
p.next()
if p.tok != token.LBRACE && p.tok != token.SEMICOLON {
s2 = p.parseSimpleStmt(false)
}
if isForStmt {
// for statements have a 3rd section
p.expectSemi()
if p.tok != token.LBRACE {
s3 = p.parseSimpleStmt(false)
}
}
} else {
s1, s2 = nil, s1
}
p.exprLev = prevLev
}
return s1, s2, s3
}
func (p *parser) parseIfStmt() *ast.IfStmt {
if p.trace {
defer un(trace(p, "IfStmt"))
}
// IfStmt block
defer closeScope(openScope(p))
pos := p.expect(token.IF)
s1, s2, _ := p.parseControlClause(false)
body := p.parseBlockStmt()
var else_ ast.Stmt
if p.tok == token.ELSE {
p.next()
else_ = p.parseStmt()
} else {
p.expectSemi()
}
return &ast.IfStmt{pos, s1, p.makeExpr(s2), body, else_}
}
func (p *parser) parseCaseClause() *ast.CaseClause {
if p.trace {
defer un(trace(p, "CaseClause"))
}
// CaseClause block
defer closeScope(openScope(p))
// SwitchCase
pos := p.pos
var x []ast.Expr
if p.tok == token.CASE {
p.next()
x = p.parseExprList()
} else {
p.expect(token.DEFAULT)
}
colon := p.expect(token.COLON)
body := p.parseStmtList()
return &ast.CaseClause{pos, x, colon, body}
}
func (p *parser) parseTypeList() []ast.Expr {
if p.trace {
defer un(trace(p, "TypeList"))
}
var list vector.Vector
list.Push(p.parseType())
for p.tok == token.COMMA {
p.next()
list.Push(p.parseType())
}
return makeExprList(&list)
}
func (p *parser) parseTypeCaseClause() *ast.TypeCaseClause {
if p.trace {
defer un(trace(p, "TypeCaseClause"))
}
// TypeCaseClause block
defer closeScope(openScope(p))
// TypeSwitchCase
pos := p.pos
var types []ast.Expr
if p.tok == token.CASE {
p.next()
types = p.parseTypeList()
} else {
p.expect(token.DEFAULT)
}
colon := p.expect(token.COLON)
body := p.parseStmtList()
return &ast.TypeCaseClause{pos, types, colon, body}
}
func isExprSwitch(s ast.Stmt) bool {
if s == nil {
return true
}
if e, ok := s.(*ast.ExprStmt); ok {
if a, ok := e.X.(*ast.TypeAssertExpr); ok {
return a.Type != nil // regular type assertion
}
return true
}
return false
}
func (p *parser) parseSwitchStmt() ast.Stmt {
if p.trace {
defer un(trace(p, "SwitchStmt"))
}
// SwitchStmt block
defer closeScope(openScope(p))
pos := p.expect(token.SWITCH)
s1, s2, _ := p.parseControlClause(false)
if isExprSwitch(s2) {
lbrace := p.expect(token.LBRACE)
var cases vector.Vector
for p.tok == token.CASE || p.tok == token.DEFAULT {
cases.Push(p.parseCaseClause())
}
rbrace := p.expect(token.RBRACE)
body := &ast.BlockStmt{lbrace, makeStmtList(&cases), rbrace}
p.expectSemi()
return &ast.SwitchStmt{pos, s1, p.makeExpr(s2), body}
}
// type switch
// TODO(gri): do all the checks!
lbrace := p.expect(token.LBRACE)
var cases vector.Vector
for p.tok == token.CASE || p.tok == token.DEFAULT {
cases.Push(p.parseTypeCaseClause())
}
rbrace := p.expect(token.RBRACE)
p.expectSemi()
body := &ast.BlockStmt{lbrace, makeStmtList(&cases), rbrace}
return &ast.TypeSwitchStmt{pos, s1, s2, body}
}
func (p *parser) parseCommClause() *ast.CommClause {
if p.trace {
defer un(trace(p, "CommClause"))
}
// CommClause block
defer closeScope(openScope(p))
// CommCase
pos := p.pos
var tok token.Token
var lhs, rhs ast.Expr
if p.tok == token.CASE {
p.next()
if p.tok == token.ARROW {
// RecvExpr without assignment
rhs = p.parseExpr()
} else {
// SendExpr or RecvExpr
rhs = p.parseExpr()
if p.tok == token.ASSIGN || p.tok == token.DEFINE {
// RecvExpr with assignment
tok = p.tok
p.next()
lhs = rhs
if p.tok == token.ARROW {
rhs = p.parseExpr()
} else {
p.expect(token.ARROW) // use expect() error handling
}
}
// else SendExpr
}
} else {
p.expect(token.DEFAULT)
}
colon := p.expect(token.COLON)
body := p.parseStmtList()
return &ast.CommClause{pos, tok, lhs, rhs, colon, body}
}
func (p *parser) parseSelectStmt() *ast.SelectStmt {
if p.trace {
defer un(trace(p, "SelectStmt"))
}
pos := p.expect(token.SELECT)
lbrace := p.expect(token.LBRACE)
var cases vector.Vector
for p.tok == token.CASE || p.tok == token.DEFAULT {
cases.Push(p.parseCommClause())
}
rbrace := p.expect(token.RBRACE)
p.expectSemi()
body := &ast.BlockStmt{lbrace, makeStmtList(&cases), rbrace}
return &ast.SelectStmt{pos, body}
}
func (p *parser) parseForStmt() ast.Stmt {
if p.trace {
defer un(trace(p, "ForStmt"))
}
// ForStmt block
defer closeScope(openScope(p))
pos := p.expect(token.FOR)
s1, s2, s3 := p.parseControlClause(true)
body := p.parseBlockStmt()
p.expectSemi()
if as, isAssign := s2.(*ast.AssignStmt); isAssign {
// possibly a for statement with a range clause; check assignment operator
if as.Tok != token.ASSIGN && as.Tok != token.DEFINE {
p.errorExpected(as.TokPos, "'=' or ':='")
return &ast.BadStmt{pos}
}
// check lhs
var key, value ast.Expr
switch len(as.Lhs) {
case 2:
value = as.Lhs[1]
fallthrough
case 1:
key = as.Lhs[0]
default:
p.errorExpected(as.Lhs[0].Pos(), "1 or 2 expressions")
return &ast.BadStmt{pos}
}
// check rhs
if len(as.Rhs) != 1 {
p.errorExpected(as.Rhs[0].Pos(), "1 expressions")
return &ast.BadStmt{pos}
}
if rhs, isUnary := as.Rhs[0].(*ast.UnaryExpr); isUnary && rhs.Op == token.RANGE {
// rhs is range expression; check lhs
return &ast.RangeStmt{pos, key, value, as.TokPos, as.Tok, rhs.X, body}
} else {
p.errorExpected(s2.Pos(), "range clause")
return &ast.BadStmt{pos}
}
} else {
// regular for statement
return &ast.ForStmt{pos, s1, p.makeExpr(s2), s3, body}
}
panic() // unreachable
return nil
}
func (p *parser) parseStmt() (s ast.Stmt) {
if p.trace {
defer un(trace(p, "Statement"))
}
switch p.tok {
case token.CONST, token.TYPE, token.VAR:
s = &ast.DeclStmt{p.parseDecl()}
case
// tokens that may start a top-level expression
token.IDENT, token.INT, token.FLOAT, token.CHAR, token.STRING, token.FUNC, token.LPAREN, // operand
token.LBRACK, token.STRUCT, // composite type
token.MUL, token.AND, token.ARROW, token.ADD, token.SUB, token.XOR: // unary operators
s = p.parseSimpleStmt(true)
// because of the required look-ahead, labeled statements are
// parsed by parseSimpleStmt - don't expect a semicolon after
// them
if _, isLabeledStmt := s.(*ast.LabeledStmt); !isLabeledStmt {
p.expectSemi()
}
case token.GO:
s = p.parseGoStmt()
case token.DEFER:
s = p.parseDeferStmt()
case token.RETURN:
s = p.parseReturnStmt()
case token.BREAK, token.CONTINUE, token.GOTO, token.FALLTHROUGH:
s = p.parseBranchStmt(p.tok)
case token.LBRACE:
s = p.parseBlockStmt()
p.expectSemi()
case token.IF:
s = p.parseIfStmt()
case token.SWITCH:
s = p.parseSwitchStmt()
case token.SELECT:
s = p.parseSelectStmt()
case token.FOR:
s = p.parseForStmt()
case token.SEMICOLON:
p.next()
fallthrough
case token.RBRACE:
// a semicolon may be omitted before a closing "}"
s = &ast.EmptyStmt{p.pos}
default:
// no statement found
p.errorExpected(p.pos, "statement")
p.next() // make progress
s = &ast.BadStmt{p.pos}
}
return
}
// ----------------------------------------------------------------------------
// Declarations
type parseSpecFunction func(p *parser, doc *ast.CommentGroup) ast.Spec
func parseImportSpec(p *parser, doc *ast.CommentGroup) ast.Spec {
if p.trace {
defer un(trace(p, "ImportSpec"))
}
var ident *ast.Ident
if p.tok == token.PERIOD {
ident = &ast.Ident{p.pos, ast.NewObj(ast.Pkg, p.pos, ".")}
p.next()
} else if p.tok == token.IDENT {
ident = p.declIdent(ast.Pkg)
}
var path []*ast.BasicLit
if p.tok == token.STRING {
x := &ast.BasicLit{p.pos, p.tok, p.lit}
p.next()
path = []*ast.BasicLit{x}
} else {
p.expect(token.STRING) // use expect() error handling
}
p.expectSemi()
return &ast.ImportSpec{doc, ident, path, p.lineComment}
}
func parseConstSpec(p *parser, doc *ast.CommentGroup) ast.Spec {
if p.trace {
defer un(trace(p, "ConstSpec"))
}
idents := p.declIdentList(ast.Con)
typ := p.tryType()
var values []ast.Expr
if typ != nil || p.tok == token.ASSIGN {
p.expect(token.ASSIGN)
values = p.parseExprList()
}
p.expectSemi()
return &ast.ValueSpec{doc, idents, typ, values, p.lineComment}
}
func parseTypeSpec(p *parser, doc *ast.CommentGroup) ast.Spec {
if p.trace {
defer un(trace(p, "TypeSpec"))
}
ident := p.declIdent(ast.Typ)
typ := p.parseType()
p.expectSemi()
return &ast.TypeSpec{doc, ident, typ, p.lineComment}
}
func parseVarSpec(p *parser, doc *ast.CommentGroup) ast.Spec {
if p.trace {
defer un(trace(p, "VarSpec"))
}
idents := p.declIdentList(ast.Var)
typ := p.tryType()
var values []ast.Expr
if typ == nil || p.tok == token.ASSIGN {
p.expect(token.ASSIGN)
values = p.parseExprList()
}
p.expectSemi()
return &ast.ValueSpec{doc, idents, typ, values, p.lineComment}
}
func (p *parser) parseGenDecl(keyword token.Token, f parseSpecFunction) *ast.GenDecl {
if p.trace {
defer un(trace(p, keyword.String()+"Decl"))
}
doc := p.leadComment
pos := p.expect(keyword)
var lparen, rparen token.Position
var list vector.Vector
if p.tok == token.LPAREN {
lparen = p.pos
p.next()
for p.tok != token.RPAREN && p.tok != token.EOF {
list.Push(f(p, p.leadComment))
}
rparen = p.expect(token.RPAREN)
p.expectSemi()
} else {
list.Push(f(p, nil))
}
// convert vector
specs := make([]ast.Spec, len(list))
for i, x := range list {
specs[i] = x.(ast.Spec)
}
return &ast.GenDecl{doc, pos, keyword, lparen, specs, rparen}
}
func (p *parser) parseReceiver() *ast.Field {
if p.trace {
defer un(trace(p, "Receiver"))
}
pos := p.pos
par := p.parseParameters(false)
// must have exactly one receiver
if len(par) != 1 || len(par) == 1 && len(par[0].Names) > 1 {
p.errorExpected(pos, "exactly one receiver")
return &ast.Field{Type: &ast.BadExpr{noPos}}
}
recv := par[0]
// recv type must be TypeName or *TypeName
base := recv.Type
if ptr, isPtr := base.(*ast.StarExpr); isPtr {
base = ptr.X
}
if !isTypeName(base) {
p.errorExpected(base.Pos(), "type name")
}
return recv
}
func (p *parser) parseFunctionDecl() *ast.FuncDecl {
if p.trace {
defer un(trace(p, "FunctionDecl"))
}
doc := p.leadComment
pos := p.expect(token.FUNC)
var recv *ast.Field
if p.tok == token.LPAREN {
recv = p.parseReceiver()
}
ident := p.declIdent(ast.Fun)
params, results := p.parseSignature()
var body *ast.BlockStmt
if p.tok == token.LBRACE {
body = p.parseBlockStmt()
}
p.expectSemi()
return &ast.FuncDecl{doc, recv, ident, &ast.FuncType{pos, params, results}, body}
}
func (p *parser) parseDecl() ast.Decl {
if p.trace {
defer un(trace(p, "Declaration"))
}
var f parseSpecFunction
switch p.tok {
case token.CONST:
f = parseConstSpec
case token.TYPE:
f = parseTypeSpec
case token.VAR:
f = parseVarSpec
case token.FUNC:
return p.parseFunctionDecl()
default:
pos := p.pos
p.errorExpected(pos, "declaration")
decl := &ast.BadDecl{pos}
p.next() // make progress in any case
return decl
}
return p.parseGenDecl(p.tok, f)
}
func (p *parser) parseDeclList() []ast.Decl {
if p.trace {
defer un(trace(p, "DeclList"))
}
var list vector.Vector
for p.tok != token.EOF {
list.Push(p.parseDecl())
}
// convert vector
decls := make([]ast.Decl, len(list))
for i, x := range list {
decls[i] = x.(ast.Decl)
}
return decls
}
// ----------------------------------------------------------------------------
// Source files
func (p *parser) parseFile() *ast.File {
if p.trace {
defer un(trace(p, "File"))
}
// package clause
doc := p.leadComment
pos := p.expect(token.PACKAGE)
ident := p.parseIdent(ast.Pkg) // package name is in no scope
p.expectSemi()
// file block
defer closeScope(openScope(p))
var decls []ast.Decl
// Don't bother parsing the rest if we had errors already.
// Likely not a Go source file at all.
if p.ErrorCount() == 0 && p.mode&PackageClauseOnly == 0 {
// import decls
var list vector.Vector
for p.tok == token.IMPORT {
list.Push(p.parseGenDecl(token.IMPORT, parseImportSpec))
}
if p.mode&ImportsOnly == 0 {
// rest of package body
for p.tok != token.EOF {
list.Push(p.parseDecl())
}
}
// convert declaration list
decls = make([]ast.Decl, len(list))
for i, x := range list {
decls[i] = x.(ast.Decl)
}
}
return &ast.File{doc, pos, ident, decls, p.comments}
}
Urgent parser/gofmt fix.
Wrong position information for identifier
could lead to destructive reformatting of
source via hg gofmt.
R=rsc
CC=golang-dev
http://codereview.appspot.com/189100
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// A parser for Go source files. Input may be provided in a variety of
// forms (see the various Parse* functions); the output is an abstract
// syntax tree (AST) representing the Go source. The parser is invoked
// through one of the Parse* functions.
//
package parser
import (
"container/vector"
"fmt"
"go/ast"
"go/scanner"
"go/token"
)
// noPos is used when there is no corresponding source position for a token.
var noPos token.Position
// The mode parameter to the Parse* functions is a set of flags (or 0).
// They control the amount of source code parsed and other optional
// parser functionality.
//
const (
PackageClauseOnly uint = 1 << iota // parsing stops after package clause
ImportsOnly // parsing stops after import declarations
ParseComments // parse comments and add them to AST
CheckSemantics // do semantic checks (only declarations for now)
Trace // print a trace of parsed productions
)
// The parser structure holds the parser's internal state.
type parser struct {
scanner.ErrorVector
scanner scanner.Scanner
// Tracing/debugging
mode uint // parsing mode
check bool // == (mode & CheckSemantics != 0)
trace bool // == (mode & Trace != 0)
indent uint // indentation used for tracing output
// Comments
comments *ast.CommentGroup // list of collected comments
lastComment *ast.CommentGroup // last comment in the comments list
leadComment *ast.CommentGroup // the last lead comment
lineComment *ast.CommentGroup // the last line comment
// Next token
pos token.Position // token position
tok token.Token // one token look-ahead
lit []byte // token literal
// Non-syntactic parser control
exprLev int // < 0: in control clause, >= 0: in expression
// Scopes
pkgScope *ast.Scope
fileScope *ast.Scope
topScope *ast.Scope
}
// scannerMode returns the scanner mode bits given the parser's mode bits.
func scannerMode(mode uint) uint {
var m uint = scanner.InsertSemis
if mode&ParseComments != 0 {
m |= scanner.ScanComments
}
return m
}
func (p *parser) init(filename string, src []byte, mode uint) {
p.scanner.Init(filename, src, p, scannerMode(mode))
p.mode = mode
p.trace = mode&Trace != 0 // for convenience (p.trace is used frequently)
p.check = mode&CheckSemantics != 0 // for convenience (p.check is used frequently)
p.next()
}
// ----------------------------------------------------------------------------
// Parsing support
func (p *parser) printTrace(a ...) {
const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . " +
". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . "
const n = uint(len(dots))
fmt.Printf("%5d:%3d: ", p.pos.Line, p.pos.Column)
i := 2 * p.indent
for ; i > n; i -= n {
fmt.Print(dots)
}
fmt.Print(dots[0:i])
fmt.Println(a)
}
func trace(p *parser, msg string) *parser {
p.printTrace(msg, "(")
p.indent++
return p
}
// Usage pattern: defer un(trace(p, "..."));
func un(p *parser) {
p.indent--
p.printTrace(")")
}
// Advance to the next token.
func (p *parser) next0() {
// Because of one-token look-ahead, print the previous token
// when tracing as it provides a more readable output. The
// very first token (p.pos.Line == 0) is not initialized (it
// is token.ILLEGAL), so don't print it .
if p.trace && p.pos.Line > 0 {
s := p.tok.String()
switch {
case p.tok.IsLiteral():
p.printTrace(s, string(p.lit))
case p.tok.IsOperator(), p.tok.IsKeyword():
p.printTrace("\"" + s + "\"")
default:
p.printTrace(s)
}
}
p.pos, p.tok, p.lit = p.scanner.Scan()
}
// Consume a comment and return it and the line on which it ends.
func (p *parser) consumeComment() (comment *ast.Comment, endline int) {
// /*-style comments may end on a different line than where they start.
// Scan the comment for '\n' chars and adjust endline accordingly.
endline = p.pos.Line
if p.lit[1] == '*' {
for _, b := range p.lit {
if b == '\n' {
endline++
}
}
}
comment = &ast.Comment{p.pos, p.lit}
p.next0()
return
}
// Consume a group of adjacent comments, add it to the parser's
// comments list, and return the line of which the last comment
// in the group ends. An empty line or non-comment token terminates
// a comment group.
//
func (p *parser) consumeCommentGroup() int {
var list vector.Vector
endline := p.pos.Line
for p.tok == token.COMMENT && endline+1 >= p.pos.Line {
var comment *ast.Comment
comment, endline = p.consumeComment()
list.Push(comment)
}
// convert list
group := make([]*ast.Comment, len(list))
for i, x := range list {
group[i] = x.(*ast.Comment)
}
// add comment group to the comments list
g := &ast.CommentGroup{group, nil}
if p.lastComment != nil {
p.lastComment.Next = g
} else {
p.comments = g
}
p.lastComment = g
return endline
}
// Advance to the next non-comment token. In the process, collect
// any comment groups encountered, and remember the last lead and
// and line comments.
//
// A lead comment is a comment group that starts and ends in a
// line without any other tokens and that is followed by a non-comment
// token on the line immediately after the comment group.
//
// A line comment is a comment group that follows a non-comment
// token on the same line, and that has no tokens after it on the line
// where it ends.
//
// Lead and line comments may be considered documentation that is
// stored in the AST.
//
func (p *parser) next() {
p.leadComment = nil
p.lineComment = nil
line := p.pos.Line // current line
p.next0()
if p.tok == token.COMMENT {
if p.pos.Line == line {
// The comment is on same line as previous token; it
// cannot be a lead comment but may be a line comment.
endline := p.consumeCommentGroup()
if p.pos.Line != endline {
// The next token is on a different line, thus
// the last comment group is a line comment.
p.lineComment = p.lastComment
}
}
// consume successor comments, if any
endline := -1
for p.tok == token.COMMENT {
endline = p.consumeCommentGroup()
}
if endline >= 0 && endline+1 == p.pos.Line {
// The next token is following on the line immediately after the
// comment group, thus the last comment group is a lead comment.
p.leadComment = p.lastComment
}
}
}
func (p *parser) errorExpected(pos token.Position, msg string) {
msg = "expected " + msg
if pos.Offset == p.pos.Offset {
// the error happened at the current position;
// make the error message more specific
msg += ", found '" + p.tok.String() + "'"
if p.tok.IsLiteral() {
msg += " " + string(p.lit)
}
}
p.Error(pos, msg)
}
func (p *parser) expect(tok token.Token) token.Position {
pos := p.pos
if p.tok != tok {
p.errorExpected(pos, "'"+tok.String()+"'")
}
p.next() // make progress in any case
return pos
}
func (p *parser) expectSemi() {
if p.tok != token.RPAREN && p.tok != token.RBRACE {
p.expect(token.SEMICOLON)
}
}
// ----------------------------------------------------------------------------
// Scope support
// Usage pattern: defer closeScope(openScope(p));
func openScope(p *parser) *parser {
p.topScope = ast.NewScope(p.topScope)
return p
}
func closeScope(p *parser) { p.topScope = p.topScope.Outer }
func (p *parser) parseIdent(kind ast.ObjKind) *ast.Ident {
obj := ast.NewObj(ast.Err, p.pos, "")
if p.tok == token.IDENT {
obj.Name = string(p.lit)
p.next()
} else {
p.expect(token.IDENT) // use expect() error handling
}
return &ast.Ident{obj.Pos, obj}
}
// TODO(gri) Separate parsing from declaration since an identifier's
// scope often starts only after the type has been seen.
func (p *parser) declIdent(kind ast.ObjKind) *ast.Ident {
obj := ast.NewObj(kind, p.pos, "")
if p.tok == token.IDENT {
obj.Name = string(p.lit)
// TODO(gri) Consider reversing the conditionals below:
// always do the declaration but only report
// error if enabled (may be necessary to get
// search functionality in the presence of
// incorrect files).
if p.check && !p.topScope.Declare(obj) {
// TODO(gri) Declare could return already-declared
// object for a very good error message.
p.Error(obj.Pos, "'"+obj.Name+"' declared already")
}
p.next()
} else {
p.expect(token.IDENT) // use expect() error handling
}
return &ast.Ident{obj.Pos, obj}
}
// TODO(gri) Separate parsing from declaration since an identifier's
// scope often starts only after the type has been seen.
func (p *parser) declIdentList(kind ast.ObjKind) []*ast.Ident {
if p.trace {
defer un(trace(p, "IdentList"))
}
var list vector.Vector
list.Push(p.declIdent(kind))
for p.tok == token.COMMA {
p.next()
list.Push(p.declIdent(kind))
}
// convert vector
idents := make([]*ast.Ident, len(list))
for i, x := range list {
idents[i] = x.(*ast.Ident)
}
return idents
}
func (p *parser) findIdent() *ast.Ident {
pos := p.pos
name := ""
var obj *ast.Object
if p.tok == token.IDENT {
name = string(p.lit)
obj = p.topScope.Lookup(name)
p.next()
} else {
p.expect(token.IDENT) // use expect() error handling
}
if obj == nil {
obj = ast.NewObj(ast.Err, pos, name)
}
return &ast.Ident{pos, obj}
}
// ----------------------------------------------------------------------------
// Common productions
func makeExprList(list *vector.Vector) []ast.Expr {
exprs := make([]ast.Expr, len(*list))
for i, x := range *list {
exprs[i] = x.(ast.Expr)
}
return exprs
}
func (p *parser) parseExprList() []ast.Expr {
if p.trace {
defer un(trace(p, "ExpressionList"))
}
var list vector.Vector
list.Push(p.parseExpr())
for p.tok == token.COMMA {
p.next()
list.Push(p.parseExpr())
}
return makeExprList(&list)
}
// ----------------------------------------------------------------------------
// Types
func (p *parser) parseType() ast.Expr {
if p.trace {
defer un(trace(p, "Type"))
}
typ := p.tryType()
if typ == nil {
p.errorExpected(p.pos, "type")
p.next() // make progress
return &ast.BadExpr{p.pos}
}
return typ
}
func (p *parser) parseQualifiedIdent() ast.Expr {
if p.trace {
defer un(trace(p, "QualifiedIdent"))
}
var x ast.Expr = p.findIdent()
if p.tok == token.PERIOD {
// first identifier is a package identifier
p.next()
sel := p.findIdent()
x = &ast.SelectorExpr{x, sel}
}
return x
}
func (p *parser) parseTypeName() ast.Expr {
if p.trace {
defer un(trace(p, "TypeName"))
}
return p.parseQualifiedIdent()
}
func (p *parser) parseArrayType(ellipsisOk bool) ast.Expr {
if p.trace {
defer un(trace(p, "ArrayType"))
}
lbrack := p.expect(token.LBRACK)
var len ast.Expr
if ellipsisOk && p.tok == token.ELLIPSIS {
len = &ast.Ellipsis{p.pos}
p.next()
} else if p.tok != token.RBRACK {
len = p.parseExpr()
}
p.expect(token.RBRACK)
elt := p.parseType()
return &ast.ArrayType{lbrack, len, elt}
}
func (p *parser) makeIdentList(list *vector.Vector) []*ast.Ident {
idents := make([]*ast.Ident, len(*list))
for i, x := range *list {
ident, isIdent := x.(*ast.Ident)
if !isIdent {
pos := x.(ast.Expr).Pos()
p.errorExpected(pos, "identifier")
idents[i] = &ast.Ident{pos, ast.NewObj(ast.Err, pos, "")}
}
idents[i] = ident
}
return idents
}
func (p *parser) parseFieldDecl() *ast.Field {
if p.trace {
defer un(trace(p, "FieldDecl"))
}
doc := p.leadComment
// a list of identifiers looks like a list of type names
var list vector.Vector
for {
// TODO(gri): do not allow ()'s here
list.Push(p.parseType())
if p.tok != token.COMMA {
break
}
p.next()
}
// if we had a list of identifiers, it must be followed by a type
typ := p.tryType()
// optional tag
var tag []*ast.BasicLit
if p.tok == token.STRING {
x := &ast.BasicLit{p.pos, p.tok, p.lit}
p.next()
tag = []*ast.BasicLit{x}
}
// analyze case
var idents []*ast.Ident
if typ != nil {
// IdentifierList Type
idents = p.makeIdentList(&list)
} else {
// Type (anonymous field)
if len(list) == 1 {
// TODO(gri): check that this looks like a type
typ = list.At(0).(ast.Expr)
} else {
p.errorExpected(p.pos, "anonymous field")
typ = &ast.BadExpr{p.pos}
}
}
p.expectSemi()
return &ast.Field{doc, idents, typ, tag, p.lineComment}
}
func (p *parser) parseStructType() *ast.StructType {
if p.trace {
defer un(trace(p, "StructType"))
}
pos := p.expect(token.STRUCT)
lbrace := p.expect(token.LBRACE)
var list vector.Vector
for p.tok == token.IDENT || p.tok == token.MUL {
list.Push(p.parseFieldDecl())
}
rbrace := p.expect(token.RBRACE)
// convert vector
fields := make([]*ast.Field, len(list))
for i, x := range list {
fields[i] = x.(*ast.Field)
}
return &ast.StructType{pos, lbrace, fields, rbrace, false}
}
func (p *parser) parsePointerType() *ast.StarExpr {
if p.trace {
defer un(trace(p, "PointerType"))
}
star := p.expect(token.MUL)
base := p.parseType()
return &ast.StarExpr{star, base}
}
func (p *parser) tryParameterType(ellipsisOk bool) ast.Expr {
if ellipsisOk && p.tok == token.ELLIPSIS {
pos := p.pos
p.next()
if p.tok != token.RPAREN {
// "..." always must be at the very end of a parameter list
p.Error(pos, "expected type, found '...'")
}
return &ast.Ellipsis{pos}
}
return p.tryType()
}
func (p *parser) parseParameterType(ellipsisOk bool) ast.Expr {
typ := p.tryParameterType(ellipsisOk)
if typ == nil {
p.errorExpected(p.pos, "type")
p.next() // make progress
typ = &ast.BadExpr{p.pos}
}
return typ
}
func (p *parser) parseParameterDecl(ellipsisOk bool) (*vector.Vector, ast.Expr) {
if p.trace {
defer un(trace(p, "ParameterDecl"))
}
// a list of identifiers looks like a list of type names
var list vector.Vector
for {
// TODO(gri): do not allow ()'s here
list.Push(p.parseParameterType(ellipsisOk))
if p.tok != token.COMMA {
break
}
p.next()
}
// if we had a list of identifiers, it must be followed by a type
typ := p.tryParameterType(ellipsisOk)
return &list, typ
}
func (p *parser) parseParameterList(ellipsisOk bool) []*ast.Field {
if p.trace {
defer un(trace(p, "ParameterList"))
}
list, typ := p.parseParameterDecl(ellipsisOk)
if typ != nil {
// IdentifierList Type
idents := p.makeIdentList(list)
list.Resize(0, 0)
list.Push(&ast.Field{nil, idents, typ, nil, nil})
if p.tok == token.COMMA {
p.next()
}
for p.tok != token.RPAREN && p.tok != token.EOF {
idents := p.declIdentList(ast.Var)
typ := p.parseParameterType(ellipsisOk)
list.Push(&ast.Field{nil, idents, typ, nil, nil})
if p.tok != token.COMMA {
break
}
p.next()
}
} else {
// Type { "," Type } (anonymous parameters)
// convert list of types into list of *Param
for i, x := range *list {
list.Set(i, &ast.Field{Type: x.(ast.Expr)})
}
}
// convert list
params := make([]*ast.Field, len(*list))
for i, x := range *list {
params[i] = x.(*ast.Field)
}
return params
}
func (p *parser) parseParameters(ellipsisOk bool) []*ast.Field {
if p.trace {
defer un(trace(p, "Parameters"))
}
var params []*ast.Field
p.expect(token.LPAREN)
openScope(p)
if p.tok != token.RPAREN {
params = p.parseParameterList(ellipsisOk)
}
closeScope(p)
p.expect(token.RPAREN)
return params
}
func (p *parser) parseResult() []*ast.Field {
if p.trace {
defer un(trace(p, "Result"))
}
var results []*ast.Field
if p.tok == token.LPAREN {
results = p.parseParameters(false)
} else if p.tok != token.FUNC {
typ := p.tryType()
if typ != nil {
results = make([]*ast.Field, 1)
results[0] = &ast.Field{Type: typ}
}
}
return results
}
func (p *parser) parseSignature() (params []*ast.Field, results []*ast.Field) {
if p.trace {
defer un(trace(p, "Signature"))
}
params = p.parseParameters(true)
results = p.parseResult()
return
}
func (p *parser) parseFuncType() *ast.FuncType {
if p.trace {
defer un(trace(p, "FuncType"))
}
pos := p.expect(token.FUNC)
params, results := p.parseSignature()
return &ast.FuncType{pos, params, results}
}
func (p *parser) parseMethodSpec() *ast.Field {
if p.trace {
defer un(trace(p, "MethodSpec"))
}
doc := p.leadComment
var idents []*ast.Ident
var typ ast.Expr
x := p.parseQualifiedIdent()
if ident, isIdent := x.(*ast.Ident); isIdent && p.tok == token.LPAREN {
// method
idents = []*ast.Ident{ident}
params, results := p.parseSignature()
typ = &ast.FuncType{noPos, params, results}
} else {
// embedded interface
typ = x
}
p.expectSemi()
return &ast.Field{doc, idents, typ, nil, p.lineComment}
}
func (p *parser) parseInterfaceType() *ast.InterfaceType {
if p.trace {
defer un(trace(p, "InterfaceType"))
}
pos := p.expect(token.INTERFACE)
lbrace := p.expect(token.LBRACE)
var list vector.Vector
for p.tok == token.IDENT {
list.Push(p.parseMethodSpec())
}
rbrace := p.expect(token.RBRACE)
// convert vector
methods := make([]*ast.Field, len(list))
for i, x := range list {
methods[i] = x.(*ast.Field)
}
return &ast.InterfaceType{pos, lbrace, methods, rbrace, false}
}
func (p *parser) parseMapType() *ast.MapType {
if p.trace {
defer un(trace(p, "MapType"))
}
pos := p.expect(token.MAP)
p.expect(token.LBRACK)
key := p.parseType()
p.expect(token.RBRACK)
value := p.parseType()
return &ast.MapType{pos, key, value}
}
func (p *parser) parseChanType() *ast.ChanType {
if p.trace {
defer un(trace(p, "ChanType"))
}
pos := p.pos
dir := ast.SEND | ast.RECV
if p.tok == token.CHAN {
p.next()
if p.tok == token.ARROW {
p.next()
dir = ast.SEND
}
} else {
p.expect(token.ARROW)
p.expect(token.CHAN)
dir = ast.RECV
}
value := p.parseType()
return &ast.ChanType{pos, dir, value}
}
func (p *parser) tryRawType(ellipsisOk bool) ast.Expr {
switch p.tok {
case token.IDENT:
return p.parseTypeName()
case token.LBRACK:
return p.parseArrayType(ellipsisOk)
case token.STRUCT:
return p.parseStructType()
case token.MUL:
return p.parsePointerType()
case token.FUNC:
return p.parseFuncType()
case token.INTERFACE:
return p.parseInterfaceType()
case token.MAP:
return p.parseMapType()
case token.CHAN, token.ARROW:
return p.parseChanType()
case token.LPAREN:
lparen := p.pos
p.next()
typ := p.parseType()
rparen := p.expect(token.RPAREN)
return &ast.ParenExpr{lparen, typ, rparen}
}
// no type found
return nil
}
func (p *parser) tryType() ast.Expr { return p.tryRawType(false) }
// ----------------------------------------------------------------------------
// Blocks
func makeStmtList(list *vector.Vector) []ast.Stmt {
stats := make([]ast.Stmt, len(*list))
for i, x := range *list {
stats[i] = x.(ast.Stmt)
}
return stats
}
func (p *parser) parseStmtList() []ast.Stmt {
if p.trace {
defer un(trace(p, "StatementList"))
}
var list vector.Vector
for p.tok != token.CASE && p.tok != token.DEFAULT && p.tok != token.RBRACE && p.tok != token.EOF {
list.Push(p.parseStmt())
}
return makeStmtList(&list)
}
func (p *parser) parseBlockStmt() *ast.BlockStmt {
if p.trace {
defer un(trace(p, "BlockStmt"))
}
defer closeScope(openScope(p))
lbrace := p.expect(token.LBRACE)
list := p.parseStmtList()
rbrace := p.expect(token.RBRACE)
return &ast.BlockStmt{lbrace, list, rbrace}
}
// ----------------------------------------------------------------------------
// Expressions
func (p *parser) parseFuncTypeOrLit() ast.Expr {
if p.trace {
defer un(trace(p, "FuncTypeOrLit"))
}
typ := p.parseFuncType()
if p.tok != token.LBRACE {
// function type only
return typ
}
p.exprLev++
body := p.parseBlockStmt()
p.exprLev--
return &ast.FuncLit{typ, body}
}
// parseOperand may return an expression or a raw type (incl. array
// types of the form [...]T. Callers must verify the result.
//
func (p *parser) parseOperand() ast.Expr {
if p.trace {
defer un(trace(p, "Operand"))
}
switch p.tok {
case token.IDENT:
return p.findIdent()
case token.INT, token.FLOAT, token.CHAR, token.STRING:
x := &ast.BasicLit{p.pos, p.tok, p.lit}
p.next()
return x
case token.LPAREN:
lparen := p.pos
p.next()
p.exprLev++
x := p.parseExpr()
p.exprLev--
rparen := p.expect(token.RPAREN)
return &ast.ParenExpr{lparen, x, rparen}
case token.FUNC:
return p.parseFuncTypeOrLit()
default:
t := p.tryRawType(true) // could be type for composite literal or conversion
if t != nil {
return t
}
}
p.errorExpected(p.pos, "operand")
p.next() // make progress
return &ast.BadExpr{p.pos}
}
func (p *parser) parseSelectorOrTypeAssertion(x ast.Expr) ast.Expr {
if p.trace {
defer un(trace(p, "SelectorOrTypeAssertion"))
}
p.expect(token.PERIOD)
if p.tok == token.IDENT {
// selector
sel := p.findIdent()
return &ast.SelectorExpr{x, sel}
}
// type assertion
p.expect(token.LPAREN)
var typ ast.Expr
if p.tok == token.TYPE {
// type switch: typ == nil
p.next()
} else {
typ = p.parseType()
}
p.expect(token.RPAREN)
return &ast.TypeAssertExpr{x, typ}
}
func (p *parser) parseIndexOrSlice(x ast.Expr) ast.Expr {
if p.trace {
defer un(trace(p, "IndexOrSlice"))
}
p.expect(token.LBRACK)
p.exprLev++
index := p.parseExpr()
if p.tok == token.COLON {
p.next()
var end ast.Expr
if p.tok != token.RBRACK {
end = p.parseExpr()
}
x = &ast.SliceExpr{x, index, end}
} else {
x = &ast.IndexExpr{x, index}
}
p.exprLev--
p.expect(token.RBRACK)
return x
}
func (p *parser) parseCallOrConversion(fun ast.Expr) *ast.CallExpr {
if p.trace {
defer un(trace(p, "CallOrConversion"))
}
lparen := p.expect(token.LPAREN)
p.exprLev++
var list vector.Vector
for p.tok != token.RPAREN && p.tok != token.EOF {
list.Push(p.parseExpr())
if p.tok != token.COMMA {
break
}
p.next()
}
p.exprLev--
rparen := p.expect(token.RPAREN)
return &ast.CallExpr{fun, lparen, makeExprList(&list), rparen}
}
func (p *parser) parseElement() ast.Expr {
if p.trace {
defer un(trace(p, "Element"))
}
x := p.parseExpr()
if p.tok == token.COLON {
colon := p.pos
p.next()
x = &ast.KeyValueExpr{x, colon, p.parseExpr()}
}
return x
}
func (p *parser) parseElementList() []ast.Expr {
if p.trace {
defer un(trace(p, "ElementList"))
}
var list vector.Vector
for p.tok != token.RBRACE && p.tok != token.EOF {
list.Push(p.parseElement())
if p.tok != token.COMMA {
break
}
p.next()
}
return makeExprList(&list)
}
func (p *parser) parseCompositeLit(typ ast.Expr) ast.Expr {
if p.trace {
defer un(trace(p, "CompositeLit"))
}
lbrace := p.expect(token.LBRACE)
var elts []ast.Expr
if p.tok != token.RBRACE {
elts = p.parseElementList()
}
rbrace := p.expect(token.RBRACE)
return &ast.CompositeLit{typ, lbrace, elts, rbrace}
}
// TODO(gri): Consider different approach to checking syntax after parsing:
// Provide a arguments (set of flags) to parsing functions
// restricting what they are supposed to accept depending
// on context.
// checkExpr checks that x is an expression (and not a type).
func (p *parser) checkExpr(x ast.Expr) ast.Expr {
// TODO(gri): should provide predicate in AST nodes
switch t := x.(type) {
case *ast.BadExpr:
case *ast.Ident:
case *ast.BasicLit:
case *ast.StringList:
case *ast.FuncLit:
case *ast.CompositeLit:
case *ast.ParenExpr:
case *ast.SelectorExpr:
case *ast.IndexExpr:
case *ast.SliceExpr:
case *ast.TypeAssertExpr:
if t.Type == nil {
// the form X.(type) is only allowed in type switch expressions
p.errorExpected(x.Pos(), "expression")
x = &ast.BadExpr{x.Pos()}
}
case *ast.CallExpr:
case *ast.StarExpr:
case *ast.UnaryExpr:
if t.Op == token.RANGE {
// the range operator is only allowed at the top of a for statement
p.errorExpected(x.Pos(), "expression")
x = &ast.BadExpr{x.Pos()}
}
case *ast.BinaryExpr:
default:
// all other nodes are not proper expressions
p.errorExpected(x.Pos(), "expression")
x = &ast.BadExpr{x.Pos()}
}
return x
}
// isTypeName returns true iff x is type name.
func isTypeName(x ast.Expr) bool {
// TODO(gri): should provide predicate in AST nodes
switch t := x.(type) {
case *ast.BadExpr:
case *ast.Ident:
case *ast.ParenExpr:
return isTypeName(t.X) // TODO(gri): should (TypeName) be illegal?
case *ast.SelectorExpr:
return isTypeName(t.X)
default:
return false // all other nodes are not type names
}
return true
}
// isCompositeLitType returns true iff x is a legal composite literal type.
func isCompositeLitType(x ast.Expr) bool {
// TODO(gri): should provide predicate in AST nodes
switch t := x.(type) {
case *ast.BadExpr:
case *ast.Ident:
case *ast.ParenExpr:
return isCompositeLitType(t.X)
case *ast.SelectorExpr:
return isTypeName(t.X)
case *ast.ArrayType:
case *ast.StructType:
case *ast.MapType:
default:
return false // all other nodes are not legal composite literal types
}
return true
}
// checkExprOrType checks that x is an expression or a type
// (and not a raw type such as [...]T).
//
func (p *parser) checkExprOrType(x ast.Expr) ast.Expr {
// TODO(gri): should provide predicate in AST nodes
switch t := x.(type) {
case *ast.UnaryExpr:
if t.Op == token.RANGE {
// the range operator is only allowed at the top of a for statement
p.errorExpected(x.Pos(), "expression")
x = &ast.BadExpr{x.Pos()}
}
case *ast.ArrayType:
if len, isEllipsis := t.Len.(*ast.Ellipsis); isEllipsis {
p.Error(len.Pos(), "expected array length, found '...'")
x = &ast.BadExpr{x.Pos()}
}
}
// all other nodes are expressions or types
return x
}
func (p *parser) parsePrimaryExpr() ast.Expr {
if p.trace {
defer un(trace(p, "PrimaryExpr"))
}
x := p.parseOperand()
L: for {
switch p.tok {
case token.PERIOD:
x = p.parseSelectorOrTypeAssertion(p.checkExpr(x))
case token.LBRACK:
x = p.parseIndexOrSlice(p.checkExpr(x))
case token.LPAREN:
x = p.parseCallOrConversion(p.checkExprOrType(x))
case token.LBRACE:
if isCompositeLitType(x) && (p.exprLev >= 0 || !isTypeName(x)) {
x = p.parseCompositeLit(x)
} else {
break L
}
default:
break L
}
}
return x
}
func (p *parser) parseUnaryExpr() ast.Expr {
if p.trace {
defer un(trace(p, "UnaryExpr"))
}
switch p.tok {
case token.ADD, token.SUB, token.NOT, token.XOR, token.ARROW, token.AND, token.RANGE:
pos, op := p.pos, p.tok
p.next()
x := p.parseUnaryExpr()
return &ast.UnaryExpr{pos, op, p.checkExpr(x)}
case token.MUL:
// unary "*" expression or pointer type
pos := p.pos
p.next()
x := p.parseUnaryExpr()
return &ast.StarExpr{pos, p.checkExprOrType(x)}
}
return p.parsePrimaryExpr()
}
func (p *parser) parseBinaryExpr(prec1 int) ast.Expr {
if p.trace {
defer un(trace(p, "BinaryExpr"))
}
x := p.parseUnaryExpr()
for prec := p.tok.Precedence(); prec >= prec1; prec-- {
for p.tok.Precedence() == prec {
pos, op := p.pos, p.tok
p.next()
y := p.parseBinaryExpr(prec + 1)
x = &ast.BinaryExpr{p.checkExpr(x), pos, op, p.checkExpr(y)}
}
}
return x
}
// TODO(gri): parseExpr may return a type or even a raw type ([..]int) -
// should reject when a type/raw type is obviously not allowed
func (p *parser) parseExpr() ast.Expr {
if p.trace {
defer un(trace(p, "Expression"))
}
return p.parseBinaryExpr(token.LowestPrec + 1)
}
// ----------------------------------------------------------------------------
// Statements
func (p *parser) parseSimpleStmt(labelOk bool) ast.Stmt {
if p.trace {
defer un(trace(p, "SimpleStmt"))
}
x := p.parseExprList()
switch p.tok {
case token.COLON:
// labeled statement
p.next()
if labelOk && len(x) == 1 {
if label, isIdent := x[0].(*ast.Ident); isIdent {
return &ast.LabeledStmt{label, p.parseStmt()}
}
}
p.Error(x[0].Pos(), "illegal label declaration")
return &ast.BadStmt{x[0].Pos()}
case
token.DEFINE, token.ASSIGN, token.ADD_ASSIGN,
token.SUB_ASSIGN, token.MUL_ASSIGN, token.QUO_ASSIGN,
token.REM_ASSIGN, token.AND_ASSIGN, token.OR_ASSIGN,
token.XOR_ASSIGN, token.SHL_ASSIGN, token.SHR_ASSIGN, token.AND_NOT_ASSIGN:
// assignment statement
pos, tok := p.pos, p.tok
p.next()
y := p.parseExprList()
return &ast.AssignStmt{x, pos, tok, y}
}
if len(x) > 1 {
p.Error(x[0].Pos(), "only one expression allowed")
// continue with first expression
}
if p.tok == token.INC || p.tok == token.DEC {
// increment or decrement
s := &ast.IncDecStmt{x[0], p.tok}
p.next() // consume "++" or "--"
return s
}
// expression
return &ast.ExprStmt{x[0]}
}
func (p *parser) parseCallExpr() *ast.CallExpr {
x := p.parseExpr()
if call, isCall := x.(*ast.CallExpr); isCall {
return call
}
p.errorExpected(x.Pos(), "function/method call")
return nil
}
func (p *parser) parseGoStmt() ast.Stmt {
if p.trace {
defer un(trace(p, "GoStmt"))
}
pos := p.expect(token.GO)
call := p.parseCallExpr()
p.expectSemi()
if call == nil {
return &ast.BadStmt{pos}
}
return &ast.GoStmt{pos, call}
}
func (p *parser) parseDeferStmt() ast.Stmt {
if p.trace {
defer un(trace(p, "DeferStmt"))
}
pos := p.expect(token.DEFER)
call := p.parseCallExpr()
p.expectSemi()
if call == nil {
return &ast.BadStmt{pos}
}
return &ast.DeferStmt{pos, call}
}
func (p *parser) parseReturnStmt() *ast.ReturnStmt {
if p.trace {
defer un(trace(p, "ReturnStmt"))
}
pos := p.pos
p.expect(token.RETURN)
var x []ast.Expr
if p.tok != token.SEMICOLON && p.tok != token.RBRACE {
x = p.parseExprList()
}
p.expectSemi()
return &ast.ReturnStmt{pos, x}
}
func (p *parser) parseBranchStmt(tok token.Token) *ast.BranchStmt {
if p.trace {
defer un(trace(p, "BranchStmt"))
}
s := &ast.BranchStmt{p.pos, tok, nil}
p.expect(tok)
if tok != token.FALLTHROUGH && p.tok == token.IDENT {
s.Label = p.findIdent()
}
p.expectSemi()
return s
}
func (p *parser) makeExpr(s ast.Stmt) ast.Expr {
if s == nil {
return nil
}
if es, isExpr := s.(*ast.ExprStmt); isExpr {
return p.checkExpr(es.X)
}
p.Error(s.Pos(), "expected condition, found simple statement")
return &ast.BadExpr{s.Pos()}
}
func (p *parser) parseControlClause(isForStmt bool) (s1, s2, s3 ast.Stmt) {
if p.tok != token.LBRACE {
prevLev := p.exprLev
p.exprLev = -1
if p.tok != token.SEMICOLON {
s1 = p.parseSimpleStmt(false)
}
if p.tok == token.SEMICOLON {
p.next()
if p.tok != token.LBRACE && p.tok != token.SEMICOLON {
s2 = p.parseSimpleStmt(false)
}
if isForStmt {
// for statements have a 3rd section
p.expectSemi()
if p.tok != token.LBRACE {
s3 = p.parseSimpleStmt(false)
}
}
} else {
s1, s2 = nil, s1
}
p.exprLev = prevLev
}
return s1, s2, s3
}
func (p *parser) parseIfStmt() *ast.IfStmt {
if p.trace {
defer un(trace(p, "IfStmt"))
}
// IfStmt block
defer closeScope(openScope(p))
pos := p.expect(token.IF)
s1, s2, _ := p.parseControlClause(false)
body := p.parseBlockStmt()
var else_ ast.Stmt
if p.tok == token.ELSE {
p.next()
else_ = p.parseStmt()
} else {
p.expectSemi()
}
return &ast.IfStmt{pos, s1, p.makeExpr(s2), body, else_}
}
func (p *parser) parseCaseClause() *ast.CaseClause {
if p.trace {
defer un(trace(p, "CaseClause"))
}
// CaseClause block
defer closeScope(openScope(p))
// SwitchCase
pos := p.pos
var x []ast.Expr
if p.tok == token.CASE {
p.next()
x = p.parseExprList()
} else {
p.expect(token.DEFAULT)
}
colon := p.expect(token.COLON)
body := p.parseStmtList()
return &ast.CaseClause{pos, x, colon, body}
}
func (p *parser) parseTypeList() []ast.Expr {
if p.trace {
defer un(trace(p, "TypeList"))
}
var list vector.Vector
list.Push(p.parseType())
for p.tok == token.COMMA {
p.next()
list.Push(p.parseType())
}
return makeExprList(&list)
}
func (p *parser) parseTypeCaseClause() *ast.TypeCaseClause {
if p.trace {
defer un(trace(p, "TypeCaseClause"))
}
// TypeCaseClause block
defer closeScope(openScope(p))
// TypeSwitchCase
pos := p.pos
var types []ast.Expr
if p.tok == token.CASE {
p.next()
types = p.parseTypeList()
} else {
p.expect(token.DEFAULT)
}
colon := p.expect(token.COLON)
body := p.parseStmtList()
return &ast.TypeCaseClause{pos, types, colon, body}
}
func isExprSwitch(s ast.Stmt) bool {
if s == nil {
return true
}
if e, ok := s.(*ast.ExprStmt); ok {
if a, ok := e.X.(*ast.TypeAssertExpr); ok {
return a.Type != nil // regular type assertion
}
return true
}
return false
}
func (p *parser) parseSwitchStmt() ast.Stmt {
if p.trace {
defer un(trace(p, "SwitchStmt"))
}
// SwitchStmt block
defer closeScope(openScope(p))
pos := p.expect(token.SWITCH)
s1, s2, _ := p.parseControlClause(false)
if isExprSwitch(s2) {
lbrace := p.expect(token.LBRACE)
var cases vector.Vector
for p.tok == token.CASE || p.tok == token.DEFAULT {
cases.Push(p.parseCaseClause())
}
rbrace := p.expect(token.RBRACE)
body := &ast.BlockStmt{lbrace, makeStmtList(&cases), rbrace}
p.expectSemi()
return &ast.SwitchStmt{pos, s1, p.makeExpr(s2), body}
}
// type switch
// TODO(gri): do all the checks!
lbrace := p.expect(token.LBRACE)
var cases vector.Vector
for p.tok == token.CASE || p.tok == token.DEFAULT {
cases.Push(p.parseTypeCaseClause())
}
rbrace := p.expect(token.RBRACE)
p.expectSemi()
body := &ast.BlockStmt{lbrace, makeStmtList(&cases), rbrace}
return &ast.TypeSwitchStmt{pos, s1, s2, body}
}
func (p *parser) parseCommClause() *ast.CommClause {
if p.trace {
defer un(trace(p, "CommClause"))
}
// CommClause block
defer closeScope(openScope(p))
// CommCase
pos := p.pos
var tok token.Token
var lhs, rhs ast.Expr
if p.tok == token.CASE {
p.next()
if p.tok == token.ARROW {
// RecvExpr without assignment
rhs = p.parseExpr()
} else {
// SendExpr or RecvExpr
rhs = p.parseExpr()
if p.tok == token.ASSIGN || p.tok == token.DEFINE {
// RecvExpr with assignment
tok = p.tok
p.next()
lhs = rhs
if p.tok == token.ARROW {
rhs = p.parseExpr()
} else {
p.expect(token.ARROW) // use expect() error handling
}
}
// else SendExpr
}
} else {
p.expect(token.DEFAULT)
}
colon := p.expect(token.COLON)
body := p.parseStmtList()
return &ast.CommClause{pos, tok, lhs, rhs, colon, body}
}
func (p *parser) parseSelectStmt() *ast.SelectStmt {
if p.trace {
defer un(trace(p, "SelectStmt"))
}
pos := p.expect(token.SELECT)
lbrace := p.expect(token.LBRACE)
var cases vector.Vector
for p.tok == token.CASE || p.tok == token.DEFAULT {
cases.Push(p.parseCommClause())
}
rbrace := p.expect(token.RBRACE)
p.expectSemi()
body := &ast.BlockStmt{lbrace, makeStmtList(&cases), rbrace}
return &ast.SelectStmt{pos, body}
}
func (p *parser) parseForStmt() ast.Stmt {
if p.trace {
defer un(trace(p, "ForStmt"))
}
// ForStmt block
defer closeScope(openScope(p))
pos := p.expect(token.FOR)
s1, s2, s3 := p.parseControlClause(true)
body := p.parseBlockStmt()
p.expectSemi()
if as, isAssign := s2.(*ast.AssignStmt); isAssign {
// possibly a for statement with a range clause; check assignment operator
if as.Tok != token.ASSIGN && as.Tok != token.DEFINE {
p.errorExpected(as.TokPos, "'=' or ':='")
return &ast.BadStmt{pos}
}
// check lhs
var key, value ast.Expr
switch len(as.Lhs) {
case 2:
value = as.Lhs[1]
fallthrough
case 1:
key = as.Lhs[0]
default:
p.errorExpected(as.Lhs[0].Pos(), "1 or 2 expressions")
return &ast.BadStmt{pos}
}
// check rhs
if len(as.Rhs) != 1 {
p.errorExpected(as.Rhs[0].Pos(), "1 expressions")
return &ast.BadStmt{pos}
}
if rhs, isUnary := as.Rhs[0].(*ast.UnaryExpr); isUnary && rhs.Op == token.RANGE {
// rhs is range expression; check lhs
return &ast.RangeStmt{pos, key, value, as.TokPos, as.Tok, rhs.X, body}
} else {
p.errorExpected(s2.Pos(), "range clause")
return &ast.BadStmt{pos}
}
} else {
// regular for statement
return &ast.ForStmt{pos, s1, p.makeExpr(s2), s3, body}
}
panic() // unreachable
return nil
}
func (p *parser) parseStmt() (s ast.Stmt) {
if p.trace {
defer un(trace(p, "Statement"))
}
switch p.tok {
case token.CONST, token.TYPE, token.VAR:
s = &ast.DeclStmt{p.parseDecl()}
case
// tokens that may start a top-level expression
token.IDENT, token.INT, token.FLOAT, token.CHAR, token.STRING, token.FUNC, token.LPAREN, // operand
token.LBRACK, token.STRUCT, // composite type
token.MUL, token.AND, token.ARROW, token.ADD, token.SUB, token.XOR: // unary operators
s = p.parseSimpleStmt(true)
// because of the required look-ahead, labeled statements are
// parsed by parseSimpleStmt - don't expect a semicolon after
// them
if _, isLabeledStmt := s.(*ast.LabeledStmt); !isLabeledStmt {
p.expectSemi()
}
case token.GO:
s = p.parseGoStmt()
case token.DEFER:
s = p.parseDeferStmt()
case token.RETURN:
s = p.parseReturnStmt()
case token.BREAK, token.CONTINUE, token.GOTO, token.FALLTHROUGH:
s = p.parseBranchStmt(p.tok)
case token.LBRACE:
s = p.parseBlockStmt()
p.expectSemi()
case token.IF:
s = p.parseIfStmt()
case token.SWITCH:
s = p.parseSwitchStmt()
case token.SELECT:
s = p.parseSelectStmt()
case token.FOR:
s = p.parseForStmt()
case token.SEMICOLON:
p.next()
fallthrough
case token.RBRACE:
// a semicolon may be omitted before a closing "}"
s = &ast.EmptyStmt{p.pos}
default:
// no statement found
p.errorExpected(p.pos, "statement")
p.next() // make progress
s = &ast.BadStmt{p.pos}
}
return
}
// ----------------------------------------------------------------------------
// Declarations
type parseSpecFunction func(p *parser, doc *ast.CommentGroup) ast.Spec
func parseImportSpec(p *parser, doc *ast.CommentGroup) ast.Spec {
if p.trace {
defer un(trace(p, "ImportSpec"))
}
var ident *ast.Ident
if p.tok == token.PERIOD {
ident = &ast.Ident{p.pos, ast.NewObj(ast.Pkg, p.pos, ".")}
p.next()
} else if p.tok == token.IDENT {
ident = p.declIdent(ast.Pkg)
}
var path []*ast.BasicLit
if p.tok == token.STRING {
x := &ast.BasicLit{p.pos, p.tok, p.lit}
p.next()
path = []*ast.BasicLit{x}
} else {
p.expect(token.STRING) // use expect() error handling
}
p.expectSemi()
return &ast.ImportSpec{doc, ident, path, p.lineComment}
}
func parseConstSpec(p *parser, doc *ast.CommentGroup) ast.Spec {
if p.trace {
defer un(trace(p, "ConstSpec"))
}
idents := p.declIdentList(ast.Con)
typ := p.tryType()
var values []ast.Expr
if typ != nil || p.tok == token.ASSIGN {
p.expect(token.ASSIGN)
values = p.parseExprList()
}
p.expectSemi()
return &ast.ValueSpec{doc, idents, typ, values, p.lineComment}
}
func parseTypeSpec(p *parser, doc *ast.CommentGroup) ast.Spec {
if p.trace {
defer un(trace(p, "TypeSpec"))
}
ident := p.declIdent(ast.Typ)
typ := p.parseType()
p.expectSemi()
return &ast.TypeSpec{doc, ident, typ, p.lineComment}
}
func parseVarSpec(p *parser, doc *ast.CommentGroup) ast.Spec {
if p.trace {
defer un(trace(p, "VarSpec"))
}
idents := p.declIdentList(ast.Var)
typ := p.tryType()
var values []ast.Expr
if typ == nil || p.tok == token.ASSIGN {
p.expect(token.ASSIGN)
values = p.parseExprList()
}
p.expectSemi()
return &ast.ValueSpec{doc, idents, typ, values, p.lineComment}
}
func (p *parser) parseGenDecl(keyword token.Token, f parseSpecFunction) *ast.GenDecl {
if p.trace {
defer un(trace(p, keyword.String()+"Decl"))
}
doc := p.leadComment
pos := p.expect(keyword)
var lparen, rparen token.Position
var list vector.Vector
if p.tok == token.LPAREN {
lparen = p.pos
p.next()
for p.tok != token.RPAREN && p.tok != token.EOF {
list.Push(f(p, p.leadComment))
}
rparen = p.expect(token.RPAREN)
p.expectSemi()
} else {
list.Push(f(p, nil))
}
// convert vector
specs := make([]ast.Spec, len(list))
for i, x := range list {
specs[i] = x.(ast.Spec)
}
return &ast.GenDecl{doc, pos, keyword, lparen, specs, rparen}
}
func (p *parser) parseReceiver() *ast.Field {
if p.trace {
defer un(trace(p, "Receiver"))
}
pos := p.pos
par := p.parseParameters(false)
// must have exactly one receiver
if len(par) != 1 || len(par) == 1 && len(par[0].Names) > 1 {
p.errorExpected(pos, "exactly one receiver")
return &ast.Field{Type: &ast.BadExpr{noPos}}
}
recv := par[0]
// recv type must be TypeName or *TypeName
base := recv.Type
if ptr, isPtr := base.(*ast.StarExpr); isPtr {
base = ptr.X
}
if !isTypeName(base) {
p.errorExpected(base.Pos(), "type name")
}
return recv
}
func (p *parser) parseFunctionDecl() *ast.FuncDecl {
if p.trace {
defer un(trace(p, "FunctionDecl"))
}
doc := p.leadComment
pos := p.expect(token.FUNC)
var recv *ast.Field
if p.tok == token.LPAREN {
recv = p.parseReceiver()
}
ident := p.declIdent(ast.Fun)
params, results := p.parseSignature()
var body *ast.BlockStmt
if p.tok == token.LBRACE {
body = p.parseBlockStmt()
}
p.expectSemi()
return &ast.FuncDecl{doc, recv, ident, &ast.FuncType{pos, params, results}, body}
}
func (p *parser) parseDecl() ast.Decl {
if p.trace {
defer un(trace(p, "Declaration"))
}
var f parseSpecFunction
switch p.tok {
case token.CONST:
f = parseConstSpec
case token.TYPE:
f = parseTypeSpec
case token.VAR:
f = parseVarSpec
case token.FUNC:
return p.parseFunctionDecl()
default:
pos := p.pos
p.errorExpected(pos, "declaration")
decl := &ast.BadDecl{pos}
p.next() // make progress in any case
return decl
}
return p.parseGenDecl(p.tok, f)
}
func (p *parser) parseDeclList() []ast.Decl {
if p.trace {
defer un(trace(p, "DeclList"))
}
var list vector.Vector
for p.tok != token.EOF {
list.Push(p.parseDecl())
}
// convert vector
decls := make([]ast.Decl, len(list))
for i, x := range list {
decls[i] = x.(ast.Decl)
}
return decls
}
// ----------------------------------------------------------------------------
// Source files
func (p *parser) parseFile() *ast.File {
if p.trace {
defer un(trace(p, "File"))
}
// package clause
doc := p.leadComment
pos := p.expect(token.PACKAGE)
ident := p.parseIdent(ast.Pkg) // package name is in no scope
p.expectSemi()
// file block
defer closeScope(openScope(p))
var decls []ast.Decl
// Don't bother parsing the rest if we had errors already.
// Likely not a Go source file at all.
if p.ErrorCount() == 0 && p.mode&PackageClauseOnly == 0 {
// import decls
var list vector.Vector
for p.tok == token.IMPORT {
list.Push(p.parseGenDecl(token.IMPORT, parseImportSpec))
}
if p.mode&ImportsOnly == 0 {
// rest of package body
for p.tok != token.EOF {
list.Push(p.parseDecl())
}
}
// convert declaration list
decls = make([]ast.Decl, len(list))
for i, x := range list {
decls[i] = x.(ast.Decl)
}
}
return &ast.File{doc, pos, ident, decls, p.comments}
}
|
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package parser implements a parser for Go source files. Input may be
// provided in a variety of forms (see the various Parse* functions); the
// output is an abstract syntax tree (AST) representing the Go source. The
// parser is invoked through one of the Parse* functions.
//
package parser
import (
"fmt"
"go/ast"
"go/scanner"
"go/token"
"strconv"
"strings"
"unicode"
)
// The parser structure holds the parser's internal state.
type parser struct {
file *token.File
errors scanner.ErrorList
scanner scanner.Scanner
// Tracing/debugging
mode Mode // parsing mode
trace bool // == (mode & Trace != 0)
indent uint // indentation used for tracing output
// Comments
comments []*ast.CommentGroup
leadComment *ast.CommentGroup // last lead comment
lineComment *ast.CommentGroup // last line comment
// Next token
pos token.Pos // token position
tok token.Token // one token look-ahead
lit string // token literal
// Error recovery
// (used to limit the number of calls to syncXXX functions
// w/o making scanning progress - avoids potential endless
// loops across multiple parser functions during error recovery)
syncPos token.Pos // last synchronization position
syncCnt int // number of calls to syncXXX without progress
// Non-syntactic parser control
exprLev int // < 0: in control clause, >= 0: in expression
// Ordinary identifier scopes
pkgScope *ast.Scope // pkgScope.Outer == nil
topScope *ast.Scope // top-most scope; may be pkgScope
unresolved []*ast.Ident // unresolved identifiers
imports []*ast.ImportSpec // list of imports
// Label scope
// (maintained by open/close LabelScope)
labelScope *ast.Scope // label scope for current function
targetStack [][]*ast.Ident // stack of unresolved labels
}
func (p *parser) init(fset *token.FileSet, filename string, src []byte, mode Mode) {
p.file = fset.AddFile(filename, fset.Base(), len(src))
var m scanner.Mode
if mode&ParseComments != 0 {
m = scanner.ScanComments
}
eh := func(pos token.Position, msg string) { p.errors.Add(pos, msg) }
p.scanner.Init(p.file, src, eh, m)
p.mode = mode
p.trace = mode&Trace != 0 // for convenience (p.trace is used frequently)
p.next()
// set up the pkgScope here (as opposed to in parseFile) because
// there are other parser entry points (ParseExpr, etc.)
p.openScope()
p.pkgScope = p.topScope
// for the same reason, set up a label scope
p.openLabelScope()
}
// ----------------------------------------------------------------------------
// Scoping support
func (p *parser) openScope() {
p.topScope = ast.NewScope(p.topScope)
}
func (p *parser) closeScope() {
p.topScope = p.topScope.Outer
}
func (p *parser) openLabelScope() {
p.labelScope = ast.NewScope(p.labelScope)
p.targetStack = append(p.targetStack, nil)
}
func (p *parser) closeLabelScope() {
// resolve labels
n := len(p.targetStack) - 1
scope := p.labelScope
for _, ident := range p.targetStack[n] {
ident.Obj = scope.Lookup(ident.Name)
if ident.Obj == nil && p.mode&DeclarationErrors != 0 {
p.error(ident.Pos(), fmt.Sprintf("label %s undefined", ident.Name))
}
}
// pop label scope
p.targetStack = p.targetStack[0:n]
p.labelScope = p.labelScope.Outer
}
func (p *parser) declare(decl, data interface{}, scope *ast.Scope, kind ast.ObjKind, idents ...*ast.Ident) {
for _, ident := range idents {
assert(ident.Obj == nil, "identifier already declared or resolved")
obj := ast.NewObj(kind, ident.Name)
// remember the corresponding declaration for redeclaration
// errors and global variable resolution/typechecking phase
obj.Decl = decl
obj.Data = data
ident.Obj = obj
if ident.Name != "_" {
if alt := scope.Insert(obj); alt != nil && p.mode&DeclarationErrors != 0 {
prevDecl := ""
if pos := alt.Pos(); pos.IsValid() {
prevDecl = fmt.Sprintf("\n\tprevious declaration at %s", p.file.Position(pos))
}
p.error(ident.Pos(), fmt.Sprintf("%s redeclared in this block%s", ident.Name, prevDecl))
}
}
}
}
func (p *parser) shortVarDecl(decl *ast.AssignStmt, list []ast.Expr) {
// Go spec: A short variable declaration may redeclare variables
// provided they were originally declared in the same block with
// the same type, and at least one of the non-blank variables is new.
n := 0 // number of new variables
for _, x := range list {
if ident, isIdent := x.(*ast.Ident); isIdent {
assert(ident.Obj == nil, "identifier already declared or resolved")
obj := ast.NewObj(ast.Var, ident.Name)
// remember corresponding assignment for other tools
obj.Decl = decl
ident.Obj = obj
if ident.Name != "_" {
if alt := p.topScope.Insert(obj); alt != nil {
ident.Obj = alt // redeclaration
} else {
n++ // new declaration
}
}
} else {
p.errorExpected(x.Pos(), "identifier")
}
}
if n == 0 && p.mode&DeclarationErrors != 0 {
p.error(list[0].Pos(), "no new variables on left side of :=")
}
}
// The unresolved object is a sentinel to mark identifiers that have been added
// to the list of unresolved identifiers. The sentinel is only used for verifying
// internal consistency.
var unresolved = new(ast.Object)
func (p *parser) resolve(x ast.Expr) {
// nothing to do if x is not an identifier or the blank identifier
ident, _ := x.(*ast.Ident)
if ident == nil {
return
}
assert(ident.Obj == nil, "identifier already declared or resolved")
if ident.Name == "_" {
return
}
// try to resolve the identifier
for s := p.topScope; s != nil; s = s.Outer {
if obj := s.Lookup(ident.Name); obj != nil {
ident.Obj = obj
return
}
}
// all local scopes are known, so any unresolved identifier
// must be found either in the file scope, package scope
// (perhaps in another file), or universe scope --- collect
// them so that they can be resolved later
ident.Obj = unresolved
p.unresolved = append(p.unresolved, ident)
}
// ----------------------------------------------------------------------------
// Parsing support
func (p *parser) printTrace(a ...interface{}) {
const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . " +
". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . "
const n = uint(len(dots))
pos := p.file.Position(p.pos)
fmt.Printf("%5d:%3d: ", pos.Line, pos.Column)
i := 2 * p.indent
for ; i > n; i -= n {
fmt.Print(dots)
}
fmt.Print(dots[0:i])
fmt.Println(a...)
}
func trace(p *parser, msg string) *parser {
p.printTrace(msg, "(")
p.indent++
return p
}
// Usage pattern: defer un(trace(p, "..."));
func un(p *parser) {
p.indent--
p.printTrace(")")
}
// Advance to the next token.
func (p *parser) next0() {
// Because of one-token look-ahead, print the previous token
// when tracing as it provides a more readable output. The
// very first token (!p.pos.IsValid()) is not initialized
// (it is token.ILLEGAL), so don't print it .
if p.trace && p.pos.IsValid() {
s := p.tok.String()
switch {
case p.tok.IsLiteral():
p.printTrace(s, p.lit)
case p.tok.IsOperator(), p.tok.IsKeyword():
p.printTrace("\"" + s + "\"")
default:
p.printTrace(s)
}
}
p.pos, p.tok, p.lit = p.scanner.Scan()
}
// Consume a comment and return it and the line on which it ends.
func (p *parser) consumeComment() (comment *ast.Comment, endline int) {
// /*-style comments may end on a different line than where they start.
// Scan the comment for '\n' chars and adjust endline accordingly.
endline = p.file.Line(p.pos)
if p.lit[1] == '*' {
// don't use range here - no need to decode Unicode code points
for i := 0; i < len(p.lit); i++ {
if p.lit[i] == '\n' {
endline++
}
}
}
comment = &ast.Comment{Slash: p.pos, Text: p.lit}
p.next0()
return
}
// Consume a group of adjacent comments, add it to the parser's
// comments list, and return it together with the line at which
// the last comment in the group ends. A non-comment token or n
// empty lines terminate a comment group.
//
func (p *parser) consumeCommentGroup(n int) (comments *ast.CommentGroup, endline int) {
var list []*ast.Comment
endline = p.file.Line(p.pos)
for p.tok == token.COMMENT && p.file.Line(p.pos) <= endline+n {
var comment *ast.Comment
comment, endline = p.consumeComment()
list = append(list, comment)
}
// add comment group to the comments list
comments = &ast.CommentGroup{List: list}
p.comments = append(p.comments, comments)
return
}
// Advance to the next non-comment token. In the process, collect
// any comment groups encountered, and remember the last lead and
// and line comments.
//
// A lead comment is a comment group that starts and ends in a
// line without any other tokens and that is followed by a non-comment
// token on the line immediately after the comment group.
//
// A line comment is a comment group that follows a non-comment
// token on the same line, and that has no tokens after it on the line
// where it ends.
//
// Lead and line comments may be considered documentation that is
// stored in the AST.
//
func (p *parser) next() {
p.leadComment = nil
p.lineComment = nil
line := p.file.Line(p.pos) // current line
p.next0()
if p.tok == token.COMMENT {
var comment *ast.CommentGroup
var endline int
if p.file.Line(p.pos) == line {
// The comment is on same line as the previous token; it
// cannot be a lead comment but may be a line comment.
comment, endline = p.consumeCommentGroup(0)
if p.file.Line(p.pos) != endline {
// The next token is on a different line, thus
// the last comment group is a line comment.
p.lineComment = comment
}
}
// consume successor comments, if any
endline = -1
for p.tok == token.COMMENT {
comment, endline = p.consumeCommentGroup(1)
}
if endline+1 == p.file.Line(p.pos) {
// The next token is following on the line immediately after the
// comment group, thus the last comment group is a lead comment.
p.leadComment = comment
}
}
}
func (p *parser) error(pos token.Pos, msg string) {
p.errors.Add(p.file.Position(pos), msg)
}
func (p *parser) errorExpected(pos token.Pos, msg string) {
msg = "expected " + msg
if pos == p.pos {
// the error happened at the current position;
// make the error message more specific
if p.tok == token.SEMICOLON && p.lit == "\n" {
msg += ", found newline"
} else {
msg += ", found '" + p.tok.String() + "'"
if p.tok.IsLiteral() {
msg += " " + p.lit
}
}
}
p.error(pos, msg)
}
func (p *parser) expect(tok token.Token) token.Pos {
pos := p.pos
if p.tok != tok {
p.errorExpected(pos, "'"+tok.String()+"'")
}
p.next() // make progress
return pos
}
// expectClosing is like expect but provides a better error message
// for the common case of a missing comma before a newline.
//
func (p *parser) expectClosing(tok token.Token, context string) token.Pos {
if p.tok != tok && p.tok == token.SEMICOLON && p.lit == "\n" {
p.error(p.pos, "missing ',' before newline in "+context)
p.next()
}
return p.expect(tok)
}
func (p *parser) expectSemi() {
// semicolon is optional before a closing ')' or '}'
if p.tok != token.RPAREN && p.tok != token.RBRACE {
if p.tok == token.SEMICOLON {
p.next()
} else {
p.errorExpected(p.pos, "';'")
syncStmt(p)
}
}
}
func (p *parser) atComma(context string) bool {
if p.tok == token.COMMA {
return true
}
if p.tok == token.SEMICOLON && p.lit == "\n" {
p.error(p.pos, "missing ',' before newline in "+context)
return true // "insert" the comma and continue
}
return false
}
func assert(cond bool, msg string) {
if !cond {
panic("go/parser internal error: " + msg)
}
}
// syncStmt advances to the next statement.
// Used for synchronization after an error.
//
func syncStmt(p *parser) {
for {
switch p.tok {
case token.BREAK, token.CONST, token.CONTINUE, token.DEFER,
token.FALLTHROUGH, token.FOR, token.GO, token.GOTO,
token.IF, token.RETURN, token.SELECT, token.SWITCH,
token.TYPE, token.VAR:
// Return only if parser made some progress since last
// sync or if it has not reached 10 sync calls without
// progress. Otherwise consume at least one token to
// avoid an endless parser loop (it is possible that
// both parseOperand and parseStmt call syncStmt and
// correctly do not advance, thus the need for the
// invocation limit p.syncCnt).
if p.pos == p.syncPos && p.syncCnt < 10 {
p.syncCnt++
return
}
if p.pos > p.syncPos {
p.syncPos = p.pos
p.syncCnt = 0
return
}
// Reaching here indicates a parser bug, likely an
// incorrect token list in this function, but it only
// leads to skipping of possibly correct code if a
// previous error is present, and thus is preferred
// over a non-terminating parse.
case token.EOF:
return
}
p.next()
}
}
// syncDecl advances to the next declaration.
// Used for synchronization after an error.
//
func syncDecl(p *parser) {
for {
switch p.tok {
case token.CONST, token.TYPE, token.VAR:
// see comments in syncStmt
if p.pos == p.syncPos && p.syncCnt < 10 {
p.syncCnt++
return
}
if p.pos > p.syncPos {
p.syncPos = p.pos
p.syncCnt = 0
return
}
case token.EOF:
return
}
p.next()
}
}
// ----------------------------------------------------------------------------
// Identifiers
func (p *parser) parseIdent() *ast.Ident {
pos := p.pos
name := "_"
if p.tok == token.IDENT {
name = p.lit
p.next()
} else {
p.expect(token.IDENT) // use expect() error handling
}
return &ast.Ident{NamePos: pos, Name: name}
}
func (p *parser) parseIdentList() (list []*ast.Ident) {
if p.trace {
defer un(trace(p, "IdentList"))
}
list = append(list, p.parseIdent())
for p.tok == token.COMMA {
p.next()
list = append(list, p.parseIdent())
}
return
}
// ----------------------------------------------------------------------------
// Common productions
// If lhs is set, result list elements which are identifiers are not resolved.
func (p *parser) parseExprList(lhs bool) (list []ast.Expr) {
if p.trace {
defer un(trace(p, "ExpressionList"))
}
list = append(list, p.checkExpr(p.parseExpr(lhs)))
for p.tok == token.COMMA {
p.next()
list = append(list, p.checkExpr(p.parseExpr(lhs)))
}
return
}
func (p *parser) parseLhsList() []ast.Expr {
list := p.parseExprList(true)
switch p.tok {
case token.DEFINE:
// lhs of a short variable declaration
// but doesn't enter scope until later:
// caller must call p.shortVarDecl(p.makeIdentList(list))
// at appropriate time.
case token.COLON:
// lhs of a label declaration or a communication clause of a select
// statement (parseLhsList is not called when parsing the case clause
// of a switch statement):
// - labels are declared by the caller of parseLhsList
// - for communication clauses, if there is a stand-alone identifier
// followed by a colon, we have a syntax error; there is no need
// to resolve the identifier in that case
default:
// identifiers must be declared elsewhere
for _, x := range list {
p.resolve(x)
}
}
return list
}
func (p *parser) parseRhsList() []ast.Expr {
return p.parseExprList(false)
}
// ----------------------------------------------------------------------------
// Types
func (p *parser) parseType() ast.Expr {
if p.trace {
defer un(trace(p, "Type"))
}
typ := p.tryType()
if typ == nil {
pos := p.pos
p.errorExpected(pos, "type")
p.next() // make progress
return &ast.BadExpr{From: pos, To: p.pos}
}
return typ
}
// If the result is an identifier, it is not resolved.
func (p *parser) parseTypeName() ast.Expr {
if p.trace {
defer un(trace(p, "TypeName"))
}
ident := p.parseIdent()
// don't resolve ident yet - it may be a parameter or field name
if p.tok == token.PERIOD {
// ident is a package name
p.next()
p.resolve(ident)
sel := p.parseIdent()
return &ast.SelectorExpr{X: ident, Sel: sel}
}
return ident
}
func (p *parser) parseArrayType(ellipsisOk bool) ast.Expr {
if p.trace {
defer un(trace(p, "ArrayType"))
}
lbrack := p.expect(token.LBRACK)
var len ast.Expr
if ellipsisOk && p.tok == token.ELLIPSIS {
len = &ast.Ellipsis{Ellipsis: p.pos}
p.next()
} else if p.tok != token.RBRACK {
len = p.parseRhs()
}
p.expect(token.RBRACK)
elt := p.parseType()
return &ast.ArrayType{Lbrack: lbrack, Len: len, Elt: elt}
}
func (p *parser) makeIdentList(list []ast.Expr) []*ast.Ident {
idents := make([]*ast.Ident, len(list))
for i, x := range list {
ident, isIdent := x.(*ast.Ident)
if !isIdent {
if _, isBad := x.(*ast.BadExpr); !isBad {
// only report error if it's a new one
p.errorExpected(x.Pos(), "identifier")
}
ident = &ast.Ident{NamePos: x.Pos(), Name: "_"}
}
idents[i] = ident
}
return idents
}
func (p *parser) parseFieldDecl(scope *ast.Scope) *ast.Field {
if p.trace {
defer un(trace(p, "FieldDecl"))
}
doc := p.leadComment
// fields
list, typ := p.parseVarList(false)
// optional tag
var tag *ast.BasicLit
if p.tok == token.STRING {
tag = &ast.BasicLit{ValuePos: p.pos, Kind: p.tok, Value: p.lit}
p.next()
}
// analyze case
var idents []*ast.Ident
if typ != nil {
// IdentifierList Type
idents = p.makeIdentList(list)
} else {
// ["*"] TypeName (AnonymousField)
typ = list[0] // we always have at least one element
p.resolve(typ)
if n := len(list); n > 1 || !isTypeName(deref(typ)) {
pos := typ.Pos()
p.errorExpected(pos, "anonymous field")
typ = &ast.BadExpr{From: pos, To: list[n-1].End()}
}
}
p.expectSemi() // call before accessing p.linecomment
field := &ast.Field{Doc: doc, Names: idents, Type: typ, Tag: tag, Comment: p.lineComment}
p.declare(field, nil, scope, ast.Var, idents...)
return field
}
func (p *parser) parseStructType() *ast.StructType {
if p.trace {
defer un(trace(p, "StructType"))
}
pos := p.expect(token.STRUCT)
lbrace := p.expect(token.LBRACE)
scope := ast.NewScope(nil) // struct scope
var list []*ast.Field
for p.tok == token.IDENT || p.tok == token.MUL || p.tok == token.LPAREN {
// a field declaration cannot start with a '(' but we accept
// it here for more robust parsing and better error messages
// (parseFieldDecl will check and complain if necessary)
list = append(list, p.parseFieldDecl(scope))
}
rbrace := p.expect(token.RBRACE)
return &ast.StructType{
Struct: pos,
Fields: &ast.FieldList{
Opening: lbrace,
List: list,
Closing: rbrace,
},
}
}
func (p *parser) parsePointerType() *ast.StarExpr {
if p.trace {
defer un(trace(p, "PointerType"))
}
star := p.expect(token.MUL)
base := p.parseType()
return &ast.StarExpr{Star: star, X: base}
}
func (p *parser) tryVarType(isParam bool) ast.Expr {
if isParam && p.tok == token.ELLIPSIS {
pos := p.pos
p.next()
typ := p.tryIdentOrType(isParam) // don't use parseType so we can provide better error message
if typ == nil {
p.error(pos, "'...' parameter is missing type")
typ = &ast.BadExpr{From: pos, To: p.pos}
}
return &ast.Ellipsis{Ellipsis: pos, Elt: typ}
}
return p.tryIdentOrType(false)
}
func (p *parser) parseVarType(isParam bool) ast.Expr {
typ := p.tryVarType(isParam)
if typ == nil {
pos := p.pos
p.errorExpected(pos, "type")
p.next() // make progress
typ = &ast.BadExpr{From: pos, To: p.pos}
}
return typ
}
func (p *parser) parseVarList(isParam bool) (list []ast.Expr, typ ast.Expr) {
if p.trace {
defer un(trace(p, "VarList"))
}
// a list of identifiers looks like a list of type names
//
// parse/tryVarType accepts any type (including parenthesized
// ones) even though the syntax does not permit them here: we
// accept them all for more robust parsing and complain later
for typ := p.parseVarType(isParam); typ != nil; {
list = append(list, typ)
if p.tok != token.COMMA {
break
}
p.next()
typ = p.tryVarType(isParam) // maybe nil as in: func f(int,) {}
}
// if we had a list of identifiers, it must be followed by a type
if typ = p.tryVarType(isParam); typ != nil {
p.resolve(typ)
}
return
}
func (p *parser) parseParameterList(scope *ast.Scope, ellipsisOk bool) (params []*ast.Field) {
if p.trace {
defer un(trace(p, "ParameterList"))
}
list, typ := p.parseVarList(ellipsisOk)
if typ != nil {
// IdentifierList Type
idents := p.makeIdentList(list)
field := &ast.Field{Names: idents, Type: typ}
params = append(params, field)
// Go spec: The scope of an identifier denoting a function
// parameter or result variable is the function body.
p.declare(field, nil, scope, ast.Var, idents...)
if p.tok == token.COMMA {
p.next()
}
for p.tok != token.RPAREN && p.tok != token.EOF {
idents := p.parseIdentList()
typ := p.parseVarType(ellipsisOk)
field := &ast.Field{Names: idents, Type: typ}
params = append(params, field)
// Go spec: The scope of an identifier denoting a function
// parameter or result variable is the function body.
p.declare(field, nil, scope, ast.Var, idents...)
if !p.atComma("parameter list") {
break
}
p.next()
}
} else {
// Type { "," Type } (anonymous parameters)
params = make([]*ast.Field, len(list))
for i, x := range list {
p.resolve(x)
params[i] = &ast.Field{Type: x}
}
}
return
}
func (p *parser) parseParameters(scope *ast.Scope, ellipsisOk bool) *ast.FieldList {
if p.trace {
defer un(trace(p, "Parameters"))
}
var params []*ast.Field
lparen := p.expect(token.LPAREN)
if p.tok != token.RPAREN {
params = p.parseParameterList(scope, ellipsisOk)
}
rparen := p.expect(token.RPAREN)
return &ast.FieldList{Opening: lparen, List: params, Closing: rparen}
}
func (p *parser) parseResult(scope *ast.Scope) *ast.FieldList {
if p.trace {
defer un(trace(p, "Result"))
}
if p.tok == token.LPAREN {
return p.parseParameters(scope, false)
}
typ := p.tryType()
if typ != nil {
list := make([]*ast.Field, 1)
list[0] = &ast.Field{Type: typ}
return &ast.FieldList{List: list}
}
return nil
}
func (p *parser) parseSignature(scope *ast.Scope) (params, results *ast.FieldList) {
if p.trace {
defer un(trace(p, "Signature"))
}
params = p.parseParameters(scope, true)
results = p.parseResult(scope)
return
}
func (p *parser) parseFuncType() (*ast.FuncType, *ast.Scope) {
if p.trace {
defer un(trace(p, "FuncType"))
}
pos := p.expect(token.FUNC)
scope := ast.NewScope(p.topScope) // function scope
params, results := p.parseSignature(scope)
return &ast.FuncType{Func: pos, Params: params, Results: results}, scope
}
func (p *parser) parseMethodSpec(scope *ast.Scope) *ast.Field {
if p.trace {
defer un(trace(p, "MethodSpec"))
}
doc := p.leadComment
var idents []*ast.Ident
var typ ast.Expr
x := p.parseTypeName()
if ident, isIdent := x.(*ast.Ident); isIdent && p.tok == token.LPAREN {
// method
idents = []*ast.Ident{ident}
scope := ast.NewScope(nil) // method scope
params, results := p.parseSignature(scope)
typ = &ast.FuncType{Func: token.NoPos, Params: params, Results: results}
} else {
// embedded interface
typ = x
p.resolve(typ)
}
p.expectSemi() // call before accessing p.linecomment
spec := &ast.Field{Doc: doc, Names: idents, Type: typ, Comment: p.lineComment}
p.declare(spec, nil, scope, ast.Fun, idents...)
return spec
}
func (p *parser) parseInterfaceType() *ast.InterfaceType {
if p.trace {
defer un(trace(p, "InterfaceType"))
}
pos := p.expect(token.INTERFACE)
lbrace := p.expect(token.LBRACE)
scope := ast.NewScope(nil) // interface scope
var list []*ast.Field
for p.tok == token.IDENT {
list = append(list, p.parseMethodSpec(scope))
}
rbrace := p.expect(token.RBRACE)
return &ast.InterfaceType{
Interface: pos,
Methods: &ast.FieldList{
Opening: lbrace,
List: list,
Closing: rbrace,
},
}
}
func (p *parser) parseMapType() *ast.MapType {
if p.trace {
defer un(trace(p, "MapType"))
}
pos := p.expect(token.MAP)
p.expect(token.LBRACK)
key := p.parseType()
p.expect(token.RBRACK)
value := p.parseType()
return &ast.MapType{Map: pos, Key: key, Value: value}
}
func (p *parser) parseChanType() *ast.ChanType {
if p.trace {
defer un(trace(p, "ChanType"))
}
pos := p.pos
dir := ast.SEND | ast.RECV
if p.tok == token.CHAN {
p.next()
if p.tok == token.ARROW {
p.next()
dir = ast.SEND
}
} else {
p.expect(token.ARROW)
p.expect(token.CHAN)
dir = ast.RECV
}
value := p.parseType()
return &ast.ChanType{Begin: pos, Dir: dir, Value: value}
}
// If the result is an identifier, it is not resolved.
func (p *parser) tryIdentOrType(ellipsisOk bool) ast.Expr {
switch p.tok {
case token.IDENT:
return p.parseTypeName()
case token.LBRACK:
return p.parseArrayType(ellipsisOk)
case token.STRUCT:
return p.parseStructType()
case token.MUL:
return p.parsePointerType()
case token.FUNC:
typ, _ := p.parseFuncType()
return typ
case token.INTERFACE:
return p.parseInterfaceType()
case token.MAP:
return p.parseMapType()
case token.CHAN, token.ARROW:
return p.parseChanType()
case token.LPAREN:
lparen := p.pos
p.next()
typ := p.parseType()
rparen := p.expect(token.RPAREN)
return &ast.ParenExpr{Lparen: lparen, X: typ, Rparen: rparen}
}
// no type found
return nil
}
func (p *parser) tryType() ast.Expr {
typ := p.tryIdentOrType(false)
if typ != nil {
p.resolve(typ)
}
return typ
}
// ----------------------------------------------------------------------------
// Blocks
func (p *parser) parseStmtList() (list []ast.Stmt) {
if p.trace {
defer un(trace(p, "StatementList"))
}
for p.tok != token.CASE && p.tok != token.DEFAULT && p.tok != token.RBRACE && p.tok != token.EOF {
list = append(list, p.parseStmt())
}
return
}
func (p *parser) parseBody(scope *ast.Scope) *ast.BlockStmt {
if p.trace {
defer un(trace(p, "Body"))
}
lbrace := p.expect(token.LBRACE)
p.topScope = scope // open function scope
p.openLabelScope()
list := p.parseStmtList()
p.closeLabelScope()
p.closeScope()
rbrace := p.expect(token.RBRACE)
return &ast.BlockStmt{Lbrace: lbrace, List: list, Rbrace: rbrace}
}
func (p *parser) parseBlockStmt() *ast.BlockStmt {
if p.trace {
defer un(trace(p, "BlockStmt"))
}
lbrace := p.expect(token.LBRACE)
p.openScope()
list := p.parseStmtList()
p.closeScope()
rbrace := p.expect(token.RBRACE)
return &ast.BlockStmt{Lbrace: lbrace, List: list, Rbrace: rbrace}
}
// ----------------------------------------------------------------------------
// Expressions
func (p *parser) parseFuncTypeOrLit() ast.Expr {
if p.trace {
defer un(trace(p, "FuncTypeOrLit"))
}
typ, scope := p.parseFuncType()
if p.tok != token.LBRACE {
// function type only
return typ
}
p.exprLev++
body := p.parseBody(scope)
p.exprLev--
return &ast.FuncLit{Type: typ, Body: body}
}
// parseOperand may return an expression or a raw type (incl. array
// types of the form [...]T. Callers must verify the result.
// If lhs is set and the result is an identifier, it is not resolved.
//
func (p *parser) parseOperand(lhs bool) ast.Expr {
if p.trace {
defer un(trace(p, "Operand"))
}
switch p.tok {
case token.IDENT:
x := p.parseIdent()
if !lhs {
p.resolve(x)
}
return x
case token.INT, token.FLOAT, token.IMAG, token.CHAR, token.STRING:
x := &ast.BasicLit{ValuePos: p.pos, Kind: p.tok, Value: p.lit}
p.next()
return x
case token.LPAREN:
lparen := p.pos
p.next()
p.exprLev++
x := p.parseRhsOrType() // types may be parenthesized: (some type)
p.exprLev--
rparen := p.expect(token.RPAREN)
return &ast.ParenExpr{Lparen: lparen, X: x, Rparen: rparen}
case token.FUNC:
return p.parseFuncTypeOrLit()
}
if typ := p.tryIdentOrType(true); typ != nil {
// could be type for composite literal or conversion
_, isIdent := typ.(*ast.Ident)
assert(!isIdent, "type cannot be identifier")
return typ
}
// we have an error
pos := p.pos
p.errorExpected(pos, "operand")
syncStmt(p)
return &ast.BadExpr{From: pos, To: p.pos}
}
func (p *parser) parseSelector(x ast.Expr) ast.Expr {
if p.trace {
defer un(trace(p, "Selector"))
}
sel := p.parseIdent()
return &ast.SelectorExpr{X: x, Sel: sel}
}
func (p *parser) parseTypeAssertion(x ast.Expr) ast.Expr {
if p.trace {
defer un(trace(p, "TypeAssertion"))
}
p.expect(token.LPAREN)
var typ ast.Expr
if p.tok == token.TYPE {
// type switch: typ == nil
p.next()
} else {
typ = p.parseType()
}
p.expect(token.RPAREN)
return &ast.TypeAssertExpr{X: x, Type: typ}
}
func (p *parser) parseIndexOrSlice(x ast.Expr) ast.Expr {
if p.trace {
defer un(trace(p, "IndexOrSlice"))
}
lbrack := p.expect(token.LBRACK)
p.exprLev++
var low, high ast.Expr
isSlice := false
if p.tok != token.COLON {
low = p.parseRhs()
}
if p.tok == token.COLON {
isSlice = true
p.next()
if p.tok != token.RBRACK {
high = p.parseRhs()
}
}
p.exprLev--
rbrack := p.expect(token.RBRACK)
if isSlice {
return &ast.SliceExpr{X: x, Lbrack: lbrack, Low: low, High: high, Rbrack: rbrack}
}
return &ast.IndexExpr{X: x, Lbrack: lbrack, Index: low, Rbrack: rbrack}
}
func (p *parser) parseCallOrConversion(fun ast.Expr) *ast.CallExpr {
if p.trace {
defer un(trace(p, "CallOrConversion"))
}
lparen := p.expect(token.LPAREN)
p.exprLev++
var list []ast.Expr
var ellipsis token.Pos
for p.tok != token.RPAREN && p.tok != token.EOF && !ellipsis.IsValid() {
list = append(list, p.parseRhsOrType()) // builtins may expect a type: make(some type, ...)
if p.tok == token.ELLIPSIS {
ellipsis = p.pos
p.next()
}
if !p.atComma("argument list") {
break
}
p.next()
}
p.exprLev--
rparen := p.expectClosing(token.RPAREN, "argument list")
return &ast.CallExpr{Fun: fun, Lparen: lparen, Args: list, Ellipsis: ellipsis, Rparen: rparen}
}
func (p *parser) parseElement(keyOk bool) ast.Expr {
if p.trace {
defer un(trace(p, "Element"))
}
if p.tok == token.LBRACE {
return p.parseLiteralValue(nil)
}
x := p.checkExpr(p.parseExpr(keyOk)) // don't resolve if map key
if keyOk {
if p.tok == token.COLON {
colon := p.pos
p.next()
return &ast.KeyValueExpr{Key: x, Colon: colon, Value: p.parseElement(false)}
}
p.resolve(x) // not a map key
}
return x
}
func (p *parser) parseElementList() (list []ast.Expr) {
if p.trace {
defer un(trace(p, "ElementList"))
}
for p.tok != token.RBRACE && p.tok != token.EOF {
list = append(list, p.parseElement(true))
if !p.atComma("composite literal") {
break
}
p.next()
}
return
}
func (p *parser) parseLiteralValue(typ ast.Expr) ast.Expr {
if p.trace {
defer un(trace(p, "LiteralValue"))
}
lbrace := p.expect(token.LBRACE)
var elts []ast.Expr
p.exprLev++
if p.tok != token.RBRACE {
elts = p.parseElementList()
}
p.exprLev--
rbrace := p.expectClosing(token.RBRACE, "composite literal")
return &ast.CompositeLit{Type: typ, Lbrace: lbrace, Elts: elts, Rbrace: rbrace}
}
// checkExpr checks that x is an expression (and not a type).
func (p *parser) checkExpr(x ast.Expr) ast.Expr {
switch unparen(x).(type) {
case *ast.BadExpr:
case *ast.Ident:
case *ast.BasicLit:
case *ast.FuncLit:
case *ast.CompositeLit:
case *ast.ParenExpr:
panic("unreachable")
case *ast.SelectorExpr:
case *ast.IndexExpr:
case *ast.SliceExpr:
case *ast.TypeAssertExpr:
// If t.Type == nil we have a type assertion of the form
// y.(type), which is only allowed in type switch expressions.
// It's hard to exclude those but for the case where we are in
// a type switch. Instead be lenient and test this in the type
// checker.
case *ast.CallExpr:
case *ast.StarExpr:
case *ast.UnaryExpr:
case *ast.BinaryExpr:
default:
// all other nodes are not proper expressions
p.errorExpected(x.Pos(), "expression")
x = &ast.BadExpr{From: x.Pos(), To: x.End()}
}
return x
}
// isTypeName returns true iff x is a (qualified) TypeName.
func isTypeName(x ast.Expr) bool {
switch t := x.(type) {
case *ast.BadExpr:
case *ast.Ident:
case *ast.SelectorExpr:
_, isIdent := t.X.(*ast.Ident)
return isIdent
default:
return false // all other nodes are not type names
}
return true
}
// isLiteralType returns true iff x is a legal composite literal type.
func isLiteralType(x ast.Expr) bool {
switch t := x.(type) {
case *ast.BadExpr:
case *ast.Ident:
case *ast.SelectorExpr:
_, isIdent := t.X.(*ast.Ident)
return isIdent
case *ast.ArrayType:
case *ast.StructType:
case *ast.MapType:
default:
return false // all other nodes are not legal composite literal types
}
return true
}
// If x is of the form *T, deref returns T, otherwise it returns x.
func deref(x ast.Expr) ast.Expr {
if p, isPtr := x.(*ast.StarExpr); isPtr {
x = p.X
}
return x
}
// If x is of the form (T), unparen returns unparen(T), otherwise it returns x.
func unparen(x ast.Expr) ast.Expr {
if p, isParen := x.(*ast.ParenExpr); isParen {
x = unparen(p.X)
}
return x
}
// checkExprOrType checks that x is an expression or a type
// (and not a raw type such as [...]T).
//
func (p *parser) checkExprOrType(x ast.Expr) ast.Expr {
switch t := unparen(x).(type) {
case *ast.ParenExpr:
panic("unreachable")
case *ast.UnaryExpr:
case *ast.ArrayType:
if len, isEllipsis := t.Len.(*ast.Ellipsis); isEllipsis {
p.error(len.Pos(), "expected array length, found '...'")
x = &ast.BadExpr{From: x.Pos(), To: x.End()}
}
}
// all other nodes are expressions or types
return x
}
// If lhs is set and the result is an identifier, it is not resolved.
func (p *parser) parsePrimaryExpr(lhs bool) ast.Expr {
if p.trace {
defer un(trace(p, "PrimaryExpr"))
}
x := p.parseOperand(lhs)
L:
for {
switch p.tok {
case token.PERIOD:
p.next()
if lhs {
p.resolve(x)
}
switch p.tok {
case token.IDENT:
x = p.parseSelector(p.checkExpr(x))
case token.LPAREN:
x = p.parseTypeAssertion(p.checkExpr(x))
default:
pos := p.pos
p.errorExpected(pos, "selector or type assertion")
p.next() // make progress
x = &ast.BadExpr{From: pos, To: p.pos}
}
case token.LBRACK:
if lhs {
p.resolve(x)
}
x = p.parseIndexOrSlice(p.checkExpr(x))
case token.LPAREN:
if lhs {
p.resolve(x)
}
x = p.parseCallOrConversion(p.checkExprOrType(x))
case token.LBRACE:
if isLiteralType(x) && (p.exprLev >= 0 || !isTypeName(x)) {
if lhs {
p.resolve(x)
}
x = p.parseLiteralValue(x)
} else {
break L
}
default:
break L
}
lhs = false // no need to try to resolve again
}
return x
}
// If lhs is set and the result is an identifier, it is not resolved.
func (p *parser) parseUnaryExpr(lhs bool) ast.Expr {
if p.trace {
defer un(trace(p, "UnaryExpr"))
}
switch p.tok {
case token.ADD, token.SUB, token.NOT, token.XOR, token.AND:
pos, op := p.pos, p.tok
p.next()
x := p.parseUnaryExpr(false)
return &ast.UnaryExpr{OpPos: pos, Op: op, X: p.checkExpr(x)}
case token.ARROW:
// channel type or receive expression
pos := p.pos
p.next()
if p.tok == token.CHAN {
p.next()
value := p.parseType()
return &ast.ChanType{Begin: pos, Dir: ast.RECV, Value: value}
}
x := p.parseUnaryExpr(false)
return &ast.UnaryExpr{OpPos: pos, Op: token.ARROW, X: p.checkExpr(x)}
case token.MUL:
// pointer type or unary "*" expression
pos := p.pos
p.next()
x := p.parseUnaryExpr(false)
return &ast.StarExpr{Star: pos, X: p.checkExprOrType(x)}
}
return p.parsePrimaryExpr(lhs)
}
// If lhs is set and the result is an identifier, it is not resolved.
func (p *parser) parseBinaryExpr(lhs bool, prec1 int) ast.Expr {
if p.trace {
defer un(trace(p, "BinaryExpr"))
}
x := p.parseUnaryExpr(lhs)
for prec := p.tok.Precedence(); prec >= prec1; prec-- {
for p.tok.Precedence() == prec {
pos, op := p.pos, p.tok
p.next()
if lhs {
p.resolve(x)
lhs = false
}
y := p.parseBinaryExpr(false, prec+1)
x = &ast.BinaryExpr{X: p.checkExpr(x), OpPos: pos, Op: op, Y: p.checkExpr(y)}
}
}
return x
}
// If lhs is set and the result is an identifier, it is not resolved.
// The result may be a type or even a raw type ([...]int). Callers must
// check the result (using checkExpr or checkExprOrType), depending on
// context.
func (p *parser) parseExpr(lhs bool) ast.Expr {
if p.trace {
defer un(trace(p, "Expression"))
}
return p.parseBinaryExpr(lhs, token.LowestPrec+1)
}
func (p *parser) parseRhs() ast.Expr {
return p.checkExpr(p.parseExpr(false))
}
func (p *parser) parseRhsOrType() ast.Expr {
return p.checkExprOrType(p.parseExpr(false))
}
// ----------------------------------------------------------------------------
// Statements
// Parsing modes for parseSimpleStmt.
const (
basic = iota
labelOk
rangeOk
)
// parseSimpleStmt returns true as 2nd result if it parsed the assignment
// of a range clause (with mode == rangeOk). The returned statement is an
// assignment with a right-hand side that is a single unary expression of
// the form "range x". No guarantees are given for the left-hand side.
func (p *parser) parseSimpleStmt(mode int) (ast.Stmt, bool) {
if p.trace {
defer un(trace(p, "SimpleStmt"))
}
x := p.parseLhsList()
switch p.tok {
case
token.DEFINE, token.ASSIGN, token.ADD_ASSIGN,
token.SUB_ASSIGN, token.MUL_ASSIGN, token.QUO_ASSIGN,
token.REM_ASSIGN, token.AND_ASSIGN, token.OR_ASSIGN,
token.XOR_ASSIGN, token.SHL_ASSIGN, token.SHR_ASSIGN, token.AND_NOT_ASSIGN:
// assignment statement, possibly part of a range clause
pos, tok := p.pos, p.tok
p.next()
var y []ast.Expr
isRange := false
if mode == rangeOk && p.tok == token.RANGE && (tok == token.DEFINE || tok == token.ASSIGN) {
pos := p.pos
p.next()
y = []ast.Expr{&ast.UnaryExpr{OpPos: pos, Op: token.RANGE, X: p.parseRhs()}}
isRange = true
} else {
y = p.parseRhsList()
}
as := &ast.AssignStmt{Lhs: x, TokPos: pos, Tok: tok, Rhs: y}
if tok == token.DEFINE {
p.shortVarDecl(as, x)
}
return as, isRange
}
if len(x) > 1 {
p.errorExpected(x[0].Pos(), "1 expression")
// continue with first expression
}
switch p.tok {
case token.COLON:
// labeled statement
colon := p.pos
p.next()
if label, isIdent := x[0].(*ast.Ident); mode == labelOk && isIdent {
// Go spec: The scope of a label is the body of the function
// in which it is declared and excludes the body of any nested
// function.
stmt := &ast.LabeledStmt{Label: label, Colon: colon, Stmt: p.parseStmt()}
p.declare(stmt, nil, p.labelScope, ast.Lbl, label)
return stmt, false
}
// The label declaration typically starts at x[0].Pos(), but the label
// declaration may be erroneous due to a token after that position (and
// before the ':'). If SpuriousErrors is not set, the (only) error re-
// ported for the line is the illegal label error instead of the token
// before the ':' that caused the problem. Thus, use the (latest) colon
// position for error reporting.
p.error(colon, "illegal label declaration")
return &ast.BadStmt{From: x[0].Pos(), To: colon + 1}, false
case token.ARROW:
// send statement
arrow := p.pos
p.next()
y := p.parseRhs()
return &ast.SendStmt{Chan: x[0], Arrow: arrow, Value: y}, false
case token.INC, token.DEC:
// increment or decrement
s := &ast.IncDecStmt{X: x[0], TokPos: p.pos, Tok: p.tok}
p.next()
return s, false
}
// expression
return &ast.ExprStmt{X: x[0]}, false
}
func (p *parser) parseCallExpr() *ast.CallExpr {
x := p.parseRhsOrType() // could be a conversion: (some type)(x)
if call, isCall := x.(*ast.CallExpr); isCall {
return call
}
if _, isBad := x.(*ast.BadExpr); !isBad {
// only report error if it's a new one
p.errorExpected(x.Pos(), "function/method call")
}
return nil
}
func (p *parser) parseGoStmt() ast.Stmt {
if p.trace {
defer un(trace(p, "GoStmt"))
}
pos := p.expect(token.GO)
call := p.parseCallExpr()
p.expectSemi()
if call == nil {
return &ast.BadStmt{From: pos, To: pos + 2} // len("go")
}
return &ast.GoStmt{Go: pos, Call: call}
}
func (p *parser) parseDeferStmt() ast.Stmt {
if p.trace {
defer un(trace(p, "DeferStmt"))
}
pos := p.expect(token.DEFER)
call := p.parseCallExpr()
p.expectSemi()
if call == nil {
return &ast.BadStmt{From: pos, To: pos + 5} // len("defer")
}
return &ast.DeferStmt{Defer: pos, Call: call}
}
func (p *parser) parseReturnStmt() *ast.ReturnStmt {
if p.trace {
defer un(trace(p, "ReturnStmt"))
}
pos := p.pos
p.expect(token.RETURN)
var x []ast.Expr
if p.tok != token.SEMICOLON && p.tok != token.RBRACE {
x = p.parseRhsList()
}
p.expectSemi()
return &ast.ReturnStmt{Return: pos, Results: x}
}
func (p *parser) parseBranchStmt(tok token.Token) *ast.BranchStmt {
if p.trace {
defer un(trace(p, "BranchStmt"))
}
pos := p.expect(tok)
var label *ast.Ident
if tok != token.FALLTHROUGH && p.tok == token.IDENT {
label = p.parseIdent()
// add to list of unresolved targets
n := len(p.targetStack) - 1
p.targetStack[n] = append(p.targetStack[n], label)
}
p.expectSemi()
return &ast.BranchStmt{TokPos: pos, Tok: tok, Label: label}
}
func (p *parser) makeExpr(s ast.Stmt) ast.Expr {
if s == nil {
return nil
}
if es, isExpr := s.(*ast.ExprStmt); isExpr {
return p.checkExpr(es.X)
}
p.error(s.Pos(), "expected condition, found simple statement")
return &ast.BadExpr{From: s.Pos(), To: s.End()}
}
func (p *parser) parseIfStmt() *ast.IfStmt {
if p.trace {
defer un(trace(p, "IfStmt"))
}
pos := p.expect(token.IF)
p.openScope()
defer p.closeScope()
var s ast.Stmt
var x ast.Expr
{
prevLev := p.exprLev
p.exprLev = -1
if p.tok == token.SEMICOLON {
p.next()
x = p.parseRhs()
} else {
s, _ = p.parseSimpleStmt(basic)
if p.tok == token.SEMICOLON {
p.next()
x = p.parseRhs()
} else {
x = p.makeExpr(s)
s = nil
}
}
p.exprLev = prevLev
}
body := p.parseBlockStmt()
var else_ ast.Stmt
if p.tok == token.ELSE {
p.next()
else_ = p.parseStmt()
} else {
p.expectSemi()
}
return &ast.IfStmt{If: pos, Init: s, Cond: x, Body: body, Else: else_}
}
func (p *parser) parseTypeList() (list []ast.Expr) {
if p.trace {
defer un(trace(p, "TypeList"))
}
list = append(list, p.parseType())
for p.tok == token.COMMA {
p.next()
list = append(list, p.parseType())
}
return
}
func (p *parser) parseCaseClause(typeSwitch bool) *ast.CaseClause {
if p.trace {
defer un(trace(p, "CaseClause"))
}
pos := p.pos
var list []ast.Expr
if p.tok == token.CASE {
p.next()
if typeSwitch {
list = p.parseTypeList()
} else {
list = p.parseRhsList()
}
} else {
p.expect(token.DEFAULT)
}
colon := p.expect(token.COLON)
p.openScope()
body := p.parseStmtList()
p.closeScope()
return &ast.CaseClause{Case: pos, List: list, Colon: colon, Body: body}
}
func isTypeSwitchAssert(x ast.Expr) bool {
a, ok := x.(*ast.TypeAssertExpr)
return ok && a.Type == nil
}
func isTypeSwitchGuard(s ast.Stmt) bool {
switch t := s.(type) {
case *ast.ExprStmt:
// x.(nil)
return isTypeSwitchAssert(t.X)
case *ast.AssignStmt:
// v := x.(nil)
return len(t.Lhs) == 1 && t.Tok == token.DEFINE && len(t.Rhs) == 1 && isTypeSwitchAssert(t.Rhs[0])
}
return false
}
func (p *parser) parseSwitchStmt() ast.Stmt {
if p.trace {
defer un(trace(p, "SwitchStmt"))
}
pos := p.expect(token.SWITCH)
p.openScope()
defer p.closeScope()
var s1, s2 ast.Stmt
if p.tok != token.LBRACE {
prevLev := p.exprLev
p.exprLev = -1
if p.tok != token.SEMICOLON {
s2, _ = p.parseSimpleStmt(basic)
}
if p.tok == token.SEMICOLON {
p.next()
s1 = s2
s2 = nil
if p.tok != token.LBRACE {
// A TypeSwitchGuard may declare a variable in addition
// to the variable declared in the initial SimpleStmt.
// Introduce extra scope to avoid redeclaration errors:
//
// switch t := 0; t := x.(T) { ... }
//
// (this code is not valid Go because the first t will
// cannot be accessed and thus is never used, the extra
// scope is needed for the correct error message).
//
// If we don't have a type switch, s2 must be an expression.
// Having the extra nested but empty scope won't affect it.
p.openScope()
defer p.closeScope()
s2, _ = p.parseSimpleStmt(basic)
}
}
p.exprLev = prevLev
}
typeSwitch := isTypeSwitchGuard(s2)
lbrace := p.expect(token.LBRACE)
var list []ast.Stmt
for p.tok == token.CASE || p.tok == token.DEFAULT {
list = append(list, p.parseCaseClause(typeSwitch))
}
rbrace := p.expect(token.RBRACE)
p.expectSemi()
body := &ast.BlockStmt{Lbrace: lbrace, List: list, Rbrace: rbrace}
if typeSwitch {
return &ast.TypeSwitchStmt{Switch: pos, Init: s1, Assign: s2, Body: body}
}
return &ast.SwitchStmt{Switch: pos, Init: s1, Tag: p.makeExpr(s2), Body: body}
}
func (p *parser) parseCommClause() *ast.CommClause {
if p.trace {
defer un(trace(p, "CommClause"))
}
p.openScope()
pos := p.pos
var comm ast.Stmt
if p.tok == token.CASE {
p.next()
lhs := p.parseLhsList()
if p.tok == token.ARROW {
// SendStmt
if len(lhs) > 1 {
p.errorExpected(lhs[0].Pos(), "1 expression")
// continue with first expression
}
arrow := p.pos
p.next()
rhs := p.parseRhs()
comm = &ast.SendStmt{Chan: lhs[0], Arrow: arrow, Value: rhs}
} else {
// RecvStmt
if tok := p.tok; tok == token.ASSIGN || tok == token.DEFINE {
// RecvStmt with assignment
if len(lhs) > 2 {
p.errorExpected(lhs[0].Pos(), "1 or 2 expressions")
// continue with first two expressions
lhs = lhs[0:2]
}
pos := p.pos
p.next()
rhs := p.parseRhs()
as := &ast.AssignStmt{Lhs: lhs, TokPos: pos, Tok: tok, Rhs: []ast.Expr{rhs}}
if tok == token.DEFINE {
p.shortVarDecl(as, lhs)
}
comm = as
} else {
// lhs must be single receive operation
if len(lhs) > 1 {
p.errorExpected(lhs[0].Pos(), "1 expression")
// continue with first expression
}
comm = &ast.ExprStmt{X: lhs[0]}
}
}
} else {
p.expect(token.DEFAULT)
}
colon := p.expect(token.COLON)
body := p.parseStmtList()
p.closeScope()
return &ast.CommClause{Case: pos, Comm: comm, Colon: colon, Body: body}
}
func (p *parser) parseSelectStmt() *ast.SelectStmt {
if p.trace {
defer un(trace(p, "SelectStmt"))
}
pos := p.expect(token.SELECT)
lbrace := p.expect(token.LBRACE)
var list []ast.Stmt
for p.tok == token.CASE || p.tok == token.DEFAULT {
list = append(list, p.parseCommClause())
}
rbrace := p.expect(token.RBRACE)
p.expectSemi()
body := &ast.BlockStmt{Lbrace: lbrace, List: list, Rbrace: rbrace}
return &ast.SelectStmt{Select: pos, Body: body}
}
func (p *parser) parseForStmt() ast.Stmt {
if p.trace {
defer un(trace(p, "ForStmt"))
}
pos := p.expect(token.FOR)
p.openScope()
defer p.closeScope()
var s1, s2, s3 ast.Stmt
var isRange bool
if p.tok != token.LBRACE {
prevLev := p.exprLev
p.exprLev = -1
if p.tok != token.SEMICOLON {
s2, isRange = p.parseSimpleStmt(rangeOk)
}
if !isRange && p.tok == token.SEMICOLON {
p.next()
s1 = s2
s2 = nil
if p.tok != token.SEMICOLON {
s2, _ = p.parseSimpleStmt(basic)
}
p.expectSemi()
if p.tok != token.LBRACE {
s3, _ = p.parseSimpleStmt(basic)
}
}
p.exprLev = prevLev
}
body := p.parseBlockStmt()
p.expectSemi()
if isRange {
as := s2.(*ast.AssignStmt)
// check lhs
var key, value ast.Expr
switch len(as.Lhs) {
case 2:
key, value = as.Lhs[0], as.Lhs[1]
case 1:
key = as.Lhs[0]
default:
p.errorExpected(as.Lhs[0].Pos(), "1 or 2 expressions")
return &ast.BadStmt{From: pos, To: body.End()}
}
// parseSimpleStmt returned a right-hand side that
// is a single unary expression of the form "range x"
x := as.Rhs[0].(*ast.UnaryExpr).X
return &ast.RangeStmt{
For: pos,
Key: key,
Value: value,
TokPos: as.TokPos,
Tok: as.Tok,
X: x,
Body: body,
}
}
// regular for statement
return &ast.ForStmt{
For: pos,
Init: s1,
Cond: p.makeExpr(s2),
Post: s3,
Body: body,
}
}
func (p *parser) parseStmt() (s ast.Stmt) {
if p.trace {
defer un(trace(p, "Statement"))
}
switch p.tok {
case token.CONST, token.TYPE, token.VAR:
s = &ast.DeclStmt{Decl: p.parseDecl(syncStmt)}
case
// tokens that may start an expression
token.IDENT, token.INT, token.FLOAT, token.IMAG, token.CHAR, token.STRING, token.FUNC, token.LPAREN, // operands
token.LBRACK, token.STRUCT, // composite types
token.ADD, token.SUB, token.MUL, token.AND, token.XOR, token.ARROW, token.NOT: // unary operators
s, _ = p.parseSimpleStmt(labelOk)
// because of the required look-ahead, labeled statements are
// parsed by parseSimpleStmt - don't expect a semicolon after
// them
if _, isLabeledStmt := s.(*ast.LabeledStmt); !isLabeledStmt {
p.expectSemi()
}
case token.GO:
s = p.parseGoStmt()
case token.DEFER:
s = p.parseDeferStmt()
case token.RETURN:
s = p.parseReturnStmt()
case token.BREAK, token.CONTINUE, token.GOTO, token.FALLTHROUGH:
s = p.parseBranchStmt(p.tok)
case token.LBRACE:
s = p.parseBlockStmt()
p.expectSemi()
case token.IF:
s = p.parseIfStmt()
case token.SWITCH:
s = p.parseSwitchStmt()
case token.SELECT:
s = p.parseSelectStmt()
case token.FOR:
s = p.parseForStmt()
case token.SEMICOLON:
s = &ast.EmptyStmt{Semicolon: p.pos}
p.next()
case token.RBRACE:
// a semicolon may be omitted before a closing "}"
s = &ast.EmptyStmt{Semicolon: p.pos}
default:
// no statement found
pos := p.pos
p.errorExpected(pos, "statement")
syncStmt(p)
s = &ast.BadStmt{From: pos, To: p.pos}
}
return
}
// ----------------------------------------------------------------------------
// Declarations
type parseSpecFunction func(p *parser, doc *ast.CommentGroup, iota int) ast.Spec
func isValidImport(lit string) bool {
const illegalChars = `!"#$%&'()*,:;<=>?[\]^{|}` + "`\uFFFD"
s, _ := strconv.Unquote(lit) // go/scanner returns a legal string literal
for _, r := range s {
if !unicode.IsGraphic(r) || unicode.IsSpace(r) || strings.ContainsRune(illegalChars, r) {
return false
}
}
return s != ""
}
func parseImportSpec(p *parser, doc *ast.CommentGroup, _ int) ast.Spec {
if p.trace {
defer un(trace(p, "ImportSpec"))
}
var ident *ast.Ident
switch p.tok {
case token.PERIOD:
ident = &ast.Ident{NamePos: p.pos, Name: "."}
p.next()
case token.IDENT:
ident = p.parseIdent()
}
var path *ast.BasicLit
if p.tok == token.STRING {
if !isValidImport(p.lit) {
p.error(p.pos, "invalid import path: "+p.lit)
}
path = &ast.BasicLit{ValuePos: p.pos, Kind: p.tok, Value: p.lit}
p.next()
} else {
p.expect(token.STRING) // use expect() error handling
}
p.expectSemi() // call before accessing p.linecomment
// collect imports
spec := &ast.ImportSpec{
Doc: doc,
Name: ident,
Path: path,
Comment: p.lineComment,
}
p.imports = append(p.imports, spec)
return spec
}
func parseConstSpec(p *parser, doc *ast.CommentGroup, iota int) ast.Spec {
if p.trace {
defer un(trace(p, "ConstSpec"))
}
idents := p.parseIdentList()
typ := p.tryType()
var values []ast.Expr
if typ != nil || p.tok == token.ASSIGN || iota == 0 {
p.expect(token.ASSIGN)
values = p.parseRhsList()
}
p.expectSemi() // call before accessing p.linecomment
// Go spec: The scope of a constant or variable identifier declared inside
// a function begins at the end of the ConstSpec or VarSpec and ends at
// the end of the innermost containing block.
// (Global identifiers are resolved in a separate phase after parsing.)
spec := &ast.ValueSpec{
Doc: doc,
Names: idents,
Type: typ,
Values: values,
Comment: p.lineComment,
}
p.declare(spec, iota, p.topScope, ast.Con, idents...)
return spec
}
func parseTypeSpec(p *parser, doc *ast.CommentGroup, _ int) ast.Spec {
if p.trace {
defer un(trace(p, "TypeSpec"))
}
ident := p.parseIdent()
// Go spec: The scope of a type identifier declared inside a function begins
// at the identifier in the TypeSpec and ends at the end of the innermost
// containing block.
// (Global identifiers are resolved in a separate phase after parsing.)
spec := &ast.TypeSpec{Doc: doc, Name: ident}
p.declare(spec, nil, p.topScope, ast.Typ, ident)
spec.Type = p.parseType()
p.expectSemi() // call before accessing p.linecomment
spec.Comment = p.lineComment
return spec
}
func parseVarSpec(p *parser, doc *ast.CommentGroup, _ int) ast.Spec {
if p.trace {
defer un(trace(p, "VarSpec"))
}
idents := p.parseIdentList()
typ := p.tryType()
var values []ast.Expr
if typ == nil || p.tok == token.ASSIGN {
p.expect(token.ASSIGN)
values = p.parseRhsList()
}
p.expectSemi() // call before accessing p.linecomment
// Go spec: The scope of a constant or variable identifier declared inside
// a function begins at the end of the ConstSpec or VarSpec and ends at
// the end of the innermost containing block.
// (Global identifiers are resolved in a separate phase after parsing.)
spec := &ast.ValueSpec{
Doc: doc,
Names: idents,
Type: typ,
Values: values,
Comment: p.lineComment,
}
p.declare(spec, nil, p.topScope, ast.Var, idents...)
return spec
}
func (p *parser) parseGenDecl(keyword token.Token, f parseSpecFunction) *ast.GenDecl {
if p.trace {
defer un(trace(p, "GenDecl("+keyword.String()+")"))
}
doc := p.leadComment
pos := p.expect(keyword)
var lparen, rparen token.Pos
var list []ast.Spec
if p.tok == token.LPAREN {
lparen = p.pos
p.next()
for iota := 0; p.tok != token.RPAREN && p.tok != token.EOF; iota++ {
list = append(list, f(p, p.leadComment, iota))
}
rparen = p.expect(token.RPAREN)
p.expectSemi()
} else {
list = append(list, f(p, nil, 0))
}
return &ast.GenDecl{
Doc: doc,
TokPos: pos,
Tok: keyword,
Lparen: lparen,
Specs: list,
Rparen: rparen,
}
}
func (p *parser) parseReceiver(scope *ast.Scope) *ast.FieldList {
if p.trace {
defer un(trace(p, "Receiver"))
}
par := p.parseParameters(scope, false)
// must have exactly one receiver
if par.NumFields() != 1 {
p.errorExpected(par.Opening, "exactly one receiver")
par.List = []*ast.Field{{Type: &ast.BadExpr{From: par.Opening, To: par.Closing + 1}}}
return par
}
// recv type must be of the form ["*"] identifier
recv := par.List[0]
base := deref(recv.Type)
if _, isIdent := base.(*ast.Ident); !isIdent {
if _, isBad := base.(*ast.BadExpr); !isBad {
// only report error if it's a new one
p.errorExpected(base.Pos(), "(unqualified) identifier")
}
par.List = []*ast.Field{
{Type: &ast.BadExpr{From: recv.Pos(), To: recv.End()}},
}
}
return par
}
func (p *parser) parseFuncDecl() *ast.FuncDecl {
if p.trace {
defer un(trace(p, "FunctionDecl"))
}
doc := p.leadComment
pos := p.expect(token.FUNC)
scope := ast.NewScope(p.topScope) // function scope
var recv *ast.FieldList
if p.tok == token.LPAREN {
recv = p.parseReceiver(scope)
}
ident := p.parseIdent()
params, results := p.parseSignature(scope)
var body *ast.BlockStmt
if p.tok == token.LBRACE {
body = p.parseBody(scope)
}
p.expectSemi()
decl := &ast.FuncDecl{
Doc: doc,
Recv: recv,
Name: ident,
Type: &ast.FuncType{
Func: pos,
Params: params,
Results: results,
},
Body: body,
}
if recv == nil {
// Go spec: The scope of an identifier denoting a constant, type,
// variable, or function (but not method) declared at top level
// (outside any function) is the package block.
//
// init() functions cannot be referred to and there may
// be more than one - don't put them in the pkgScope
if ident.Name != "init" {
p.declare(decl, nil, p.pkgScope, ast.Fun, ident)
}
}
return decl
}
func (p *parser) parseDecl(sync func(*parser)) ast.Decl {
if p.trace {
defer un(trace(p, "Declaration"))
}
var f parseSpecFunction
switch p.tok {
case token.CONST:
f = parseConstSpec
case token.TYPE:
f = parseTypeSpec
case token.VAR:
f = parseVarSpec
case token.FUNC:
return p.parseFuncDecl()
default:
pos := p.pos
p.errorExpected(pos, "declaration")
sync(p)
return &ast.BadDecl{From: pos, To: p.pos}
}
return p.parseGenDecl(p.tok, f)
}
// ----------------------------------------------------------------------------
// Source files
func (p *parser) parseFile() *ast.File {
if p.trace {
defer un(trace(p, "File"))
}
// package clause
doc := p.leadComment
pos := p.expect(token.PACKAGE)
// Go spec: The package clause is not a declaration;
// the package name does not appear in any scope.
ident := p.parseIdent()
if ident.Name == "_" {
p.error(p.pos, "invalid package name _")
}
p.expectSemi()
var decls []ast.Decl
// Don't bother parsing the rest if we had errors already.
// Likely not a Go source file at all.
if p.errors.Len() == 0 && p.mode&PackageClauseOnly == 0 {
// import decls
for p.tok == token.IMPORT {
decls = append(decls, p.parseGenDecl(token.IMPORT, parseImportSpec))
}
if p.mode&ImportsOnly == 0 {
// rest of package body
for p.tok != token.EOF {
decls = append(decls, p.parseDecl(syncDecl))
}
}
}
assert(p.topScope == p.pkgScope, "imbalanced scopes")
// resolve global identifiers within the same file
i := 0
for _, ident := range p.unresolved {
// i <= index for current ident
assert(ident.Obj == unresolved, "object already resolved")
ident.Obj = p.pkgScope.Lookup(ident.Name) // also removes unresolved sentinel
if ident.Obj == nil {
p.unresolved[i] = ident
i++
}
}
return &ast.File{
Doc: doc,
Package: pos,
Name: ident,
Decls: decls,
Scope: p.pkgScope,
Imports: p.imports,
Unresolved: p.unresolved[0:i],
Comments: p.comments,
}
}
go/parser: minor cleanup
- there is no label scope at package level
- open/close all scopes symmetrically now
that there is only one parse entry point
(parseFile)
R=golang-dev, r
CC=golang-dev
http://codereview.appspot.com/6230047
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package parser implements a parser for Go source files. Input may be
// provided in a variety of forms (see the various Parse* functions); the
// output is an abstract syntax tree (AST) representing the Go source. The
// parser is invoked through one of the Parse* functions.
//
package parser
import (
"fmt"
"go/ast"
"go/scanner"
"go/token"
"strconv"
"strings"
"unicode"
)
// The parser structure holds the parser's internal state.
type parser struct {
file *token.File
errors scanner.ErrorList
scanner scanner.Scanner
// Tracing/debugging
mode Mode // parsing mode
trace bool // == (mode & Trace != 0)
indent uint // indentation used for tracing output
// Comments
comments []*ast.CommentGroup
leadComment *ast.CommentGroup // last lead comment
lineComment *ast.CommentGroup // last line comment
// Next token
pos token.Pos // token position
tok token.Token // one token look-ahead
lit string // token literal
// Error recovery
// (used to limit the number of calls to syncXXX functions
// w/o making scanning progress - avoids potential endless
// loops across multiple parser functions during error recovery)
syncPos token.Pos // last synchronization position
syncCnt int // number of calls to syncXXX without progress
// Non-syntactic parser control
exprLev int // < 0: in control clause, >= 0: in expression
// Ordinary identifier scopes
pkgScope *ast.Scope // pkgScope.Outer == nil
topScope *ast.Scope // top-most scope; may be pkgScope
unresolved []*ast.Ident // unresolved identifiers
imports []*ast.ImportSpec // list of imports
// Label scopes
// (maintained by open/close LabelScope)
labelScope *ast.Scope // label scope for current function
targetStack [][]*ast.Ident // stack of unresolved labels
}
func (p *parser) init(fset *token.FileSet, filename string, src []byte, mode Mode) {
p.file = fset.AddFile(filename, fset.Base(), len(src))
var m scanner.Mode
if mode&ParseComments != 0 {
m = scanner.ScanComments
}
eh := func(pos token.Position, msg string) { p.errors.Add(pos, msg) }
p.scanner.Init(p.file, src, eh, m)
p.mode = mode
p.trace = mode&Trace != 0 // for convenience (p.trace is used frequently)
p.next()
}
// ----------------------------------------------------------------------------
// Scoping support
func (p *parser) openScope() {
p.topScope = ast.NewScope(p.topScope)
}
func (p *parser) closeScope() {
p.topScope = p.topScope.Outer
}
func (p *parser) openLabelScope() {
p.labelScope = ast.NewScope(p.labelScope)
p.targetStack = append(p.targetStack, nil)
}
func (p *parser) closeLabelScope() {
// resolve labels
n := len(p.targetStack) - 1
scope := p.labelScope
for _, ident := range p.targetStack[n] {
ident.Obj = scope.Lookup(ident.Name)
if ident.Obj == nil && p.mode&DeclarationErrors != 0 {
p.error(ident.Pos(), fmt.Sprintf("label %s undefined", ident.Name))
}
}
// pop label scope
p.targetStack = p.targetStack[0:n]
p.labelScope = p.labelScope.Outer
}
func (p *parser) declare(decl, data interface{}, scope *ast.Scope, kind ast.ObjKind, idents ...*ast.Ident) {
for _, ident := range idents {
assert(ident.Obj == nil, "identifier already declared or resolved")
obj := ast.NewObj(kind, ident.Name)
// remember the corresponding declaration for redeclaration
// errors and global variable resolution/typechecking phase
obj.Decl = decl
obj.Data = data
ident.Obj = obj
if ident.Name != "_" {
if alt := scope.Insert(obj); alt != nil && p.mode&DeclarationErrors != 0 {
prevDecl := ""
if pos := alt.Pos(); pos.IsValid() {
prevDecl = fmt.Sprintf("\n\tprevious declaration at %s", p.file.Position(pos))
}
p.error(ident.Pos(), fmt.Sprintf("%s redeclared in this block%s", ident.Name, prevDecl))
}
}
}
}
func (p *parser) shortVarDecl(decl *ast.AssignStmt, list []ast.Expr) {
// Go spec: A short variable declaration may redeclare variables
// provided they were originally declared in the same block with
// the same type, and at least one of the non-blank variables is new.
n := 0 // number of new variables
for _, x := range list {
if ident, isIdent := x.(*ast.Ident); isIdent {
assert(ident.Obj == nil, "identifier already declared or resolved")
obj := ast.NewObj(ast.Var, ident.Name)
// remember corresponding assignment for other tools
obj.Decl = decl
ident.Obj = obj
if ident.Name != "_" {
if alt := p.topScope.Insert(obj); alt != nil {
ident.Obj = alt // redeclaration
} else {
n++ // new declaration
}
}
} else {
p.errorExpected(x.Pos(), "identifier")
}
}
if n == 0 && p.mode&DeclarationErrors != 0 {
p.error(list[0].Pos(), "no new variables on left side of :=")
}
}
// The unresolved object is a sentinel to mark identifiers that have been added
// to the list of unresolved identifiers. The sentinel is only used for verifying
// internal consistency.
var unresolved = new(ast.Object)
func (p *parser) resolve(x ast.Expr) {
// nothing to do if x is not an identifier or the blank identifier
ident, _ := x.(*ast.Ident)
if ident == nil {
return
}
assert(ident.Obj == nil, "identifier already declared or resolved")
if ident.Name == "_" {
return
}
// try to resolve the identifier
for s := p.topScope; s != nil; s = s.Outer {
if obj := s.Lookup(ident.Name); obj != nil {
ident.Obj = obj
return
}
}
// all local scopes are known, so any unresolved identifier
// must be found either in the file scope, package scope
// (perhaps in another file), or universe scope --- collect
// them so that they can be resolved later
ident.Obj = unresolved
p.unresolved = append(p.unresolved, ident)
}
// ----------------------------------------------------------------------------
// Parsing support
func (p *parser) printTrace(a ...interface{}) {
const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . " +
". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . "
const n = uint(len(dots))
pos := p.file.Position(p.pos)
fmt.Printf("%5d:%3d: ", pos.Line, pos.Column)
i := 2 * p.indent
for ; i > n; i -= n {
fmt.Print(dots)
}
fmt.Print(dots[0:i])
fmt.Println(a...)
}
func trace(p *parser, msg string) *parser {
p.printTrace(msg, "(")
p.indent++
return p
}
// Usage pattern: defer un(trace(p, "..."));
func un(p *parser) {
p.indent--
p.printTrace(")")
}
// Advance to the next token.
func (p *parser) next0() {
// Because of one-token look-ahead, print the previous token
// when tracing as it provides a more readable output. The
// very first token (!p.pos.IsValid()) is not initialized
// (it is token.ILLEGAL), so don't print it .
if p.trace && p.pos.IsValid() {
s := p.tok.String()
switch {
case p.tok.IsLiteral():
p.printTrace(s, p.lit)
case p.tok.IsOperator(), p.tok.IsKeyword():
p.printTrace("\"" + s + "\"")
default:
p.printTrace(s)
}
}
p.pos, p.tok, p.lit = p.scanner.Scan()
}
// Consume a comment and return it and the line on which it ends.
func (p *parser) consumeComment() (comment *ast.Comment, endline int) {
// /*-style comments may end on a different line than where they start.
// Scan the comment for '\n' chars and adjust endline accordingly.
endline = p.file.Line(p.pos)
if p.lit[1] == '*' {
// don't use range here - no need to decode Unicode code points
for i := 0; i < len(p.lit); i++ {
if p.lit[i] == '\n' {
endline++
}
}
}
comment = &ast.Comment{Slash: p.pos, Text: p.lit}
p.next0()
return
}
// Consume a group of adjacent comments, add it to the parser's
// comments list, and return it together with the line at which
// the last comment in the group ends. A non-comment token or n
// empty lines terminate a comment group.
//
func (p *parser) consumeCommentGroup(n int) (comments *ast.CommentGroup, endline int) {
var list []*ast.Comment
endline = p.file.Line(p.pos)
for p.tok == token.COMMENT && p.file.Line(p.pos) <= endline+n {
var comment *ast.Comment
comment, endline = p.consumeComment()
list = append(list, comment)
}
// add comment group to the comments list
comments = &ast.CommentGroup{List: list}
p.comments = append(p.comments, comments)
return
}
// Advance to the next non-comment token. In the process, collect
// any comment groups encountered, and remember the last lead and
// and line comments.
//
// A lead comment is a comment group that starts and ends in a
// line without any other tokens and that is followed by a non-comment
// token on the line immediately after the comment group.
//
// A line comment is a comment group that follows a non-comment
// token on the same line, and that has no tokens after it on the line
// where it ends.
//
// Lead and line comments may be considered documentation that is
// stored in the AST.
//
func (p *parser) next() {
p.leadComment = nil
p.lineComment = nil
line := p.file.Line(p.pos) // current line
p.next0()
if p.tok == token.COMMENT {
var comment *ast.CommentGroup
var endline int
if p.file.Line(p.pos) == line {
// The comment is on same line as the previous token; it
// cannot be a lead comment but may be a line comment.
comment, endline = p.consumeCommentGroup(0)
if p.file.Line(p.pos) != endline {
// The next token is on a different line, thus
// the last comment group is a line comment.
p.lineComment = comment
}
}
// consume successor comments, if any
endline = -1
for p.tok == token.COMMENT {
comment, endline = p.consumeCommentGroup(1)
}
if endline+1 == p.file.Line(p.pos) {
// The next token is following on the line immediately after the
// comment group, thus the last comment group is a lead comment.
p.leadComment = comment
}
}
}
func (p *parser) error(pos token.Pos, msg string) {
p.errors.Add(p.file.Position(pos), msg)
}
func (p *parser) errorExpected(pos token.Pos, msg string) {
msg = "expected " + msg
if pos == p.pos {
// the error happened at the current position;
// make the error message more specific
if p.tok == token.SEMICOLON && p.lit == "\n" {
msg += ", found newline"
} else {
msg += ", found '" + p.tok.String() + "'"
if p.tok.IsLiteral() {
msg += " " + p.lit
}
}
}
p.error(pos, msg)
}
func (p *parser) expect(tok token.Token) token.Pos {
pos := p.pos
if p.tok != tok {
p.errorExpected(pos, "'"+tok.String()+"'")
}
p.next() // make progress
return pos
}
// expectClosing is like expect but provides a better error message
// for the common case of a missing comma before a newline.
//
func (p *parser) expectClosing(tok token.Token, context string) token.Pos {
if p.tok != tok && p.tok == token.SEMICOLON && p.lit == "\n" {
p.error(p.pos, "missing ',' before newline in "+context)
p.next()
}
return p.expect(tok)
}
func (p *parser) expectSemi() {
// semicolon is optional before a closing ')' or '}'
if p.tok != token.RPAREN && p.tok != token.RBRACE {
if p.tok == token.SEMICOLON {
p.next()
} else {
p.errorExpected(p.pos, "';'")
syncStmt(p)
}
}
}
func (p *parser) atComma(context string) bool {
if p.tok == token.COMMA {
return true
}
if p.tok == token.SEMICOLON && p.lit == "\n" {
p.error(p.pos, "missing ',' before newline in "+context)
return true // "insert" the comma and continue
}
return false
}
func assert(cond bool, msg string) {
if !cond {
panic("go/parser internal error: " + msg)
}
}
// syncStmt advances to the next statement.
// Used for synchronization after an error.
//
func syncStmt(p *parser) {
for {
switch p.tok {
case token.BREAK, token.CONST, token.CONTINUE, token.DEFER,
token.FALLTHROUGH, token.FOR, token.GO, token.GOTO,
token.IF, token.RETURN, token.SELECT, token.SWITCH,
token.TYPE, token.VAR:
// Return only if parser made some progress since last
// sync or if it has not reached 10 sync calls without
// progress. Otherwise consume at least one token to
// avoid an endless parser loop (it is possible that
// both parseOperand and parseStmt call syncStmt and
// correctly do not advance, thus the need for the
// invocation limit p.syncCnt).
if p.pos == p.syncPos && p.syncCnt < 10 {
p.syncCnt++
return
}
if p.pos > p.syncPos {
p.syncPos = p.pos
p.syncCnt = 0
return
}
// Reaching here indicates a parser bug, likely an
// incorrect token list in this function, but it only
// leads to skipping of possibly correct code if a
// previous error is present, and thus is preferred
// over a non-terminating parse.
case token.EOF:
return
}
p.next()
}
}
// syncDecl advances to the next declaration.
// Used for synchronization after an error.
//
func syncDecl(p *parser) {
for {
switch p.tok {
case token.CONST, token.TYPE, token.VAR:
// see comments in syncStmt
if p.pos == p.syncPos && p.syncCnt < 10 {
p.syncCnt++
return
}
if p.pos > p.syncPos {
p.syncPos = p.pos
p.syncCnt = 0
return
}
case token.EOF:
return
}
p.next()
}
}
// ----------------------------------------------------------------------------
// Identifiers
func (p *parser) parseIdent() *ast.Ident {
pos := p.pos
name := "_"
if p.tok == token.IDENT {
name = p.lit
p.next()
} else {
p.expect(token.IDENT) // use expect() error handling
}
return &ast.Ident{NamePos: pos, Name: name}
}
func (p *parser) parseIdentList() (list []*ast.Ident) {
if p.trace {
defer un(trace(p, "IdentList"))
}
list = append(list, p.parseIdent())
for p.tok == token.COMMA {
p.next()
list = append(list, p.parseIdent())
}
return
}
// ----------------------------------------------------------------------------
// Common productions
// If lhs is set, result list elements which are identifiers are not resolved.
func (p *parser) parseExprList(lhs bool) (list []ast.Expr) {
if p.trace {
defer un(trace(p, "ExpressionList"))
}
list = append(list, p.checkExpr(p.parseExpr(lhs)))
for p.tok == token.COMMA {
p.next()
list = append(list, p.checkExpr(p.parseExpr(lhs)))
}
return
}
func (p *parser) parseLhsList() []ast.Expr {
list := p.parseExprList(true)
switch p.tok {
case token.DEFINE:
// lhs of a short variable declaration
// but doesn't enter scope until later:
// caller must call p.shortVarDecl(p.makeIdentList(list))
// at appropriate time.
case token.COLON:
// lhs of a label declaration or a communication clause of a select
// statement (parseLhsList is not called when parsing the case clause
// of a switch statement):
// - labels are declared by the caller of parseLhsList
// - for communication clauses, if there is a stand-alone identifier
// followed by a colon, we have a syntax error; there is no need
// to resolve the identifier in that case
default:
// identifiers must be declared elsewhere
for _, x := range list {
p.resolve(x)
}
}
return list
}
func (p *parser) parseRhsList() []ast.Expr {
return p.parseExprList(false)
}
// ----------------------------------------------------------------------------
// Types
func (p *parser) parseType() ast.Expr {
if p.trace {
defer un(trace(p, "Type"))
}
typ := p.tryType()
if typ == nil {
pos := p.pos
p.errorExpected(pos, "type")
p.next() // make progress
return &ast.BadExpr{From: pos, To: p.pos}
}
return typ
}
// If the result is an identifier, it is not resolved.
func (p *parser) parseTypeName() ast.Expr {
if p.trace {
defer un(trace(p, "TypeName"))
}
ident := p.parseIdent()
// don't resolve ident yet - it may be a parameter or field name
if p.tok == token.PERIOD {
// ident is a package name
p.next()
p.resolve(ident)
sel := p.parseIdent()
return &ast.SelectorExpr{X: ident, Sel: sel}
}
return ident
}
func (p *parser) parseArrayType(ellipsisOk bool) ast.Expr {
if p.trace {
defer un(trace(p, "ArrayType"))
}
lbrack := p.expect(token.LBRACK)
var len ast.Expr
if ellipsisOk && p.tok == token.ELLIPSIS {
len = &ast.Ellipsis{Ellipsis: p.pos}
p.next()
} else if p.tok != token.RBRACK {
len = p.parseRhs()
}
p.expect(token.RBRACK)
elt := p.parseType()
return &ast.ArrayType{Lbrack: lbrack, Len: len, Elt: elt}
}
func (p *parser) makeIdentList(list []ast.Expr) []*ast.Ident {
idents := make([]*ast.Ident, len(list))
for i, x := range list {
ident, isIdent := x.(*ast.Ident)
if !isIdent {
if _, isBad := x.(*ast.BadExpr); !isBad {
// only report error if it's a new one
p.errorExpected(x.Pos(), "identifier")
}
ident = &ast.Ident{NamePos: x.Pos(), Name: "_"}
}
idents[i] = ident
}
return idents
}
func (p *parser) parseFieldDecl(scope *ast.Scope) *ast.Field {
if p.trace {
defer un(trace(p, "FieldDecl"))
}
doc := p.leadComment
// fields
list, typ := p.parseVarList(false)
// optional tag
var tag *ast.BasicLit
if p.tok == token.STRING {
tag = &ast.BasicLit{ValuePos: p.pos, Kind: p.tok, Value: p.lit}
p.next()
}
// analyze case
var idents []*ast.Ident
if typ != nil {
// IdentifierList Type
idents = p.makeIdentList(list)
} else {
// ["*"] TypeName (AnonymousField)
typ = list[0] // we always have at least one element
p.resolve(typ)
if n := len(list); n > 1 || !isTypeName(deref(typ)) {
pos := typ.Pos()
p.errorExpected(pos, "anonymous field")
typ = &ast.BadExpr{From: pos, To: list[n-1].End()}
}
}
p.expectSemi() // call before accessing p.linecomment
field := &ast.Field{Doc: doc, Names: idents, Type: typ, Tag: tag, Comment: p.lineComment}
p.declare(field, nil, scope, ast.Var, idents...)
return field
}
func (p *parser) parseStructType() *ast.StructType {
if p.trace {
defer un(trace(p, "StructType"))
}
pos := p.expect(token.STRUCT)
lbrace := p.expect(token.LBRACE)
scope := ast.NewScope(nil) // struct scope
var list []*ast.Field
for p.tok == token.IDENT || p.tok == token.MUL || p.tok == token.LPAREN {
// a field declaration cannot start with a '(' but we accept
// it here for more robust parsing and better error messages
// (parseFieldDecl will check and complain if necessary)
list = append(list, p.parseFieldDecl(scope))
}
rbrace := p.expect(token.RBRACE)
return &ast.StructType{
Struct: pos,
Fields: &ast.FieldList{
Opening: lbrace,
List: list,
Closing: rbrace,
},
}
}
func (p *parser) parsePointerType() *ast.StarExpr {
if p.trace {
defer un(trace(p, "PointerType"))
}
star := p.expect(token.MUL)
base := p.parseType()
return &ast.StarExpr{Star: star, X: base}
}
func (p *parser) tryVarType(isParam bool) ast.Expr {
if isParam && p.tok == token.ELLIPSIS {
pos := p.pos
p.next()
typ := p.tryIdentOrType(isParam) // don't use parseType so we can provide better error message
if typ == nil {
p.error(pos, "'...' parameter is missing type")
typ = &ast.BadExpr{From: pos, To: p.pos}
}
return &ast.Ellipsis{Ellipsis: pos, Elt: typ}
}
return p.tryIdentOrType(false)
}
func (p *parser) parseVarType(isParam bool) ast.Expr {
typ := p.tryVarType(isParam)
if typ == nil {
pos := p.pos
p.errorExpected(pos, "type")
p.next() // make progress
typ = &ast.BadExpr{From: pos, To: p.pos}
}
return typ
}
func (p *parser) parseVarList(isParam bool) (list []ast.Expr, typ ast.Expr) {
if p.trace {
defer un(trace(p, "VarList"))
}
// a list of identifiers looks like a list of type names
//
// parse/tryVarType accepts any type (including parenthesized
// ones) even though the syntax does not permit them here: we
// accept them all for more robust parsing and complain later
for typ := p.parseVarType(isParam); typ != nil; {
list = append(list, typ)
if p.tok != token.COMMA {
break
}
p.next()
typ = p.tryVarType(isParam) // maybe nil as in: func f(int,) {}
}
// if we had a list of identifiers, it must be followed by a type
if typ = p.tryVarType(isParam); typ != nil {
p.resolve(typ)
}
return
}
func (p *parser) parseParameterList(scope *ast.Scope, ellipsisOk bool) (params []*ast.Field) {
if p.trace {
defer un(trace(p, "ParameterList"))
}
list, typ := p.parseVarList(ellipsisOk)
if typ != nil {
// IdentifierList Type
idents := p.makeIdentList(list)
field := &ast.Field{Names: idents, Type: typ}
params = append(params, field)
// Go spec: The scope of an identifier denoting a function
// parameter or result variable is the function body.
p.declare(field, nil, scope, ast.Var, idents...)
if p.tok == token.COMMA {
p.next()
}
for p.tok != token.RPAREN && p.tok != token.EOF {
idents := p.parseIdentList()
typ := p.parseVarType(ellipsisOk)
field := &ast.Field{Names: idents, Type: typ}
params = append(params, field)
// Go spec: The scope of an identifier denoting a function
// parameter or result variable is the function body.
p.declare(field, nil, scope, ast.Var, idents...)
if !p.atComma("parameter list") {
break
}
p.next()
}
} else {
// Type { "," Type } (anonymous parameters)
params = make([]*ast.Field, len(list))
for i, x := range list {
p.resolve(x)
params[i] = &ast.Field{Type: x}
}
}
return
}
func (p *parser) parseParameters(scope *ast.Scope, ellipsisOk bool) *ast.FieldList {
if p.trace {
defer un(trace(p, "Parameters"))
}
var params []*ast.Field
lparen := p.expect(token.LPAREN)
if p.tok != token.RPAREN {
params = p.parseParameterList(scope, ellipsisOk)
}
rparen := p.expect(token.RPAREN)
return &ast.FieldList{Opening: lparen, List: params, Closing: rparen}
}
func (p *parser) parseResult(scope *ast.Scope) *ast.FieldList {
if p.trace {
defer un(trace(p, "Result"))
}
if p.tok == token.LPAREN {
return p.parseParameters(scope, false)
}
typ := p.tryType()
if typ != nil {
list := make([]*ast.Field, 1)
list[0] = &ast.Field{Type: typ}
return &ast.FieldList{List: list}
}
return nil
}
func (p *parser) parseSignature(scope *ast.Scope) (params, results *ast.FieldList) {
if p.trace {
defer un(trace(p, "Signature"))
}
params = p.parseParameters(scope, true)
results = p.parseResult(scope)
return
}
func (p *parser) parseFuncType() (*ast.FuncType, *ast.Scope) {
if p.trace {
defer un(trace(p, "FuncType"))
}
pos := p.expect(token.FUNC)
scope := ast.NewScope(p.topScope) // function scope
params, results := p.parseSignature(scope)
return &ast.FuncType{Func: pos, Params: params, Results: results}, scope
}
func (p *parser) parseMethodSpec(scope *ast.Scope) *ast.Field {
if p.trace {
defer un(trace(p, "MethodSpec"))
}
doc := p.leadComment
var idents []*ast.Ident
var typ ast.Expr
x := p.parseTypeName()
if ident, isIdent := x.(*ast.Ident); isIdent && p.tok == token.LPAREN {
// method
idents = []*ast.Ident{ident}
scope := ast.NewScope(nil) // method scope
params, results := p.parseSignature(scope)
typ = &ast.FuncType{Func: token.NoPos, Params: params, Results: results}
} else {
// embedded interface
typ = x
p.resolve(typ)
}
p.expectSemi() // call before accessing p.linecomment
spec := &ast.Field{Doc: doc, Names: idents, Type: typ, Comment: p.lineComment}
p.declare(spec, nil, scope, ast.Fun, idents...)
return spec
}
func (p *parser) parseInterfaceType() *ast.InterfaceType {
if p.trace {
defer un(trace(p, "InterfaceType"))
}
pos := p.expect(token.INTERFACE)
lbrace := p.expect(token.LBRACE)
scope := ast.NewScope(nil) // interface scope
var list []*ast.Field
for p.tok == token.IDENT {
list = append(list, p.parseMethodSpec(scope))
}
rbrace := p.expect(token.RBRACE)
return &ast.InterfaceType{
Interface: pos,
Methods: &ast.FieldList{
Opening: lbrace,
List: list,
Closing: rbrace,
},
}
}
func (p *parser) parseMapType() *ast.MapType {
if p.trace {
defer un(trace(p, "MapType"))
}
pos := p.expect(token.MAP)
p.expect(token.LBRACK)
key := p.parseType()
p.expect(token.RBRACK)
value := p.parseType()
return &ast.MapType{Map: pos, Key: key, Value: value}
}
func (p *parser) parseChanType() *ast.ChanType {
if p.trace {
defer un(trace(p, "ChanType"))
}
pos := p.pos
dir := ast.SEND | ast.RECV
if p.tok == token.CHAN {
p.next()
if p.tok == token.ARROW {
p.next()
dir = ast.SEND
}
} else {
p.expect(token.ARROW)
p.expect(token.CHAN)
dir = ast.RECV
}
value := p.parseType()
return &ast.ChanType{Begin: pos, Dir: dir, Value: value}
}
// If the result is an identifier, it is not resolved.
func (p *parser) tryIdentOrType(ellipsisOk bool) ast.Expr {
switch p.tok {
case token.IDENT:
return p.parseTypeName()
case token.LBRACK:
return p.parseArrayType(ellipsisOk)
case token.STRUCT:
return p.parseStructType()
case token.MUL:
return p.parsePointerType()
case token.FUNC:
typ, _ := p.parseFuncType()
return typ
case token.INTERFACE:
return p.parseInterfaceType()
case token.MAP:
return p.parseMapType()
case token.CHAN, token.ARROW:
return p.parseChanType()
case token.LPAREN:
lparen := p.pos
p.next()
typ := p.parseType()
rparen := p.expect(token.RPAREN)
return &ast.ParenExpr{Lparen: lparen, X: typ, Rparen: rparen}
}
// no type found
return nil
}
func (p *parser) tryType() ast.Expr {
typ := p.tryIdentOrType(false)
if typ != nil {
p.resolve(typ)
}
return typ
}
// ----------------------------------------------------------------------------
// Blocks
func (p *parser) parseStmtList() (list []ast.Stmt) {
if p.trace {
defer un(trace(p, "StatementList"))
}
for p.tok != token.CASE && p.tok != token.DEFAULT && p.tok != token.RBRACE && p.tok != token.EOF {
list = append(list, p.parseStmt())
}
return
}
func (p *parser) parseBody(scope *ast.Scope) *ast.BlockStmt {
if p.trace {
defer un(trace(p, "Body"))
}
lbrace := p.expect(token.LBRACE)
p.topScope = scope // open function scope
p.openLabelScope()
list := p.parseStmtList()
p.closeLabelScope()
p.closeScope()
rbrace := p.expect(token.RBRACE)
return &ast.BlockStmt{Lbrace: lbrace, List: list, Rbrace: rbrace}
}
func (p *parser) parseBlockStmt() *ast.BlockStmt {
if p.trace {
defer un(trace(p, "BlockStmt"))
}
lbrace := p.expect(token.LBRACE)
p.openScope()
list := p.parseStmtList()
p.closeScope()
rbrace := p.expect(token.RBRACE)
return &ast.BlockStmt{Lbrace: lbrace, List: list, Rbrace: rbrace}
}
// ----------------------------------------------------------------------------
// Expressions
func (p *parser) parseFuncTypeOrLit() ast.Expr {
if p.trace {
defer un(trace(p, "FuncTypeOrLit"))
}
typ, scope := p.parseFuncType()
if p.tok != token.LBRACE {
// function type only
return typ
}
p.exprLev++
body := p.parseBody(scope)
p.exprLev--
return &ast.FuncLit{Type: typ, Body: body}
}
// parseOperand may return an expression or a raw type (incl. array
// types of the form [...]T. Callers must verify the result.
// If lhs is set and the result is an identifier, it is not resolved.
//
func (p *parser) parseOperand(lhs bool) ast.Expr {
if p.trace {
defer un(trace(p, "Operand"))
}
switch p.tok {
case token.IDENT:
x := p.parseIdent()
if !lhs {
p.resolve(x)
}
return x
case token.INT, token.FLOAT, token.IMAG, token.CHAR, token.STRING:
x := &ast.BasicLit{ValuePos: p.pos, Kind: p.tok, Value: p.lit}
p.next()
return x
case token.LPAREN:
lparen := p.pos
p.next()
p.exprLev++
x := p.parseRhsOrType() // types may be parenthesized: (some type)
p.exprLev--
rparen := p.expect(token.RPAREN)
return &ast.ParenExpr{Lparen: lparen, X: x, Rparen: rparen}
case token.FUNC:
return p.parseFuncTypeOrLit()
}
if typ := p.tryIdentOrType(true); typ != nil {
// could be type for composite literal or conversion
_, isIdent := typ.(*ast.Ident)
assert(!isIdent, "type cannot be identifier")
return typ
}
// we have an error
pos := p.pos
p.errorExpected(pos, "operand")
syncStmt(p)
return &ast.BadExpr{From: pos, To: p.pos}
}
func (p *parser) parseSelector(x ast.Expr) ast.Expr {
if p.trace {
defer un(trace(p, "Selector"))
}
sel := p.parseIdent()
return &ast.SelectorExpr{X: x, Sel: sel}
}
func (p *parser) parseTypeAssertion(x ast.Expr) ast.Expr {
if p.trace {
defer un(trace(p, "TypeAssertion"))
}
p.expect(token.LPAREN)
var typ ast.Expr
if p.tok == token.TYPE {
// type switch: typ == nil
p.next()
} else {
typ = p.parseType()
}
p.expect(token.RPAREN)
return &ast.TypeAssertExpr{X: x, Type: typ}
}
func (p *parser) parseIndexOrSlice(x ast.Expr) ast.Expr {
if p.trace {
defer un(trace(p, "IndexOrSlice"))
}
lbrack := p.expect(token.LBRACK)
p.exprLev++
var low, high ast.Expr
isSlice := false
if p.tok != token.COLON {
low = p.parseRhs()
}
if p.tok == token.COLON {
isSlice = true
p.next()
if p.tok != token.RBRACK {
high = p.parseRhs()
}
}
p.exprLev--
rbrack := p.expect(token.RBRACK)
if isSlice {
return &ast.SliceExpr{X: x, Lbrack: lbrack, Low: low, High: high, Rbrack: rbrack}
}
return &ast.IndexExpr{X: x, Lbrack: lbrack, Index: low, Rbrack: rbrack}
}
func (p *parser) parseCallOrConversion(fun ast.Expr) *ast.CallExpr {
if p.trace {
defer un(trace(p, "CallOrConversion"))
}
lparen := p.expect(token.LPAREN)
p.exprLev++
var list []ast.Expr
var ellipsis token.Pos
for p.tok != token.RPAREN && p.tok != token.EOF && !ellipsis.IsValid() {
list = append(list, p.parseRhsOrType()) // builtins may expect a type: make(some type, ...)
if p.tok == token.ELLIPSIS {
ellipsis = p.pos
p.next()
}
if !p.atComma("argument list") {
break
}
p.next()
}
p.exprLev--
rparen := p.expectClosing(token.RPAREN, "argument list")
return &ast.CallExpr{Fun: fun, Lparen: lparen, Args: list, Ellipsis: ellipsis, Rparen: rparen}
}
func (p *parser) parseElement(keyOk bool) ast.Expr {
if p.trace {
defer un(trace(p, "Element"))
}
if p.tok == token.LBRACE {
return p.parseLiteralValue(nil)
}
x := p.checkExpr(p.parseExpr(keyOk)) // don't resolve if map key
if keyOk {
if p.tok == token.COLON {
colon := p.pos
p.next()
return &ast.KeyValueExpr{Key: x, Colon: colon, Value: p.parseElement(false)}
}
p.resolve(x) // not a map key
}
return x
}
func (p *parser) parseElementList() (list []ast.Expr) {
if p.trace {
defer un(trace(p, "ElementList"))
}
for p.tok != token.RBRACE && p.tok != token.EOF {
list = append(list, p.parseElement(true))
if !p.atComma("composite literal") {
break
}
p.next()
}
return
}
func (p *parser) parseLiteralValue(typ ast.Expr) ast.Expr {
if p.trace {
defer un(trace(p, "LiteralValue"))
}
lbrace := p.expect(token.LBRACE)
var elts []ast.Expr
p.exprLev++
if p.tok != token.RBRACE {
elts = p.parseElementList()
}
p.exprLev--
rbrace := p.expectClosing(token.RBRACE, "composite literal")
return &ast.CompositeLit{Type: typ, Lbrace: lbrace, Elts: elts, Rbrace: rbrace}
}
// checkExpr checks that x is an expression (and not a type).
func (p *parser) checkExpr(x ast.Expr) ast.Expr {
switch unparen(x).(type) {
case *ast.BadExpr:
case *ast.Ident:
case *ast.BasicLit:
case *ast.FuncLit:
case *ast.CompositeLit:
case *ast.ParenExpr:
panic("unreachable")
case *ast.SelectorExpr:
case *ast.IndexExpr:
case *ast.SliceExpr:
case *ast.TypeAssertExpr:
// If t.Type == nil we have a type assertion of the form
// y.(type), which is only allowed in type switch expressions.
// It's hard to exclude those but for the case where we are in
// a type switch. Instead be lenient and test this in the type
// checker.
case *ast.CallExpr:
case *ast.StarExpr:
case *ast.UnaryExpr:
case *ast.BinaryExpr:
default:
// all other nodes are not proper expressions
p.errorExpected(x.Pos(), "expression")
x = &ast.BadExpr{From: x.Pos(), To: x.End()}
}
return x
}
// isTypeName returns true iff x is a (qualified) TypeName.
func isTypeName(x ast.Expr) bool {
switch t := x.(type) {
case *ast.BadExpr:
case *ast.Ident:
case *ast.SelectorExpr:
_, isIdent := t.X.(*ast.Ident)
return isIdent
default:
return false // all other nodes are not type names
}
return true
}
// isLiteralType returns true iff x is a legal composite literal type.
func isLiteralType(x ast.Expr) bool {
switch t := x.(type) {
case *ast.BadExpr:
case *ast.Ident:
case *ast.SelectorExpr:
_, isIdent := t.X.(*ast.Ident)
return isIdent
case *ast.ArrayType:
case *ast.StructType:
case *ast.MapType:
default:
return false // all other nodes are not legal composite literal types
}
return true
}
// If x is of the form *T, deref returns T, otherwise it returns x.
func deref(x ast.Expr) ast.Expr {
if p, isPtr := x.(*ast.StarExpr); isPtr {
x = p.X
}
return x
}
// If x is of the form (T), unparen returns unparen(T), otherwise it returns x.
func unparen(x ast.Expr) ast.Expr {
if p, isParen := x.(*ast.ParenExpr); isParen {
x = unparen(p.X)
}
return x
}
// checkExprOrType checks that x is an expression or a type
// (and not a raw type such as [...]T).
//
func (p *parser) checkExprOrType(x ast.Expr) ast.Expr {
switch t := unparen(x).(type) {
case *ast.ParenExpr:
panic("unreachable")
case *ast.UnaryExpr:
case *ast.ArrayType:
if len, isEllipsis := t.Len.(*ast.Ellipsis); isEllipsis {
p.error(len.Pos(), "expected array length, found '...'")
x = &ast.BadExpr{From: x.Pos(), To: x.End()}
}
}
// all other nodes are expressions or types
return x
}
// If lhs is set and the result is an identifier, it is not resolved.
func (p *parser) parsePrimaryExpr(lhs bool) ast.Expr {
if p.trace {
defer un(trace(p, "PrimaryExpr"))
}
x := p.parseOperand(lhs)
L:
for {
switch p.tok {
case token.PERIOD:
p.next()
if lhs {
p.resolve(x)
}
switch p.tok {
case token.IDENT:
x = p.parseSelector(p.checkExpr(x))
case token.LPAREN:
x = p.parseTypeAssertion(p.checkExpr(x))
default:
pos := p.pos
p.errorExpected(pos, "selector or type assertion")
p.next() // make progress
x = &ast.BadExpr{From: pos, To: p.pos}
}
case token.LBRACK:
if lhs {
p.resolve(x)
}
x = p.parseIndexOrSlice(p.checkExpr(x))
case token.LPAREN:
if lhs {
p.resolve(x)
}
x = p.parseCallOrConversion(p.checkExprOrType(x))
case token.LBRACE:
if isLiteralType(x) && (p.exprLev >= 0 || !isTypeName(x)) {
if lhs {
p.resolve(x)
}
x = p.parseLiteralValue(x)
} else {
break L
}
default:
break L
}
lhs = false // no need to try to resolve again
}
return x
}
// If lhs is set and the result is an identifier, it is not resolved.
func (p *parser) parseUnaryExpr(lhs bool) ast.Expr {
if p.trace {
defer un(trace(p, "UnaryExpr"))
}
switch p.tok {
case token.ADD, token.SUB, token.NOT, token.XOR, token.AND:
pos, op := p.pos, p.tok
p.next()
x := p.parseUnaryExpr(false)
return &ast.UnaryExpr{OpPos: pos, Op: op, X: p.checkExpr(x)}
case token.ARROW:
// channel type or receive expression
pos := p.pos
p.next()
if p.tok == token.CHAN {
p.next()
value := p.parseType()
return &ast.ChanType{Begin: pos, Dir: ast.RECV, Value: value}
}
x := p.parseUnaryExpr(false)
return &ast.UnaryExpr{OpPos: pos, Op: token.ARROW, X: p.checkExpr(x)}
case token.MUL:
// pointer type or unary "*" expression
pos := p.pos
p.next()
x := p.parseUnaryExpr(false)
return &ast.StarExpr{Star: pos, X: p.checkExprOrType(x)}
}
return p.parsePrimaryExpr(lhs)
}
// If lhs is set and the result is an identifier, it is not resolved.
func (p *parser) parseBinaryExpr(lhs bool, prec1 int) ast.Expr {
if p.trace {
defer un(trace(p, "BinaryExpr"))
}
x := p.parseUnaryExpr(lhs)
for prec := p.tok.Precedence(); prec >= prec1; prec-- {
for p.tok.Precedence() == prec {
pos, op := p.pos, p.tok
p.next()
if lhs {
p.resolve(x)
lhs = false
}
y := p.parseBinaryExpr(false, prec+1)
x = &ast.BinaryExpr{X: p.checkExpr(x), OpPos: pos, Op: op, Y: p.checkExpr(y)}
}
}
return x
}
// If lhs is set and the result is an identifier, it is not resolved.
// The result may be a type or even a raw type ([...]int). Callers must
// check the result (using checkExpr or checkExprOrType), depending on
// context.
func (p *parser) parseExpr(lhs bool) ast.Expr {
if p.trace {
defer un(trace(p, "Expression"))
}
return p.parseBinaryExpr(lhs, token.LowestPrec+1)
}
func (p *parser) parseRhs() ast.Expr {
return p.checkExpr(p.parseExpr(false))
}
func (p *parser) parseRhsOrType() ast.Expr {
return p.checkExprOrType(p.parseExpr(false))
}
// ----------------------------------------------------------------------------
// Statements
// Parsing modes for parseSimpleStmt.
const (
basic = iota
labelOk
rangeOk
)
// parseSimpleStmt returns true as 2nd result if it parsed the assignment
// of a range clause (with mode == rangeOk). The returned statement is an
// assignment with a right-hand side that is a single unary expression of
// the form "range x". No guarantees are given for the left-hand side.
func (p *parser) parseSimpleStmt(mode int) (ast.Stmt, bool) {
if p.trace {
defer un(trace(p, "SimpleStmt"))
}
x := p.parseLhsList()
switch p.tok {
case
token.DEFINE, token.ASSIGN, token.ADD_ASSIGN,
token.SUB_ASSIGN, token.MUL_ASSIGN, token.QUO_ASSIGN,
token.REM_ASSIGN, token.AND_ASSIGN, token.OR_ASSIGN,
token.XOR_ASSIGN, token.SHL_ASSIGN, token.SHR_ASSIGN, token.AND_NOT_ASSIGN:
// assignment statement, possibly part of a range clause
pos, tok := p.pos, p.tok
p.next()
var y []ast.Expr
isRange := false
if mode == rangeOk && p.tok == token.RANGE && (tok == token.DEFINE || tok == token.ASSIGN) {
pos := p.pos
p.next()
y = []ast.Expr{&ast.UnaryExpr{OpPos: pos, Op: token.RANGE, X: p.parseRhs()}}
isRange = true
} else {
y = p.parseRhsList()
}
as := &ast.AssignStmt{Lhs: x, TokPos: pos, Tok: tok, Rhs: y}
if tok == token.DEFINE {
p.shortVarDecl(as, x)
}
return as, isRange
}
if len(x) > 1 {
p.errorExpected(x[0].Pos(), "1 expression")
// continue with first expression
}
switch p.tok {
case token.COLON:
// labeled statement
colon := p.pos
p.next()
if label, isIdent := x[0].(*ast.Ident); mode == labelOk && isIdent {
// Go spec: The scope of a label is the body of the function
// in which it is declared and excludes the body of any nested
// function.
stmt := &ast.LabeledStmt{Label: label, Colon: colon, Stmt: p.parseStmt()}
p.declare(stmt, nil, p.labelScope, ast.Lbl, label)
return stmt, false
}
// The label declaration typically starts at x[0].Pos(), but the label
// declaration may be erroneous due to a token after that position (and
// before the ':'). If SpuriousErrors is not set, the (only) error re-
// ported for the line is the illegal label error instead of the token
// before the ':' that caused the problem. Thus, use the (latest) colon
// position for error reporting.
p.error(colon, "illegal label declaration")
return &ast.BadStmt{From: x[0].Pos(), To: colon + 1}, false
case token.ARROW:
// send statement
arrow := p.pos
p.next()
y := p.parseRhs()
return &ast.SendStmt{Chan: x[0], Arrow: arrow, Value: y}, false
case token.INC, token.DEC:
// increment or decrement
s := &ast.IncDecStmt{X: x[0], TokPos: p.pos, Tok: p.tok}
p.next()
return s, false
}
// expression
return &ast.ExprStmt{X: x[0]}, false
}
func (p *parser) parseCallExpr() *ast.CallExpr {
x := p.parseRhsOrType() // could be a conversion: (some type)(x)
if call, isCall := x.(*ast.CallExpr); isCall {
return call
}
if _, isBad := x.(*ast.BadExpr); !isBad {
// only report error if it's a new one
p.errorExpected(x.Pos(), "function/method call")
}
return nil
}
func (p *parser) parseGoStmt() ast.Stmt {
if p.trace {
defer un(trace(p, "GoStmt"))
}
pos := p.expect(token.GO)
call := p.parseCallExpr()
p.expectSemi()
if call == nil {
return &ast.BadStmt{From: pos, To: pos + 2} // len("go")
}
return &ast.GoStmt{Go: pos, Call: call}
}
func (p *parser) parseDeferStmt() ast.Stmt {
if p.trace {
defer un(trace(p, "DeferStmt"))
}
pos := p.expect(token.DEFER)
call := p.parseCallExpr()
p.expectSemi()
if call == nil {
return &ast.BadStmt{From: pos, To: pos + 5} // len("defer")
}
return &ast.DeferStmt{Defer: pos, Call: call}
}
func (p *parser) parseReturnStmt() *ast.ReturnStmt {
if p.trace {
defer un(trace(p, "ReturnStmt"))
}
pos := p.pos
p.expect(token.RETURN)
var x []ast.Expr
if p.tok != token.SEMICOLON && p.tok != token.RBRACE {
x = p.parseRhsList()
}
p.expectSemi()
return &ast.ReturnStmt{Return: pos, Results: x}
}
func (p *parser) parseBranchStmt(tok token.Token) *ast.BranchStmt {
if p.trace {
defer un(trace(p, "BranchStmt"))
}
pos := p.expect(tok)
var label *ast.Ident
if tok != token.FALLTHROUGH && p.tok == token.IDENT {
label = p.parseIdent()
// add to list of unresolved targets
n := len(p.targetStack) - 1
p.targetStack[n] = append(p.targetStack[n], label)
}
p.expectSemi()
return &ast.BranchStmt{TokPos: pos, Tok: tok, Label: label}
}
func (p *parser) makeExpr(s ast.Stmt) ast.Expr {
if s == nil {
return nil
}
if es, isExpr := s.(*ast.ExprStmt); isExpr {
return p.checkExpr(es.X)
}
p.error(s.Pos(), "expected condition, found simple statement")
return &ast.BadExpr{From: s.Pos(), To: s.End()}
}
func (p *parser) parseIfStmt() *ast.IfStmt {
if p.trace {
defer un(trace(p, "IfStmt"))
}
pos := p.expect(token.IF)
p.openScope()
defer p.closeScope()
var s ast.Stmt
var x ast.Expr
{
prevLev := p.exprLev
p.exprLev = -1
if p.tok == token.SEMICOLON {
p.next()
x = p.parseRhs()
} else {
s, _ = p.parseSimpleStmt(basic)
if p.tok == token.SEMICOLON {
p.next()
x = p.parseRhs()
} else {
x = p.makeExpr(s)
s = nil
}
}
p.exprLev = prevLev
}
body := p.parseBlockStmt()
var else_ ast.Stmt
if p.tok == token.ELSE {
p.next()
else_ = p.parseStmt()
} else {
p.expectSemi()
}
return &ast.IfStmt{If: pos, Init: s, Cond: x, Body: body, Else: else_}
}
func (p *parser) parseTypeList() (list []ast.Expr) {
if p.trace {
defer un(trace(p, "TypeList"))
}
list = append(list, p.parseType())
for p.tok == token.COMMA {
p.next()
list = append(list, p.parseType())
}
return
}
func (p *parser) parseCaseClause(typeSwitch bool) *ast.CaseClause {
if p.trace {
defer un(trace(p, "CaseClause"))
}
pos := p.pos
var list []ast.Expr
if p.tok == token.CASE {
p.next()
if typeSwitch {
list = p.parseTypeList()
} else {
list = p.parseRhsList()
}
} else {
p.expect(token.DEFAULT)
}
colon := p.expect(token.COLON)
p.openScope()
body := p.parseStmtList()
p.closeScope()
return &ast.CaseClause{Case: pos, List: list, Colon: colon, Body: body}
}
func isTypeSwitchAssert(x ast.Expr) bool {
a, ok := x.(*ast.TypeAssertExpr)
return ok && a.Type == nil
}
func isTypeSwitchGuard(s ast.Stmt) bool {
switch t := s.(type) {
case *ast.ExprStmt:
// x.(nil)
return isTypeSwitchAssert(t.X)
case *ast.AssignStmt:
// v := x.(nil)
return len(t.Lhs) == 1 && t.Tok == token.DEFINE && len(t.Rhs) == 1 && isTypeSwitchAssert(t.Rhs[0])
}
return false
}
func (p *parser) parseSwitchStmt() ast.Stmt {
if p.trace {
defer un(trace(p, "SwitchStmt"))
}
pos := p.expect(token.SWITCH)
p.openScope()
defer p.closeScope()
var s1, s2 ast.Stmt
if p.tok != token.LBRACE {
prevLev := p.exprLev
p.exprLev = -1
if p.tok != token.SEMICOLON {
s2, _ = p.parseSimpleStmt(basic)
}
if p.tok == token.SEMICOLON {
p.next()
s1 = s2
s2 = nil
if p.tok != token.LBRACE {
// A TypeSwitchGuard may declare a variable in addition
// to the variable declared in the initial SimpleStmt.
// Introduce extra scope to avoid redeclaration errors:
//
// switch t := 0; t := x.(T) { ... }
//
// (this code is not valid Go because the first t will
// cannot be accessed and thus is never used, the extra
// scope is needed for the correct error message).
//
// If we don't have a type switch, s2 must be an expression.
// Having the extra nested but empty scope won't affect it.
p.openScope()
defer p.closeScope()
s2, _ = p.parseSimpleStmt(basic)
}
}
p.exprLev = prevLev
}
typeSwitch := isTypeSwitchGuard(s2)
lbrace := p.expect(token.LBRACE)
var list []ast.Stmt
for p.tok == token.CASE || p.tok == token.DEFAULT {
list = append(list, p.parseCaseClause(typeSwitch))
}
rbrace := p.expect(token.RBRACE)
p.expectSemi()
body := &ast.BlockStmt{Lbrace: lbrace, List: list, Rbrace: rbrace}
if typeSwitch {
return &ast.TypeSwitchStmt{Switch: pos, Init: s1, Assign: s2, Body: body}
}
return &ast.SwitchStmt{Switch: pos, Init: s1, Tag: p.makeExpr(s2), Body: body}
}
func (p *parser) parseCommClause() *ast.CommClause {
if p.trace {
defer un(trace(p, "CommClause"))
}
p.openScope()
pos := p.pos
var comm ast.Stmt
if p.tok == token.CASE {
p.next()
lhs := p.parseLhsList()
if p.tok == token.ARROW {
// SendStmt
if len(lhs) > 1 {
p.errorExpected(lhs[0].Pos(), "1 expression")
// continue with first expression
}
arrow := p.pos
p.next()
rhs := p.parseRhs()
comm = &ast.SendStmt{Chan: lhs[0], Arrow: arrow, Value: rhs}
} else {
// RecvStmt
if tok := p.tok; tok == token.ASSIGN || tok == token.DEFINE {
// RecvStmt with assignment
if len(lhs) > 2 {
p.errorExpected(lhs[0].Pos(), "1 or 2 expressions")
// continue with first two expressions
lhs = lhs[0:2]
}
pos := p.pos
p.next()
rhs := p.parseRhs()
as := &ast.AssignStmt{Lhs: lhs, TokPos: pos, Tok: tok, Rhs: []ast.Expr{rhs}}
if tok == token.DEFINE {
p.shortVarDecl(as, lhs)
}
comm = as
} else {
// lhs must be single receive operation
if len(lhs) > 1 {
p.errorExpected(lhs[0].Pos(), "1 expression")
// continue with first expression
}
comm = &ast.ExprStmt{X: lhs[0]}
}
}
} else {
p.expect(token.DEFAULT)
}
colon := p.expect(token.COLON)
body := p.parseStmtList()
p.closeScope()
return &ast.CommClause{Case: pos, Comm: comm, Colon: colon, Body: body}
}
func (p *parser) parseSelectStmt() *ast.SelectStmt {
if p.trace {
defer un(trace(p, "SelectStmt"))
}
pos := p.expect(token.SELECT)
lbrace := p.expect(token.LBRACE)
var list []ast.Stmt
for p.tok == token.CASE || p.tok == token.DEFAULT {
list = append(list, p.parseCommClause())
}
rbrace := p.expect(token.RBRACE)
p.expectSemi()
body := &ast.BlockStmt{Lbrace: lbrace, List: list, Rbrace: rbrace}
return &ast.SelectStmt{Select: pos, Body: body}
}
func (p *parser) parseForStmt() ast.Stmt {
if p.trace {
defer un(trace(p, "ForStmt"))
}
pos := p.expect(token.FOR)
p.openScope()
defer p.closeScope()
var s1, s2, s3 ast.Stmt
var isRange bool
if p.tok != token.LBRACE {
prevLev := p.exprLev
p.exprLev = -1
if p.tok != token.SEMICOLON {
s2, isRange = p.parseSimpleStmt(rangeOk)
}
if !isRange && p.tok == token.SEMICOLON {
p.next()
s1 = s2
s2 = nil
if p.tok != token.SEMICOLON {
s2, _ = p.parseSimpleStmt(basic)
}
p.expectSemi()
if p.tok != token.LBRACE {
s3, _ = p.parseSimpleStmt(basic)
}
}
p.exprLev = prevLev
}
body := p.parseBlockStmt()
p.expectSemi()
if isRange {
as := s2.(*ast.AssignStmt)
// check lhs
var key, value ast.Expr
switch len(as.Lhs) {
case 2:
key, value = as.Lhs[0], as.Lhs[1]
case 1:
key = as.Lhs[0]
default:
p.errorExpected(as.Lhs[0].Pos(), "1 or 2 expressions")
return &ast.BadStmt{From: pos, To: body.End()}
}
// parseSimpleStmt returned a right-hand side that
// is a single unary expression of the form "range x"
x := as.Rhs[0].(*ast.UnaryExpr).X
return &ast.RangeStmt{
For: pos,
Key: key,
Value: value,
TokPos: as.TokPos,
Tok: as.Tok,
X: x,
Body: body,
}
}
// regular for statement
return &ast.ForStmt{
For: pos,
Init: s1,
Cond: p.makeExpr(s2),
Post: s3,
Body: body,
}
}
func (p *parser) parseStmt() (s ast.Stmt) {
if p.trace {
defer un(trace(p, "Statement"))
}
switch p.tok {
case token.CONST, token.TYPE, token.VAR:
s = &ast.DeclStmt{Decl: p.parseDecl(syncStmt)}
case
// tokens that may start an expression
token.IDENT, token.INT, token.FLOAT, token.IMAG, token.CHAR, token.STRING, token.FUNC, token.LPAREN, // operands
token.LBRACK, token.STRUCT, // composite types
token.ADD, token.SUB, token.MUL, token.AND, token.XOR, token.ARROW, token.NOT: // unary operators
s, _ = p.parseSimpleStmt(labelOk)
// because of the required look-ahead, labeled statements are
// parsed by parseSimpleStmt - don't expect a semicolon after
// them
if _, isLabeledStmt := s.(*ast.LabeledStmt); !isLabeledStmt {
p.expectSemi()
}
case token.GO:
s = p.parseGoStmt()
case token.DEFER:
s = p.parseDeferStmt()
case token.RETURN:
s = p.parseReturnStmt()
case token.BREAK, token.CONTINUE, token.GOTO, token.FALLTHROUGH:
s = p.parseBranchStmt(p.tok)
case token.LBRACE:
s = p.parseBlockStmt()
p.expectSemi()
case token.IF:
s = p.parseIfStmt()
case token.SWITCH:
s = p.parseSwitchStmt()
case token.SELECT:
s = p.parseSelectStmt()
case token.FOR:
s = p.parseForStmt()
case token.SEMICOLON:
s = &ast.EmptyStmt{Semicolon: p.pos}
p.next()
case token.RBRACE:
// a semicolon may be omitted before a closing "}"
s = &ast.EmptyStmt{Semicolon: p.pos}
default:
// no statement found
pos := p.pos
p.errorExpected(pos, "statement")
syncStmt(p)
s = &ast.BadStmt{From: pos, To: p.pos}
}
return
}
// ----------------------------------------------------------------------------
// Declarations
type parseSpecFunction func(p *parser, doc *ast.CommentGroup, iota int) ast.Spec
func isValidImport(lit string) bool {
const illegalChars = `!"#$%&'()*,:;<=>?[\]^{|}` + "`\uFFFD"
s, _ := strconv.Unquote(lit) // go/scanner returns a legal string literal
for _, r := range s {
if !unicode.IsGraphic(r) || unicode.IsSpace(r) || strings.ContainsRune(illegalChars, r) {
return false
}
}
return s != ""
}
func parseImportSpec(p *parser, doc *ast.CommentGroup, _ int) ast.Spec {
if p.trace {
defer un(trace(p, "ImportSpec"))
}
var ident *ast.Ident
switch p.tok {
case token.PERIOD:
ident = &ast.Ident{NamePos: p.pos, Name: "."}
p.next()
case token.IDENT:
ident = p.parseIdent()
}
var path *ast.BasicLit
if p.tok == token.STRING {
if !isValidImport(p.lit) {
p.error(p.pos, "invalid import path: "+p.lit)
}
path = &ast.BasicLit{ValuePos: p.pos, Kind: p.tok, Value: p.lit}
p.next()
} else {
p.expect(token.STRING) // use expect() error handling
}
p.expectSemi() // call before accessing p.linecomment
// collect imports
spec := &ast.ImportSpec{
Doc: doc,
Name: ident,
Path: path,
Comment: p.lineComment,
}
p.imports = append(p.imports, spec)
return spec
}
func parseConstSpec(p *parser, doc *ast.CommentGroup, iota int) ast.Spec {
if p.trace {
defer un(trace(p, "ConstSpec"))
}
idents := p.parseIdentList()
typ := p.tryType()
var values []ast.Expr
if typ != nil || p.tok == token.ASSIGN || iota == 0 {
p.expect(token.ASSIGN)
values = p.parseRhsList()
}
p.expectSemi() // call before accessing p.linecomment
// Go spec: The scope of a constant or variable identifier declared inside
// a function begins at the end of the ConstSpec or VarSpec and ends at
// the end of the innermost containing block.
// (Global identifiers are resolved in a separate phase after parsing.)
spec := &ast.ValueSpec{
Doc: doc,
Names: idents,
Type: typ,
Values: values,
Comment: p.lineComment,
}
p.declare(spec, iota, p.topScope, ast.Con, idents...)
return spec
}
func parseTypeSpec(p *parser, doc *ast.CommentGroup, _ int) ast.Spec {
if p.trace {
defer un(trace(p, "TypeSpec"))
}
ident := p.parseIdent()
// Go spec: The scope of a type identifier declared inside a function begins
// at the identifier in the TypeSpec and ends at the end of the innermost
// containing block.
// (Global identifiers are resolved in a separate phase after parsing.)
spec := &ast.TypeSpec{Doc: doc, Name: ident}
p.declare(spec, nil, p.topScope, ast.Typ, ident)
spec.Type = p.parseType()
p.expectSemi() // call before accessing p.linecomment
spec.Comment = p.lineComment
return spec
}
func parseVarSpec(p *parser, doc *ast.CommentGroup, _ int) ast.Spec {
if p.trace {
defer un(trace(p, "VarSpec"))
}
idents := p.parseIdentList()
typ := p.tryType()
var values []ast.Expr
if typ == nil || p.tok == token.ASSIGN {
p.expect(token.ASSIGN)
values = p.parseRhsList()
}
p.expectSemi() // call before accessing p.linecomment
// Go spec: The scope of a constant or variable identifier declared inside
// a function begins at the end of the ConstSpec or VarSpec and ends at
// the end of the innermost containing block.
// (Global identifiers are resolved in a separate phase after parsing.)
spec := &ast.ValueSpec{
Doc: doc,
Names: idents,
Type: typ,
Values: values,
Comment: p.lineComment,
}
p.declare(spec, nil, p.topScope, ast.Var, idents...)
return spec
}
func (p *parser) parseGenDecl(keyword token.Token, f parseSpecFunction) *ast.GenDecl {
if p.trace {
defer un(trace(p, "GenDecl("+keyword.String()+")"))
}
doc := p.leadComment
pos := p.expect(keyword)
var lparen, rparen token.Pos
var list []ast.Spec
if p.tok == token.LPAREN {
lparen = p.pos
p.next()
for iota := 0; p.tok != token.RPAREN && p.tok != token.EOF; iota++ {
list = append(list, f(p, p.leadComment, iota))
}
rparen = p.expect(token.RPAREN)
p.expectSemi()
} else {
list = append(list, f(p, nil, 0))
}
return &ast.GenDecl{
Doc: doc,
TokPos: pos,
Tok: keyword,
Lparen: lparen,
Specs: list,
Rparen: rparen,
}
}
func (p *parser) parseReceiver(scope *ast.Scope) *ast.FieldList {
if p.trace {
defer un(trace(p, "Receiver"))
}
par := p.parseParameters(scope, false)
// must have exactly one receiver
if par.NumFields() != 1 {
p.errorExpected(par.Opening, "exactly one receiver")
par.List = []*ast.Field{{Type: &ast.BadExpr{From: par.Opening, To: par.Closing + 1}}}
return par
}
// recv type must be of the form ["*"] identifier
recv := par.List[0]
base := deref(recv.Type)
if _, isIdent := base.(*ast.Ident); !isIdent {
if _, isBad := base.(*ast.BadExpr); !isBad {
// only report error if it's a new one
p.errorExpected(base.Pos(), "(unqualified) identifier")
}
par.List = []*ast.Field{
{Type: &ast.BadExpr{From: recv.Pos(), To: recv.End()}},
}
}
return par
}
func (p *parser) parseFuncDecl() *ast.FuncDecl {
if p.trace {
defer un(trace(p, "FunctionDecl"))
}
doc := p.leadComment
pos := p.expect(token.FUNC)
scope := ast.NewScope(p.topScope) // function scope
var recv *ast.FieldList
if p.tok == token.LPAREN {
recv = p.parseReceiver(scope)
}
ident := p.parseIdent()
params, results := p.parseSignature(scope)
var body *ast.BlockStmt
if p.tok == token.LBRACE {
body = p.parseBody(scope)
}
p.expectSemi()
decl := &ast.FuncDecl{
Doc: doc,
Recv: recv,
Name: ident,
Type: &ast.FuncType{
Func: pos,
Params: params,
Results: results,
},
Body: body,
}
if recv == nil {
// Go spec: The scope of an identifier denoting a constant, type,
// variable, or function (but not method) declared at top level
// (outside any function) is the package block.
//
// init() functions cannot be referred to and there may
// be more than one - don't put them in the pkgScope
if ident.Name != "init" {
p.declare(decl, nil, p.pkgScope, ast.Fun, ident)
}
}
return decl
}
func (p *parser) parseDecl(sync func(*parser)) ast.Decl {
if p.trace {
defer un(trace(p, "Declaration"))
}
var f parseSpecFunction
switch p.tok {
case token.CONST:
f = parseConstSpec
case token.TYPE:
f = parseTypeSpec
case token.VAR:
f = parseVarSpec
case token.FUNC:
return p.parseFuncDecl()
default:
pos := p.pos
p.errorExpected(pos, "declaration")
sync(p)
return &ast.BadDecl{From: pos, To: p.pos}
}
return p.parseGenDecl(p.tok, f)
}
// ----------------------------------------------------------------------------
// Source files
func (p *parser) parseFile() *ast.File {
if p.trace {
defer un(trace(p, "File"))
}
// package clause
doc := p.leadComment
pos := p.expect(token.PACKAGE)
// Go spec: The package clause is not a declaration;
// the package name does not appear in any scope.
ident := p.parseIdent()
if ident.Name == "_" {
p.error(p.pos, "invalid package name _")
}
p.expectSemi()
// Don't bother parsing the rest if we had errors already.
// Likely not a Go source file at all.
p.openScope()
p.pkgScope = p.topScope
var decls []ast.Decl
if p.errors.Len() == 0 && p.mode&PackageClauseOnly == 0 {
// import decls
for p.tok == token.IMPORT {
decls = append(decls, p.parseGenDecl(token.IMPORT, parseImportSpec))
}
if p.mode&ImportsOnly == 0 {
// rest of package body
for p.tok != token.EOF {
decls = append(decls, p.parseDecl(syncDecl))
}
}
}
p.closeScope()
assert(p.topScope == nil, "unbalanced scopes")
assert(p.labelScope == nil, "unbalanced label scopes")
// resolve global identifiers within the same file
i := 0
for _, ident := range p.unresolved {
// i <= index for current ident
assert(ident.Obj == unresolved, "object already resolved")
ident.Obj = p.pkgScope.Lookup(ident.Name) // also removes unresolved sentinel
if ident.Obj == nil {
p.unresolved[i] = ident
i++
}
}
return &ast.File{
Doc: doc,
Package: pos,
Name: ident,
Decls: decls,
Scope: p.pkgScope,
Imports: p.imports,
Unresolved: p.unresolved[0:i],
Comments: p.comments,
}
}
|
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package png implements a PNG image decoder and encoder.
//
// The PNG specification is at http://www.w3.org/TR/PNG/.
package png
import (
"compress/zlib"
"encoding/binary"
"fmt"
"hash"
"hash/crc32"
"image"
"image/color"
"io"
)
// Color type, as per the PNG spec.
const (
ctGrayscale = 0
ctTrueColor = 2
ctPaletted = 3
ctGrayscaleAlpha = 4
ctTrueColorAlpha = 6
)
// A cb is a combination of color type and bit depth.
const (
cbInvalid = iota
cbG1
cbG2
cbG4
cbG8
cbGA8
cbTC8
cbP1
cbP2
cbP4
cbP8
cbTCA8
cbG16
cbGA16
cbTC16
cbTCA16
)
// Filter type, as per the PNG spec.
const (
ftNone = 0
ftSub = 1
ftUp = 2
ftAverage = 3
ftPaeth = 4
nFilter = 5
)
// Decoding stage.
// The PNG specification says that the IHDR, PLTE (if present), IDAT and IEND
// chunks must appear in that order. There may be multiple IDAT chunks, and
// IDAT chunks must be sequential (i.e. they may not have any other chunks
// between them).
// http://www.w3.org/TR/PNG/#5ChunkOrdering
const (
dsStart = iota
dsSeenIHDR
dsSeenPLTE
dsSeenIDAT
dsSeenIEND
)
const pngHeader = "\x89PNG\r\n\x1a\n"
type decoder struct {
r io.Reader
img image.Image
crc hash.Hash32
width, height int
depth int
palette color.Palette
cb int
stage int
idatLength uint32
tmp [3 * 256]byte
}
// A FormatError reports that the input is not a valid PNG.
type FormatError string
func (e FormatError) Error() string { return "png: invalid format: " + string(e) }
var chunkOrderError = FormatError("chunk out of order")
// An UnsupportedError reports that the input uses a valid but unimplemented PNG feature.
type UnsupportedError string
func (e UnsupportedError) Error() string { return "png: unsupported feature: " + string(e) }
func min(a, b int) int {
if a < b {
return a
}
return b
}
func (d *decoder) parseIHDR(length uint32) error {
if length != 13 {
return FormatError("bad IHDR length")
}
if _, err := io.ReadFull(d.r, d.tmp[:13]); err != nil {
return err
}
d.crc.Write(d.tmp[:13])
if d.tmp[10] != 0 || d.tmp[11] != 0 || d.tmp[12] != 0 {
return UnsupportedError("compression, filter or interlace method")
}
w := int32(binary.BigEndian.Uint32(d.tmp[0:4]))
h := int32(binary.BigEndian.Uint32(d.tmp[4:8]))
if w < 0 || h < 0 {
return FormatError("negative dimension")
}
nPixels := int64(w) * int64(h)
if nPixels != int64(int(nPixels)) {
return UnsupportedError("dimension overflow")
}
d.cb = cbInvalid
d.depth = int(d.tmp[8])
switch d.depth {
case 1:
switch d.tmp[9] {
case ctGrayscale:
d.cb = cbG1
case ctPaletted:
d.cb = cbP1
}
case 2:
switch d.tmp[9] {
case ctGrayscale:
d.cb = cbG2
case ctPaletted:
d.cb = cbP2
}
case 4:
switch d.tmp[9] {
case ctGrayscale:
d.cb = cbG4
case ctPaletted:
d.cb = cbP4
}
case 8:
switch d.tmp[9] {
case ctGrayscale:
d.cb = cbG8
case ctTrueColor:
d.cb = cbTC8
case ctPaletted:
d.cb = cbP8
case ctGrayscaleAlpha:
d.cb = cbGA8
case ctTrueColorAlpha:
d.cb = cbTCA8
}
case 16:
switch d.tmp[9] {
case ctGrayscale:
d.cb = cbG16
case ctTrueColor:
d.cb = cbTC16
case ctGrayscaleAlpha:
d.cb = cbGA16
case ctTrueColorAlpha:
d.cb = cbTCA16
}
}
if d.cb == cbInvalid {
return UnsupportedError(fmt.Sprintf("bit depth %d, color type %d", d.tmp[8], d.tmp[9]))
}
d.width, d.height = int(w), int(h)
return d.verifyChecksum()
}
func (d *decoder) parsePLTE(length uint32) error {
np := int(length / 3) // The number of palette entries.
if length%3 != 0 || np <= 0 || np > 256 || np > 1<<uint(d.depth) {
return FormatError("bad PLTE length")
}
n, err := io.ReadFull(d.r, d.tmp[:3*np])
if err != nil {
return err
}
d.crc.Write(d.tmp[:n])
switch d.cb {
case cbP1, cbP2, cbP4, cbP8:
d.palette = color.Palette(make([]color.Color, np))
for i := 0; i < np; i++ {
d.palette[i] = color.RGBA{d.tmp[3*i+0], d.tmp[3*i+1], d.tmp[3*i+2], 0xff}
}
case cbTC8, cbTCA8, cbTC16, cbTCA16:
// As per the PNG spec, a PLTE chunk is optional (and for practical purposes,
// ignorable) for the ctTrueColor and ctTrueColorAlpha color types (section 4.1.2).
default:
return FormatError("PLTE, color type mismatch")
}
return d.verifyChecksum()
}
func (d *decoder) parsetRNS(length uint32) error {
if length > 256 {
return FormatError("bad tRNS length")
}
n, err := io.ReadFull(d.r, d.tmp[:length])
if err != nil {
return err
}
d.crc.Write(d.tmp[:n])
switch d.cb {
case cbG8, cbG16:
return UnsupportedError("grayscale transparency")
case cbTC8, cbTC16:
return UnsupportedError("truecolor transparency")
case cbP1, cbP2, cbP4, cbP8:
if n > len(d.palette) {
return FormatError("bad tRNS length")
}
for i := 0; i < n; i++ {
rgba := d.palette[i].(color.RGBA)
d.palette[i] = color.NRGBA{rgba.R, rgba.G, rgba.B, d.tmp[i]}
}
case cbGA8, cbGA16, cbTCA8, cbTCA16:
return FormatError("tRNS, color type mismatch")
}
return d.verifyChecksum()
}
// Read presents one or more IDAT chunks as one continuous stream (minus the
// intermediate chunk headers and footers). If the PNG data looked like:
// ... len0 IDAT xxx crc0 len1 IDAT yy crc1 len2 IEND crc2
// then this reader presents xxxyy. For well-formed PNG data, the decoder state
// immediately before the first Read call is that d.r is positioned between the
// first IDAT and xxx, and the decoder state immediately after the last Read
// call is that d.r is positioned between yy and crc1.
func (d *decoder) Read(p []byte) (int, error) {
if len(p) == 0 {
return 0, nil
}
for d.idatLength == 0 {
// We have exhausted an IDAT chunk. Verify the checksum of that chunk.
if err := d.verifyChecksum(); err != nil {
return 0, err
}
// Read the length and chunk type of the next chunk, and check that
// it is an IDAT chunk.
if _, err := io.ReadFull(d.r, d.tmp[:8]); err != nil {
return 0, err
}
d.idatLength = binary.BigEndian.Uint32(d.tmp[:4])
if string(d.tmp[4:8]) != "IDAT" {
return 0, FormatError("not enough pixel data")
}
d.crc.Reset()
d.crc.Write(d.tmp[4:8])
}
if int(d.idatLength) < 0 {
return 0, UnsupportedError("IDAT chunk length overflow")
}
n, err := d.r.Read(p[:min(len(p), int(d.idatLength))])
d.crc.Write(p[:n])
d.idatLength -= uint32(n)
return n, err
}
// decode decodes the IDAT data into an image.
func (d *decoder) decode() (image.Image, error) {
r, err := zlib.NewReader(d)
if err != nil {
return nil, err
}
defer r.Close()
bitsPerPixel := 0
maxPalette := uint8(0)
pixOffset := 0
var (
gray *image.Gray
rgba *image.RGBA
paletted *image.Paletted
nrgba *image.NRGBA
gray16 *image.Gray16
rgba64 *image.RGBA64
nrgba64 *image.NRGBA64
img image.Image
)
switch d.cb {
case cbG1, cbG2, cbG4, cbG8:
bitsPerPixel = d.depth
gray = image.NewGray(image.Rect(0, 0, d.width, d.height))
img = gray
case cbGA8:
bitsPerPixel = 16
nrgba = image.NewNRGBA(image.Rect(0, 0, d.width, d.height))
img = nrgba
case cbTC8:
bitsPerPixel = 24
rgba = image.NewRGBA(image.Rect(0, 0, d.width, d.height))
img = rgba
case cbP1, cbP2, cbP4, cbP8:
bitsPerPixel = d.depth
paletted = image.NewPaletted(image.Rect(0, 0, d.width, d.height), d.palette)
img = paletted
maxPalette = uint8(len(d.palette) - 1)
case cbTCA8:
bitsPerPixel = 32
nrgba = image.NewNRGBA(image.Rect(0, 0, d.width, d.height))
img = nrgba
case cbG16:
bitsPerPixel = 16
gray16 = image.NewGray16(image.Rect(0, 0, d.width, d.height))
img = gray16
case cbGA16:
bitsPerPixel = 32
nrgba64 = image.NewNRGBA64(image.Rect(0, 0, d.width, d.height))
img = nrgba64
case cbTC16:
bitsPerPixel = 48
rgba64 = image.NewRGBA64(image.Rect(0, 0, d.width, d.height))
img = rgba64
case cbTCA16:
bitsPerPixel = 64
nrgba64 = image.NewNRGBA64(image.Rect(0, 0, d.width, d.height))
img = nrgba64
}
bytesPerPixel := (bitsPerPixel + 7) / 8
// cr and pr are the bytes for the current and previous row.
// The +1 is for the per-row filter type, which is at cr[0].
cr := make([]uint8, 1+(bitsPerPixel*d.width+7)/8)
pr := make([]uint8, 1+(bitsPerPixel*d.width+7)/8)
for y := 0; y < d.height; y++ {
// Read the decompressed bytes.
_, err := io.ReadFull(r, cr)
if err != nil {
return nil, err
}
// Apply the filter.
cdat := cr[1:]
pdat := pr[1:]
switch cr[0] {
case ftNone:
// No-op.
case ftSub:
for i := bytesPerPixel; i < len(cdat); i++ {
cdat[i] += cdat[i-bytesPerPixel]
}
case ftUp:
for i, p := range pdat {
cdat[i] += p
}
case ftAverage:
for i := 0; i < bytesPerPixel; i++ {
cdat[i] += pdat[i] / 2
}
for i := bytesPerPixel; i < len(cdat); i++ {
cdat[i] += uint8((int(cdat[i-bytesPerPixel]) + int(pdat[i])) / 2)
}
case ftPaeth:
filterPaeth(cdat, pdat, bytesPerPixel)
default:
return nil, FormatError("bad filter type")
}
// Convert from bytes to colors.
switch d.cb {
case cbG1:
for x := 0; x < d.width; x += 8 {
b := cdat[x/8]
for x2 := 0; x2 < 8 && x+x2 < d.width; x2++ {
gray.SetGray(x+x2, y, color.Gray{(b >> 7) * 0xff})
b <<= 1
}
}
case cbG2:
for x := 0; x < d.width; x += 4 {
b := cdat[x/4]
for x2 := 0; x2 < 4 && x+x2 < d.width; x2++ {
gray.SetGray(x+x2, y, color.Gray{(b >> 6) * 0x55})
b <<= 2
}
}
case cbG4:
for x := 0; x < d.width; x += 2 {
b := cdat[x/2]
for x2 := 0; x2 < 2 && x+x2 < d.width; x2++ {
gray.SetGray(x+x2, y, color.Gray{(b >> 4) * 0x11})
b <<= 4
}
}
case cbG8:
copy(gray.Pix[pixOffset:], cdat)
pixOffset += gray.Stride
case cbGA8:
for x := 0; x < d.width; x++ {
ycol := cdat[2*x+0]
nrgba.SetNRGBA(x, y, color.NRGBA{ycol, ycol, ycol, cdat[2*x+1]})
}
case cbTC8:
pix, i, j := rgba.Pix, pixOffset, 0
for x := 0; x < d.width; x++ {
pix[i+0] = cdat[j+0]
pix[i+1] = cdat[j+1]
pix[i+2] = cdat[j+2]
pix[i+3] = 0xff
i += 4
j += 3
}
pixOffset += rgba.Stride
case cbP1:
for x := 0; x < d.width; x += 8 {
b := cdat[x/8]
for x2 := 0; x2 < 8 && x+x2 < d.width; x2++ {
idx := b >> 7
if idx > maxPalette {
return nil, FormatError("palette index out of range")
}
paletted.SetColorIndex(x+x2, y, idx)
b <<= 1
}
}
case cbP2:
for x := 0; x < d.width; x += 4 {
b := cdat[x/4]
for x2 := 0; x2 < 4 && x+x2 < d.width; x2++ {
idx := b >> 6
if idx > maxPalette {
return nil, FormatError("palette index out of range")
}
paletted.SetColorIndex(x+x2, y, idx)
b <<= 2
}
}
case cbP4:
for x := 0; x < d.width; x += 2 {
b := cdat[x/2]
for x2 := 0; x2 < 2 && x+x2 < d.width; x2++ {
idx := b >> 4
if idx > maxPalette {
return nil, FormatError("palette index out of range")
}
paletted.SetColorIndex(x+x2, y, idx)
b <<= 4
}
}
case cbP8:
if maxPalette != 255 {
for x := 0; x < d.width; x++ {
if cdat[x] > maxPalette {
return nil, FormatError("palette index out of range")
}
}
}
copy(paletted.Pix[pixOffset:], cdat)
pixOffset += paletted.Stride
case cbTCA8:
copy(nrgba.Pix[pixOffset:], cdat)
pixOffset += nrgba.Stride
case cbG16:
for x := 0; x < d.width; x++ {
ycol := uint16(cdat[2*x+0])<<8 | uint16(cdat[2*x+1])
gray16.SetGray16(x, y, color.Gray16{ycol})
}
case cbGA16:
for x := 0; x < d.width; x++ {
ycol := uint16(cdat[4*x+0])<<8 | uint16(cdat[4*x+1])
acol := uint16(cdat[4*x+2])<<8 | uint16(cdat[4*x+3])
nrgba64.SetNRGBA64(x, y, color.NRGBA64{ycol, ycol, ycol, acol})
}
case cbTC16:
for x := 0; x < d.width; x++ {
rcol := uint16(cdat[6*x+0])<<8 | uint16(cdat[6*x+1])
gcol := uint16(cdat[6*x+2])<<8 | uint16(cdat[6*x+3])
bcol := uint16(cdat[6*x+4])<<8 | uint16(cdat[6*x+5])
rgba64.SetRGBA64(x, y, color.RGBA64{rcol, gcol, bcol, 0xffff})
}
case cbTCA16:
for x := 0; x < d.width; x++ {
rcol := uint16(cdat[8*x+0])<<8 | uint16(cdat[8*x+1])
gcol := uint16(cdat[8*x+2])<<8 | uint16(cdat[8*x+3])
bcol := uint16(cdat[8*x+4])<<8 | uint16(cdat[8*x+5])
acol := uint16(cdat[8*x+6])<<8 | uint16(cdat[8*x+7])
nrgba64.SetNRGBA64(x, y, color.NRGBA64{rcol, gcol, bcol, acol})
}
}
// The current row for y is the previous row for y+1.
pr, cr = cr, pr
}
// Check for EOF, to verify the zlib checksum.
n, err := r.Read(pr[:1])
if err != io.EOF {
return nil, FormatError(err.Error())
}
if n != 0 || d.idatLength != 0 {
return nil, FormatError("too much pixel data")
}
return img, nil
}
func (d *decoder) parseIDAT(length uint32) (err error) {
d.idatLength = length
d.img, err = d.decode()
if err != nil {
return err
}
return d.verifyChecksum()
}
func (d *decoder) parseIEND(length uint32) error {
if length != 0 {
return FormatError("bad IEND length")
}
return d.verifyChecksum()
}
func (d *decoder) parseChunk() error {
// Read the length and chunk type.
n, err := io.ReadFull(d.r, d.tmp[:8])
if err != nil {
return err
}
length := binary.BigEndian.Uint32(d.tmp[:4])
d.crc.Reset()
d.crc.Write(d.tmp[4:8])
// Read the chunk data.
switch string(d.tmp[4:8]) {
case "IHDR":
if d.stage != dsStart {
return chunkOrderError
}
d.stage = dsSeenIHDR
return d.parseIHDR(length)
case "PLTE":
if d.stage != dsSeenIHDR {
return chunkOrderError
}
d.stage = dsSeenPLTE
return d.parsePLTE(length)
case "tRNS":
if d.stage != dsSeenPLTE {
return chunkOrderError
}
return d.parsetRNS(length)
case "IDAT":
if d.stage < dsSeenIHDR || d.stage > dsSeenIDAT || (d.cb == cbP8 && d.stage == dsSeenIHDR) {
return chunkOrderError
}
d.stage = dsSeenIDAT
return d.parseIDAT(length)
case "IEND":
if d.stage != dsSeenIDAT {
return chunkOrderError
}
d.stage = dsSeenIEND
return d.parseIEND(length)
}
// Ignore this chunk (of a known length).
var ignored [4096]byte
for length > 0 {
n, err = io.ReadFull(d.r, ignored[:min(len(ignored), int(length))])
if err != nil {
return err
}
d.crc.Write(ignored[:n])
length -= uint32(n)
}
return d.verifyChecksum()
}
func (d *decoder) verifyChecksum() error {
if _, err := io.ReadFull(d.r, d.tmp[:4]); err != nil {
return err
}
if binary.BigEndian.Uint32(d.tmp[:4]) != d.crc.Sum32() {
return FormatError("invalid checksum")
}
return nil
}
func (d *decoder) checkHeader() error {
_, err := io.ReadFull(d.r, d.tmp[:len(pngHeader)])
if err != nil {
return err
}
if string(d.tmp[:len(pngHeader)]) != pngHeader {
return FormatError("not a PNG file")
}
return nil
}
// Decode reads a PNG image from r and returns it as an image.Image.
// The type of Image returned depends on the PNG contents.
func Decode(r io.Reader) (image.Image, error) {
d := &decoder{
r: r,
crc: crc32.NewIEEE(),
}
if err := d.checkHeader(); err != nil {
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
return nil, err
}
for d.stage != dsSeenIEND {
if err := d.parseChunk(); err != nil {
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
return nil, err
}
}
return d.img, nil
}
// DecodeConfig returns the color model and dimensions of a PNG image without
// decoding the entire image.
func DecodeConfig(r io.Reader) (image.Config, error) {
d := &decoder{
r: r,
crc: crc32.NewIEEE(),
}
if err := d.checkHeader(); err != nil {
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
return image.Config{}, err
}
for {
if err := d.parseChunk(); err != nil {
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
return image.Config{}, err
}
if d.stage == dsSeenIHDR && d.cb != cbP8 {
break
}
if d.stage == dsSeenPLTE && d.cb == cbP8 {
break
}
}
var cm color.Model
switch d.cb {
case cbG1, cbG2, cbG4, cbG8:
cm = color.GrayModel
case cbGA8:
cm = color.NRGBAModel
case cbTC8:
cm = color.RGBAModel
case cbP1, cbP2, cbP4, cbP8:
cm = d.palette
case cbTCA8:
cm = color.NRGBAModel
case cbG16:
cm = color.Gray16Model
case cbGA16:
cm = color.NRGBA64Model
case cbTC16:
cm = color.RGBA64Model
case cbTCA16:
cm = color.NRGBA64Model
}
return image.Config{
ColorModel: cm,
Width: d.width,
Height: d.height,
}, nil
}
func init() {
image.RegisterFormat("png", pngHeader, Decode, DecodeConfig)
}
image/png: degrade gracefully for palette index values that aren't
defined by the PLTE chunk. Such pixels decode to opaque black,
which matches what libpng does.
Fixes issue 4319.
On my reading, the PNG spec isn't clear whether palette index values
outside of those defined by the PLTE chunk is an error, and if not,
what to do.
Libpng 1.5.3 falls back to opaque black. png_set_PLTE says:
/* Changed in libpng-1.2.1 to allocate PNG_MAX_PALETTE_LENGTH instead
* of num_palette entries, in case of an invalid PNG file that has
* too-large sample values.
*/
png_ptr->palette = (png_colorp)png_calloc(png_ptr,
PNG_MAX_PALETTE_LENGTH * png_sizeof(png_color));
ImageMagick 6.5.7 returns an error:
$ convert -version
Version: ImageMagick 6.5.7-8 2012-08-17 Q16 http://www.imagemagick.org
Copyright: Copyright (C) 1999-2009 ImageMagick Studio LLC
Features: OpenMP
$ convert packetloss.png x.bmp
convert: Invalid colormap index `packetloss.png' @ image.c/SyncImage/3849.
R=r
CC=golang-dev
http://codereview.appspot.com/6822065
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package png implements a PNG image decoder and encoder.
//
// The PNG specification is at http://www.w3.org/TR/PNG/.
package png
import (
"compress/zlib"
"encoding/binary"
"fmt"
"hash"
"hash/crc32"
"image"
"image/color"
"io"
)
// Color type, as per the PNG spec.
const (
ctGrayscale = 0
ctTrueColor = 2
ctPaletted = 3
ctGrayscaleAlpha = 4
ctTrueColorAlpha = 6
)
// A cb is a combination of color type and bit depth.
const (
cbInvalid = iota
cbG1
cbG2
cbG4
cbG8
cbGA8
cbTC8
cbP1
cbP2
cbP4
cbP8
cbTCA8
cbG16
cbGA16
cbTC16
cbTCA16
)
// Filter type, as per the PNG spec.
const (
ftNone = 0
ftSub = 1
ftUp = 2
ftAverage = 3
ftPaeth = 4
nFilter = 5
)
// Decoding stage.
// The PNG specification says that the IHDR, PLTE (if present), IDAT and IEND
// chunks must appear in that order. There may be multiple IDAT chunks, and
// IDAT chunks must be sequential (i.e. they may not have any other chunks
// between them).
// http://www.w3.org/TR/PNG/#5ChunkOrdering
const (
dsStart = iota
dsSeenIHDR
dsSeenPLTE
dsSeenIDAT
dsSeenIEND
)
const pngHeader = "\x89PNG\r\n\x1a\n"
type decoder struct {
r io.Reader
img image.Image
crc hash.Hash32
width, height int
depth int
palette color.Palette
cb int
stage int
idatLength uint32
tmp [3 * 256]byte
}
// A FormatError reports that the input is not a valid PNG.
type FormatError string
func (e FormatError) Error() string { return "png: invalid format: " + string(e) }
var chunkOrderError = FormatError("chunk out of order")
// An UnsupportedError reports that the input uses a valid but unimplemented PNG feature.
type UnsupportedError string
func (e UnsupportedError) Error() string { return "png: unsupported feature: " + string(e) }
func min(a, b int) int {
if a < b {
return a
}
return b
}
func (d *decoder) parseIHDR(length uint32) error {
if length != 13 {
return FormatError("bad IHDR length")
}
if _, err := io.ReadFull(d.r, d.tmp[:13]); err != nil {
return err
}
d.crc.Write(d.tmp[:13])
if d.tmp[10] != 0 || d.tmp[11] != 0 || d.tmp[12] != 0 {
return UnsupportedError("compression, filter or interlace method")
}
w := int32(binary.BigEndian.Uint32(d.tmp[0:4]))
h := int32(binary.BigEndian.Uint32(d.tmp[4:8]))
if w < 0 || h < 0 {
return FormatError("negative dimension")
}
nPixels := int64(w) * int64(h)
if nPixels != int64(int(nPixels)) {
return UnsupportedError("dimension overflow")
}
d.cb = cbInvalid
d.depth = int(d.tmp[8])
switch d.depth {
case 1:
switch d.tmp[9] {
case ctGrayscale:
d.cb = cbG1
case ctPaletted:
d.cb = cbP1
}
case 2:
switch d.tmp[9] {
case ctGrayscale:
d.cb = cbG2
case ctPaletted:
d.cb = cbP2
}
case 4:
switch d.tmp[9] {
case ctGrayscale:
d.cb = cbG4
case ctPaletted:
d.cb = cbP4
}
case 8:
switch d.tmp[9] {
case ctGrayscale:
d.cb = cbG8
case ctTrueColor:
d.cb = cbTC8
case ctPaletted:
d.cb = cbP8
case ctGrayscaleAlpha:
d.cb = cbGA8
case ctTrueColorAlpha:
d.cb = cbTCA8
}
case 16:
switch d.tmp[9] {
case ctGrayscale:
d.cb = cbG16
case ctTrueColor:
d.cb = cbTC16
case ctGrayscaleAlpha:
d.cb = cbGA16
case ctTrueColorAlpha:
d.cb = cbTCA16
}
}
if d.cb == cbInvalid {
return UnsupportedError(fmt.Sprintf("bit depth %d, color type %d", d.tmp[8], d.tmp[9]))
}
d.width, d.height = int(w), int(h)
return d.verifyChecksum()
}
func (d *decoder) parsePLTE(length uint32) error {
np := int(length / 3) // The number of palette entries.
if length%3 != 0 || np <= 0 || np > 256 || np > 1<<uint(d.depth) {
return FormatError("bad PLTE length")
}
n, err := io.ReadFull(d.r, d.tmp[:3*np])
if err != nil {
return err
}
d.crc.Write(d.tmp[:n])
switch d.cb {
case cbP1, cbP2, cbP4, cbP8:
d.palette = make(color.Palette, 256)
for i := 0; i < np; i++ {
d.palette[i] = color.RGBA{d.tmp[3*i+0], d.tmp[3*i+1], d.tmp[3*i+2], 0xff}
}
for i := np; i < 256; i++ {
// Initialize the rest of the palette to opaque black. The spec isn't
// clear whether palette index values outside of those defined by the PLTE
// chunk is an error: libpng 1.5.13 falls back to opaque black, the
// same as we do here, ImageMagick 6.5.7 returns an error.
d.palette[i] = color.RGBA{0x00, 0x00, 0x00, 0xff}
}
d.palette = d.palette[:np]
case cbTC8, cbTCA8, cbTC16, cbTCA16:
// As per the PNG spec, a PLTE chunk is optional (and for practical purposes,
// ignorable) for the ctTrueColor and ctTrueColorAlpha color types (section 4.1.2).
default:
return FormatError("PLTE, color type mismatch")
}
return d.verifyChecksum()
}
func (d *decoder) parsetRNS(length uint32) error {
if length > 256 {
return FormatError("bad tRNS length")
}
n, err := io.ReadFull(d.r, d.tmp[:length])
if err != nil {
return err
}
d.crc.Write(d.tmp[:n])
switch d.cb {
case cbG8, cbG16:
return UnsupportedError("grayscale transparency")
case cbTC8, cbTC16:
return UnsupportedError("truecolor transparency")
case cbP1, cbP2, cbP4, cbP8:
if len(d.palette) < n {
d.palette = d.palette[:n]
}
for i := 0; i < n; i++ {
rgba := d.palette[i].(color.RGBA)
d.palette[i] = color.NRGBA{rgba.R, rgba.G, rgba.B, d.tmp[i]}
}
case cbGA8, cbGA16, cbTCA8, cbTCA16:
return FormatError("tRNS, color type mismatch")
}
return d.verifyChecksum()
}
// Read presents one or more IDAT chunks as one continuous stream (minus the
// intermediate chunk headers and footers). If the PNG data looked like:
// ... len0 IDAT xxx crc0 len1 IDAT yy crc1 len2 IEND crc2
// then this reader presents xxxyy. For well-formed PNG data, the decoder state
// immediately before the first Read call is that d.r is positioned between the
// first IDAT and xxx, and the decoder state immediately after the last Read
// call is that d.r is positioned between yy and crc1.
func (d *decoder) Read(p []byte) (int, error) {
if len(p) == 0 {
return 0, nil
}
for d.idatLength == 0 {
// We have exhausted an IDAT chunk. Verify the checksum of that chunk.
if err := d.verifyChecksum(); err != nil {
return 0, err
}
// Read the length and chunk type of the next chunk, and check that
// it is an IDAT chunk.
if _, err := io.ReadFull(d.r, d.tmp[:8]); err != nil {
return 0, err
}
d.idatLength = binary.BigEndian.Uint32(d.tmp[:4])
if string(d.tmp[4:8]) != "IDAT" {
return 0, FormatError("not enough pixel data")
}
d.crc.Reset()
d.crc.Write(d.tmp[4:8])
}
if int(d.idatLength) < 0 {
return 0, UnsupportedError("IDAT chunk length overflow")
}
n, err := d.r.Read(p[:min(len(p), int(d.idatLength))])
d.crc.Write(p[:n])
d.idatLength -= uint32(n)
return n, err
}
// decode decodes the IDAT data into an image.
func (d *decoder) decode() (image.Image, error) {
r, err := zlib.NewReader(d)
if err != nil {
return nil, err
}
defer r.Close()
bitsPerPixel := 0
pixOffset := 0
var (
gray *image.Gray
rgba *image.RGBA
paletted *image.Paletted
nrgba *image.NRGBA
gray16 *image.Gray16
rgba64 *image.RGBA64
nrgba64 *image.NRGBA64
img image.Image
)
switch d.cb {
case cbG1, cbG2, cbG4, cbG8:
bitsPerPixel = d.depth
gray = image.NewGray(image.Rect(0, 0, d.width, d.height))
img = gray
case cbGA8:
bitsPerPixel = 16
nrgba = image.NewNRGBA(image.Rect(0, 0, d.width, d.height))
img = nrgba
case cbTC8:
bitsPerPixel = 24
rgba = image.NewRGBA(image.Rect(0, 0, d.width, d.height))
img = rgba
case cbP1, cbP2, cbP4, cbP8:
bitsPerPixel = d.depth
paletted = image.NewPaletted(image.Rect(0, 0, d.width, d.height), d.palette)
img = paletted
case cbTCA8:
bitsPerPixel = 32
nrgba = image.NewNRGBA(image.Rect(0, 0, d.width, d.height))
img = nrgba
case cbG16:
bitsPerPixel = 16
gray16 = image.NewGray16(image.Rect(0, 0, d.width, d.height))
img = gray16
case cbGA16:
bitsPerPixel = 32
nrgba64 = image.NewNRGBA64(image.Rect(0, 0, d.width, d.height))
img = nrgba64
case cbTC16:
bitsPerPixel = 48
rgba64 = image.NewRGBA64(image.Rect(0, 0, d.width, d.height))
img = rgba64
case cbTCA16:
bitsPerPixel = 64
nrgba64 = image.NewNRGBA64(image.Rect(0, 0, d.width, d.height))
img = nrgba64
}
bytesPerPixel := (bitsPerPixel + 7) / 8
// cr and pr are the bytes for the current and previous row.
// The +1 is for the per-row filter type, which is at cr[0].
cr := make([]uint8, 1+(bitsPerPixel*d.width+7)/8)
pr := make([]uint8, 1+(bitsPerPixel*d.width+7)/8)
for y := 0; y < d.height; y++ {
// Read the decompressed bytes.
_, err := io.ReadFull(r, cr)
if err != nil {
return nil, err
}
// Apply the filter.
cdat := cr[1:]
pdat := pr[1:]
switch cr[0] {
case ftNone:
// No-op.
case ftSub:
for i := bytesPerPixel; i < len(cdat); i++ {
cdat[i] += cdat[i-bytesPerPixel]
}
case ftUp:
for i, p := range pdat {
cdat[i] += p
}
case ftAverage:
for i := 0; i < bytesPerPixel; i++ {
cdat[i] += pdat[i] / 2
}
for i := bytesPerPixel; i < len(cdat); i++ {
cdat[i] += uint8((int(cdat[i-bytesPerPixel]) + int(pdat[i])) / 2)
}
case ftPaeth:
filterPaeth(cdat, pdat, bytesPerPixel)
default:
return nil, FormatError("bad filter type")
}
// Convert from bytes to colors.
switch d.cb {
case cbG1:
for x := 0; x < d.width; x += 8 {
b := cdat[x/8]
for x2 := 0; x2 < 8 && x+x2 < d.width; x2++ {
gray.SetGray(x+x2, y, color.Gray{(b >> 7) * 0xff})
b <<= 1
}
}
case cbG2:
for x := 0; x < d.width; x += 4 {
b := cdat[x/4]
for x2 := 0; x2 < 4 && x+x2 < d.width; x2++ {
gray.SetGray(x+x2, y, color.Gray{(b >> 6) * 0x55})
b <<= 2
}
}
case cbG4:
for x := 0; x < d.width; x += 2 {
b := cdat[x/2]
for x2 := 0; x2 < 2 && x+x2 < d.width; x2++ {
gray.SetGray(x+x2, y, color.Gray{(b >> 4) * 0x11})
b <<= 4
}
}
case cbG8:
copy(gray.Pix[pixOffset:], cdat)
pixOffset += gray.Stride
case cbGA8:
for x := 0; x < d.width; x++ {
ycol := cdat[2*x+0]
nrgba.SetNRGBA(x, y, color.NRGBA{ycol, ycol, ycol, cdat[2*x+1]})
}
case cbTC8:
pix, i, j := rgba.Pix, pixOffset, 0
for x := 0; x < d.width; x++ {
pix[i+0] = cdat[j+0]
pix[i+1] = cdat[j+1]
pix[i+2] = cdat[j+2]
pix[i+3] = 0xff
i += 4
j += 3
}
pixOffset += rgba.Stride
case cbP1:
for x := 0; x < d.width; x += 8 {
b := cdat[x/8]
for x2 := 0; x2 < 8 && x+x2 < d.width; x2++ {
idx := b >> 7
if len(paletted.Palette) <= int(idx) {
paletted.Palette = paletted.Palette[:int(idx)+1]
}
paletted.SetColorIndex(x+x2, y, idx)
b <<= 1
}
}
case cbP2:
for x := 0; x < d.width; x += 4 {
b := cdat[x/4]
for x2 := 0; x2 < 4 && x+x2 < d.width; x2++ {
idx := b >> 6
if len(paletted.Palette) <= int(idx) {
paletted.Palette = paletted.Palette[:int(idx)+1]
}
paletted.SetColorIndex(x+x2, y, idx)
b <<= 2
}
}
case cbP4:
for x := 0; x < d.width; x += 2 {
b := cdat[x/2]
for x2 := 0; x2 < 2 && x+x2 < d.width; x2++ {
idx := b >> 4
if len(paletted.Palette) <= int(idx) {
paletted.Palette = paletted.Palette[:int(idx)+1]
}
paletted.SetColorIndex(x+x2, y, idx)
b <<= 4
}
}
case cbP8:
if len(paletted.Palette) != 255 {
for x := 0; x < d.width; x++ {
if len(paletted.Palette) <= int(cdat[x]) {
paletted.Palette = paletted.Palette[:int(cdat[x])+1]
}
}
}
copy(paletted.Pix[pixOffset:], cdat)
pixOffset += paletted.Stride
case cbTCA8:
copy(nrgba.Pix[pixOffset:], cdat)
pixOffset += nrgba.Stride
case cbG16:
for x := 0; x < d.width; x++ {
ycol := uint16(cdat[2*x+0])<<8 | uint16(cdat[2*x+1])
gray16.SetGray16(x, y, color.Gray16{ycol})
}
case cbGA16:
for x := 0; x < d.width; x++ {
ycol := uint16(cdat[4*x+0])<<8 | uint16(cdat[4*x+1])
acol := uint16(cdat[4*x+2])<<8 | uint16(cdat[4*x+3])
nrgba64.SetNRGBA64(x, y, color.NRGBA64{ycol, ycol, ycol, acol})
}
case cbTC16:
for x := 0; x < d.width; x++ {
rcol := uint16(cdat[6*x+0])<<8 | uint16(cdat[6*x+1])
gcol := uint16(cdat[6*x+2])<<8 | uint16(cdat[6*x+3])
bcol := uint16(cdat[6*x+4])<<8 | uint16(cdat[6*x+5])
rgba64.SetRGBA64(x, y, color.RGBA64{rcol, gcol, bcol, 0xffff})
}
case cbTCA16:
for x := 0; x < d.width; x++ {
rcol := uint16(cdat[8*x+0])<<8 | uint16(cdat[8*x+1])
gcol := uint16(cdat[8*x+2])<<8 | uint16(cdat[8*x+3])
bcol := uint16(cdat[8*x+4])<<8 | uint16(cdat[8*x+5])
acol := uint16(cdat[8*x+6])<<8 | uint16(cdat[8*x+7])
nrgba64.SetNRGBA64(x, y, color.NRGBA64{rcol, gcol, bcol, acol})
}
}
// The current row for y is the previous row for y+1.
pr, cr = cr, pr
}
// Check for EOF, to verify the zlib checksum.
n, err := r.Read(pr[:1])
if err != io.EOF {
return nil, FormatError(err.Error())
}
if n != 0 || d.idatLength != 0 {
return nil, FormatError("too much pixel data")
}
return img, nil
}
func (d *decoder) parseIDAT(length uint32) (err error) {
d.idatLength = length
d.img, err = d.decode()
if err != nil {
return err
}
return d.verifyChecksum()
}
func (d *decoder) parseIEND(length uint32) error {
if length != 0 {
return FormatError("bad IEND length")
}
return d.verifyChecksum()
}
func (d *decoder) parseChunk() error {
// Read the length and chunk type.
n, err := io.ReadFull(d.r, d.tmp[:8])
if err != nil {
return err
}
length := binary.BigEndian.Uint32(d.tmp[:4])
d.crc.Reset()
d.crc.Write(d.tmp[4:8])
// Read the chunk data.
switch string(d.tmp[4:8]) {
case "IHDR":
if d.stage != dsStart {
return chunkOrderError
}
d.stage = dsSeenIHDR
return d.parseIHDR(length)
case "PLTE":
if d.stage != dsSeenIHDR {
return chunkOrderError
}
d.stage = dsSeenPLTE
return d.parsePLTE(length)
case "tRNS":
if d.stage != dsSeenPLTE {
return chunkOrderError
}
return d.parsetRNS(length)
case "IDAT":
if d.stage < dsSeenIHDR || d.stage > dsSeenIDAT || (d.cb == cbP8 && d.stage == dsSeenIHDR) {
return chunkOrderError
}
d.stage = dsSeenIDAT
return d.parseIDAT(length)
case "IEND":
if d.stage != dsSeenIDAT {
return chunkOrderError
}
d.stage = dsSeenIEND
return d.parseIEND(length)
}
// Ignore this chunk (of a known length).
var ignored [4096]byte
for length > 0 {
n, err = io.ReadFull(d.r, ignored[:min(len(ignored), int(length))])
if err != nil {
return err
}
d.crc.Write(ignored[:n])
length -= uint32(n)
}
return d.verifyChecksum()
}
func (d *decoder) verifyChecksum() error {
if _, err := io.ReadFull(d.r, d.tmp[:4]); err != nil {
return err
}
if binary.BigEndian.Uint32(d.tmp[:4]) != d.crc.Sum32() {
return FormatError("invalid checksum")
}
return nil
}
func (d *decoder) checkHeader() error {
_, err := io.ReadFull(d.r, d.tmp[:len(pngHeader)])
if err != nil {
return err
}
if string(d.tmp[:len(pngHeader)]) != pngHeader {
return FormatError("not a PNG file")
}
return nil
}
// Decode reads a PNG image from r and returns it as an image.Image.
// The type of Image returned depends on the PNG contents.
func Decode(r io.Reader) (image.Image, error) {
d := &decoder{
r: r,
crc: crc32.NewIEEE(),
}
if err := d.checkHeader(); err != nil {
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
return nil, err
}
for d.stage != dsSeenIEND {
if err := d.parseChunk(); err != nil {
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
return nil, err
}
}
return d.img, nil
}
// DecodeConfig returns the color model and dimensions of a PNG image without
// decoding the entire image.
func DecodeConfig(r io.Reader) (image.Config, error) {
d := &decoder{
r: r,
crc: crc32.NewIEEE(),
}
if err := d.checkHeader(); err != nil {
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
return image.Config{}, err
}
for {
if err := d.parseChunk(); err != nil {
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
return image.Config{}, err
}
if d.stage == dsSeenIHDR && d.cb != cbP8 {
break
}
if d.stage == dsSeenPLTE && d.cb == cbP8 {
break
}
}
var cm color.Model
switch d.cb {
case cbG1, cbG2, cbG4, cbG8:
cm = color.GrayModel
case cbGA8:
cm = color.NRGBAModel
case cbTC8:
cm = color.RGBAModel
case cbP1, cbP2, cbP4, cbP8:
cm = d.palette
case cbTCA8:
cm = color.NRGBAModel
case cbG16:
cm = color.Gray16Model
case cbGA16:
cm = color.NRGBA64Model
case cbTC16:
cm = color.RGBA64Model
case cbTCA16:
cm = color.NRGBA64Model
}
return image.Config{
ColorModel: cm,
Width: d.width,
Height: d.height,
}, nil
}
func init() {
image.RegisterFormat("png", pngHeader, Decode, DecodeConfig)
}
|
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package ioutil implements some I/O utility functions.
package ioutil
import (
"bytes"
"io"
"os"
"sort"
)
// readAll reads from r until an error or EOF and returns the data it read
// from the internal buffer allocated with a specified capacity.
func readAll(r io.Reader, capacity int64) (b []byte, err error) {
buf := bytes.NewBuffer(make([]byte, 0, capacity))
// If the buffer overflows, we will get bytes.ErrTooLarge.
// Return that as an error. Any other panic remains.
defer func() {
e := recover()
if e == nil {
return
}
if panicErr, ok := e.(error); ok && panicErr == bytes.ErrTooLarge {
err = panicErr
} else {
panic(e)
}
}()
_, err = buf.ReadFrom(r)
return buf.Bytes(), err
}
// ReadAll reads from r until an error or EOF and returns the data it read.
// A successful call returns err == nil, not err == EOF. Because ReadAll is
// defined to read from src until EOF, it does not treat an EOF from Read
// as an error to be reported.
func ReadAll(r io.Reader) ([]byte, error) {
return readAll(r, bytes.MinRead)
}
// ReadFile reads the file named by filename and returns the contents.
// A successful call returns err == nil, not err == EOF. Because ReadFile
// reads the whole file, it does not treat an EOF from Read as an error
// to be reported.
func ReadFile(filename string) ([]byte, error) {
f, err := os.Open(filename)
if err != nil {
return nil, err
}
defer f.Close()
// It's a good but not certain bet that FileInfo will tell us exactly how much to
// read, so let's try it but be prepared for the answer to be wrong.
fi, err := f.Stat()
var n int64
if size := fi.Size(); err == nil && size < 2e9 { // Don't preallocate a huge buffer, just in case.
n = size
}
// As initial capacity for readAll, use n + a little extra in case Size is zero,
// and to avoid another allocation after Read has filled the buffer. The readAll
// call will read into its allocated internal buffer cheaply. If the size was
// wrong, we'll either waste some space off the end or reallocate as needed, but
// in the overwhelmingly common case we'll get it just right.
return readAll(f, n+bytes.MinRead)
}
// WriteFile writes data to a file named by filename.
// If the file does not exist, WriteFile creates it with permissions perm;
// otherwise WriteFile truncates it before writing.
func WriteFile(filename string, data []byte, perm os.FileMode) error {
f, err := os.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm)
if err != nil {
return err
}
n, err := f.Write(data)
f.Close()
if err == nil && n < len(data) {
err = io.ErrShortWrite
}
return err
}
// byName implements sort.Interface.
type byName []os.FileInfo
func (f byName) Len() int { return len(f) }
func (f byName) Less(i, j int) bool { return f[i].Name() < f[j].Name() }
func (f byName) Swap(i, j int) { f[i], f[j] = f[j], f[i] }
// ReadDir reads the directory named by dirname and returns
// a list of sorted directory entries.
func ReadDir(dirname string) ([]os.FileInfo, error) {
f, err := os.Open(dirname)
if err != nil {
return nil, err
}
list, err := f.Readdir(-1)
f.Close()
if err != nil {
return nil, err
}
sort.Sort(byName(list))
return list, nil
}
type nopCloser struct {
io.Reader
}
func (nopCloser) Close() error { return nil }
// NopCloser returns a ReadCloser with a no-op Close method wrapping
// the provided Reader r.
func NopCloser(r io.Reader) io.ReadCloser {
return nopCloser{r}
}
type devNull int
// devNull implements ReaderFrom as an optimization so io.Copy to
// ioutil.Discard can avoid doing unnecessary work.
var _ io.ReaderFrom = devNull(0)
func (devNull) Write(p []byte) (int, error) {
return len(p), nil
}
var blackHole = make([]byte, 8192)
func (devNull) ReadFrom(r io.Reader) (n int64, err error) {
readSize := 0
for {
readSize, err = r.Read(blackHole)
n += int64(readSize)
if err != nil {
if err == io.EOF {
return n, nil
}
return
}
}
panic("unreachable")
}
// Discard is an io.Writer on which all Write calls succeed
// without doing anything.
var Discard io.Writer = devNull(0)
io/ioutil: fix crash when Stat fails
Fixes issue 3320.
R=golang-dev, gri
CC=golang-dev
http://codereview.appspot.com/5824051
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package ioutil implements some I/O utility functions.
package ioutil
import (
"bytes"
"io"
"os"
"sort"
)
// readAll reads from r until an error or EOF and returns the data it read
// from the internal buffer allocated with a specified capacity.
func readAll(r io.Reader, capacity int64) (b []byte, err error) {
buf := bytes.NewBuffer(make([]byte, 0, capacity))
// If the buffer overflows, we will get bytes.ErrTooLarge.
// Return that as an error. Any other panic remains.
defer func() {
e := recover()
if e == nil {
return
}
if panicErr, ok := e.(error); ok && panicErr == bytes.ErrTooLarge {
err = panicErr
} else {
panic(e)
}
}()
_, err = buf.ReadFrom(r)
return buf.Bytes(), err
}
// ReadAll reads from r until an error or EOF and returns the data it read.
// A successful call returns err == nil, not err == EOF. Because ReadAll is
// defined to read from src until EOF, it does not treat an EOF from Read
// as an error to be reported.
func ReadAll(r io.Reader) ([]byte, error) {
return readAll(r, bytes.MinRead)
}
// ReadFile reads the file named by filename and returns the contents.
// A successful call returns err == nil, not err == EOF. Because ReadFile
// reads the whole file, it does not treat an EOF from Read as an error
// to be reported.
func ReadFile(filename string) ([]byte, error) {
f, err := os.Open(filename)
if err != nil {
return nil, err
}
defer f.Close()
// It's a good but not certain bet that FileInfo will tell us exactly how much to
// read, so let's try it but be prepared for the answer to be wrong.
var n int64
if fi, err := f.Stat(); err == nil {
// Don't preallocate a huge buffer, just in case.
if size := fi.Size(); size < 1e9 {
n = size
}
}
// As initial capacity for readAll, use n + a little extra in case Size is zero,
// and to avoid another allocation after Read has filled the buffer. The readAll
// call will read into its allocated internal buffer cheaply. If the size was
// wrong, we'll either waste some space off the end or reallocate as needed, but
// in the overwhelmingly common case we'll get it just right.
return readAll(f, n+bytes.MinRead)
}
// WriteFile writes data to a file named by filename.
// If the file does not exist, WriteFile creates it with permissions perm;
// otherwise WriteFile truncates it before writing.
func WriteFile(filename string, data []byte, perm os.FileMode) error {
f, err := os.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm)
if err != nil {
return err
}
n, err := f.Write(data)
f.Close()
if err == nil && n < len(data) {
err = io.ErrShortWrite
}
return err
}
// byName implements sort.Interface.
type byName []os.FileInfo
func (f byName) Len() int { return len(f) }
func (f byName) Less(i, j int) bool { return f[i].Name() < f[j].Name() }
func (f byName) Swap(i, j int) { f[i], f[j] = f[j], f[i] }
// ReadDir reads the directory named by dirname and returns
// a list of sorted directory entries.
func ReadDir(dirname string) ([]os.FileInfo, error) {
f, err := os.Open(dirname)
if err != nil {
return nil, err
}
list, err := f.Readdir(-1)
f.Close()
if err != nil {
return nil, err
}
sort.Sort(byName(list))
return list, nil
}
type nopCloser struct {
io.Reader
}
func (nopCloser) Close() error { return nil }
// NopCloser returns a ReadCloser with a no-op Close method wrapping
// the provided Reader r.
func NopCloser(r io.Reader) io.ReadCloser {
return nopCloser{r}
}
type devNull int
// devNull implements ReaderFrom as an optimization so io.Copy to
// ioutil.Discard can avoid doing unnecessary work.
var _ io.ReaderFrom = devNull(0)
func (devNull) Write(p []byte) (int, error) {
return len(p), nil
}
var blackHole = make([]byte, 8192)
func (devNull) ReadFrom(r io.Reader) (n int64, err error) {
readSize := 0
for {
readSize, err = r.Read(blackHole)
n += int64(readSize)
if err != nil {
if err == io.EOF {
return n, nil
}
return
}
}
panic("unreachable")
}
// Discard is an io.Writer on which all Write calls succeed
// without doing anything.
var Discard io.Writer = devNull(0)
|
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package http_test
import (
"bytes"
"errors"
"fmt"
"io"
"io/ioutil"
"mime"
"mime/multipart"
"net"
. "net/http"
"net/http/httptest"
"net/url"
"os"
"os/exec"
"path"
"path/filepath"
"regexp"
"runtime"
"strings"
"testing"
"time"
)
const (
testFile = "testdata/file"
testFileLen = 11
)
type wantRange struct {
start, end int64 // range [start,end)
}
var ServeFileRangeTests = []struct {
r string
code int
ranges []wantRange
}{
{r: "", code: StatusOK},
{r: "bytes=0-4", code: StatusPartialContent, ranges: []wantRange{{0, 5}}},
{r: "bytes=2-", code: StatusPartialContent, ranges: []wantRange{{2, testFileLen}}},
{r: "bytes=-5", code: StatusPartialContent, ranges: []wantRange{{testFileLen - 5, testFileLen}}},
{r: "bytes=3-7", code: StatusPartialContent, ranges: []wantRange{{3, 8}}},
{r: "bytes=20-", code: StatusRequestedRangeNotSatisfiable},
{r: "bytes=0-0,-2", code: StatusPartialContent, ranges: []wantRange{{0, 1}, {testFileLen - 2, testFileLen}}},
{r: "bytes=0-1,5-8", code: StatusPartialContent, ranges: []wantRange{{0, 2}, {5, 9}}},
{r: "bytes=0-1,5-", code: StatusPartialContent, ranges: []wantRange{{0, 2}, {5, testFileLen}}},
{r: "bytes=0-,1-,2-,3-,4-", code: StatusOK}, // ignore wasteful range request
}
func TestServeFile(t *testing.T) {
ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
ServeFile(w, r, "testdata/file")
}))
defer ts.Close()
var err error
file, err := ioutil.ReadFile(testFile)
if err != nil {
t.Fatal("reading file:", err)
}
// set up the Request (re-used for all tests)
var req Request
req.Header = make(Header)
if req.URL, err = url.Parse(ts.URL); err != nil {
t.Fatal("ParseURL:", err)
}
req.Method = "GET"
// straight GET
_, body := getBody(t, "straight get", req)
if !bytes.Equal(body, file) {
t.Fatalf("body mismatch: got %q, want %q", body, file)
}
// Range tests
Cases:
for _, rt := range ServeFileRangeTests {
if rt.r != "" {
req.Header.Set("Range", rt.r)
}
resp, body := getBody(t, fmt.Sprintf("range test %q", rt.r), req)
if resp.StatusCode != rt.code {
t.Errorf("range=%q: StatusCode=%d, want %d", rt.r, resp.StatusCode, rt.code)
}
if rt.code == StatusRequestedRangeNotSatisfiable {
continue
}
wantContentRange := ""
if len(rt.ranges) == 1 {
rng := rt.ranges[0]
wantContentRange = fmt.Sprintf("bytes %d-%d/%d", rng.start, rng.end-1, testFileLen)
}
cr := resp.Header.Get("Content-Range")
if cr != wantContentRange {
t.Errorf("range=%q: Content-Range = %q, want %q", rt.r, cr, wantContentRange)
}
ct := resp.Header.Get("Content-Type")
if len(rt.ranges) == 1 {
rng := rt.ranges[0]
wantBody := file[rng.start:rng.end]
if !bytes.Equal(body, wantBody) {
t.Errorf("range=%q: body = %q, want %q", rt.r, body, wantBody)
}
if strings.HasPrefix(ct, "multipart/byteranges") {
t.Errorf("range=%q content-type = %q; unexpected multipart/byteranges", rt.r, ct)
}
}
if len(rt.ranges) > 1 {
typ, params, err := mime.ParseMediaType(ct)
if err != nil {
t.Errorf("range=%q content-type = %q; %v", rt.r, ct, err)
continue
}
if typ != "multipart/byteranges" {
t.Errorf("range=%q content-type = %q; want multipart/byteranges", rt.r, typ)
continue
}
if params["boundary"] == "" {
t.Errorf("range=%q content-type = %q; lacks boundary", rt.r, ct)
continue
}
if g, w := resp.ContentLength, int64(len(body)); g != w {
t.Errorf("range=%q Content-Length = %d; want %d", rt.r, g, w)
continue
}
mr := multipart.NewReader(bytes.NewReader(body), params["boundary"])
for ri, rng := range rt.ranges {
part, err := mr.NextPart()
if err != nil {
t.Errorf("range=%q, reading part index %d: %v", rt.r, ri, err)
continue Cases
}
wantContentRange = fmt.Sprintf("bytes %d-%d/%d", rng.start, rng.end-1, testFileLen)
if g, w := part.Header.Get("Content-Range"), wantContentRange; g != w {
t.Errorf("range=%q: part Content-Range = %q; want %q", rt.r, g, w)
}
body, err := ioutil.ReadAll(part)
if err != nil {
t.Errorf("range=%q, reading part index %d body: %v", rt.r, ri, err)
continue Cases
}
wantBody := file[rng.start:rng.end]
if !bytes.Equal(body, wantBody) {
t.Errorf("range=%q: body = %q, want %q", rt.r, body, wantBody)
}
}
_, err = mr.NextPart()
if err != io.EOF {
t.Errorf("range=%q; expected final error io.EOF; got %v", rt.r, err)
}
}
}
}
var fsRedirectTestData = []struct {
original, redirect string
}{
{"/test/index.html", "/test/"},
{"/test/testdata", "/test/testdata/"},
{"/test/testdata/file/", "/test/testdata/file"},
}
func TestFSRedirect(t *testing.T) {
ts := httptest.NewServer(StripPrefix("/test", FileServer(Dir("."))))
defer ts.Close()
for _, data := range fsRedirectTestData {
res, err := Get(ts.URL + data.original)
if err != nil {
t.Fatal(err)
}
res.Body.Close()
if g, e := res.Request.URL.Path, data.redirect; g != e {
t.Errorf("redirect from %s: got %s, want %s", data.original, g, e)
}
}
}
type testFileSystem struct {
open func(name string) (File, error)
}
func (fs *testFileSystem) Open(name string) (File, error) {
return fs.open(name)
}
func TestFileServerCleans(t *testing.T) {
ch := make(chan string, 1)
fs := FileServer(&testFileSystem{func(name string) (File, error) {
ch <- name
return nil, errors.New("file does not exist")
}})
tests := []struct {
reqPath, openArg string
}{
{"/foo.txt", "/foo.txt"},
{"//foo.txt", "/foo.txt"},
{"/../foo.txt", "/foo.txt"},
}
req, _ := NewRequest("GET", "http://example.com", nil)
for n, test := range tests {
rec := httptest.NewRecorder()
req.URL.Path = test.reqPath
fs.ServeHTTP(rec, req)
if got := <-ch; got != test.openArg {
t.Errorf("test %d: got %q, want %q", n, got, test.openArg)
}
}
}
func mustRemoveAll(dir string) {
err := os.RemoveAll(dir)
if err != nil {
panic(err)
}
}
func TestFileServerImplicitLeadingSlash(t *testing.T) {
tempDir, err := ioutil.TempDir("", "")
if err != nil {
t.Fatalf("TempDir: %v", err)
}
defer mustRemoveAll(tempDir)
if err := ioutil.WriteFile(filepath.Join(tempDir, "foo.txt"), []byte("Hello world"), 0644); err != nil {
t.Fatalf("WriteFile: %v", err)
}
ts := httptest.NewServer(StripPrefix("/bar/", FileServer(Dir(tempDir))))
defer ts.Close()
get := func(suffix string) string {
res, err := Get(ts.URL + suffix)
if err != nil {
t.Fatalf("Get %s: %v", suffix, err)
}
b, err := ioutil.ReadAll(res.Body)
if err != nil {
t.Fatalf("ReadAll %s: %v", suffix, err)
}
res.Body.Close()
return string(b)
}
if s := get("/bar/"); !strings.Contains(s, ">foo.txt<") {
t.Logf("expected a directory listing with foo.txt, got %q", s)
}
if s := get("/bar/foo.txt"); s != "Hello world" {
t.Logf("expected %q, got %q", "Hello world", s)
}
}
func TestDirJoin(t *testing.T) {
wfi, err := os.Stat("/etc/hosts")
if err != nil {
t.Logf("skipping test; no /etc/hosts file")
return
}
test := func(d Dir, name string) {
f, err := d.Open(name)
if err != nil {
t.Fatalf("open of %s: %v", name, err)
}
defer f.Close()
gfi, err := f.Stat()
if err != nil {
t.Fatalf("stat of %s: %v", name, err)
}
if !os.SameFile(gfi, wfi) {
t.Errorf("%s got different file", name)
}
}
test(Dir("/etc/"), "/hosts")
test(Dir("/etc/"), "hosts")
test(Dir("/etc/"), "../../../../hosts")
test(Dir("/etc"), "/hosts")
test(Dir("/etc"), "hosts")
test(Dir("/etc"), "../../../../hosts")
// Not really directories, but since we use this trick in
// ServeFile, test it:
test(Dir("/etc/hosts"), "")
test(Dir("/etc/hosts"), "/")
test(Dir("/etc/hosts"), "../")
}
func TestEmptyDirOpenCWD(t *testing.T) {
test := func(d Dir) {
name := "fs_test.go"
f, err := d.Open(name)
if err != nil {
t.Fatalf("open of %s: %v", name, err)
}
defer f.Close()
}
test(Dir(""))
test(Dir("."))
test(Dir("./"))
}
func TestServeFileContentType(t *testing.T) {
const ctype = "icecream/chocolate"
ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
if r.FormValue("override") == "1" {
w.Header().Set("Content-Type", ctype)
}
ServeFile(w, r, "testdata/file")
}))
defer ts.Close()
get := func(override, want string) {
resp, err := Get(ts.URL + "?override=" + override)
if err != nil {
t.Fatal(err)
}
if h := resp.Header.Get("Content-Type"); h != want {
t.Errorf("Content-Type mismatch: got %q, want %q", h, want)
}
}
get("0", "text/plain; charset=utf-8")
get("1", ctype)
}
func TestServeFileMimeType(t *testing.T) {
ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
ServeFile(w, r, "testdata/style.css")
}))
defer ts.Close()
resp, err := Get(ts.URL)
if err != nil {
t.Fatal(err)
}
want := "text/css; charset=utf-8"
if h := resp.Header.Get("Content-Type"); h != want {
t.Errorf("Content-Type mismatch: got %q, want %q", h, want)
}
}
func TestServeFileFromCWD(t *testing.T) {
ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
ServeFile(w, r, "fs_test.go")
}))
defer ts.Close()
r, err := Get(ts.URL)
if err != nil {
t.Fatal(err)
}
r.Body.Close()
if r.StatusCode != 200 {
t.Fatalf("expected 200 OK, got %s", r.Status)
}
}
func TestServeFileWithContentEncoding(t *testing.T) {
ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
w.Header().Set("Content-Encoding", "foo")
ServeFile(w, r, "testdata/file")
}))
defer ts.Close()
resp, err := Get(ts.URL)
if err != nil {
t.Fatal(err)
}
if g, e := resp.ContentLength, int64(-1); g != e {
t.Errorf("Content-Length mismatch: got %d, want %d", g, e)
}
}
func TestServeIndexHtml(t *testing.T) {
const want = "index.html says hello\n"
ts := httptest.NewServer(FileServer(Dir(".")))
defer ts.Close()
for _, path := range []string{"/testdata/", "/testdata/index.html"} {
res, err := Get(ts.URL + path)
if err != nil {
t.Fatal(err)
}
b, err := ioutil.ReadAll(res.Body)
if err != nil {
t.Fatal("reading Body:", err)
}
if s := string(b); s != want {
t.Errorf("for path %q got %q, want %q", path, s, want)
}
res.Body.Close()
}
}
func TestFileServerZeroByte(t *testing.T) {
ts := httptest.NewServer(FileServer(Dir(".")))
defer ts.Close()
res, err := Get(ts.URL + "/..\x00")
if err != nil {
t.Fatal(err)
}
b, err := ioutil.ReadAll(res.Body)
if err != nil {
t.Fatal("reading Body:", err)
}
if res.StatusCode == 200 {
t.Errorf("got status 200; want an error. Body is:\n%s", string(b))
}
}
type fakeFileInfo struct {
dir bool
basename string
modtime time.Time
ents []*fakeFileInfo
contents string
}
func (f *fakeFileInfo) Name() string { return f.basename }
func (f *fakeFileInfo) Sys() interface{} { return nil }
func (f *fakeFileInfo) ModTime() time.Time { return f.modtime }
func (f *fakeFileInfo) IsDir() bool { return f.dir }
func (f *fakeFileInfo) Size() int64 { return int64(len(f.contents)) }
func (f *fakeFileInfo) Mode() os.FileMode {
if f.dir {
return 0755 | os.ModeDir
}
return 0644
}
type fakeFile struct {
io.ReadSeeker
fi *fakeFileInfo
path string // as opened
}
func (f *fakeFile) Close() error { return nil }
func (f *fakeFile) Stat() (os.FileInfo, error) { return f.fi, nil }
func (f *fakeFile) Readdir(count int) ([]os.FileInfo, error) {
if !f.fi.dir {
return nil, os.ErrInvalid
}
var fis []os.FileInfo
for _, fi := range f.fi.ents {
fis = append(fis, fi)
}
return fis, nil
}
type fakeFS map[string]*fakeFileInfo
func (fs fakeFS) Open(name string) (File, error) {
name = path.Clean(name)
f, ok := fs[name]
if !ok {
println("fake filesystem didn't find file", name)
return nil, os.ErrNotExist
}
return &fakeFile{ReadSeeker: strings.NewReader(f.contents), fi: f, path: name}, nil
}
func TestDirectoryIfNotModified(t *testing.T) {
const indexContents = "I am a fake index.html file"
fileMod := time.Unix(1000000000, 0).UTC()
fileModStr := fileMod.Format(TimeFormat)
dirMod := time.Unix(123, 0).UTC()
indexFile := &fakeFileInfo{
basename: "index.html",
modtime: fileMod,
contents: indexContents,
}
fs := fakeFS{
"/": &fakeFileInfo{
dir: true,
modtime: dirMod,
ents: []*fakeFileInfo{indexFile},
},
"/index.html": indexFile,
}
ts := httptest.NewServer(FileServer(fs))
defer ts.Close()
res, err := Get(ts.URL)
if err != nil {
t.Fatal(err)
}
b, err := ioutil.ReadAll(res.Body)
if err != nil {
t.Fatal(err)
}
if string(b) != indexContents {
t.Fatalf("Got body %q; want %q", b, indexContents)
}
res.Body.Close()
lastMod := res.Header.Get("Last-Modified")
if lastMod != fileModStr {
t.Fatalf("initial Last-Modified = %q; want %q", lastMod, fileModStr)
}
req, _ := NewRequest("GET", ts.URL, nil)
req.Header.Set("If-Modified-Since", lastMod)
res, err = DefaultClient.Do(req)
if err != nil {
t.Fatal(err)
}
if res.StatusCode != 304 {
t.Fatalf("Code after If-Modified-Since request = %v; want 304", res.StatusCode)
}
res.Body.Close()
// Advance the index.html file's modtime, but not the directory's.
indexFile.modtime = indexFile.modtime.Add(1 * time.Hour)
res, err = DefaultClient.Do(req)
if err != nil {
t.Fatal(err)
}
if res.StatusCode != 200 {
t.Fatalf("Code after second If-Modified-Since request = %v; want 200; res is %#v", res.StatusCode, res)
}
res.Body.Close()
}
func mustStat(t *testing.T, fileName string) os.FileInfo {
fi, err := os.Stat(fileName)
if err != nil {
t.Fatal(err)
}
return fi
}
func TestServeContent(t *testing.T) {
type serveParam struct {
name string
modtime time.Time
content io.ReadSeeker
contentType string
etag string
}
servec := make(chan serveParam, 1)
ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
p := <-servec
if p.etag != "" {
w.Header().Set("ETag", p.etag)
}
if p.contentType != "" {
w.Header().Set("Content-Type", p.contentType)
}
ServeContent(w, r, p.name, p.modtime, p.content)
}))
defer ts.Close()
type testCase struct {
file string
modtime time.Time
serveETag string // optional
serveContentType string // optional
reqHeader map[string]string
wantLastMod string
wantContentType string
wantStatus int
}
htmlModTime := mustStat(t, "testdata/index.html").ModTime()
tests := map[string]testCase{
"no_last_modified": {
file: "testdata/style.css",
wantContentType: "text/css; charset=utf-8",
wantStatus: 200,
},
"with_last_modified": {
file: "testdata/index.html",
wantContentType: "text/html; charset=utf-8",
modtime: htmlModTime,
wantLastMod: htmlModTime.UTC().Format(TimeFormat),
wantStatus: 200,
},
"not_modified_modtime": {
file: "testdata/style.css",
modtime: htmlModTime,
reqHeader: map[string]string{
"If-Modified-Since": htmlModTime.UTC().Format(TimeFormat),
},
wantStatus: 304,
},
"not_modified_modtime_with_contenttype": {
file: "testdata/style.css",
serveContentType: "text/css", // explicit content type
modtime: htmlModTime,
reqHeader: map[string]string{
"If-Modified-Since": htmlModTime.UTC().Format(TimeFormat),
},
wantStatus: 304,
},
"not_modified_etag": {
file: "testdata/style.css",
serveETag: `"foo"`,
reqHeader: map[string]string{
"If-None-Match": `"foo"`,
},
wantStatus: 304,
},
"range_good": {
file: "testdata/style.css",
serveETag: `"A"`,
reqHeader: map[string]string{
"Range": "bytes=0-4",
},
wantStatus: StatusPartialContent,
wantContentType: "text/css; charset=utf-8",
},
// An If-Range resource for entity "A", but entity "B" is now current.
// The Range request should be ignored.
"range_no_match": {
file: "testdata/style.css",
serveETag: `"A"`,
reqHeader: map[string]string{
"Range": "bytes=0-4",
"If-Range": `"B"`,
},
wantStatus: 200,
wantContentType: "text/css; charset=utf-8",
},
}
for testName, tt := range tests {
f, err := os.Open(tt.file)
if err != nil {
t.Fatalf("test %q: %v", testName, err)
}
defer f.Close()
servec <- serveParam{
name: filepath.Base(tt.file),
content: f,
modtime: tt.modtime,
etag: tt.serveETag,
contentType: tt.serveContentType,
}
req, err := NewRequest("GET", ts.URL, nil)
if err != nil {
t.Fatal(err)
}
for k, v := range tt.reqHeader {
req.Header.Set(k, v)
}
res, err := DefaultClient.Do(req)
if err != nil {
t.Fatal(err)
}
if res.StatusCode != tt.wantStatus {
t.Errorf("test %q: status = %d; want %d", testName, res.StatusCode, tt.wantStatus)
}
if g, e := res.Header.Get("Content-Type"), tt.wantContentType; g != e {
t.Errorf("test %q: content-type = %q, want %q", testName, g, e)
}
if g, e := res.Header.Get("Last-Modified"), tt.wantLastMod; g != e {
t.Errorf("test %q: last-modified = %q, want %q", testName, g, e)
}
}
}
// verifies that sendfile is being used on Linux
func TestLinuxSendfile(t *testing.T) {
if runtime.GOOS != "linux" {
t.Logf("skipping; linux-only test")
return
}
_, err := exec.LookPath("strace")
if err != nil {
t.Logf("skipping; strace not found in path")
return
}
ln, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil {
t.Fatal(err)
}
lnf, err := ln.(*net.TCPListener).File()
if err != nil {
t.Fatal(err)
}
defer ln.Close()
var buf bytes.Buffer
child := exec.Command("strace", "-f", os.Args[0], "-test.run=TestLinuxSendfileChild")
child.ExtraFiles = append(child.ExtraFiles, lnf)
child.Env = append([]string{"GO_WANT_HELPER_PROCESS=1"}, os.Environ()...)
child.Stdout = &buf
child.Stderr = &buf
err = child.Start()
if err != nil {
t.Logf("skipping; failed to start straced child: %v", err)
return
}
res, err := Get(fmt.Sprintf("http://%s/", ln.Addr()))
if err != nil {
t.Fatalf("http client error: %v", err)
}
_, err = io.Copy(ioutil.Discard, res.Body)
if err != nil {
t.Fatalf("client body read error: %v", err)
}
res.Body.Close()
// Force child to exit cleanly.
Get(fmt.Sprintf("http://%s/quit", ln.Addr()))
child.Wait()
rx := regexp.MustCompile(`sendfile(64)?\(\d+,\s*\d+,\s*NULL,\s*\d+\)\s*=\s*\d+\s*\n`)
rxResume := regexp.MustCompile(`<\.\.\. sendfile(64)? resumed> \)\s*=\s*\d+\s*\n`)
out := buf.String()
if !rx.MatchString(out) && !rxResume.MatchString(out) {
t.Errorf("no sendfile system call found in:\n%s", out)
}
}
func getBody(t *testing.T, testName string, req Request) (*Response, []byte) {
r, err := DefaultClient.Do(&req)
if err != nil {
t.Fatalf("%s: for URL %q, send error: %v", testName, req.URL.String(), err)
}
b, err := ioutil.ReadAll(r.Body)
if err != nil {
t.Fatalf("%s: for URL %q, reading body: %v", testName, req.URL.String(), err)
}
return r, b
}
// TestLinuxSendfileChild isn't a real test. It's used as a helper process
// for TestLinuxSendfile.
func TestLinuxSendfileChild(*testing.T) {
if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" {
return
}
defer os.Exit(0)
fd3 := os.NewFile(3, "ephemeral-port-listener")
ln, err := net.FileListener(fd3)
if err != nil {
panic(err)
}
mux := NewServeMux()
mux.Handle("/", FileServer(Dir("testdata")))
mux.HandleFunc("/quit", func(ResponseWriter, *Request) {
os.Exit(0)
})
s := &Server{Handler: mux}
err = s.Serve(ln)
if err != nil {
panic(err)
}
}
net/http: fix data race in test
The issue is that server still sends body,
when client closes the fd.
Fixes issue 4329.
R=golang-dev, dave, rsc
CC=golang-dev
http://codereview.appspot.com/6822072
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package http_test
import (
"bytes"
"errors"
"fmt"
"io"
"io/ioutil"
"mime"
"mime/multipart"
"net"
. "net/http"
"net/http/httptest"
"net/url"
"os"
"os/exec"
"path"
"path/filepath"
"regexp"
"runtime"
"strings"
"testing"
"time"
)
const (
testFile = "testdata/file"
testFileLen = 11
)
type wantRange struct {
start, end int64 // range [start,end)
}
var ServeFileRangeTests = []struct {
r string
code int
ranges []wantRange
}{
{r: "", code: StatusOK},
{r: "bytes=0-4", code: StatusPartialContent, ranges: []wantRange{{0, 5}}},
{r: "bytes=2-", code: StatusPartialContent, ranges: []wantRange{{2, testFileLen}}},
{r: "bytes=-5", code: StatusPartialContent, ranges: []wantRange{{testFileLen - 5, testFileLen}}},
{r: "bytes=3-7", code: StatusPartialContent, ranges: []wantRange{{3, 8}}},
{r: "bytes=20-", code: StatusRequestedRangeNotSatisfiable},
{r: "bytes=0-0,-2", code: StatusPartialContent, ranges: []wantRange{{0, 1}, {testFileLen - 2, testFileLen}}},
{r: "bytes=0-1,5-8", code: StatusPartialContent, ranges: []wantRange{{0, 2}, {5, 9}}},
{r: "bytes=0-1,5-", code: StatusPartialContent, ranges: []wantRange{{0, 2}, {5, testFileLen}}},
{r: "bytes=0-,1-,2-,3-,4-", code: StatusOK}, // ignore wasteful range request
}
func TestServeFile(t *testing.T) {
ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
ServeFile(w, r, "testdata/file")
}))
defer ts.Close()
var err error
file, err := ioutil.ReadFile(testFile)
if err != nil {
t.Fatal("reading file:", err)
}
// set up the Request (re-used for all tests)
var req Request
req.Header = make(Header)
if req.URL, err = url.Parse(ts.URL); err != nil {
t.Fatal("ParseURL:", err)
}
req.Method = "GET"
// straight GET
_, body := getBody(t, "straight get", req)
if !bytes.Equal(body, file) {
t.Fatalf("body mismatch: got %q, want %q", body, file)
}
// Range tests
Cases:
for _, rt := range ServeFileRangeTests {
if rt.r != "" {
req.Header.Set("Range", rt.r)
}
resp, body := getBody(t, fmt.Sprintf("range test %q", rt.r), req)
if resp.StatusCode != rt.code {
t.Errorf("range=%q: StatusCode=%d, want %d", rt.r, resp.StatusCode, rt.code)
}
if rt.code == StatusRequestedRangeNotSatisfiable {
continue
}
wantContentRange := ""
if len(rt.ranges) == 1 {
rng := rt.ranges[0]
wantContentRange = fmt.Sprintf("bytes %d-%d/%d", rng.start, rng.end-1, testFileLen)
}
cr := resp.Header.Get("Content-Range")
if cr != wantContentRange {
t.Errorf("range=%q: Content-Range = %q, want %q", rt.r, cr, wantContentRange)
}
ct := resp.Header.Get("Content-Type")
if len(rt.ranges) == 1 {
rng := rt.ranges[0]
wantBody := file[rng.start:rng.end]
if !bytes.Equal(body, wantBody) {
t.Errorf("range=%q: body = %q, want %q", rt.r, body, wantBody)
}
if strings.HasPrefix(ct, "multipart/byteranges") {
t.Errorf("range=%q content-type = %q; unexpected multipart/byteranges", rt.r, ct)
}
}
if len(rt.ranges) > 1 {
typ, params, err := mime.ParseMediaType(ct)
if err != nil {
t.Errorf("range=%q content-type = %q; %v", rt.r, ct, err)
continue
}
if typ != "multipart/byteranges" {
t.Errorf("range=%q content-type = %q; want multipart/byteranges", rt.r, typ)
continue
}
if params["boundary"] == "" {
t.Errorf("range=%q content-type = %q; lacks boundary", rt.r, ct)
continue
}
if g, w := resp.ContentLength, int64(len(body)); g != w {
t.Errorf("range=%q Content-Length = %d; want %d", rt.r, g, w)
continue
}
mr := multipart.NewReader(bytes.NewReader(body), params["boundary"])
for ri, rng := range rt.ranges {
part, err := mr.NextPart()
if err != nil {
t.Errorf("range=%q, reading part index %d: %v", rt.r, ri, err)
continue Cases
}
wantContentRange = fmt.Sprintf("bytes %d-%d/%d", rng.start, rng.end-1, testFileLen)
if g, w := part.Header.Get("Content-Range"), wantContentRange; g != w {
t.Errorf("range=%q: part Content-Range = %q; want %q", rt.r, g, w)
}
body, err := ioutil.ReadAll(part)
if err != nil {
t.Errorf("range=%q, reading part index %d body: %v", rt.r, ri, err)
continue Cases
}
wantBody := file[rng.start:rng.end]
if !bytes.Equal(body, wantBody) {
t.Errorf("range=%q: body = %q, want %q", rt.r, body, wantBody)
}
}
_, err = mr.NextPart()
if err != io.EOF {
t.Errorf("range=%q; expected final error io.EOF; got %v", rt.r, err)
}
}
}
}
var fsRedirectTestData = []struct {
original, redirect string
}{
{"/test/index.html", "/test/"},
{"/test/testdata", "/test/testdata/"},
{"/test/testdata/file/", "/test/testdata/file"},
}
func TestFSRedirect(t *testing.T) {
ts := httptest.NewServer(StripPrefix("/test", FileServer(Dir("."))))
defer ts.Close()
for _, data := range fsRedirectTestData {
res, err := Get(ts.URL + data.original)
if err != nil {
t.Fatal(err)
}
res.Body.Close()
if g, e := res.Request.URL.Path, data.redirect; g != e {
t.Errorf("redirect from %s: got %s, want %s", data.original, g, e)
}
}
}
type testFileSystem struct {
open func(name string) (File, error)
}
func (fs *testFileSystem) Open(name string) (File, error) {
return fs.open(name)
}
func TestFileServerCleans(t *testing.T) {
ch := make(chan string, 1)
fs := FileServer(&testFileSystem{func(name string) (File, error) {
ch <- name
return nil, errors.New("file does not exist")
}})
tests := []struct {
reqPath, openArg string
}{
{"/foo.txt", "/foo.txt"},
{"//foo.txt", "/foo.txt"},
{"/../foo.txt", "/foo.txt"},
}
req, _ := NewRequest("GET", "http://example.com", nil)
for n, test := range tests {
rec := httptest.NewRecorder()
req.URL.Path = test.reqPath
fs.ServeHTTP(rec, req)
if got := <-ch; got != test.openArg {
t.Errorf("test %d: got %q, want %q", n, got, test.openArg)
}
}
}
func mustRemoveAll(dir string) {
err := os.RemoveAll(dir)
if err != nil {
panic(err)
}
}
func TestFileServerImplicitLeadingSlash(t *testing.T) {
tempDir, err := ioutil.TempDir("", "")
if err != nil {
t.Fatalf("TempDir: %v", err)
}
defer mustRemoveAll(tempDir)
if err := ioutil.WriteFile(filepath.Join(tempDir, "foo.txt"), []byte("Hello world"), 0644); err != nil {
t.Fatalf("WriteFile: %v", err)
}
ts := httptest.NewServer(StripPrefix("/bar/", FileServer(Dir(tempDir))))
defer ts.Close()
get := func(suffix string) string {
res, err := Get(ts.URL + suffix)
if err != nil {
t.Fatalf("Get %s: %v", suffix, err)
}
b, err := ioutil.ReadAll(res.Body)
if err != nil {
t.Fatalf("ReadAll %s: %v", suffix, err)
}
res.Body.Close()
return string(b)
}
if s := get("/bar/"); !strings.Contains(s, ">foo.txt<") {
t.Logf("expected a directory listing with foo.txt, got %q", s)
}
if s := get("/bar/foo.txt"); s != "Hello world" {
t.Logf("expected %q, got %q", "Hello world", s)
}
}
func TestDirJoin(t *testing.T) {
wfi, err := os.Stat("/etc/hosts")
if err != nil {
t.Logf("skipping test; no /etc/hosts file")
return
}
test := func(d Dir, name string) {
f, err := d.Open(name)
if err != nil {
t.Fatalf("open of %s: %v", name, err)
}
defer f.Close()
gfi, err := f.Stat()
if err != nil {
t.Fatalf("stat of %s: %v", name, err)
}
if !os.SameFile(gfi, wfi) {
t.Errorf("%s got different file", name)
}
}
test(Dir("/etc/"), "/hosts")
test(Dir("/etc/"), "hosts")
test(Dir("/etc/"), "../../../../hosts")
test(Dir("/etc"), "/hosts")
test(Dir("/etc"), "hosts")
test(Dir("/etc"), "../../../../hosts")
// Not really directories, but since we use this trick in
// ServeFile, test it:
test(Dir("/etc/hosts"), "")
test(Dir("/etc/hosts"), "/")
test(Dir("/etc/hosts"), "../")
}
func TestEmptyDirOpenCWD(t *testing.T) {
test := func(d Dir) {
name := "fs_test.go"
f, err := d.Open(name)
if err != nil {
t.Fatalf("open of %s: %v", name, err)
}
defer f.Close()
}
test(Dir(""))
test(Dir("."))
test(Dir("./"))
}
func TestServeFileContentType(t *testing.T) {
const ctype = "icecream/chocolate"
ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
if r.FormValue("override") == "1" {
w.Header().Set("Content-Type", ctype)
}
ServeFile(w, r, "testdata/file")
}))
defer ts.Close()
get := func(override, want string) {
resp, err := Get(ts.URL + "?override=" + override)
if err != nil {
t.Fatal(err)
}
if h := resp.Header.Get("Content-Type"); h != want {
t.Errorf("Content-Type mismatch: got %q, want %q", h, want)
}
}
get("0", "text/plain; charset=utf-8")
get("1", ctype)
}
func TestServeFileMimeType(t *testing.T) {
ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
ServeFile(w, r, "testdata/style.css")
}))
defer ts.Close()
resp, err := Get(ts.URL)
if err != nil {
t.Fatal(err)
}
want := "text/css; charset=utf-8"
if h := resp.Header.Get("Content-Type"); h != want {
t.Errorf("Content-Type mismatch: got %q, want %q", h, want)
}
}
func TestServeFileFromCWD(t *testing.T) {
ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
ServeFile(w, r, "fs_test.go")
}))
defer ts.Close()
r, err := Get(ts.URL)
if err != nil {
t.Fatal(err)
}
r.Body.Close()
if r.StatusCode != 200 {
t.Fatalf("expected 200 OK, got %s", r.Status)
}
}
func TestServeFileWithContentEncoding(t *testing.T) {
ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
w.Header().Set("Content-Encoding", "foo")
ServeFile(w, r, "testdata/file")
}))
defer ts.Close()
resp, err := Get(ts.URL)
if err != nil {
t.Fatal(err)
}
if g, e := resp.ContentLength, int64(-1); g != e {
t.Errorf("Content-Length mismatch: got %d, want %d", g, e)
}
}
func TestServeIndexHtml(t *testing.T) {
const want = "index.html says hello\n"
ts := httptest.NewServer(FileServer(Dir(".")))
defer ts.Close()
for _, path := range []string{"/testdata/", "/testdata/index.html"} {
res, err := Get(ts.URL + path)
if err != nil {
t.Fatal(err)
}
b, err := ioutil.ReadAll(res.Body)
if err != nil {
t.Fatal("reading Body:", err)
}
if s := string(b); s != want {
t.Errorf("for path %q got %q, want %q", path, s, want)
}
res.Body.Close()
}
}
func TestFileServerZeroByte(t *testing.T) {
ts := httptest.NewServer(FileServer(Dir(".")))
defer ts.Close()
res, err := Get(ts.URL + "/..\x00")
if err != nil {
t.Fatal(err)
}
b, err := ioutil.ReadAll(res.Body)
if err != nil {
t.Fatal("reading Body:", err)
}
if res.StatusCode == 200 {
t.Errorf("got status 200; want an error. Body is:\n%s", string(b))
}
}
type fakeFileInfo struct {
dir bool
basename string
modtime time.Time
ents []*fakeFileInfo
contents string
}
func (f *fakeFileInfo) Name() string { return f.basename }
func (f *fakeFileInfo) Sys() interface{} { return nil }
func (f *fakeFileInfo) ModTime() time.Time { return f.modtime }
func (f *fakeFileInfo) IsDir() bool { return f.dir }
func (f *fakeFileInfo) Size() int64 { return int64(len(f.contents)) }
func (f *fakeFileInfo) Mode() os.FileMode {
if f.dir {
return 0755 | os.ModeDir
}
return 0644
}
type fakeFile struct {
io.ReadSeeker
fi *fakeFileInfo
path string // as opened
}
func (f *fakeFile) Close() error { return nil }
func (f *fakeFile) Stat() (os.FileInfo, error) { return f.fi, nil }
func (f *fakeFile) Readdir(count int) ([]os.FileInfo, error) {
if !f.fi.dir {
return nil, os.ErrInvalid
}
var fis []os.FileInfo
for _, fi := range f.fi.ents {
fis = append(fis, fi)
}
return fis, nil
}
type fakeFS map[string]*fakeFileInfo
func (fs fakeFS) Open(name string) (File, error) {
name = path.Clean(name)
f, ok := fs[name]
if !ok {
println("fake filesystem didn't find file", name)
return nil, os.ErrNotExist
}
return &fakeFile{ReadSeeker: strings.NewReader(f.contents), fi: f, path: name}, nil
}
func TestDirectoryIfNotModified(t *testing.T) {
const indexContents = "I am a fake index.html file"
fileMod := time.Unix(1000000000, 0).UTC()
fileModStr := fileMod.Format(TimeFormat)
dirMod := time.Unix(123, 0).UTC()
indexFile := &fakeFileInfo{
basename: "index.html",
modtime: fileMod,
contents: indexContents,
}
fs := fakeFS{
"/": &fakeFileInfo{
dir: true,
modtime: dirMod,
ents: []*fakeFileInfo{indexFile},
},
"/index.html": indexFile,
}
ts := httptest.NewServer(FileServer(fs))
defer ts.Close()
res, err := Get(ts.URL)
if err != nil {
t.Fatal(err)
}
b, err := ioutil.ReadAll(res.Body)
if err != nil {
t.Fatal(err)
}
if string(b) != indexContents {
t.Fatalf("Got body %q; want %q", b, indexContents)
}
res.Body.Close()
lastMod := res.Header.Get("Last-Modified")
if lastMod != fileModStr {
t.Fatalf("initial Last-Modified = %q; want %q", lastMod, fileModStr)
}
req, _ := NewRequest("GET", ts.URL, nil)
req.Header.Set("If-Modified-Since", lastMod)
res, err = DefaultClient.Do(req)
if err != nil {
t.Fatal(err)
}
if res.StatusCode != 304 {
t.Fatalf("Code after If-Modified-Since request = %v; want 304", res.StatusCode)
}
res.Body.Close()
// Advance the index.html file's modtime, but not the directory's.
indexFile.modtime = indexFile.modtime.Add(1 * time.Hour)
res, err = DefaultClient.Do(req)
if err != nil {
t.Fatal(err)
}
if res.StatusCode != 200 {
t.Fatalf("Code after second If-Modified-Since request = %v; want 200; res is %#v", res.StatusCode, res)
}
res.Body.Close()
}
func mustStat(t *testing.T, fileName string) os.FileInfo {
fi, err := os.Stat(fileName)
if err != nil {
t.Fatal(err)
}
return fi
}
func TestServeContent(t *testing.T) {
type serveParam struct {
name string
modtime time.Time
content io.ReadSeeker
contentType string
etag string
}
servec := make(chan serveParam, 1)
ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
p := <-servec
if p.etag != "" {
w.Header().Set("ETag", p.etag)
}
if p.contentType != "" {
w.Header().Set("Content-Type", p.contentType)
}
ServeContent(w, r, p.name, p.modtime, p.content)
}))
defer ts.Close()
type testCase struct {
file string
modtime time.Time
serveETag string // optional
serveContentType string // optional
reqHeader map[string]string
wantLastMod string
wantContentType string
wantStatus int
}
htmlModTime := mustStat(t, "testdata/index.html").ModTime()
tests := map[string]testCase{
"no_last_modified": {
file: "testdata/style.css",
wantContentType: "text/css; charset=utf-8",
wantStatus: 200,
},
"with_last_modified": {
file: "testdata/index.html",
wantContentType: "text/html; charset=utf-8",
modtime: htmlModTime,
wantLastMod: htmlModTime.UTC().Format(TimeFormat),
wantStatus: 200,
},
"not_modified_modtime": {
file: "testdata/style.css",
modtime: htmlModTime,
reqHeader: map[string]string{
"If-Modified-Since": htmlModTime.UTC().Format(TimeFormat),
},
wantStatus: 304,
},
"not_modified_modtime_with_contenttype": {
file: "testdata/style.css",
serveContentType: "text/css", // explicit content type
modtime: htmlModTime,
reqHeader: map[string]string{
"If-Modified-Since": htmlModTime.UTC().Format(TimeFormat),
},
wantStatus: 304,
},
"not_modified_etag": {
file: "testdata/style.css",
serveETag: `"foo"`,
reqHeader: map[string]string{
"If-None-Match": `"foo"`,
},
wantStatus: 304,
},
"range_good": {
file: "testdata/style.css",
serveETag: `"A"`,
reqHeader: map[string]string{
"Range": "bytes=0-4",
},
wantStatus: StatusPartialContent,
wantContentType: "text/css; charset=utf-8",
},
// An If-Range resource for entity "A", but entity "B" is now current.
// The Range request should be ignored.
"range_no_match": {
file: "testdata/style.css",
serveETag: `"A"`,
reqHeader: map[string]string{
"Range": "bytes=0-4",
"If-Range": `"B"`,
},
wantStatus: 200,
wantContentType: "text/css; charset=utf-8",
},
}
for testName, tt := range tests {
f, err := os.Open(tt.file)
if err != nil {
t.Fatalf("test %q: %v", testName, err)
}
defer f.Close()
servec <- serveParam{
name: filepath.Base(tt.file),
content: f,
modtime: tt.modtime,
etag: tt.serveETag,
contentType: tt.serveContentType,
}
req, err := NewRequest("GET", ts.URL, nil)
if err != nil {
t.Fatal(err)
}
for k, v := range tt.reqHeader {
req.Header.Set(k, v)
}
res, err := DefaultClient.Do(req)
if err != nil {
t.Fatal(err)
}
io.Copy(ioutil.Discard, res.Body)
res.Body.Close()
if res.StatusCode != tt.wantStatus {
t.Errorf("test %q: status = %d; want %d", testName, res.StatusCode, tt.wantStatus)
}
if g, e := res.Header.Get("Content-Type"), tt.wantContentType; g != e {
t.Errorf("test %q: content-type = %q, want %q", testName, g, e)
}
if g, e := res.Header.Get("Last-Modified"), tt.wantLastMod; g != e {
t.Errorf("test %q: last-modified = %q, want %q", testName, g, e)
}
}
}
// verifies that sendfile is being used on Linux
func TestLinuxSendfile(t *testing.T) {
if runtime.GOOS != "linux" {
t.Logf("skipping; linux-only test")
return
}
_, err := exec.LookPath("strace")
if err != nil {
t.Logf("skipping; strace not found in path")
return
}
ln, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil {
t.Fatal(err)
}
lnf, err := ln.(*net.TCPListener).File()
if err != nil {
t.Fatal(err)
}
defer ln.Close()
var buf bytes.Buffer
child := exec.Command("strace", "-f", os.Args[0], "-test.run=TestLinuxSendfileChild")
child.ExtraFiles = append(child.ExtraFiles, lnf)
child.Env = append([]string{"GO_WANT_HELPER_PROCESS=1"}, os.Environ()...)
child.Stdout = &buf
child.Stderr = &buf
err = child.Start()
if err != nil {
t.Logf("skipping; failed to start straced child: %v", err)
return
}
res, err := Get(fmt.Sprintf("http://%s/", ln.Addr()))
if err != nil {
t.Fatalf("http client error: %v", err)
}
_, err = io.Copy(ioutil.Discard, res.Body)
if err != nil {
t.Fatalf("client body read error: %v", err)
}
res.Body.Close()
// Force child to exit cleanly.
Get(fmt.Sprintf("http://%s/quit", ln.Addr()))
child.Wait()
rx := regexp.MustCompile(`sendfile(64)?\(\d+,\s*\d+,\s*NULL,\s*\d+\)\s*=\s*\d+\s*\n`)
rxResume := regexp.MustCompile(`<\.\.\. sendfile(64)? resumed> \)\s*=\s*\d+\s*\n`)
out := buf.String()
if !rx.MatchString(out) && !rxResume.MatchString(out) {
t.Errorf("no sendfile system call found in:\n%s", out)
}
}
func getBody(t *testing.T, testName string, req Request) (*Response, []byte) {
r, err := DefaultClient.Do(&req)
if err != nil {
t.Fatalf("%s: for URL %q, send error: %v", testName, req.URL.String(), err)
}
b, err := ioutil.ReadAll(r.Body)
if err != nil {
t.Fatalf("%s: for URL %q, reading body: %v", testName, req.URL.String(), err)
}
return r, b
}
// TestLinuxSendfileChild isn't a real test. It's used as a helper process
// for TestLinuxSendfile.
func TestLinuxSendfileChild(*testing.T) {
if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" {
return
}
defer os.Exit(0)
fd3 := os.NewFile(3, "ephemeral-port-listener")
ln, err := net.FileListener(fd3)
if err != nil {
panic(err)
}
mux := NewServeMux()
mux.Handle("/", FileServer(Dir("testdata")))
mux.HandleFunc("/quit", func(ResponseWriter, *Request) {
os.Exit(0)
})
s := &Server{Handler: mux}
err = s.Serve(ln)
if err != nil {
panic(err)
}
}
|
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package net
import (
"fmt"
"io"
"io/ioutil"
"runtime"
"testing"
"time"
)
func isTimeout(err error) bool {
e, ok := err.(Error)
return ok && e.Timeout()
}
type copyRes struct {
n int64
err error
d time.Duration
}
func TestAcceptTimeout(t *testing.T) {
switch runtime.GOOS {
case "plan9":
t.Logf("skipping test on %q", runtime.GOOS)
return
}
ln := newLocalListener(t).(*TCPListener)
defer ln.Close()
ln.SetDeadline(time.Now().Add(-1 * time.Second))
if _, err := ln.Accept(); !isTimeout(err) {
t.Fatalf("Accept: expected err %v, got %v", errTimeout, err)
}
if _, err := ln.Accept(); !isTimeout(err) {
t.Fatalf("Accept: expected err %v, got %v", errTimeout, err)
}
ln.SetDeadline(time.Now().Add(100 * time.Millisecond))
if _, err := ln.Accept(); !isTimeout(err) {
t.Fatalf("Accept: expected err %v, got %v", errTimeout, err)
}
if _, err := ln.Accept(); !isTimeout(err) {
t.Fatalf("Accept: expected err %v, got %v", errTimeout, err)
}
ln.SetDeadline(noDeadline)
errc := make(chan error)
go func() {
_, err := ln.Accept()
errc <- err
}()
time.Sleep(100 * time.Millisecond)
select {
case err := <-errc:
t.Fatalf("Expected Accept() to not return, but it returned with %v\n", err)
default:
}
ln.Close()
if err := <-errc; err.(*OpError).Err != errClosing {
t.Fatalf("Accept: expected err %v, got %v", errClosing, err.(*OpError).Err)
}
}
func TestReadTimeout(t *testing.T) {
switch runtime.GOOS {
case "plan9":
t.Logf("skipping test on %q", runtime.GOOS)
return
}
ln := newLocalListener(t)
defer ln.Close()
c, err := DialTCP("tcp", nil, ln.Addr().(*TCPAddr))
if err != nil {
t.Fatalf("Connect: %v", err)
}
defer c.Close()
c.SetDeadline(time.Now().Add(time.Hour))
c.SetReadDeadline(time.Now().Add(-1 * time.Second))
buf := make([]byte, 1)
if _, err = c.Read(buf); !isTimeout(err) {
t.Fatalf("Read: expected err %v, got %v", errTimeout, err)
}
if _, err = c.Read(buf); !isTimeout(err) {
t.Fatalf("Read: expected err %v, got %v", errTimeout, err)
}
c.SetDeadline(time.Now().Add(100 * time.Millisecond))
if _, err = c.Read(buf); !isTimeout(err) {
t.Fatalf("Read: expected err %v, got %v", errTimeout, err)
}
if _, err = c.Read(buf); !isTimeout(err) {
t.Fatalf("Read: expected err %v, got %v", errTimeout, err)
}
c.SetReadDeadline(noDeadline)
c.SetWriteDeadline(time.Now().Add(-1 * time.Second))
errc := make(chan error)
go func() {
_, err := c.Read(buf)
errc <- err
}()
time.Sleep(100 * time.Millisecond)
select {
case err := <-errc:
t.Fatalf("Expected Read() to not return, but it returned with %v\n", err)
default:
}
c.Close()
if err := <-errc; err.(*OpError).Err != errClosing {
t.Fatalf("Read: expected err %v, got %v", errClosing, err.(*OpError).Err)
}
}
func TestWriteTimeout(t *testing.T) {
switch runtime.GOOS {
case "plan9":
t.Logf("skipping test on %q", runtime.GOOS)
return
}
ln := newLocalListener(t)
defer ln.Close()
c, err := DialTCP("tcp", nil, ln.Addr().(*TCPAddr))
if err != nil {
t.Fatalf("Connect: %v", err)
}
defer c.Close()
c.SetDeadline(time.Now().Add(time.Hour))
c.SetWriteDeadline(time.Now().Add(-1 * time.Second))
buf := make([]byte, 4096)
writeUntilTimeout := func() {
for {
_, err := c.Write(buf)
if err != nil {
if isTimeout(err) {
return
}
t.Fatalf("Write: expected err %v, got %v", errTimeout, err)
}
}
}
writeUntilTimeout()
c.SetDeadline(time.Now().Add(10 * time.Millisecond))
writeUntilTimeout()
writeUntilTimeout()
c.SetWriteDeadline(noDeadline)
c.SetReadDeadline(time.Now().Add(-1 * time.Second))
errc := make(chan error)
go func() {
for {
_, err := c.Write(buf)
if err != nil {
errc <- err
}
}
}()
time.Sleep(100 * time.Millisecond)
select {
case err := <-errc:
t.Fatalf("Expected Write() to not return, but it returned with %v\n", err)
default:
}
c.Close()
if err := <-errc; err.(*OpError).Err != errClosing {
t.Fatalf("Write: expected err %v, got %v", errClosing, err.(*OpError).Err)
}
}
func testTimeout(t *testing.T, net, addr string, readFrom bool) {
c, err := Dial(net, addr)
if err != nil {
t.Errorf("Dial(%q, %q) failed: %v", net, addr, err)
return
}
defer c.Close()
what := "Read"
if readFrom {
what = "ReadFrom"
}
errc := make(chan error, 1)
go func() {
t0 := time.Now()
c.SetReadDeadline(time.Now().Add(100 * time.Millisecond))
var b [100]byte
var n int
var err error
if readFrom {
n, _, err = c.(PacketConn).ReadFrom(b[0:])
} else {
n, err = c.Read(b[0:])
}
t1 := time.Now()
if n != 0 || err == nil || !err.(Error).Timeout() {
errc <- fmt.Errorf("%s(%q, %q) did not return 0, timeout: %v, %v", what, net, addr, n, err)
return
}
if dt := t1.Sub(t0); dt < 50*time.Millisecond || !testing.Short() && dt > 250*time.Millisecond {
errc <- fmt.Errorf("%s(%q, %q) took %s, expected 0.1s", what, net, addr, dt)
return
}
errc <- nil
}()
select {
case err := <-errc:
if err != nil {
t.Error(err)
}
case <-time.After(1 * time.Second):
t.Errorf("%s(%q, %q) took over 1 second, expected 0.1s", what, net, addr)
}
}
func TestTimeoutUDP(t *testing.T) {
switch runtime.GOOS {
case "plan9":
t.Logf("skipping test on %q", runtime.GOOS)
return
}
// set up a listener that won't talk back
listening := make(chan string)
done := make(chan int)
go runDatagramPacketConnServer(t, "udp", "127.0.0.1:0", listening, done)
addr := <-listening
testTimeout(t, "udp", addr, false)
testTimeout(t, "udp", addr, true)
<-done
}
func TestTimeoutTCP(t *testing.T) {
switch runtime.GOOS {
case "plan9":
t.Logf("skipping test on %q", runtime.GOOS)
return
}
// set up a listener that won't talk back
listening := make(chan string)
done := make(chan int)
go runStreamConnServer(t, "tcp", "127.0.0.1:0", listening, done)
addr := <-listening
testTimeout(t, "tcp", addr, false)
<-done
}
func TestDeadlineReset(t *testing.T) {
switch runtime.GOOS {
case "plan9":
t.Logf("skipping test on %q", runtime.GOOS)
return
}
ln, err := Listen("tcp", "127.0.0.1:0")
if err != nil {
t.Fatal(err)
}
defer ln.Close()
tl := ln.(*TCPListener)
tl.SetDeadline(time.Now().Add(1 * time.Minute))
tl.SetDeadline(noDeadline) // reset it
errc := make(chan error, 1)
go func() {
_, err := ln.Accept()
errc <- err
}()
select {
case <-time.After(50 * time.Millisecond):
// Pass.
case err := <-errc:
// Accept should never return; we never
// connected to it.
t.Errorf("unexpected return from Accept; err=%v", err)
}
}
func TestTimeoutAccept(t *testing.T) {
switch runtime.GOOS {
case "plan9":
t.Logf("skipping test on %q", runtime.GOOS)
return
}
ln, err := Listen("tcp", "127.0.0.1:0")
if err != nil {
t.Fatal(err)
}
defer ln.Close()
tl := ln.(*TCPListener)
tl.SetDeadline(time.Now().Add(100 * time.Millisecond))
errc := make(chan error, 1)
go func() {
_, err := ln.Accept()
errc <- err
}()
select {
case <-time.After(1 * time.Second):
// Accept shouldn't block indefinitely
t.Errorf("Accept didn't return in an expected time")
case <-errc:
// Pass.
}
}
func TestReadWriteDeadline(t *testing.T) {
switch runtime.GOOS {
case "plan9":
t.Logf("skipping test on %q", runtime.GOOS)
return
}
if !canCancelIO {
t.Logf("skipping test on this system")
return
}
const (
readTimeout = 50 * time.Millisecond
writeTimeout = 250 * time.Millisecond
)
checkTimeout := func(command string, start time.Time, should time.Duration) {
is := time.Now().Sub(start)
d := is - should
if d < -30*time.Millisecond || !testing.Short() && 150*time.Millisecond < d {
t.Errorf("%s timeout test failed: is=%v should=%v\n", command, is, should)
}
}
ln, err := Listen("tcp", "127.0.0.1:0")
if err != nil {
t.Fatalf("ListenTCP on :0: %v", err)
}
lnquit := make(chan bool)
go func() {
c, err := ln.Accept()
if err != nil {
t.Fatalf("Accept: %v", err)
}
defer c.Close()
lnquit <- true
}()
c, err := Dial("tcp", ln.Addr().String())
if err != nil {
t.Fatalf("Dial: %v", err)
}
defer c.Close()
start := time.Now()
err = c.SetReadDeadline(start.Add(readTimeout))
if err != nil {
t.Fatalf("SetReadDeadline: %v", err)
}
err = c.SetWriteDeadline(start.Add(writeTimeout))
if err != nil {
t.Fatalf("SetWriteDeadline: %v", err)
}
quit := make(chan bool)
go func() {
var buf [10]byte
_, err := c.Read(buf[:])
if err == nil {
t.Errorf("Read should not succeed")
}
checkTimeout("Read", start, readTimeout)
quit <- true
}()
go func() {
var buf [10000]byte
for {
_, err := c.Write(buf[:])
if err != nil {
break
}
}
checkTimeout("Write", start, writeTimeout)
quit <- true
}()
<-quit
<-quit
<-lnquit
}
type neverEnding byte
func (b neverEnding) Read(p []byte) (n int, err error) {
for i := range p {
p[i] = byte(b)
}
return len(p), nil
}
func TestVariousDeadlines1Proc(t *testing.T) {
testVariousDeadlines(t, 1)
}
func TestVariousDeadlines4Proc(t *testing.T) {
testVariousDeadlines(t, 4)
}
func testVariousDeadlines(t *testing.T, maxProcs int) {
defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(maxProcs))
ln := newLocalListener(t)
defer ln.Close()
donec := make(chan struct{})
defer close(donec)
testsDone := func() bool {
select {
case <-donec:
return true
}
return false
}
// The server, with no timeouts of its own, sending bytes to clients
// as fast as it can.
servec := make(chan copyRes)
go func() {
for {
c, err := ln.Accept()
if err != nil {
if !testsDone() {
t.Fatalf("Accept: %v", err)
}
return
}
go func() {
t0 := time.Now()
n, err := io.Copy(c, neverEnding('a'))
d := time.Since(t0)
c.Close()
servec <- copyRes{n, err, d}
}()
}
}()
for _, timeout := range []time.Duration{
1 * time.Nanosecond,
2 * time.Nanosecond,
5 * time.Nanosecond,
50 * time.Nanosecond,
100 * time.Nanosecond,
200 * time.Nanosecond,
500 * time.Nanosecond,
750 * time.Nanosecond,
1 * time.Microsecond,
5 * time.Microsecond,
25 * time.Microsecond,
250 * time.Microsecond,
500 * time.Microsecond,
1 * time.Millisecond,
5 * time.Millisecond,
100 * time.Millisecond,
250 * time.Millisecond,
500 * time.Millisecond,
1 * time.Second,
} {
numRuns := 3
if testing.Short() {
numRuns = 1
if timeout > 500*time.Microsecond {
continue
}
}
for run := 0; run < numRuns; run++ {
name := fmt.Sprintf("%v run %d/%d", timeout, run+1, numRuns)
t.Log(name)
c, err := Dial("tcp", ln.Addr().String())
if err != nil {
t.Fatalf("Dial: %v", err)
}
clientc := make(chan copyRes)
go func() {
t0 := time.Now()
c.SetDeadline(t0.Add(timeout))
n, err := io.Copy(ioutil.Discard, c)
d := time.Since(t0)
c.Close()
clientc <- copyRes{n, err, d}
}()
const tooLong = 2000 * time.Millisecond
select {
case res := <-clientc:
if isTimeout(res.err) {
t.Logf("for %v, good client timeout after %v, reading %d bytes", name, res.d, res.n)
} else {
t.Fatalf("for %v: client Copy = %d, %v (want timeout)", name, res.n, res.err)
}
case <-time.After(tooLong):
t.Fatalf("for %v: timeout (%v) waiting for client to timeout (%v) reading", name, tooLong, timeout)
}
select {
case res := <-servec:
t.Logf("for %v: server in %v wrote %d, %v", name, res.d, res.n, res.err)
case <-time.After(tooLong):
t.Fatalf("for %v, timeout waiting for server to finish writing", name)
}
}
}
}
// TestReadDeadlineDataAvailable tests that read deadlines work, even
// if there's data ready to be read.
func TestReadDeadlineDataAvailable(t *testing.T) {
ln := newLocalListener(t)
defer ln.Close()
servec := make(chan copyRes)
const msg = "data client shouldn't read, even though it it'll be waiting"
go func() {
c, err := ln.Accept()
if err != nil {
t.Fatalf("Accept: %v", err)
}
defer c.Close()
n, err := c.Write([]byte(msg))
servec <- copyRes{n: int64(n), err: err}
}()
c, err := Dial("tcp", ln.Addr().String())
if err != nil {
t.Fatalf("Dial: %v", err)
}
defer c.Close()
if res := <-servec; res.err != nil || res.n != int64(len(msg)) {
t.Fatalf("unexpected server Write: n=%d, err=%d; want n=%d, err=nil", res.n, res.err, len(msg))
}
c.SetReadDeadline(time.Now().Add(-5 * time.Second)) // in the psat.
buf := make([]byte, len(msg)/2)
n, err := c.Read(buf)
if n > 0 || !isTimeout(err) {
t.Fatalf("client read = %d (%q) err=%v; want 0, timeout", n, buf[:n], err)
}
}
// TestWriteDeadlineBufferAvailable tests that write deadlines work, even
// if there's buffer space available to write.
func TestWriteDeadlineBufferAvailable(t *testing.T) {
ln := newLocalListener(t)
defer ln.Close()
servec := make(chan copyRes)
go func() {
c, err := ln.Accept()
if err != nil {
t.Fatalf("Accept: %v", err)
}
defer c.Close()
c.SetWriteDeadline(time.Now().Add(-5 * time.Second)) // in the past
n, err := c.Write([]byte{'x'})
servec <- copyRes{n: int64(n), err: err}
}()
c, err := Dial("tcp", ln.Addr().String())
if err != nil {
t.Fatalf("Dial: %v", err)
}
defer c.Close()
res := <-servec
if res.n != 0 {
t.Errorf("Write = %d; want 0", res.n)
}
if !isTimeout(res.err) {
t.Errorf("Write error = %v; want timeout", res.err)
}
}
// TestProlongTimeout tests concurrent deadline modification.
// Known to cause data races in the past.
func TestProlongTimeout(t *testing.T) {
switch runtime.GOOS {
case "plan9":
t.Logf("skipping test on %q", runtime.GOOS)
return
}
ln := newLocalListener(t)
defer ln.Close()
connected := make(chan bool)
go func() {
s, err := ln.Accept()
connected <- true
if err != nil {
t.Fatalf("ln.Accept: %v", err)
}
defer s.Close()
s.SetDeadline(time.Now().Add(time.Hour))
go func() {
var buf [4096]byte
for {
_, err := s.Write(buf[:])
if err != nil {
break
}
s.SetDeadline(time.Now().Add(time.Hour))
}
}()
buf := make([]byte, 1)
for {
_, err := s.Read(buf)
if err != nil {
break
}
s.SetDeadline(time.Now().Add(time.Hour))
}
}()
c, err := Dial("tcp", ln.Addr().String())
if err != nil {
t.Fatalf("DialTCP: %v", err)
}
defer c.Close()
<-connected
for i := 0; i < 1024; i++ {
var buf [1]byte
c.Write(buf[:])
}
}
net: report Accept error during TestVariousDeadlines1Proc test
R=golang-dev
CC=golang-dev
https://codereview.appspot.com/6868057
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package net
import (
"fmt"
"io"
"io/ioutil"
"runtime"
"testing"
"time"
)
func isTimeout(err error) bool {
e, ok := err.(Error)
return ok && e.Timeout()
}
type copyRes struct {
n int64
err error
d time.Duration
}
func TestAcceptTimeout(t *testing.T) {
switch runtime.GOOS {
case "plan9":
t.Logf("skipping test on %q", runtime.GOOS)
return
}
ln := newLocalListener(t).(*TCPListener)
defer ln.Close()
ln.SetDeadline(time.Now().Add(-1 * time.Second))
if _, err := ln.Accept(); !isTimeout(err) {
t.Fatalf("Accept: expected err %v, got %v", errTimeout, err)
}
if _, err := ln.Accept(); !isTimeout(err) {
t.Fatalf("Accept: expected err %v, got %v", errTimeout, err)
}
ln.SetDeadline(time.Now().Add(100 * time.Millisecond))
if _, err := ln.Accept(); !isTimeout(err) {
t.Fatalf("Accept: expected err %v, got %v", errTimeout, err)
}
if _, err := ln.Accept(); !isTimeout(err) {
t.Fatalf("Accept: expected err %v, got %v", errTimeout, err)
}
ln.SetDeadline(noDeadline)
errc := make(chan error)
go func() {
_, err := ln.Accept()
errc <- err
}()
time.Sleep(100 * time.Millisecond)
select {
case err := <-errc:
t.Fatalf("Expected Accept() to not return, but it returned with %v\n", err)
default:
}
ln.Close()
if err := <-errc; err.(*OpError).Err != errClosing {
t.Fatalf("Accept: expected err %v, got %v", errClosing, err.(*OpError).Err)
}
}
func TestReadTimeout(t *testing.T) {
switch runtime.GOOS {
case "plan9":
t.Logf("skipping test on %q", runtime.GOOS)
return
}
ln := newLocalListener(t)
defer ln.Close()
c, err := DialTCP("tcp", nil, ln.Addr().(*TCPAddr))
if err != nil {
t.Fatalf("Connect: %v", err)
}
defer c.Close()
c.SetDeadline(time.Now().Add(time.Hour))
c.SetReadDeadline(time.Now().Add(-1 * time.Second))
buf := make([]byte, 1)
if _, err = c.Read(buf); !isTimeout(err) {
t.Fatalf("Read: expected err %v, got %v", errTimeout, err)
}
if _, err = c.Read(buf); !isTimeout(err) {
t.Fatalf("Read: expected err %v, got %v", errTimeout, err)
}
c.SetDeadline(time.Now().Add(100 * time.Millisecond))
if _, err = c.Read(buf); !isTimeout(err) {
t.Fatalf("Read: expected err %v, got %v", errTimeout, err)
}
if _, err = c.Read(buf); !isTimeout(err) {
t.Fatalf("Read: expected err %v, got %v", errTimeout, err)
}
c.SetReadDeadline(noDeadline)
c.SetWriteDeadline(time.Now().Add(-1 * time.Second))
errc := make(chan error)
go func() {
_, err := c.Read(buf)
errc <- err
}()
time.Sleep(100 * time.Millisecond)
select {
case err := <-errc:
t.Fatalf("Expected Read() to not return, but it returned with %v\n", err)
default:
}
c.Close()
if err := <-errc; err.(*OpError).Err != errClosing {
t.Fatalf("Read: expected err %v, got %v", errClosing, err.(*OpError).Err)
}
}
func TestWriteTimeout(t *testing.T) {
switch runtime.GOOS {
case "plan9":
t.Logf("skipping test on %q", runtime.GOOS)
return
}
ln := newLocalListener(t)
defer ln.Close()
c, err := DialTCP("tcp", nil, ln.Addr().(*TCPAddr))
if err != nil {
t.Fatalf("Connect: %v", err)
}
defer c.Close()
c.SetDeadline(time.Now().Add(time.Hour))
c.SetWriteDeadline(time.Now().Add(-1 * time.Second))
buf := make([]byte, 4096)
writeUntilTimeout := func() {
for {
_, err := c.Write(buf)
if err != nil {
if isTimeout(err) {
return
}
t.Fatalf("Write: expected err %v, got %v", errTimeout, err)
}
}
}
writeUntilTimeout()
c.SetDeadline(time.Now().Add(10 * time.Millisecond))
writeUntilTimeout()
writeUntilTimeout()
c.SetWriteDeadline(noDeadline)
c.SetReadDeadline(time.Now().Add(-1 * time.Second))
errc := make(chan error)
go func() {
for {
_, err := c.Write(buf)
if err != nil {
errc <- err
}
}
}()
time.Sleep(100 * time.Millisecond)
select {
case err := <-errc:
t.Fatalf("Expected Write() to not return, but it returned with %v\n", err)
default:
}
c.Close()
if err := <-errc; err.(*OpError).Err != errClosing {
t.Fatalf("Write: expected err %v, got %v", errClosing, err.(*OpError).Err)
}
}
func testTimeout(t *testing.T, net, addr string, readFrom bool) {
c, err := Dial(net, addr)
if err != nil {
t.Errorf("Dial(%q, %q) failed: %v", net, addr, err)
return
}
defer c.Close()
what := "Read"
if readFrom {
what = "ReadFrom"
}
errc := make(chan error, 1)
go func() {
t0 := time.Now()
c.SetReadDeadline(time.Now().Add(100 * time.Millisecond))
var b [100]byte
var n int
var err error
if readFrom {
n, _, err = c.(PacketConn).ReadFrom(b[0:])
} else {
n, err = c.Read(b[0:])
}
t1 := time.Now()
if n != 0 || err == nil || !err.(Error).Timeout() {
errc <- fmt.Errorf("%s(%q, %q) did not return 0, timeout: %v, %v", what, net, addr, n, err)
return
}
if dt := t1.Sub(t0); dt < 50*time.Millisecond || !testing.Short() && dt > 250*time.Millisecond {
errc <- fmt.Errorf("%s(%q, %q) took %s, expected 0.1s", what, net, addr, dt)
return
}
errc <- nil
}()
select {
case err := <-errc:
if err != nil {
t.Error(err)
}
case <-time.After(1 * time.Second):
t.Errorf("%s(%q, %q) took over 1 second, expected 0.1s", what, net, addr)
}
}
func TestTimeoutUDP(t *testing.T) {
switch runtime.GOOS {
case "plan9":
t.Logf("skipping test on %q", runtime.GOOS)
return
}
// set up a listener that won't talk back
listening := make(chan string)
done := make(chan int)
go runDatagramPacketConnServer(t, "udp", "127.0.0.1:0", listening, done)
addr := <-listening
testTimeout(t, "udp", addr, false)
testTimeout(t, "udp", addr, true)
<-done
}
func TestTimeoutTCP(t *testing.T) {
switch runtime.GOOS {
case "plan9":
t.Logf("skipping test on %q", runtime.GOOS)
return
}
// set up a listener that won't talk back
listening := make(chan string)
done := make(chan int)
go runStreamConnServer(t, "tcp", "127.0.0.1:0", listening, done)
addr := <-listening
testTimeout(t, "tcp", addr, false)
<-done
}
func TestDeadlineReset(t *testing.T) {
switch runtime.GOOS {
case "plan9":
t.Logf("skipping test on %q", runtime.GOOS)
return
}
ln, err := Listen("tcp", "127.0.0.1:0")
if err != nil {
t.Fatal(err)
}
defer ln.Close()
tl := ln.(*TCPListener)
tl.SetDeadline(time.Now().Add(1 * time.Minute))
tl.SetDeadline(noDeadline) // reset it
errc := make(chan error, 1)
go func() {
_, err := ln.Accept()
errc <- err
}()
select {
case <-time.After(50 * time.Millisecond):
// Pass.
case err := <-errc:
// Accept should never return; we never
// connected to it.
t.Errorf("unexpected return from Accept; err=%v", err)
}
}
func TestTimeoutAccept(t *testing.T) {
switch runtime.GOOS {
case "plan9":
t.Logf("skipping test on %q", runtime.GOOS)
return
}
ln, err := Listen("tcp", "127.0.0.1:0")
if err != nil {
t.Fatal(err)
}
defer ln.Close()
tl := ln.(*TCPListener)
tl.SetDeadline(time.Now().Add(100 * time.Millisecond))
errc := make(chan error, 1)
go func() {
_, err := ln.Accept()
errc <- err
}()
select {
case <-time.After(1 * time.Second):
// Accept shouldn't block indefinitely
t.Errorf("Accept didn't return in an expected time")
case <-errc:
// Pass.
}
}
func TestReadWriteDeadline(t *testing.T) {
switch runtime.GOOS {
case "plan9":
t.Logf("skipping test on %q", runtime.GOOS)
return
}
if !canCancelIO {
t.Logf("skipping test on this system")
return
}
const (
readTimeout = 50 * time.Millisecond
writeTimeout = 250 * time.Millisecond
)
checkTimeout := func(command string, start time.Time, should time.Duration) {
is := time.Now().Sub(start)
d := is - should
if d < -30*time.Millisecond || !testing.Short() && 150*time.Millisecond < d {
t.Errorf("%s timeout test failed: is=%v should=%v\n", command, is, should)
}
}
ln, err := Listen("tcp", "127.0.0.1:0")
if err != nil {
t.Fatalf("ListenTCP on :0: %v", err)
}
lnquit := make(chan bool)
go func() {
c, err := ln.Accept()
if err != nil {
t.Fatalf("Accept: %v", err)
}
defer c.Close()
lnquit <- true
}()
c, err := Dial("tcp", ln.Addr().String())
if err != nil {
t.Fatalf("Dial: %v", err)
}
defer c.Close()
start := time.Now()
err = c.SetReadDeadline(start.Add(readTimeout))
if err != nil {
t.Fatalf("SetReadDeadline: %v", err)
}
err = c.SetWriteDeadline(start.Add(writeTimeout))
if err != nil {
t.Fatalf("SetWriteDeadline: %v", err)
}
quit := make(chan bool)
go func() {
var buf [10]byte
_, err := c.Read(buf[:])
if err == nil {
t.Errorf("Read should not succeed")
}
checkTimeout("Read", start, readTimeout)
quit <- true
}()
go func() {
var buf [10000]byte
for {
_, err := c.Write(buf[:])
if err != nil {
break
}
}
checkTimeout("Write", start, writeTimeout)
quit <- true
}()
<-quit
<-quit
<-lnquit
}
type neverEnding byte
func (b neverEnding) Read(p []byte) (n int, err error) {
for i := range p {
p[i] = byte(b)
}
return len(p), nil
}
func TestVariousDeadlines1Proc(t *testing.T) {
testVariousDeadlines(t, 1)
}
func TestVariousDeadlines4Proc(t *testing.T) {
testVariousDeadlines(t, 4)
}
func testVariousDeadlines(t *testing.T, maxProcs int) {
defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(maxProcs))
ln := newLocalListener(t)
defer ln.Close()
acceptc := make(chan error, 1)
// The server, with no timeouts of its own, sending bytes to clients
// as fast as it can.
servec := make(chan copyRes)
go func() {
for {
c, err := ln.Accept()
if err != nil {
acceptc <- err
return
}
go func() {
t0 := time.Now()
n, err := io.Copy(c, neverEnding('a'))
d := time.Since(t0)
c.Close()
servec <- copyRes{n, err, d}
}()
}
}()
for _, timeout := range []time.Duration{
1 * time.Nanosecond,
2 * time.Nanosecond,
5 * time.Nanosecond,
50 * time.Nanosecond,
100 * time.Nanosecond,
200 * time.Nanosecond,
500 * time.Nanosecond,
750 * time.Nanosecond,
1 * time.Microsecond,
5 * time.Microsecond,
25 * time.Microsecond,
250 * time.Microsecond,
500 * time.Microsecond,
1 * time.Millisecond,
5 * time.Millisecond,
100 * time.Millisecond,
250 * time.Millisecond,
500 * time.Millisecond,
1 * time.Second,
} {
numRuns := 3
if testing.Short() {
numRuns = 1
if timeout > 500*time.Microsecond {
continue
}
}
for run := 0; run < numRuns; run++ {
name := fmt.Sprintf("%v run %d/%d", timeout, run+1, numRuns)
t.Log(name)
c, err := Dial("tcp", ln.Addr().String())
if err != nil {
t.Fatalf("Dial: %v", err)
}
clientc := make(chan copyRes)
go func() {
t0 := time.Now()
c.SetDeadline(t0.Add(timeout))
n, err := io.Copy(ioutil.Discard, c)
d := time.Since(t0)
c.Close()
clientc <- copyRes{n, err, d}
}()
const tooLong = 2000 * time.Millisecond
select {
case res := <-clientc:
if isTimeout(res.err) {
t.Logf("for %v, good client timeout after %v, reading %d bytes", name, res.d, res.n)
} else {
t.Fatalf("for %v: client Copy = %d, %v (want timeout)", name, res.n, res.err)
}
case <-time.After(tooLong):
t.Fatalf("for %v: timeout (%v) waiting for client to timeout (%v) reading", name, tooLong, timeout)
}
select {
case res := <-servec:
t.Logf("for %v: server in %v wrote %d, %v", name, res.d, res.n, res.err)
case err := <-acceptc:
t.Fatalf("for %v: server Accept = %v", name, err)
case <-time.After(tooLong):
t.Fatalf("for %v, timeout waiting for server to finish writing", name)
}
}
}
}
// TestReadDeadlineDataAvailable tests that read deadlines work, even
// if there's data ready to be read.
func TestReadDeadlineDataAvailable(t *testing.T) {
ln := newLocalListener(t)
defer ln.Close()
servec := make(chan copyRes)
const msg = "data client shouldn't read, even though it it'll be waiting"
go func() {
c, err := ln.Accept()
if err != nil {
t.Fatalf("Accept: %v", err)
}
defer c.Close()
n, err := c.Write([]byte(msg))
servec <- copyRes{n: int64(n), err: err}
}()
c, err := Dial("tcp", ln.Addr().String())
if err != nil {
t.Fatalf("Dial: %v", err)
}
defer c.Close()
if res := <-servec; res.err != nil || res.n != int64(len(msg)) {
t.Fatalf("unexpected server Write: n=%d, err=%d; want n=%d, err=nil", res.n, res.err, len(msg))
}
c.SetReadDeadline(time.Now().Add(-5 * time.Second)) // in the psat.
buf := make([]byte, len(msg)/2)
n, err := c.Read(buf)
if n > 0 || !isTimeout(err) {
t.Fatalf("client read = %d (%q) err=%v; want 0, timeout", n, buf[:n], err)
}
}
// TestWriteDeadlineBufferAvailable tests that write deadlines work, even
// if there's buffer space available to write.
func TestWriteDeadlineBufferAvailable(t *testing.T) {
ln := newLocalListener(t)
defer ln.Close()
servec := make(chan copyRes)
go func() {
c, err := ln.Accept()
if err != nil {
t.Fatalf("Accept: %v", err)
}
defer c.Close()
c.SetWriteDeadline(time.Now().Add(-5 * time.Second)) // in the past
n, err := c.Write([]byte{'x'})
servec <- copyRes{n: int64(n), err: err}
}()
c, err := Dial("tcp", ln.Addr().String())
if err != nil {
t.Fatalf("Dial: %v", err)
}
defer c.Close()
res := <-servec
if res.n != 0 {
t.Errorf("Write = %d; want 0", res.n)
}
if !isTimeout(res.err) {
t.Errorf("Write error = %v; want timeout", res.err)
}
}
// TestProlongTimeout tests concurrent deadline modification.
// Known to cause data races in the past.
func TestProlongTimeout(t *testing.T) {
switch runtime.GOOS {
case "plan9":
t.Logf("skipping test on %q", runtime.GOOS)
return
}
ln := newLocalListener(t)
defer ln.Close()
connected := make(chan bool)
go func() {
s, err := ln.Accept()
connected <- true
if err != nil {
t.Fatalf("ln.Accept: %v", err)
}
defer s.Close()
s.SetDeadline(time.Now().Add(time.Hour))
go func() {
var buf [4096]byte
for {
_, err := s.Write(buf[:])
if err != nil {
break
}
s.SetDeadline(time.Now().Add(time.Hour))
}
}()
buf := make([]byte, 1)
for {
_, err := s.Read(buf)
if err != nil {
break
}
s.SetDeadline(time.Now().Add(time.Hour))
}
}()
c, err := Dial("tcp", ln.Addr().String())
if err != nil {
t.Fatalf("DialTCP: %v", err)
}
defer c.Close()
<-connected
for i := 0; i < 1024; i++ {
var buf [1]byte
c.Write(buf[:])
}
}
|
package brewerydb
import (
"fmt"
"net/http"
)
// StyleService provides access to the BreweryDB Style API. Use Client.Style.
type StyleService struct {
c *Client
}
// Style represents a style of Beer.
type Style struct {
ID int
Name string
ShortName string
Description string
CategoryID int
Category Category
IbuMin string
IbuMax string
SrmMin string
SrmMax string
OgMin string
OgMax string
FgMin string
FgMax string
AbvMin string
AbvMax string
CreateDate string
UpdateDate string
}
// StyleList represents a single "page" containing a slice of Styles.
type StyleList struct {
CurrentPage int
NumberOfPages int
TotalResults int
Styles []Style `json:"data"`
}
// List returns all Styles on the given page.
func (ss *StyleService) List(page int) (sl StyleList, err error) {
// GET: /styles
var req *http.Request
req, err = ss.c.NewRequest("GET", "/styles", &Page{page})
if err != nil {
return
}
err = ss.c.Do(req, &sl)
return
}
// Get obtains the Style with the given Style ID.
func (ss *StyleService) Get(id int) (s Style, err error) {
// GET: /style/:styleID
var req *http.Request
req, err = ss.c.NewRequest("GET", fmt.Sprintf("/style/%d", id), nil)
if err != nil {
return
}
resp := struct {
Status string
Data Style
Message string
}{}
err = ss.c.Do(req, &resp)
return resp.Data, err
}
api links for style.go
package brewerydb
import (
"fmt"
"net/http"
)
// StyleService provides access to the BreweryDB Style API. Use Client.Style.
//
// See: http://www.brewerydb.com/developers/docs-endpoint/style_index
type StyleService struct {
c *Client
}
// Style represents a style of Beer.
type Style struct {
ID int
Name string
ShortName string
Description string
CategoryID int
Category Category
IbuMin string
IbuMax string
SrmMin string
SrmMax string
OgMin string
OgMax string
FgMin string
FgMax string
AbvMin string
AbvMax string
CreateDate string
UpdateDate string
}
// StyleList represents a single "page" containing a slice of Styles.
type StyleList struct {
CurrentPage int
NumberOfPages int
TotalResults int
Styles []Style `json:"data"`
}
// List returns all Styles on the given page.
//
// See: http://www.brewerydb.com/developers/docs-endpoint/style_index#1
func (ss *StyleService) List(page int) (sl StyleList, err error) {
// GET: /styles
var req *http.Request
req, err = ss.c.NewRequest("GET", "/styles", &Page{page})
if err != nil {
return
}
err = ss.c.Do(req, &sl)
return
}
// Get obtains the Style with the given Style ID.
//
// See: http://www.brewerydb.com/developers/docs-endpoint/style_index#2
func (ss *StyleService) Get(id int) (s Style, err error) {
// GET: /style/:styleID
var req *http.Request
req, err = ss.c.NewRequest("GET", fmt.Sprintf("/style/%d", id), nil)
if err != nil {
return
}
resp := struct {
Status string
Data Style
Message string
}{}
err = ss.c.Do(req, &resp)
return resp.Data, err
}
|
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package net
import (
"runtime"
"testing"
)
var unicastTests = []struct {
net string
laddr string
ipv6 bool
packet bool
}{
{net: "tcp4", laddr: "127.0.0.1:0"},
{net: "tcp4", laddr: "previous"},
{net: "tcp6", laddr: "[::1]:0", ipv6: true},
{net: "tcp6", laddr: "previous", ipv6: true},
{net: "udp4", laddr: "127.0.0.1:0", packet: true},
{net: "udp6", laddr: "[::1]:0", ipv6: true, packet: true},
}
func TestUnicastTCPAndUDP(t *testing.T) {
if runtime.GOOS == "plan9" || runtime.GOOS == "windows" {
return
}
prevladdr := ""
for _, tt := range unicastTests {
if tt.ipv6 && !supportsIPv6 {
continue
}
var fd *netFD
if !tt.packet {
if tt.laddr == "previous" {
tt.laddr = prevladdr
}
c, err := Listen(tt.net, tt.laddr)
if err != nil {
t.Fatalf("Listen failed: %v", err)
}
prevladdr = c.Addr().String()
defer c.Close()
fd = c.(*TCPListener).fd
} else {
c, err := ListenPacket(tt.net, tt.laddr)
if err != nil {
t.Fatalf("ListenPacket failed: %v", err)
}
defer c.Close()
fd = c.(*UDPConn).fd
}
if !tt.ipv6 {
testIPv4UnicastSocketOptions(t, fd)
} else {
testIPv6UnicastSocketOptions(t, fd)
}
}
}
func testIPv4UnicastSocketOptions(t *testing.T, fd *netFD) {
tos, err := ipv4TOS(fd)
if err != nil {
t.Fatalf("ipv4TOS failed: %v", err)
}
t.Logf("IPv4 TOS: %v", tos)
err = setIPv4TOS(fd, 1)
if err != nil {
t.Fatalf("setIPv4TOS failed: %v", err)
}
ttl, err := ipv4TTL(fd)
if err != nil {
t.Fatalf("ipv4TTL failed: %v", err)
}
t.Logf("IPv4 TTL: %v", ttl)
err = setIPv4TTL(fd, 1)
if err != nil {
t.Fatalf("setIPv4TTL failed: %v", err)
}
}
func testIPv6UnicastSocketOptions(t *testing.T, fd *netFD) {
tos, err := ipv6TrafficClass(fd)
if err != nil {
t.Fatalf("ipv6TrafficClass failed: %v", err)
}
t.Logf("IPv6 TrafficClass: %v", tos)
err = setIPv6TrafficClass(fd, 1)
if err != nil {
t.Fatalf("setIPv6TrafficClass failed: %v", err)
}
hoplim, err := ipv6HopLimit(fd)
if err != nil {
t.Fatalf("ipv6HopLimit failed: %v", err)
}
t.Logf("IPv6 HopLimit: %v", hoplim)
err = setIPv6HopLimit(fd, 1)
if err != nil {
t.Fatalf("setIPv6HopLimit failed: %v", err)
}
}
net: fix linux build
R=golang-dev, bradfitz
CC=golang-dev
http://codereview.appspot.com/5558056
Committer: Brad Fitzpatrick <ae9783c0b0efc69cd85ab025ddd17aa44cdc4aa5@golang.org>
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package net
import (
"io"
"runtime"
"testing"
)
var unicastTests = []struct {
net string
laddr string
ipv6 bool
packet bool
}{
{net: "tcp4", laddr: "127.0.0.1:0"},
{net: "tcp4", laddr: "previous"},
{net: "tcp6", laddr: "[::1]:0", ipv6: true},
{net: "tcp6", laddr: "previous", ipv6: true},
{net: "udp4", laddr: "127.0.0.1:0", packet: true},
{net: "udp6", laddr: "[::1]:0", ipv6: true, packet: true},
}
func TestUnicastTCPAndUDP(t *testing.T) {
if runtime.GOOS == "plan9" || runtime.GOOS == "windows" {
return
}
prevladdr := ""
for _, tt := range unicastTests {
if tt.ipv6 && !supportsIPv6 {
continue
}
var (
fd *netFD
closer io.Closer
)
if !tt.packet {
if tt.laddr == "previous" {
tt.laddr = prevladdr
}
l, err := Listen(tt.net, tt.laddr)
if err != nil {
t.Fatalf("Listen failed: %v", err)
}
prevladdr = l.Addr().String()
closer = l
fd = l.(*TCPListener).fd
} else {
c, err := ListenPacket(tt.net, tt.laddr)
if err != nil {
t.Fatalf("ListenPacket failed: %v", err)
}
closer = c
fd = c.(*UDPConn).fd
}
if !tt.ipv6 {
testIPv4UnicastSocketOptions(t, fd)
} else {
testIPv6UnicastSocketOptions(t, fd)
}
closer.Close()
}
}
func testIPv4UnicastSocketOptions(t *testing.T, fd *netFD) {
tos, err := ipv4TOS(fd)
if err != nil {
t.Fatalf("ipv4TOS failed: %v", err)
}
t.Logf("IPv4 TOS: %v", tos)
err = setIPv4TOS(fd, 1)
if err != nil {
t.Fatalf("setIPv4TOS failed: %v", err)
}
ttl, err := ipv4TTL(fd)
if err != nil {
t.Fatalf("ipv4TTL failed: %v", err)
}
t.Logf("IPv4 TTL: %v", ttl)
err = setIPv4TTL(fd, 1)
if err != nil {
t.Fatalf("setIPv4TTL failed: %v", err)
}
}
func testIPv6UnicastSocketOptions(t *testing.T, fd *netFD) {
tos, err := ipv6TrafficClass(fd)
if err != nil {
t.Fatalf("ipv6TrafficClass failed: %v", err)
}
t.Logf("IPv6 TrafficClass: %v", tos)
err = setIPv6TrafficClass(fd, 1)
if err != nil {
t.Fatalf("setIPv6TrafficClass failed: %v", err)
}
hoplim, err := ipv6HopLimit(fd)
if err != nil {
t.Fatalf("ipv6HopLimit failed: %v", err)
}
t.Logf("IPv6 HopLimit: %v", hoplim)
err = setIPv6HopLimit(fd, 1)
if err != nil {
t.Fatalf("setIPv6HopLimit failed: %v", err)
}
}
|
// Copyright 2013 tsuru authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package juju
import (
"bytes"
"errors"
"github.com/globocom/commandmocker"
"github.com/globocom/config"
etesting "github.com/globocom/tsuru/exec/testing"
"github.com/globocom/tsuru/provision"
"github.com/globocom/tsuru/repository"
"github.com/globocom/tsuru/testing"
"labix.org/v2/mgo/bson"
"launchpad.net/gocheck"
"runtime"
"strconv"
"strings"
"sync"
"time"
)
func (s *S) TestShouldBeRegistered(c *gocheck.C) {
p, err := provision.Get("juju")
c.Assert(err, gocheck.IsNil)
c.Assert(p, gocheck.FitsTypeOf, &JujuProvisioner{})
}
func (s *S) TestELBSupport(c *gocheck.C) {
defer config.Unset("juju:use-elb")
config.Set("juju:use-elb", true)
p := JujuProvisioner{}
c.Assert(p.elbSupport(), gocheck.Equals, true)
config.Set("juju:use-elb", false)
c.Assert(p.elbSupport(), gocheck.Equals, true) // Read config only once.
p = JujuProvisioner{}
c.Assert(p.elbSupport(), gocheck.Equals, false)
config.Unset("juju:use-elb")
p = JujuProvisioner{}
c.Assert(p.elbSupport(), gocheck.Equals, false)
}
func (s *S) TestUnitsCollection(c *gocheck.C) {
p := JujuProvisioner{}
conn, collection := p.unitsCollection()
defer conn.Close()
c.Assert(collection.Name, gocheck.Equals, s.collName)
}
func (s *S) TestProvision(c *gocheck.C) {
fexec := &etesting.FakeExecutor{}
execut = fexec
defer func() {
execut = nil
}()
config.Set("juju:charms-path", "/etc/juju/charms")
defer config.Unset("juju:charms-path")
config.Set("host", "somehost")
defer config.Unset("host")
app := testing.NewFakeApp("trace", "python", 0)
p := JujuProvisioner{}
err := p.Provision(app)
c.Assert(err, gocheck.IsNil)
args := []string{
"deploy", "--repository", "/etc/juju/charms", "local:python", "trace",
}
c.Assert(fexec.ExecutedCmd("juju", args), gocheck.Equals, true)
args = []string{
"set", "trace", "app-repo=" + repository.GetReadOnlyUrl("trace"),
}
c.Assert(fexec.ExecutedCmd("juju", args), gocheck.Equals, true)
}
func (s *S) TestProvisionUndefinedCharmsPath(c *gocheck.C) {
config.Unset("juju:charms-path")
p := JujuProvisioner{}
err := p.Provision(testing.NewFakeApp("eternity", "sandman", 0))
c.Assert(err, gocheck.NotNil)
c.Assert(err.Error(), gocheck.Equals, `Setting "juju:charms-path" is not defined.`)
}
func (s *S) TestProvisionFailure(c *gocheck.C) {
config.Set("juju:charms-path", "/home/charms")
defer config.Unset("juju:charms-path")
tmpdir, err := commandmocker.Error("juju", "juju failed", 1)
c.Assert(err, gocheck.IsNil)
defer commandmocker.Remove(tmpdir)
app := testing.NewFakeApp("trace", "python", 0)
p := JujuProvisioner{}
err = p.Provision(app)
c.Assert(err, gocheck.NotNil)
pErr, ok := err.(*provision.Error)
c.Assert(ok, gocheck.Equals, true)
c.Assert(pErr.Reason, gocheck.Equals, "juju failed")
c.Assert(pErr.Err.Error(), gocheck.Equals, "exit status 1")
}
func (s *S) TestRestart(c *gocheck.C) {
fexec := &etesting.FakeExecutor{}
execut = fexec
defer func() {
execut = nil
}()
app := testing.NewFakeApp("cribcaged", "python", 1)
p := JujuProvisioner{}
err := p.Restart(app)
c.Assert(err, gocheck.IsNil)
args := []string{
"ssh", "-o", "StrictHostKeyChecking no", "-q", "1", "/var/lib/tsuru/hooks/restart",
}
c.Assert(fexec.ExecutedCmd("juju", args), gocheck.Equals, true)
}
func (s *S) TestRestartFailure(c *gocheck.C) {
tmpdir, err := commandmocker.Error("juju", "juju failed to run command", 25)
c.Assert(err, gocheck.IsNil)
defer commandmocker.Remove(tmpdir)
app := testing.NewFakeApp("cribcaged", "python", 1)
p := JujuProvisioner{}
err = p.Restart(app)
c.Assert(err, gocheck.NotNil)
pErr, ok := err.(*provision.Error)
c.Assert(ok, gocheck.Equals, true)
c.Assert(pErr.Reason, gocheck.Equals, "juju failed to run command\n")
c.Assert(pErr.Err.Error(), gocheck.Equals, "exit status 25")
}
func (s *S) TestDeploy(c *gocheck.C) {
config.Set("git:unit-repo", "test/dir")
defer func() {
config.Unset("git:unit-repo")
}()
app := testing.NewFakeApp("cribcaged", "python", 1)
w := &bytes.Buffer{}
p := JujuProvisioner{}
err := p.Deploy(app, w)
c.Assert(err, gocheck.IsNil)
expected := make([]string, 3)
// also ensures execution order
expected[0] = "git clone git://tsuruhost.com/cribcaged.git test/dir --depth 1" // the command expected to run on the units
expected[1] = "install deps"
expected[2] = "restart"
c.Assert(app.Commands, gocheck.DeepEquals, expected)
}
func (s *S) TestDeployLogsActions(c *gocheck.C) {
config.Set("git:unit-repo", "test/dir")
defer func() {
config.Unset("git:unit-repo")
}()
app := testing.NewFakeApp("cribcaged", "python", 1)
w := &bytes.Buffer{}
p := JujuProvisioner{}
err := p.Deploy(app, w)
c.Assert(err, gocheck.IsNil)
logs := w.String()
expected := `
---> Tsuru receiving push
---> Replicating the application repository across units
---> Installing dependencies
---> Restarting application
---> Deploy done!
`
c.Assert(logs, gocheck.Equals, expected)
}
func (s *S) TestDestroy(c *gocheck.C) {
fexec := &etesting.FakeExecutor{}
execut = fexec
defer func() {
execut = nil
}()
app := testing.NewFakeApp("cribcaged", "python", 3)
p := JujuProvisioner{}
conn, collection := p.unitsCollection()
defer conn.Close()
err := collection.Insert(
instance{UnitName: "cribcaged/0"},
instance{UnitName: "cribcaged/1"},
instance{UnitName: "cribcaged/2"},
)
c.Assert(err, gocheck.IsNil)
err = p.Destroy(app)
c.Assert(err, gocheck.IsNil)
args := []string{"destroy-service", "cribcaged"}
c.Assert(fexec.ExecutedCmd("juju", args), gocheck.Equals, true)
args = []string{"terminate-machine", "1"}
c.Assert(fexec.ExecutedCmd("juju", args), gocheck.Equals, true)
args = []string{"terminate-machine", "2"}
c.Assert(fexec.ExecutedCmd("juju", args), gocheck.Equals, true)
args = []string{"terminate-machine", "3"}
c.Assert(fexec.ExecutedCmd("juju", args), gocheck.Equals, true)
n, err := collection.Find(bson.M{
"_id": bson.M{
"$in": []string{"cribcaged/0", "cribcaged/1", "cribcaged/2"},
},
}).Count()
c.Assert(err, gocheck.IsNil)
c.Assert(n, gocheck.Equals, 0)
c.Assert(fexec.ExecutedCmd("juju", args), gocheck.Equals, true)
}
func (s *S) TestDestroyFailure(c *gocheck.C) {
tmpdir, err := commandmocker.Error("juju", "juju failed to destroy the machine", 25)
c.Assert(err, gocheck.IsNil)
defer commandmocker.Remove(tmpdir)
app := testing.NewFakeApp("idioglossia", "static", 1)
p := JujuProvisioner{}
err = p.Destroy(app)
c.Assert(err, gocheck.NotNil)
pErr, ok := err.(*provision.Error)
c.Assert(ok, gocheck.Equals, true)
c.Assert(pErr.Reason, gocheck.Equals, "juju failed to destroy the machine")
c.Assert(pErr.Err.Error(), gocheck.Equals, "exit status 25")
}
func (s *S) TestAddUnits(c *gocheck.C) {
tmpdir, err := commandmocker.Add("juju", addUnitsOutput)
c.Assert(err, gocheck.IsNil)
defer commandmocker.Remove(tmpdir)
app := testing.NewFakeApp("resist", "rush", 0)
p := JujuProvisioner{}
units, err := p.AddUnits(app, 4)
c.Assert(err, gocheck.IsNil)
c.Assert(units, gocheck.HasLen, 4)
names := make([]string, len(units))
for i, unit := range units {
names[i] = unit.Name
}
expected := []string{"resist/3", "resist/4", "resist/5", "resist/6"}
c.Assert(names, gocheck.DeepEquals, expected)
args := []string{
"add-unit", "resist", "--num-units", "4",
}
c.Assert(commandmocker.Parameters(tmpdir), gocheck.DeepEquals, args)
_, err = getQueue(queueName).Get(1e6)
c.Assert(err, gocheck.NotNil)
}
func (s *S) TestAddZeroUnits(c *gocheck.C) {
p := JujuProvisioner{}
units, err := p.AddUnits(nil, 0)
c.Assert(units, gocheck.IsNil)
c.Assert(err, gocheck.NotNil)
c.Assert(err.Error(), gocheck.Equals, "Cannot add zero units.")
}
func (s *S) TestAddUnitsFailure(c *gocheck.C) {
tmpdir, err := commandmocker.Error("juju", "juju failed", 1)
c.Assert(err, gocheck.IsNil)
defer commandmocker.Remove(tmpdir)
app := testing.NewFakeApp("headlong", "rush", 1)
p := JujuProvisioner{}
units, err := p.AddUnits(app, 1)
c.Assert(units, gocheck.IsNil)
c.Assert(err, gocheck.NotNil)
e, ok := err.(*provision.Error)
c.Assert(ok, gocheck.Equals, true)
c.Assert(e.Reason, gocheck.Equals, "juju failed")
c.Assert(e.Err.Error(), gocheck.Equals, "exit status 1")
}
func (s *S) TestRemoveUnit(c *gocheck.C) {
fexec := &etesting.FakeExecutor{}
execut = fexec
defer func() {
execut = nil
}()
app := testing.NewFakeApp("two", "rush", 3)
p := JujuProvisioner{}
conn, collection := p.unitsCollection()
defer conn.Close()
err := collection.Insert(instance{UnitName: "two/2", InstanceId: "i-00000439"})
c.Assert(err, gocheck.IsNil)
err = p.RemoveUnit(app, "two/2")
c.Assert(err, gocheck.IsNil)
ran := make(chan bool, 1)
go func() {
for {
args1 := []string{"remove-unit", "two/2"}
args2 := []string{"terminate-machine", "3"}
if fexec.ExecutedCmd("juju", args1) && fexec.ExecutedCmd("juju", args2) {
ran <- true
}
runtime.Gosched()
}
}()
select {
case <-ran:
case <-time.After(2e9):
c.Errorf("Did not run terminate-machine command after 2 seconds.")
}
n, err := collection.Find(bson.M{"_id": "two/2"}).Count()
c.Assert(err, gocheck.IsNil)
c.Assert(n, gocheck.Equals, 0)
}
func (s *S) TestRemoveUnitUnknownByJuju(c *gocheck.C) {
output := `013-01-11 20:02:07,883 INFO Connecting to environment...
2013-01-11 20:02:10,147 INFO Connected to environment.
2013-01-11 20:02:10,160 ERROR Service unit 'two/2' was not found`
tmpdir, err := commandmocker.Error("juju", output, 1)
c.Assert(err, gocheck.IsNil)
defer commandmocker.Remove(tmpdir)
app := testing.NewFakeApp("two", "rush", 3)
p := JujuProvisioner{}
err = p.RemoveUnit(app, "two/2")
c.Assert(err, gocheck.IsNil)
c.Assert(commandmocker.Ran(tmpdir), gocheck.Equals, true)
}
func (s *S) TestRemoveUnknownUnit(c *gocheck.C) {
app := testing.NewFakeApp("tears", "rush", 2)
p := JujuProvisioner{}
err := p.RemoveUnit(app, "tears/2")
c.Assert(err, gocheck.NotNil)
c.Assert(err.Error(), gocheck.Equals, `App "tears" does not have a unit named "tears/2".`)
}
func (s *S) TestRemoveUnitFailure(c *gocheck.C) {
tmpdir, err := commandmocker.Error("juju", "juju failed", 66)
c.Assert(err, gocheck.IsNil)
defer commandmocker.Remove(tmpdir)
app := testing.NewFakeApp("something", "rush", 1)
p := JujuProvisioner{}
err = p.RemoveUnit(app, "something/0")
c.Assert(err, gocheck.NotNil)
e, ok := err.(*provision.Error)
c.Assert(ok, gocheck.Equals, true)
c.Assert(e.Reason, gocheck.Equals, "juju failed")
c.Assert(e.Err.Error(), gocheck.Equals, "exit status 66")
}
func (s *S) TestInstallDepsRunRelatedHook(c *gocheck.C) {
p := &JujuProvisioner{}
app := testing.NewFakeApp("myapp", "python", 0)
w := &bytes.Buffer{}
err := p.InstallDeps(app, w)
c.Assert(err, gocheck.IsNil)
expected := []string{"ran /var/lib/tsuru/hooks/dependencies"}
c.Assert(app.Commands, gocheck.DeepEquals, expected)
}
func (s *S) TestExecutedCmd(c *gocheck.C) {
var buf bytes.Buffer
fexec := &etesting.FakeExecutor{}
execut = fexec
defer func() {
execut = nil
}()
app := testing.NewFakeApp("almah", "static", 2)
p := JujuProvisioner{}
err := p.ExecuteCommand(&buf, &buf, app, "ls", "-lh")
c.Assert(err, gocheck.IsNil)
bufOutput := `Output from unit "almah/0":
Output from unit "almah/1":
`
args := []string{
"ssh",
"-o",
"StrictHostKeyChecking no",
"-q",
"1",
"ls",
"-lh",
}
c.Assert(fexec.ExecutedCmd("juju", args), gocheck.Equals, true)
args = []string{
"ssh",
"-o",
"StrictHostKeyChecking no",
"-q",
"2",
"ls",
"-lh",
}
c.Assert(fexec.ExecutedCmd("juju", args), gocheck.Equals, true)
c.Assert(buf.String(), gocheck.Equals, bufOutput)
}
func (s *S) TestExecutedCmdFailure(c *gocheck.C) {
var buf bytes.Buffer
tmpdir, err := commandmocker.Error("juju", "failed", 2)
c.Assert(err, gocheck.IsNil)
defer commandmocker.Remove(tmpdir)
app := testing.NewFakeApp("frases", "static", 1)
p := JujuProvisioner{}
err = p.ExecuteCommand(&buf, &buf, app, "ls", "-l")
c.Assert(err, gocheck.NotNil)
c.Assert(err.Error(), gocheck.Equals, "exit status 2")
c.Assert(buf.String(), gocheck.Equals, "failed\n")
}
func (s *S) TestExecutedCmdOneUnit(c *gocheck.C) {
var buf bytes.Buffer
fexec := &etesting.FakeExecutor{}
execut = fexec
defer func() {
execut = nil
}()
app := testing.NewFakeApp("almah", "static", 1)
p := JujuProvisioner{}
err := p.ExecuteCommand(&buf, &buf, app, "ls", "-lh")
c.Assert(err, gocheck.IsNil)
args := []string{
"ssh",
"-o",
"StrictHostKeyChecking no",
"-q",
"1",
"ls",
"-lh",
}
c.Assert(fexec.ExecutedCmd("juju", args), gocheck.Equals, true)
}
func (s *S) TestExecutedCmdUnitDown(c *gocheck.C) {
var buf bytes.Buffer
fexec := &etesting.FakeExecutor{}
execut = fexec
defer func() {
execut = nil
}()
app := testing.NewFakeApp("almah", "static", 3)
app.SetUnitStatus(provision.StatusDown, 1)
p := JujuProvisioner{}
err := p.ExecuteCommand(&buf, &buf, app, "ls", "-lha")
c.Assert(err, gocheck.IsNil)
args := []string{
"ssh",
"-o",
"StrictHostKeyChecking no",
"-q",
"1",
"ls",
"-lha",
}
c.Assert(fexec.ExecutedCmd("juju", args), gocheck.Equals, true)
args = []string{
"ssh",
"-o",
"StrictHostKeyChecking no",
"-q",
"3",
"ls",
"-lha",
}
c.Assert(fexec.ExecutedCmd("juju", args), gocheck.Equals, true)
bufOutput := `Output from unit "almah/0":
Output from unit "almah/1":
Unit state is "down", it must be "started" for running commands.
Output from unit "almah/2":
`
c.Assert(buf.String(), gocheck.Equals, bufOutput)
}
func (s *S) TestSaveBootstrapMachine(c *gocheck.C) {
p := JujuProvisioner{}
m := machine{
AgentState: "state",
IpAddress: "ip",
InstanceId: "id",
InstanceState: "state",
}
p.saveBootstrapMachine(m)
conn, collection := p.bootstrapCollection()
defer conn.Close()
defer collection.Remove(m)
var mach machine
collection.Find(nil).One(&mach)
c.Assert(mach, gocheck.DeepEquals, m)
}
func (s *S) TestCollectStatusShouldNotAddBootstraTwice(c *gocheck.C) {
tmpdir, err := commandmocker.Add("juju", collectOutput)
c.Assert(err, gocheck.IsNil)
defer commandmocker.Remove(tmpdir)
p := JujuProvisioner{}
_, err = p.CollectStatus()
c.Assert(err, gocheck.IsNil)
_, err = p.CollectStatus()
c.Assert(err, gocheck.IsNil)
conn, collection := p.bootstrapCollection()
defer conn.Close()
l, err := collection.Find(nil).Count()
c.Assert(err, gocheck.IsNil)
c.Assert(l, gocheck.Equals, 1)
}
func (s *S) TestCollectStatus(c *gocheck.C) {
tmpdir, err := commandmocker.Add("juju", collectOutput)
c.Assert(err, gocheck.IsNil)
defer commandmocker.Remove(tmpdir)
p := JujuProvisioner{}
conn, collection := p.unitsCollection()
defer conn.Close()
err = collection.Insert(instance{UnitName: "as_i_rise/0", InstanceId: "i-00000439"})
c.Assert(err, gocheck.IsNil)
defer collection.Remove(bson.M{"_id": bson.M{"$in": []string{"as_i_rise/0", "the_infanta/0"}}})
expected := []provision.Unit{
{
Name: "as_i_rise/0",
AppName: "as_i_rise",
Type: "django",
Machine: 105,
InstanceId: "i-00000439",
Ip: "10.10.10.163",
Status: provision.StatusStarted,
},
{
Name: "the_infanta/0",
AppName: "the_infanta",
Type: "gunicorn",
Machine: 107,
InstanceId: "i-0000043e",
Ip: "10.10.10.168",
Status: provision.StatusInstalling,
},
}
units, err := p.CollectStatus()
c.Assert(err, gocheck.IsNil)
cp := make([]provision.Unit, len(units))
copy(cp, units)
if cp[0].Type == "gunicorn" {
cp[0], cp[1] = cp[1], cp[0]
}
c.Assert(cp, gocheck.DeepEquals, expected)
c.Assert(commandmocker.Ran(tmpdir), gocheck.Equals, true)
done := make(chan int8)
go func() {
for {
ct, err := collection.Find(nil).Count()
c.Assert(err, gocheck.IsNil)
if ct == 2 {
done <- 1
return
}
}
}()
select {
case <-done:
case <-time.After(5e9):
c.Fatal("Did not save the unit after 5 seconds.")
}
var instances []instance
err = collection.Find(nil).Sort("_id").All(&instances)
c.Assert(err, gocheck.IsNil)
c.Assert(instances, gocheck.HasLen, 2)
c.Assert(instances[0].UnitName, gocheck.Equals, "as_i_rise/0")
c.Assert(instances[0].InstanceId, gocheck.Equals, "i-00000439")
c.Assert(instances[1].UnitName, gocheck.Equals, "the_infanta/0")
c.Assert(instances[1].InstanceId, gocheck.Equals, "i-0000043e")
var b machine
err = collection.Find(nil).One(&b)
c.Assert(err, gocheck.IsNil)
}
func (s *S) TestCollectStatusDirtyOutput(c *gocheck.C) {
tmpdir, err := commandmocker.Add("juju", dirtyCollectOutput)
c.Assert(err, gocheck.IsNil)
defer commandmocker.Remove(tmpdir)
expected := []provision.Unit{
{
Name: "as_i_rise/0",
AppName: "as_i_rise",
Type: "django",
Machine: 105,
InstanceId: "i-00000439",
Ip: "10.10.10.163",
Status: provision.StatusStarted,
},
{
Name: "the_infanta/1",
AppName: "the_infanta",
Type: "gunicorn",
Machine: 107,
InstanceId: "i-0000043e",
Ip: "10.10.10.168",
Status: provision.StatusInstalling,
},
}
p := JujuProvisioner{}
units, err := p.CollectStatus()
c.Assert(err, gocheck.IsNil)
cp := make([]provision.Unit, len(units))
copy(cp, units)
if cp[0].Type == "gunicorn" {
cp[0], cp[1] = cp[1], cp[0]
}
c.Assert(cp, gocheck.DeepEquals, expected)
c.Assert(commandmocker.Ran(tmpdir), gocheck.Equals, true)
var wg sync.WaitGroup
wg.Add(1)
conn, collection := p.unitsCollection()
defer conn.Close()
go func() {
q := bson.M{"_id": bson.M{"$in": []string{"as_i_rise/0", "the_infanta/1"}}}
for {
if n, _ := collection.Find(q).Count(); n == 2 {
break
}
runtime.Gosched()
}
collection.Remove(q)
wg.Done()
}()
wg.Wait()
}
func (s *S) TestCollectStatusIDChangeDisabledELB(c *gocheck.C) {
tmpdir, err := commandmocker.Add("juju", collectOutput)
c.Assert(err, gocheck.IsNil)
defer commandmocker.Remove(tmpdir)
p := JujuProvisioner{}
conn, collection := p.unitsCollection()
defer conn.Close()
err = collection.Insert(instance{UnitName: "as_i_rise/0", InstanceId: "i-00000239"})
c.Assert(err, gocheck.IsNil)
defer collection.Remove(bson.M{"_id": bson.M{"$in": []string{"as_i_rise/0", "the_infanta/0"}}})
_, err = p.CollectStatus()
c.Assert(err, gocheck.IsNil)
done := make(chan int8)
go func() {
for {
q := bson.M{"_id": "as_i_rise/0", "instanceid": "i-00000439"}
ct, err := collection.Find(q).Count()
c.Assert(err, gocheck.IsNil)
if ct == 1 {
done <- 1
return
}
runtime.Gosched()
}
}()
select {
case <-done:
case <-time.After(5e9):
c.Fatal("Did not update the unit after 5 seconds.")
}
}
func (s *S) TestCollectStatusIDChangeFromPending(c *gocheck.C) {
tmpdir, err := commandmocker.Add("juju", collectOutput)
c.Assert(err, gocheck.IsNil)
defer commandmocker.Remove(tmpdir)
p := JujuProvisioner{}
conn, collection := p.unitsCollection()
defer conn.Close()
err = collection.Insert(instance{UnitName: "as_i_rise/0", InstanceId: "pending"})
c.Assert(err, gocheck.IsNil)
defer collection.Remove(bson.M{"_id": bson.M{"$in": []string{"as_i_rise/0", "the_infanta/0"}}})
_, err = p.CollectStatus()
c.Assert(err, gocheck.IsNil)
done := make(chan int8)
go func() {
for {
q := bson.M{"_id": "as_i_rise/0", "instanceid": "i-00000439"}
ct, err := collection.Find(q).Count()
c.Assert(err, gocheck.IsNil)
if ct == 1 {
done <- 1
return
}
runtime.Gosched()
}
}()
select {
case <-done:
case <-time.After(5e9):
c.Fatal("Did not update the unit after 5 seconds.")
}
}
func (s *S) TestCollectStatusFailure(c *gocheck.C) {
tmpdir, err := commandmocker.Error("juju", "juju failed", 1)
c.Assert(err, gocheck.IsNil)
defer commandmocker.Remove(tmpdir)
p := JujuProvisioner{}
_, err = p.CollectStatus()
c.Assert(err, gocheck.NotNil)
pErr, ok := err.(*provision.Error)
c.Assert(ok, gocheck.Equals, true)
c.Assert(pErr.Reason, gocheck.Equals, "juju failed")
c.Assert(pErr.Err.Error(), gocheck.Equals, "exit status 1")
c.Assert(commandmocker.Ran(tmpdir), gocheck.Equals, true)
}
func (s *S) TestCollectStatusInvalidYAML(c *gocheck.C) {
tmpdir, err := commandmocker.Add("juju", "local: somewhere::")
c.Assert(err, gocheck.IsNil)
defer commandmocker.Remove(tmpdir)
p := JujuProvisioner{}
_, err = p.CollectStatus()
c.Assert(err, gocheck.NotNil)
pErr, ok := err.(*provision.Error)
c.Assert(ok, gocheck.Equals, true)
c.Assert(pErr.Reason, gocheck.Equals, `"juju status" returned invalid data: local: somewhere::`)
c.Assert(pErr.Err, gocheck.ErrorMatches, `^YAML error:.*$`)
}
func (s *S) TestLoadBalancerEnabledElb(c *gocheck.C) {
p := JujuProvisioner{}
p.elb = new(bool)
*p.elb = true
lb := p.LoadBalancer()
c.Assert(lb, gocheck.NotNil)
}
func (s *S) TestLoadBalancerDisabledElb(c *gocheck.C) {
p := JujuProvisioner{}
p.elb = new(bool)
lb := p.LoadBalancer()
c.Assert(lb, gocheck.IsNil)
}
func (s *S) TestExecWithTimeout(c *gocheck.C) {
var data = []struct {
cmd []string
timeout time.Duration
out string
err error
}{
{
cmd: []string{"sleep", "2"},
timeout: 1e6,
out: "",
err: errors.New(`"sleep 2" ran for more than 1ms.`),
},
{
cmd: []string{"python", "-c", "import time; time.sleep(1); print('hello world!')"},
timeout: 5e9,
out: "hello world!\n",
err: nil,
},
{
cmd: []string{"python", "-c", "import sys; print('hello world!'); exit(1)"},
timeout: 5e9,
out: "hello world!\n",
err: errors.New("exit status 1"),
},
}
for _, d := range data {
out, err := execWithTimeout(d.timeout, d.cmd[0], d.cmd[1:]...)
if string(out) != d.out {
c.Errorf("Output. Want %q. Got %q.", d.out, out)
}
if d.err == nil && err != nil {
c.Errorf("Error. Want %v. Got %v.", d.err, err)
} else if d.err != nil && err.Error() != d.err.Error() {
c.Errorf("Error message. Want %q. Got %q.", d.err.Error(), err.Error())
}
}
}
func (s *S) TestUnitStatus(c *gocheck.C) {
var tests = []struct {
instance string
agent string
machineAgent string
expected provision.Status
}{
{"something", "nothing", "wut", provision.StatusPending},
{"", "", "", provision.StatusCreating},
{"", "", "pending", provision.StatusCreating},
{"", "", "not-started", provision.StatusCreating},
{"pending", "", "", provision.StatusCreating},
{"", "not-started", "running", provision.StatusCreating},
{"error", "install-error", "start-error", provision.StatusError},
{"started", "start-error", "running", provision.StatusError},
{"started", "charm-upgrade-error", "running", provision.StatusError},
{"running", "pending", "running", provision.StatusInstalling},
{"running", "started", "running", provision.StatusStarted},
{"running", "down", "running", provision.StatusDown},
}
for _, t := range tests {
got := unitStatus(t.instance, t.agent, t.machineAgent)
if got != t.expected {
c.Errorf("unitStatus(%q, %q, %q): Want %q. Got %q.", t.instance, t.agent, t.machineAgent, t.expected, got)
}
}
}
func (s *S) TestAddr(c *gocheck.C) {
app := testing.NewFakeApp("blue", "who", 1)
p := JujuProvisioner{}
addr, err := p.Addr(app)
c.Assert(err, gocheck.IsNil)
c.Assert(addr, gocheck.Equals, app.ProvisionUnits()[0].GetIp())
}
func (s *S) TestAddrWithoutUnits(c *gocheck.C) {
app := testing.NewFakeApp("squeeze", "who", 0)
p := JujuProvisioner{}
addr, err := p.Addr(app)
c.Assert(addr, gocheck.Equals, "")
c.Assert(err, gocheck.NotNil)
c.Assert(err.Error(), gocheck.Equals, `App "squeeze" has no units.`)
}
func (s *ELBSuite) TestProvisionWithELB(c *gocheck.C) {
fexec := &etesting.FakeExecutor{}
execut = fexec
defer func() {
execut = nil
}()
config.Set("juju:charms-path", "/home/charms")
defer config.Unset("juju:charms-path")
app := testing.NewFakeApp("jimmy", "who", 0)
p := JujuProvisioner{}
err := p.Provision(app)
c.Assert(err, gocheck.IsNil)
lb := p.LoadBalancer()
defer lb.Destroy(app)
addr, err := lb.Addr(app)
c.Assert(err, gocheck.IsNil)
c.Assert(addr, gocheck.Not(gocheck.Equals), "")
msg, err := getQueue(queueName).Get(1e9)
c.Assert(err, gocheck.IsNil)
defer msg.Delete()
c.Assert(msg.Action, gocheck.Equals, addUnitToLoadBalancer)
c.Assert(msg.Args, gocheck.DeepEquals, []string{"jimmy"})
}
func (s *ELBSuite) TestDestroyWithELB(c *gocheck.C) {
config.Set("juju:charms-path", "/home/charms")
defer config.Unset("juju:charms-path")
fexec := &etesting.FakeExecutor{}
execut = fexec
defer func() {
execut = nil
}()
app := testing.NewFakeApp("jimmy", "who", 0)
p := JujuProvisioner{}
err := p.Provision(app)
c.Assert(err, gocheck.IsNil)
err = p.Destroy(app)
c.Assert(err, gocheck.IsNil)
lb := p.LoadBalancer()
defer lb.Destroy(app) // sanity
addr, err := lb.Addr(app)
c.Assert(addr, gocheck.Equals, "")
c.Assert(err, gocheck.NotNil)
c.Assert(err.Error(), gocheck.Equals, "not found")
q := getQueue(queueName)
msg, err := q.Get(1e9)
c.Assert(err, gocheck.IsNil)
if msg.Action == addUnitToLoadBalancer && msg.Args[0] == "jimmy" {
msg.Delete()
} else {
q.Release(msg, 0)
}
}
func (s *ELBSuite) TestAddUnitsWithELB(c *gocheck.C) {
tmpdir, err := commandmocker.Add("juju", addUnitsOutput)
c.Assert(err, gocheck.IsNil)
defer commandmocker.Remove(tmpdir)
app := testing.NewFakeApp("resist", "rush", 0)
p := JujuProvisioner{}
_, err = p.AddUnits(app, 4)
c.Assert(err, gocheck.IsNil)
expected := []string{
"resist", "resist/3", "resist/4",
"resist/5", "resist/6",
}
msg, err := getQueue(queueName).Get(1e9)
c.Assert(err, gocheck.IsNil)
defer msg.Delete()
c.Assert(msg.Action, gocheck.Equals, addUnitToLoadBalancer)
c.Assert(msg.Args, gocheck.DeepEquals, expected)
}
func (s *ELBSuite) TestRemoveUnitWithELB(c *gocheck.C) {
instIds := make([]string, 4)
units := make([]provision.Unit, len(instIds))
for i := 0; i < len(instIds); i++ {
id := s.server.NewInstance()
defer s.server.RemoveInstance(id)
instIds[i] = id
units[i] = provision.Unit{
Name: "radio/" + strconv.Itoa(i),
InstanceId: id,
}
}
fexec := &etesting.FakeExecutor{}
execMut.Lock()
execut = fexec
execMut.Unlock()
defer func() {
execMut.Lock()
defer execMut.Unlock()
execut = nil
}()
app := testing.NewFakeApp("radio", "rush", 4)
manager := ELBManager{}
manager.e = s.client
err := manager.Create(app)
c.Assert(err, gocheck.IsNil)
defer manager.Destroy(app)
err = manager.Register(app, units...)
c.Assert(err, gocheck.IsNil)
p := JujuProvisioner{}
fUnit := testing.FakeUnit{Name: units[0].Name, InstanceId: units[0].InstanceId}
err = p.removeUnit(app, &fUnit)
c.Assert(err, gocheck.IsNil)
resp, err := s.client.DescribeLoadBalancers(app.GetName())
c.Assert(err, gocheck.IsNil)
c.Assert(resp.LoadBalancerDescriptions, gocheck.HasLen, 1)
c.Assert(resp.LoadBalancerDescriptions[0].Instances, gocheck.HasLen, len(units)-1)
instance := resp.LoadBalancerDescriptions[0].Instances[0]
c.Assert(instance.InstanceId, gocheck.Equals, instIds[1])
}
func (s *ELBSuite) TestCollectStatusWithELBAndIDChange(c *gocheck.C) {
a := testing.NewFakeApp("symfonia", "symfonia", 0)
p := JujuProvisioner{}
lb := p.LoadBalancer()
err := lb.Create(a)
c.Assert(err, gocheck.IsNil)
defer lb.Destroy(a)
id1 := s.server.NewInstance()
defer s.server.RemoveInstance(id1)
id2 := s.server.NewInstance()
defer s.server.RemoveInstance(id2)
id3 := s.server.NewInstance()
defer s.server.RemoveInstance(id3)
conn, collection := p.unitsCollection()
defer conn.Close()
err = collection.Insert(instance{UnitName: "symfonia/0", InstanceId: id3})
c.Assert(err, gocheck.IsNil)
err = lb.Register(a, provision.Unit{InstanceId: id3}, provision.Unit{InstanceId: id2})
q := bson.M{"_id": bson.M{"$in": []string{"symfonia/0", "symfonia/1", "symfonia/2", "raise/0"}}}
defer collection.Remove(q)
output := strings.Replace(simpleCollectOutput, "i-00004444", id1, 1)
output = strings.Replace(output, "i-00004445", id2, 1)
tmpdir, err := commandmocker.Add("juju", output)
c.Assert(err, gocheck.IsNil)
defer commandmocker.Remove(tmpdir)
_, err = p.CollectStatus()
c.Assert(err, gocheck.IsNil)
done := make(chan int8)
go func() {
for {
q := bson.M{"_id": "symfonia/0", "instanceid": id1}
ct, err := collection.Find(q).Count()
c.Assert(err, gocheck.IsNil)
if ct == 1 {
done <- 1
return
}
runtime.Gosched()
}
}()
select {
case <-done:
case <-time.After(5e9):
c.Fatal("Did not save the unit after 5 seconds.")
}
resp, err := s.client.DescribeLoadBalancers(a.GetName())
c.Assert(err, gocheck.IsNil)
c.Assert(resp.LoadBalancerDescriptions, gocheck.HasLen, 1)
instances := resp.LoadBalancerDescriptions[0].Instances
c.Assert(instances, gocheck.HasLen, 2)
c.Assert(instances[0].InstanceId, gocheck.Equals, id2)
c.Assert(instances[1].InstanceId, gocheck.Equals, id1)
}
func (s *ELBSuite) TestAddrWithELB(c *gocheck.C) {
app := testing.NewFakeApp("jimmy", "who", 0)
p := JujuProvisioner{}
lb := p.LoadBalancer()
err := lb.Create(app)
c.Assert(err, gocheck.IsNil)
defer lb.Destroy(app)
addr, err := p.Addr(app)
c.Assert(err, gocheck.IsNil)
lAddr, err := lb.Addr(app)
c.Assert(err, gocheck.IsNil)
c.Assert(addr, gocheck.Equals, lAddr)
}
func (s *ELBSuite) TestAddrWithUnknownELB(c *gocheck.C) {
app := testing.NewFakeApp("jimmy", "who", 0)
p := JujuProvisioner{}
addr, err := p.Addr(app)
c.Assert(addr, gocheck.Equals, "")
c.Assert(err, gocheck.NotNil)
c.Assert(err.Error(), gocheck.Equals, "not found")
}
provision/juju: fix deploy related test
// Copyright 2013 tsuru authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package juju
import (
"bytes"
"errors"
"github.com/globocom/commandmocker"
"github.com/globocom/config"
etesting "github.com/globocom/tsuru/exec/testing"
"github.com/globocom/tsuru/provision"
"github.com/globocom/tsuru/repository"
"github.com/globocom/tsuru/testing"
"labix.org/v2/mgo/bson"
"launchpad.net/gocheck"
"runtime"
"strconv"
"strings"
"sync"
"time"
)
func (s *S) TestShouldBeRegistered(c *gocheck.C) {
p, err := provision.Get("juju")
c.Assert(err, gocheck.IsNil)
c.Assert(p, gocheck.FitsTypeOf, &JujuProvisioner{})
}
func (s *S) TestELBSupport(c *gocheck.C) {
defer config.Unset("juju:use-elb")
config.Set("juju:use-elb", true)
p := JujuProvisioner{}
c.Assert(p.elbSupport(), gocheck.Equals, true)
config.Set("juju:use-elb", false)
c.Assert(p.elbSupport(), gocheck.Equals, true) // Read config only once.
p = JujuProvisioner{}
c.Assert(p.elbSupport(), gocheck.Equals, false)
config.Unset("juju:use-elb")
p = JujuProvisioner{}
c.Assert(p.elbSupport(), gocheck.Equals, false)
}
func (s *S) TestUnitsCollection(c *gocheck.C) {
p := JujuProvisioner{}
conn, collection := p.unitsCollection()
defer conn.Close()
c.Assert(collection.Name, gocheck.Equals, s.collName)
}
func (s *S) TestProvision(c *gocheck.C) {
fexec := &etesting.FakeExecutor{}
execut = fexec
defer func() {
execut = nil
}()
config.Set("juju:charms-path", "/etc/juju/charms")
defer config.Unset("juju:charms-path")
config.Set("host", "somehost")
defer config.Unset("host")
app := testing.NewFakeApp("trace", "python", 0)
p := JujuProvisioner{}
err := p.Provision(app)
c.Assert(err, gocheck.IsNil)
args := []string{
"deploy", "--repository", "/etc/juju/charms", "local:python", "trace",
}
c.Assert(fexec.ExecutedCmd("juju", args), gocheck.Equals, true)
args = []string{
"set", "trace", "app-repo=" + repository.GetReadOnlyUrl("trace"),
}
c.Assert(fexec.ExecutedCmd("juju", args), gocheck.Equals, true)
}
func (s *S) TestProvisionUndefinedCharmsPath(c *gocheck.C) {
config.Unset("juju:charms-path")
p := JujuProvisioner{}
err := p.Provision(testing.NewFakeApp("eternity", "sandman", 0))
c.Assert(err, gocheck.NotNil)
c.Assert(err.Error(), gocheck.Equals, `Setting "juju:charms-path" is not defined.`)
}
func (s *S) TestProvisionFailure(c *gocheck.C) {
config.Set("juju:charms-path", "/home/charms")
defer config.Unset("juju:charms-path")
tmpdir, err := commandmocker.Error("juju", "juju failed", 1)
c.Assert(err, gocheck.IsNil)
defer commandmocker.Remove(tmpdir)
app := testing.NewFakeApp("trace", "python", 0)
p := JujuProvisioner{}
err = p.Provision(app)
c.Assert(err, gocheck.NotNil)
pErr, ok := err.(*provision.Error)
c.Assert(ok, gocheck.Equals, true)
c.Assert(pErr.Reason, gocheck.Equals, "juju failed")
c.Assert(pErr.Err.Error(), gocheck.Equals, "exit status 1")
}
func (s *S) TestRestart(c *gocheck.C) {
fexec := &etesting.FakeExecutor{}
execut = fexec
defer func() {
execut = nil
}()
app := testing.NewFakeApp("cribcaged", "python", 1)
p := JujuProvisioner{}
err := p.Restart(app)
c.Assert(err, gocheck.IsNil)
args := []string{
"ssh", "-o", "StrictHostKeyChecking no", "-q", "1", "/var/lib/tsuru/hooks/restart",
}
c.Assert(fexec.ExecutedCmd("juju", args), gocheck.Equals, true)
}
func (s *S) TestRestartFailure(c *gocheck.C) {
tmpdir, err := commandmocker.Error("juju", "juju failed to run command", 25)
c.Assert(err, gocheck.IsNil)
defer commandmocker.Remove(tmpdir)
app := testing.NewFakeApp("cribcaged", "python", 1)
p := JujuProvisioner{}
err = p.Restart(app)
c.Assert(err, gocheck.NotNil)
pErr, ok := err.(*provision.Error)
c.Assert(ok, gocheck.Equals, true)
c.Assert(pErr.Reason, gocheck.Equals, "juju failed to run command\n")
c.Assert(pErr.Err.Error(), gocheck.Equals, "exit status 25")
}
func (s *S) TestDeploy(c *gocheck.C) {
tmpdir, err := commandmocker.Add("juju", "")
c.Assert(err, gocheck.IsNil)
defer commandmocker.Remove(tmpdir)
config.Set("git:unit-repo", "test/dir")
defer func() {
config.Unset("git:unit-repo")
}()
app := testing.NewFakeApp("cribcaged", "python", 1)
w := &bytes.Buffer{}
p := JujuProvisioner{}
err = p.Deploy(app, w)
c.Assert(err, gocheck.IsNil)
}
func (s *S) TestDestroy(c *gocheck.C) {
fexec := &etesting.FakeExecutor{}
execut = fexec
defer func() {
execut = nil
}()
app := testing.NewFakeApp("cribcaged", "python", 3)
p := JujuProvisioner{}
conn, collection := p.unitsCollection()
defer conn.Close()
err := collection.Insert(
instance{UnitName: "cribcaged/0"},
instance{UnitName: "cribcaged/1"},
instance{UnitName: "cribcaged/2"},
)
c.Assert(err, gocheck.IsNil)
err = p.Destroy(app)
c.Assert(err, gocheck.IsNil)
args := []string{"destroy-service", "cribcaged"}
c.Assert(fexec.ExecutedCmd("juju", args), gocheck.Equals, true)
args = []string{"terminate-machine", "1"}
c.Assert(fexec.ExecutedCmd("juju", args), gocheck.Equals, true)
args = []string{"terminate-machine", "2"}
c.Assert(fexec.ExecutedCmd("juju", args), gocheck.Equals, true)
args = []string{"terminate-machine", "3"}
c.Assert(fexec.ExecutedCmd("juju", args), gocheck.Equals, true)
n, err := collection.Find(bson.M{
"_id": bson.M{
"$in": []string{"cribcaged/0", "cribcaged/1", "cribcaged/2"},
},
}).Count()
c.Assert(err, gocheck.IsNil)
c.Assert(n, gocheck.Equals, 0)
c.Assert(fexec.ExecutedCmd("juju", args), gocheck.Equals, true)
}
func (s *S) TestDestroyFailure(c *gocheck.C) {
tmpdir, err := commandmocker.Error("juju", "juju failed to destroy the machine", 25)
c.Assert(err, gocheck.IsNil)
defer commandmocker.Remove(tmpdir)
app := testing.NewFakeApp("idioglossia", "static", 1)
p := JujuProvisioner{}
err = p.Destroy(app)
c.Assert(err, gocheck.NotNil)
pErr, ok := err.(*provision.Error)
c.Assert(ok, gocheck.Equals, true)
c.Assert(pErr.Reason, gocheck.Equals, "juju failed to destroy the machine")
c.Assert(pErr.Err.Error(), gocheck.Equals, "exit status 25")
}
func (s *S) TestAddUnits(c *gocheck.C) {
tmpdir, err := commandmocker.Add("juju", addUnitsOutput)
c.Assert(err, gocheck.IsNil)
defer commandmocker.Remove(tmpdir)
app := testing.NewFakeApp("resist", "rush", 0)
p := JujuProvisioner{}
units, err := p.AddUnits(app, 4)
c.Assert(err, gocheck.IsNil)
c.Assert(units, gocheck.HasLen, 4)
names := make([]string, len(units))
for i, unit := range units {
names[i] = unit.Name
}
expected := []string{"resist/3", "resist/4", "resist/5", "resist/6"}
c.Assert(names, gocheck.DeepEquals, expected)
args := []string{
"add-unit", "resist", "--num-units", "4",
}
c.Assert(commandmocker.Parameters(tmpdir), gocheck.DeepEquals, args)
_, err = getQueue(queueName).Get(1e6)
c.Assert(err, gocheck.NotNil)
}
func (s *S) TestAddZeroUnits(c *gocheck.C) {
p := JujuProvisioner{}
units, err := p.AddUnits(nil, 0)
c.Assert(units, gocheck.IsNil)
c.Assert(err, gocheck.NotNil)
c.Assert(err.Error(), gocheck.Equals, "Cannot add zero units.")
}
func (s *S) TestAddUnitsFailure(c *gocheck.C) {
tmpdir, err := commandmocker.Error("juju", "juju failed", 1)
c.Assert(err, gocheck.IsNil)
defer commandmocker.Remove(tmpdir)
app := testing.NewFakeApp("headlong", "rush", 1)
p := JujuProvisioner{}
units, err := p.AddUnits(app, 1)
c.Assert(units, gocheck.IsNil)
c.Assert(err, gocheck.NotNil)
e, ok := err.(*provision.Error)
c.Assert(ok, gocheck.Equals, true)
c.Assert(e.Reason, gocheck.Equals, "juju failed")
c.Assert(e.Err.Error(), gocheck.Equals, "exit status 1")
}
func (s *S) TestRemoveUnit(c *gocheck.C) {
fexec := &etesting.FakeExecutor{}
execut = fexec
defer func() {
execut = nil
}()
app := testing.NewFakeApp("two", "rush", 3)
p := JujuProvisioner{}
conn, collection := p.unitsCollection()
defer conn.Close()
err := collection.Insert(instance{UnitName: "two/2", InstanceId: "i-00000439"})
c.Assert(err, gocheck.IsNil)
err = p.RemoveUnit(app, "two/2")
c.Assert(err, gocheck.IsNil)
ran := make(chan bool, 1)
go func() {
for {
args1 := []string{"remove-unit", "two/2"}
args2 := []string{"terminate-machine", "3"}
if fexec.ExecutedCmd("juju", args1) && fexec.ExecutedCmd("juju", args2) {
ran <- true
}
runtime.Gosched()
}
}()
select {
case <-ran:
case <-time.After(2e9):
c.Errorf("Did not run terminate-machine command after 2 seconds.")
}
n, err := collection.Find(bson.M{"_id": "two/2"}).Count()
c.Assert(err, gocheck.IsNil)
c.Assert(n, gocheck.Equals, 0)
}
func (s *S) TestRemoveUnitUnknownByJuju(c *gocheck.C) {
output := `013-01-11 20:02:07,883 INFO Connecting to environment...
2013-01-11 20:02:10,147 INFO Connected to environment.
2013-01-11 20:02:10,160 ERROR Service unit 'two/2' was not found`
tmpdir, err := commandmocker.Error("juju", output, 1)
c.Assert(err, gocheck.IsNil)
defer commandmocker.Remove(tmpdir)
app := testing.NewFakeApp("two", "rush", 3)
p := JujuProvisioner{}
err = p.RemoveUnit(app, "two/2")
c.Assert(err, gocheck.IsNil)
c.Assert(commandmocker.Ran(tmpdir), gocheck.Equals, true)
}
func (s *S) TestRemoveUnknownUnit(c *gocheck.C) {
app := testing.NewFakeApp("tears", "rush", 2)
p := JujuProvisioner{}
err := p.RemoveUnit(app, "tears/2")
c.Assert(err, gocheck.NotNil)
c.Assert(err.Error(), gocheck.Equals, `App "tears" does not have a unit named "tears/2".`)
}
func (s *S) TestRemoveUnitFailure(c *gocheck.C) {
tmpdir, err := commandmocker.Error("juju", "juju failed", 66)
c.Assert(err, gocheck.IsNil)
defer commandmocker.Remove(tmpdir)
app := testing.NewFakeApp("something", "rush", 1)
p := JujuProvisioner{}
err = p.RemoveUnit(app, "something/0")
c.Assert(err, gocheck.NotNil)
e, ok := err.(*provision.Error)
c.Assert(ok, gocheck.Equals, true)
c.Assert(e.Reason, gocheck.Equals, "juju failed")
c.Assert(e.Err.Error(), gocheck.Equals, "exit status 66")
}
func (s *S) TestInstallDepsRunRelatedHook(c *gocheck.C) {
p := &JujuProvisioner{}
app := testing.NewFakeApp("myapp", "python", 0)
w := &bytes.Buffer{}
err := p.InstallDeps(app, w)
c.Assert(err, gocheck.IsNil)
expected := []string{"ran /var/lib/tsuru/hooks/dependencies"}
c.Assert(app.Commands, gocheck.DeepEquals, expected)
}
func (s *S) TestExecutedCmd(c *gocheck.C) {
var buf bytes.Buffer
fexec := &etesting.FakeExecutor{}
execut = fexec
defer func() {
execut = nil
}()
app := testing.NewFakeApp("almah", "static", 2)
p := JujuProvisioner{}
err := p.ExecuteCommand(&buf, &buf, app, "ls", "-lh")
c.Assert(err, gocheck.IsNil)
bufOutput := `Output from unit "almah/0":
Output from unit "almah/1":
`
args := []string{
"ssh",
"-o",
"StrictHostKeyChecking no",
"-q",
"1",
"ls",
"-lh",
}
c.Assert(fexec.ExecutedCmd("juju", args), gocheck.Equals, true)
args = []string{
"ssh",
"-o",
"StrictHostKeyChecking no",
"-q",
"2",
"ls",
"-lh",
}
c.Assert(fexec.ExecutedCmd("juju", args), gocheck.Equals, true)
c.Assert(buf.String(), gocheck.Equals, bufOutput)
}
func (s *S) TestExecutedCmdFailure(c *gocheck.C) {
var buf bytes.Buffer
tmpdir, err := commandmocker.Error("juju", "failed", 2)
c.Assert(err, gocheck.IsNil)
defer commandmocker.Remove(tmpdir)
app := testing.NewFakeApp("frases", "static", 1)
p := JujuProvisioner{}
err = p.ExecuteCommand(&buf, &buf, app, "ls", "-l")
c.Assert(err, gocheck.NotNil)
c.Assert(err.Error(), gocheck.Equals, "exit status 2")
c.Assert(buf.String(), gocheck.Equals, "failed\n")
}
func (s *S) TestExecutedCmdOneUnit(c *gocheck.C) {
var buf bytes.Buffer
fexec := &etesting.FakeExecutor{}
execut = fexec
defer func() {
execut = nil
}()
app := testing.NewFakeApp("almah", "static", 1)
p := JujuProvisioner{}
err := p.ExecuteCommand(&buf, &buf, app, "ls", "-lh")
c.Assert(err, gocheck.IsNil)
args := []string{
"ssh",
"-o",
"StrictHostKeyChecking no",
"-q",
"1",
"ls",
"-lh",
}
c.Assert(fexec.ExecutedCmd("juju", args), gocheck.Equals, true)
}
func (s *S) TestExecutedCmdUnitDown(c *gocheck.C) {
var buf bytes.Buffer
fexec := &etesting.FakeExecutor{}
execut = fexec
defer func() {
execut = nil
}()
app := testing.NewFakeApp("almah", "static", 3)
app.SetUnitStatus(provision.StatusDown, 1)
p := JujuProvisioner{}
err := p.ExecuteCommand(&buf, &buf, app, "ls", "-lha")
c.Assert(err, gocheck.IsNil)
args := []string{
"ssh",
"-o",
"StrictHostKeyChecking no",
"-q",
"1",
"ls",
"-lha",
}
c.Assert(fexec.ExecutedCmd("juju", args), gocheck.Equals, true)
args = []string{
"ssh",
"-o",
"StrictHostKeyChecking no",
"-q",
"3",
"ls",
"-lha",
}
c.Assert(fexec.ExecutedCmd("juju", args), gocheck.Equals, true)
bufOutput := `Output from unit "almah/0":
Output from unit "almah/1":
Unit state is "down", it must be "started" for running commands.
Output from unit "almah/2":
`
c.Assert(buf.String(), gocheck.Equals, bufOutput)
}
func (s *S) TestSaveBootstrapMachine(c *gocheck.C) {
p := JujuProvisioner{}
m := machine{
AgentState: "state",
IpAddress: "ip",
InstanceId: "id",
InstanceState: "state",
}
p.saveBootstrapMachine(m)
conn, collection := p.bootstrapCollection()
defer conn.Close()
defer collection.Remove(m)
var mach machine
collection.Find(nil).One(&mach)
c.Assert(mach, gocheck.DeepEquals, m)
}
func (s *S) TestCollectStatusShouldNotAddBootstraTwice(c *gocheck.C) {
tmpdir, err := commandmocker.Add("juju", collectOutput)
c.Assert(err, gocheck.IsNil)
defer commandmocker.Remove(tmpdir)
p := JujuProvisioner{}
_, err = p.CollectStatus()
c.Assert(err, gocheck.IsNil)
_, err = p.CollectStatus()
c.Assert(err, gocheck.IsNil)
conn, collection := p.bootstrapCollection()
defer conn.Close()
l, err := collection.Find(nil).Count()
c.Assert(err, gocheck.IsNil)
c.Assert(l, gocheck.Equals, 1)
}
func (s *S) TestCollectStatus(c *gocheck.C) {
tmpdir, err := commandmocker.Add("juju", collectOutput)
c.Assert(err, gocheck.IsNil)
defer commandmocker.Remove(tmpdir)
p := JujuProvisioner{}
conn, collection := p.unitsCollection()
defer conn.Close()
err = collection.Insert(instance{UnitName: "as_i_rise/0", InstanceId: "i-00000439"})
c.Assert(err, gocheck.IsNil)
defer collection.Remove(bson.M{"_id": bson.M{"$in": []string{"as_i_rise/0", "the_infanta/0"}}})
expected := []provision.Unit{
{
Name: "as_i_rise/0",
AppName: "as_i_rise",
Type: "django",
Machine: 105,
InstanceId: "i-00000439",
Ip: "10.10.10.163",
Status: provision.StatusStarted,
},
{
Name: "the_infanta/0",
AppName: "the_infanta",
Type: "gunicorn",
Machine: 107,
InstanceId: "i-0000043e",
Ip: "10.10.10.168",
Status: provision.StatusInstalling,
},
}
units, err := p.CollectStatus()
c.Assert(err, gocheck.IsNil)
cp := make([]provision.Unit, len(units))
copy(cp, units)
if cp[0].Type == "gunicorn" {
cp[0], cp[1] = cp[1], cp[0]
}
c.Assert(cp, gocheck.DeepEquals, expected)
c.Assert(commandmocker.Ran(tmpdir), gocheck.Equals, true)
done := make(chan int8)
go func() {
for {
ct, err := collection.Find(nil).Count()
c.Assert(err, gocheck.IsNil)
if ct == 2 {
done <- 1
return
}
}
}()
select {
case <-done:
case <-time.After(5e9):
c.Fatal("Did not save the unit after 5 seconds.")
}
var instances []instance
err = collection.Find(nil).Sort("_id").All(&instances)
c.Assert(err, gocheck.IsNil)
c.Assert(instances, gocheck.HasLen, 2)
c.Assert(instances[0].UnitName, gocheck.Equals, "as_i_rise/0")
c.Assert(instances[0].InstanceId, gocheck.Equals, "i-00000439")
c.Assert(instances[1].UnitName, gocheck.Equals, "the_infanta/0")
c.Assert(instances[1].InstanceId, gocheck.Equals, "i-0000043e")
var b machine
err = collection.Find(nil).One(&b)
c.Assert(err, gocheck.IsNil)
}
func (s *S) TestCollectStatusDirtyOutput(c *gocheck.C) {
tmpdir, err := commandmocker.Add("juju", dirtyCollectOutput)
c.Assert(err, gocheck.IsNil)
defer commandmocker.Remove(tmpdir)
expected := []provision.Unit{
{
Name: "as_i_rise/0",
AppName: "as_i_rise",
Type: "django",
Machine: 105,
InstanceId: "i-00000439",
Ip: "10.10.10.163",
Status: provision.StatusStarted,
},
{
Name: "the_infanta/1",
AppName: "the_infanta",
Type: "gunicorn",
Machine: 107,
InstanceId: "i-0000043e",
Ip: "10.10.10.168",
Status: provision.StatusInstalling,
},
}
p := JujuProvisioner{}
units, err := p.CollectStatus()
c.Assert(err, gocheck.IsNil)
cp := make([]provision.Unit, len(units))
copy(cp, units)
if cp[0].Type == "gunicorn" {
cp[0], cp[1] = cp[1], cp[0]
}
c.Assert(cp, gocheck.DeepEquals, expected)
c.Assert(commandmocker.Ran(tmpdir), gocheck.Equals, true)
var wg sync.WaitGroup
wg.Add(1)
conn, collection := p.unitsCollection()
defer conn.Close()
go func() {
q := bson.M{"_id": bson.M{"$in": []string{"as_i_rise/0", "the_infanta/1"}}}
for {
if n, _ := collection.Find(q).Count(); n == 2 {
break
}
runtime.Gosched()
}
collection.Remove(q)
wg.Done()
}()
wg.Wait()
}
func (s *S) TestCollectStatusIDChangeDisabledELB(c *gocheck.C) {
tmpdir, err := commandmocker.Add("juju", collectOutput)
c.Assert(err, gocheck.IsNil)
defer commandmocker.Remove(tmpdir)
p := JujuProvisioner{}
conn, collection := p.unitsCollection()
defer conn.Close()
err = collection.Insert(instance{UnitName: "as_i_rise/0", InstanceId: "i-00000239"})
c.Assert(err, gocheck.IsNil)
defer collection.Remove(bson.M{"_id": bson.M{"$in": []string{"as_i_rise/0", "the_infanta/0"}}})
_, err = p.CollectStatus()
c.Assert(err, gocheck.IsNil)
done := make(chan int8)
go func() {
for {
q := bson.M{"_id": "as_i_rise/0", "instanceid": "i-00000439"}
ct, err := collection.Find(q).Count()
c.Assert(err, gocheck.IsNil)
if ct == 1 {
done <- 1
return
}
runtime.Gosched()
}
}()
select {
case <-done:
case <-time.After(5e9):
c.Fatal("Did not update the unit after 5 seconds.")
}
}
func (s *S) TestCollectStatusIDChangeFromPending(c *gocheck.C) {
tmpdir, err := commandmocker.Add("juju", collectOutput)
c.Assert(err, gocheck.IsNil)
defer commandmocker.Remove(tmpdir)
p := JujuProvisioner{}
conn, collection := p.unitsCollection()
defer conn.Close()
err = collection.Insert(instance{UnitName: "as_i_rise/0", InstanceId: "pending"})
c.Assert(err, gocheck.IsNil)
defer collection.Remove(bson.M{"_id": bson.M{"$in": []string{"as_i_rise/0", "the_infanta/0"}}})
_, err = p.CollectStatus()
c.Assert(err, gocheck.IsNil)
done := make(chan int8)
go func() {
for {
q := bson.M{"_id": "as_i_rise/0", "instanceid": "i-00000439"}
ct, err := collection.Find(q).Count()
c.Assert(err, gocheck.IsNil)
if ct == 1 {
done <- 1
return
}
runtime.Gosched()
}
}()
select {
case <-done:
case <-time.After(5e9):
c.Fatal("Did not update the unit after 5 seconds.")
}
}
func (s *S) TestCollectStatusFailure(c *gocheck.C) {
tmpdir, err := commandmocker.Error("juju", "juju failed", 1)
c.Assert(err, gocheck.IsNil)
defer commandmocker.Remove(tmpdir)
p := JujuProvisioner{}
_, err = p.CollectStatus()
c.Assert(err, gocheck.NotNil)
pErr, ok := err.(*provision.Error)
c.Assert(ok, gocheck.Equals, true)
c.Assert(pErr.Reason, gocheck.Equals, "juju failed")
c.Assert(pErr.Err.Error(), gocheck.Equals, "exit status 1")
c.Assert(commandmocker.Ran(tmpdir), gocheck.Equals, true)
}
func (s *S) TestCollectStatusInvalidYAML(c *gocheck.C) {
tmpdir, err := commandmocker.Add("juju", "local: somewhere::")
c.Assert(err, gocheck.IsNil)
defer commandmocker.Remove(tmpdir)
p := JujuProvisioner{}
_, err = p.CollectStatus()
c.Assert(err, gocheck.NotNil)
pErr, ok := err.(*provision.Error)
c.Assert(ok, gocheck.Equals, true)
c.Assert(pErr.Reason, gocheck.Equals, `"juju status" returned invalid data: local: somewhere::`)
c.Assert(pErr.Err, gocheck.ErrorMatches, `^YAML error:.*$`)
}
func (s *S) TestLoadBalancerEnabledElb(c *gocheck.C) {
p := JujuProvisioner{}
p.elb = new(bool)
*p.elb = true
lb := p.LoadBalancer()
c.Assert(lb, gocheck.NotNil)
}
func (s *S) TestLoadBalancerDisabledElb(c *gocheck.C) {
p := JujuProvisioner{}
p.elb = new(bool)
lb := p.LoadBalancer()
c.Assert(lb, gocheck.IsNil)
}
func (s *S) TestExecWithTimeout(c *gocheck.C) {
var data = []struct {
cmd []string
timeout time.Duration
out string
err error
}{
{
cmd: []string{"sleep", "2"},
timeout: 1e6,
out: "",
err: errors.New(`"sleep 2" ran for more than 1ms.`),
},
{
cmd: []string{"python", "-c", "import time; time.sleep(1); print('hello world!')"},
timeout: 5e9,
out: "hello world!\n",
err: nil,
},
{
cmd: []string{"python", "-c", "import sys; print('hello world!'); exit(1)"},
timeout: 5e9,
out: "hello world!\n",
err: errors.New("exit status 1"),
},
}
for _, d := range data {
out, err := execWithTimeout(d.timeout, d.cmd[0], d.cmd[1:]...)
if string(out) != d.out {
c.Errorf("Output. Want %q. Got %q.", d.out, out)
}
if d.err == nil && err != nil {
c.Errorf("Error. Want %v. Got %v.", d.err, err)
} else if d.err != nil && err.Error() != d.err.Error() {
c.Errorf("Error message. Want %q. Got %q.", d.err.Error(), err.Error())
}
}
}
func (s *S) TestUnitStatus(c *gocheck.C) {
var tests = []struct {
instance string
agent string
machineAgent string
expected provision.Status
}{
{"something", "nothing", "wut", provision.StatusPending},
{"", "", "", provision.StatusCreating},
{"", "", "pending", provision.StatusCreating},
{"", "", "not-started", provision.StatusCreating},
{"pending", "", "", provision.StatusCreating},
{"", "not-started", "running", provision.StatusCreating},
{"error", "install-error", "start-error", provision.StatusError},
{"started", "start-error", "running", provision.StatusError},
{"started", "charm-upgrade-error", "running", provision.StatusError},
{"running", "pending", "running", provision.StatusInstalling},
{"running", "started", "running", provision.StatusStarted},
{"running", "down", "running", provision.StatusDown},
}
for _, t := range tests {
got := unitStatus(t.instance, t.agent, t.machineAgent)
if got != t.expected {
c.Errorf("unitStatus(%q, %q, %q): Want %q. Got %q.", t.instance, t.agent, t.machineAgent, t.expected, got)
}
}
}
func (s *S) TestAddr(c *gocheck.C) {
app := testing.NewFakeApp("blue", "who", 1)
p := JujuProvisioner{}
addr, err := p.Addr(app)
c.Assert(err, gocheck.IsNil)
c.Assert(addr, gocheck.Equals, app.ProvisionUnits()[0].GetIp())
}
func (s *S) TestAddrWithoutUnits(c *gocheck.C) {
app := testing.NewFakeApp("squeeze", "who", 0)
p := JujuProvisioner{}
addr, err := p.Addr(app)
c.Assert(addr, gocheck.Equals, "")
c.Assert(err, gocheck.NotNil)
c.Assert(err.Error(), gocheck.Equals, `App "squeeze" has no units.`)
}
func (s *ELBSuite) TestProvisionWithELB(c *gocheck.C) {
fexec := &etesting.FakeExecutor{}
execut = fexec
defer func() {
execut = nil
}()
config.Set("juju:charms-path", "/home/charms")
defer config.Unset("juju:charms-path")
app := testing.NewFakeApp("jimmy", "who", 0)
p := JujuProvisioner{}
err := p.Provision(app)
c.Assert(err, gocheck.IsNil)
lb := p.LoadBalancer()
defer lb.Destroy(app)
addr, err := lb.Addr(app)
c.Assert(err, gocheck.IsNil)
c.Assert(addr, gocheck.Not(gocheck.Equals), "")
msg, err := getQueue(queueName).Get(1e9)
c.Assert(err, gocheck.IsNil)
defer msg.Delete()
c.Assert(msg.Action, gocheck.Equals, addUnitToLoadBalancer)
c.Assert(msg.Args, gocheck.DeepEquals, []string{"jimmy"})
}
func (s *ELBSuite) TestDestroyWithELB(c *gocheck.C) {
config.Set("juju:charms-path", "/home/charms")
defer config.Unset("juju:charms-path")
fexec := &etesting.FakeExecutor{}
execut = fexec
defer func() {
execut = nil
}()
app := testing.NewFakeApp("jimmy", "who", 0)
p := JujuProvisioner{}
err := p.Provision(app)
c.Assert(err, gocheck.IsNil)
err = p.Destroy(app)
c.Assert(err, gocheck.IsNil)
lb := p.LoadBalancer()
defer lb.Destroy(app) // sanity
addr, err := lb.Addr(app)
c.Assert(addr, gocheck.Equals, "")
c.Assert(err, gocheck.NotNil)
c.Assert(err.Error(), gocheck.Equals, "not found")
q := getQueue(queueName)
msg, err := q.Get(1e9)
c.Assert(err, gocheck.IsNil)
if msg.Action == addUnitToLoadBalancer && msg.Args[0] == "jimmy" {
msg.Delete()
} else {
q.Release(msg, 0)
}
}
func (s *ELBSuite) TestAddUnitsWithELB(c *gocheck.C) {
tmpdir, err := commandmocker.Add("juju", addUnitsOutput)
c.Assert(err, gocheck.IsNil)
defer commandmocker.Remove(tmpdir)
app := testing.NewFakeApp("resist", "rush", 0)
p := JujuProvisioner{}
_, err = p.AddUnits(app, 4)
c.Assert(err, gocheck.IsNil)
expected := []string{
"resist", "resist/3", "resist/4",
"resist/5", "resist/6",
}
msg, err := getQueue(queueName).Get(1e9)
c.Assert(err, gocheck.IsNil)
defer msg.Delete()
c.Assert(msg.Action, gocheck.Equals, addUnitToLoadBalancer)
c.Assert(msg.Args, gocheck.DeepEquals, expected)
}
func (s *ELBSuite) TestRemoveUnitWithELB(c *gocheck.C) {
instIds := make([]string, 4)
units := make([]provision.Unit, len(instIds))
for i := 0; i < len(instIds); i++ {
id := s.server.NewInstance()
defer s.server.RemoveInstance(id)
instIds[i] = id
units[i] = provision.Unit{
Name: "radio/" + strconv.Itoa(i),
InstanceId: id,
}
}
fexec := &etesting.FakeExecutor{}
execMut.Lock()
execut = fexec
execMut.Unlock()
defer func() {
execMut.Lock()
defer execMut.Unlock()
execut = nil
}()
app := testing.NewFakeApp("radio", "rush", 4)
manager := ELBManager{}
manager.e = s.client
err := manager.Create(app)
c.Assert(err, gocheck.IsNil)
defer manager.Destroy(app)
err = manager.Register(app, units...)
c.Assert(err, gocheck.IsNil)
p := JujuProvisioner{}
fUnit := testing.FakeUnit{Name: units[0].Name, InstanceId: units[0].InstanceId}
err = p.removeUnit(app, &fUnit)
c.Assert(err, gocheck.IsNil)
resp, err := s.client.DescribeLoadBalancers(app.GetName())
c.Assert(err, gocheck.IsNil)
c.Assert(resp.LoadBalancerDescriptions, gocheck.HasLen, 1)
c.Assert(resp.LoadBalancerDescriptions[0].Instances, gocheck.HasLen, len(units)-1)
instance := resp.LoadBalancerDescriptions[0].Instances[0]
c.Assert(instance.InstanceId, gocheck.Equals, instIds[1])
}
func (s *ELBSuite) TestCollectStatusWithELBAndIDChange(c *gocheck.C) {
a := testing.NewFakeApp("symfonia", "symfonia", 0)
p := JujuProvisioner{}
lb := p.LoadBalancer()
err := lb.Create(a)
c.Assert(err, gocheck.IsNil)
defer lb.Destroy(a)
id1 := s.server.NewInstance()
defer s.server.RemoveInstance(id1)
id2 := s.server.NewInstance()
defer s.server.RemoveInstance(id2)
id3 := s.server.NewInstance()
defer s.server.RemoveInstance(id3)
conn, collection := p.unitsCollection()
defer conn.Close()
err = collection.Insert(instance{UnitName: "symfonia/0", InstanceId: id3})
c.Assert(err, gocheck.IsNil)
err = lb.Register(a, provision.Unit{InstanceId: id3}, provision.Unit{InstanceId: id2})
q := bson.M{"_id": bson.M{"$in": []string{"symfonia/0", "symfonia/1", "symfonia/2", "raise/0"}}}
defer collection.Remove(q)
output := strings.Replace(simpleCollectOutput, "i-00004444", id1, 1)
output = strings.Replace(output, "i-00004445", id2, 1)
tmpdir, err := commandmocker.Add("juju", output)
c.Assert(err, gocheck.IsNil)
defer commandmocker.Remove(tmpdir)
_, err = p.CollectStatus()
c.Assert(err, gocheck.IsNil)
done := make(chan int8)
go func() {
for {
q := bson.M{"_id": "symfonia/0", "instanceid": id1}
ct, err := collection.Find(q).Count()
c.Assert(err, gocheck.IsNil)
if ct == 1 {
done <- 1
return
}
runtime.Gosched()
}
}()
select {
case <-done:
case <-time.After(5e9):
c.Fatal("Did not save the unit after 5 seconds.")
}
resp, err := s.client.DescribeLoadBalancers(a.GetName())
c.Assert(err, gocheck.IsNil)
c.Assert(resp.LoadBalancerDescriptions, gocheck.HasLen, 1)
instances := resp.LoadBalancerDescriptions[0].Instances
c.Assert(instances, gocheck.HasLen, 2)
c.Assert(instances[0].InstanceId, gocheck.Equals, id2)
c.Assert(instances[1].InstanceId, gocheck.Equals, id1)
}
func (s *ELBSuite) TestAddrWithELB(c *gocheck.C) {
app := testing.NewFakeApp("jimmy", "who", 0)
p := JujuProvisioner{}
lb := p.LoadBalancer()
err := lb.Create(app)
c.Assert(err, gocheck.IsNil)
defer lb.Destroy(app)
addr, err := p.Addr(app)
c.Assert(err, gocheck.IsNil)
lAddr, err := lb.Addr(app)
c.Assert(err, gocheck.IsNil)
c.Assert(addr, gocheck.Equals, lAddr)
}
func (s *ELBSuite) TestAddrWithUnknownELB(c *gocheck.C) {
app := testing.NewFakeApp("jimmy", "who", 0)
p := JujuProvisioner{}
addr, err := p.Addr(app)
c.Assert(addr, gocheck.Equals, "")
c.Assert(err, gocheck.NotNil)
c.Assert(err.Error(), gocheck.Equals, "not found")
}
|
// Copyright 2016 The Gosl Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package fun
import (
"math"
"math/cmplx"
"github.com/cpmech/gosl/chk"
"github.com/cpmech/gosl/io"
"github.com/cpmech/gosl/la"
"github.com/cpmech/gosl/plt"
"github.com/cpmech/gosl/utl"
)
// Smoothing kinds
var (
// No smoothing
SmoNoneKind = io.NewEnum("None", "fun.smoothing", "L", "No smoothing")
// Lanczos (sinc) smoothing kind
SmoLanczosKind = io.NewEnum("Lanczos", "fun.smoothing", "L", "Lanczos (sinc) smoothing kind")
// Cesaro
SmoCesaroKind = io.NewEnum("Cesaro", "fun.smoothing", "L", "Lanczos (sinc) smoothing kind")
// Raised Cosine
SmoRcosKind = io.NewEnum("Rcos", "fun.smoothing", "L", "Lanczos (sinc) smoothing kind")
)
// FourierInterp performs interpolation using truncated Fourier series
//
// N/2 - 1
// ———— +i k X[j]
// f(x[j]) = \ A[k] ⋅ e with X[j] = 2 π j / N
// /
// ————
// k = -N/2
//
// x ϵ [0, 2π]
//
// Equation (2.1.27) of [1]. Note that f=u in [1] and A[k] is the tilde(u[k]) of [1]
//
// Reference:
// [1] Canuto C, Hussaini MY, Quarteroni A, Zang TA (2006) Spectral Methods: Fundamentals in
// Single Domains. Springer. 563p
//
type FourierInterp struct {
N int // number of terms. must be power of 2; i.e. N = 2ⁿ
X la.Vector // point coordinates == 2⋅π.j/N
K la.Vector // k values computed from j such that j = 0...N-1 ⇒ k = -N/2...N/2-1
A la.VectorC // coefficients for interpolation. from FFT
S la.VectorC // smothing coefficients
}
// NewFourierInterp allocates a new FourierInterp object
//
// N - 1
// 1 ———— -i k X[j]
// A[k] = ——— \ f(x[j]) ⋅ e with X[j] = 2 π j / N
// N /
// ————
// j = 0
//
// INPUT:
// N -- number of terms. must be power of 2; i.e. N = 2ⁿ
//
// smoothing -- type of smoothing: use SmoNoneKind for no smoothing
//
// NOTE: (1) x ϵ [0, 2π]
// (2) remember to call CalcA to calculate coefficients A!
//
// Equation (2.1.25) of [1]. Note that f=u in [1] and A[k] is the tilde(u[k]) of [1]
//
// Reference:
// [1] Canuto C, Hussaini MY, Quarteroni A, Zang TA (2006) Spectral Methods: Fundamentals in
// Single Domains. Springer. 563p
//
func NewFourierInterp(N int, smoothing io.Enum) (o *FourierInterp, err error) {
// check
if N%2 != 0 {
err = chk.Err("N must be even. N=%d is invalid\n", N)
return
}
// allocate
o = new(FourierInterp)
o.N = N
o.X = make([]float64, o.N)
o.K = make([]float64, o.N)
o.A = make([]complex128, o.N)
o.S = make([]complex128, o.N)
// point coordinates and K values
n := float64(o.N)
for j := 0; j < o.N; j++ {
o.X[j] = 2.0 * math.Pi * float64(j) / n
o.K[j] = o.CalcK(j)
}
// compute smoothing coefficients
σ := func(k float64) float64 { return 1.0 }
switch smoothing {
case SmoLanczosKind:
σ = func(k float64) float64 { return Sinc(2 * k * π / n) }
case SmoRcosKind:
σ = func(k float64) float64 { return (1.0 + math.Cos(2*k*π/n)) / 2.0 }
case SmoCesaroKind:
σ = func(k float64) float64 { return 1.0 - math.Abs(k)/(1.0+n/2.0) }
}
for j := 0; j < o.N; j++ {
o.S[j] = complex(σ(o.K[j]), 0)
}
return
}
// CalcA calculates the coefficients A of the interpolation ousing FFT
// rule32 -- uses 3/2-rule to remove alias error (padding method)
//
// NOTE: by using the 3/2-rule, the intepolatory property is not exact; i.e. I(xi)≈f(xi) only
//
func (o *FourierInterp) CalcA(f Ss, rule32 bool) (err error) {
// aliasing removal by padding (3/2-rule)
var fxj float64
if rule32 {
M := 3*o.N/2 - 1
m := float64(M)
tmp := make([]complex128, M)
for j := 0; j < M; j++ {
xj := 2.0 * math.Pi * float64(j) / m
fxj, err = f(xj)
if err != nil {
return
}
tmp[j] = complex(fxj/m, 0)
}
err = Dft1d(tmp, false)
if err != nil {
return
}
var jN, jM int // j's corresponding to the N and M series, respectively
for jN = 0; jN < o.N; jN++ {
k := int(o.K[jN])
if k < 0 {
jM = M + k
} else {
jM = k
}
o.A[jN] = tmp[jM]
}
return
}
// compute f(x[j]) and set A[j] with f(x[j]) / N
n := float64(o.N)
for j := 0; j < o.N; j++ {
fxj, err = f(o.X[j])
if err != nil {
return
}
o.A[j] = complex(fxj/n, 0)
}
// perform Fourier transform to find A[j]
err = Dft1d(o.A, false)
return
}
// CalcK computes k-index from j-index where j corresponds to the FFT index
//
// FFT returns the A coefficients as:
//
// {A[0], A[1], ..., A[N/2-1], A[-N/2], A[-N/2+1], ... A[-1]}
//
// k ϵ [-N/2, N/2-1]
// j ϵ [0, N-1]
//
// Example with N = 8:
//
// j=0 ⇒ k=0 j=4 ⇒ k=-4
// j=1 ⇒ k=1 j=5 ⇒ k=-3
// j=2 ⇒ k=2 j=6 ⇒ k=-2
// j=3 ⇒ k=3 j=7 ⇒ k=-1
//
func (o *FourierInterp) CalcK(j int) float64 {
h := o.N / 2
k := j - (j/h)*o.N
return float64(k)
}
// CalcJ computes j-index from k-index where j corresponds to the FFT index
//
// k ϵ [-N/2, N/2-1]
// j ϵ [0, N-1]
//
// Example with N = 8:
//
// k=0 ⇒ j=0 k=-4 ⇒ j=4
// k=1 ⇒ j=1 k=-3 ⇒ j=5 j = { N + k if k < 0
// k=2 ⇒ j=2 k=-2 ⇒ j=6 { k otherwise
// k=3 ⇒ j=3 k=-1 ⇒ j=7
//
func (o *FourierInterp) CalcJ(k float64) int {
if k < 0 {
return o.N + int(k)
}
return int(k)
}
// I computes the interpolation
//
// N/2 - 1
// ———— +i k x
// I {f}(x) = \ A[k] ⋅ e
// N /
// ————
// k = -N/2
//
// x ϵ [0, 2π]
//
// Equation (2.1.28) of [1]. Note that f=u in [1] and A[k] is the tilde(u[k]) of [1]
//
// Reference:
// [1] Canuto C, Hussaini MY, Quarteroni A, Zang TA (2006) Spectral Methods: Fundamentals in
// Single Domains. Springer. 563p
//
// NOTE: remember to call CalcA to calculate coefficients A!
//
func (o *FourierInterp) I(x float64) float64 {
var res complex128
for j := 0; j < o.N; j++ {
res += o.S[j] * o.A[j] * cmplx.Exp(complex(0, o.K[j]*x))
}
return real(res)
}
// DI computes the p-derivative of the interpolation
//
// p N/2 - 1
// p d(I{f}) ———— p +i k x
// DI{f}(x) = ——————— = \ (i⋅k) ⋅ A[k] ⋅ e
// N p /
// dx ————
// k = -N/2
//
// x ϵ [0, 2π]
//
// NOTE: remember to call CalcA to calculate coefficients A!
//
func (o *FourierInterp) DI(p int, x float64) float64 {
var res complex128
pc := complex(float64(p), 0)
for j := 0; j < o.N; j++ {
ik := complex(0, o.K[j])
ikp := cmplx.Pow(ik, pc)
res += ikp * o.S[j] * o.A[j] * cmplx.Exp(complex(0, o.K[j]*x))
}
return real(res)
}
// Plot plots interpolated curve
// option -- 1: plot only f(x)
// 2: plot both f(x) and df/dx(x)
// 3: plot all f(x), df/dx(x) and d^2f/dx^2
// 4: plot only df/dx(x)
// 5: plot only d^2f/dx^2(x)
// 6: plot df^p/dx^p
// p -- order of the derivative to plot if option == 6
// dfdx -- is the analytic df/dx(x) [optional]
// d2fdx2 -- is the analytic d^2f/dx^2(x) [optional]
func (o *FourierInterp) Plot(option, p int, f, dfdx, d2fdx2 Ss, argsF, argsI, argsD1, argsD2 *plt.A) {
// set arguments
if argsF == nil {
argsF = &plt.A{L: "f(x)", C: plt.C(0, 1), NoClip: true}
}
if argsI == nil {
argsI = &plt.A{L: "I{f}(x)", C: plt.C(1, 1), NoClip: true}
}
if argsD1 == nil {
argsD1 = &plt.A{L: "D1I{f}(x)", C: plt.C(2, 1), NoClip: true}
}
if argsD2 == nil {
argsD2 = &plt.A{L: "D2I{f}(x)", C: plt.C(3, 1), NoClip: true}
}
// graph points
npts := 2001
xx := utl.LinSpace(0, 2.0*math.Pi, npts)
// options
withF := option == 1 || option == 2 || option == 3
firstD := option == 2 || option == 3 || option == 4
secondD := option == 3 || option == 5
// allocate arrays
var y1, y2 []float64
if withF {
y1 = make([]float64, npts)
y2 = make([]float64, npts)
}
var y3, y4 []float64
if firstD {
y3 = make([]float64, npts)
y4 = make([]float64, npts)
}
var y5, y6 []float64
if secondD {
y5 = make([]float64, npts)
y6 = make([]float64, npts)
}
var y7 []float64
if option == 6 {
y7 = make([]float64, npts)
}
// compute values
for i := 0; i < npts; i++ {
x := xx[i]
if withF {
if f != nil {
fx, err := f(x)
if err != nil {
chk.Panic("f(x) failed:\n%v\n", err)
}
y1[i] = fx
}
y2[i] = o.I(x)
}
if firstD {
if dfdx != nil {
dfx, err := dfdx(x)
if err != nil {
chk.Panic("df/dx(x) failed:\n%v\n", err)
}
y3[i] = dfx
}
y4[i] = o.DI(1, x)
}
if secondD {
if d2fdx2 != nil {
ddfx, err := d2fdx2(x)
if err != nil {
chk.Panic("d2f/dx2(x) failed:\n%v\n", err)
}
y5[i] = ddfx
}
y6[i] = o.DI(2, x)
}
if option == 6 {
y7[i] = o.DI(p, x)
}
}
// plot
if option == 2 {
plt.Subplot(2, 1, 1)
}
if option == 3 {
plt.Subplot(3, 1, 1)
}
if withF {
if f != nil {
plt.Plot(xx, y1, argsF)
}
plt.Plot(xx, y2, argsI)
plt.HideTRborders()
plt.Gll("$x$", "$f(x)$", nil)
}
if option == 2 {
plt.Subplot(2, 1, 2)
}
if option == 3 {
plt.Subplot(3, 1, 2)
}
if firstD {
argsF.L = "dfdx"
//plt.Plot(X, yX, argsX)
if dfdx != nil {
plt.Plot(xx, y3, argsF)
}
plt.Plot(xx, y4, argsD1)
plt.HideTRborders()
plt.Gll("$x$", "$\\frac{\\mathrm{d}f(x)}{\\mathrm{d}x}$", nil)
}
if option == 3 {
plt.Subplot(3, 1, 3)
}
if secondD {
argsF.L = "d2fdx2"
if d2fdx2 != nil {
plt.Plot(xx, y5, argsF)
}
plt.Plot(xx, y6, argsD2)
plt.HideTRborders()
plt.Gll("$x$", "$\\frac{\\mathrm{d}^2f(x)}{\\mathrm{d}x^2}$", nil)
}
if option == 6 {
argsI.L = io.Sf("D%d", p)
plt.Plot(xx, y7, argsI)
plt.HideTRborders()
plt.Gll("$x$", io.Sf("$\\frac{\\mathrm{d}^{%d}f(x)}{\\mathrm{d}x^{%d}}$", p, p), nil)
}
}
Improve comments
// Copyright 2016 The Gosl Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package fun
import (
"math"
"math/cmplx"
"github.com/cpmech/gosl/chk"
"github.com/cpmech/gosl/io"
"github.com/cpmech/gosl/la"
"github.com/cpmech/gosl/plt"
"github.com/cpmech/gosl/utl"
)
// Smoothing kinds
var (
// No smoothing
SmoNoneKind = io.NewEnum("None", "fun.smoothing", "L", "No smoothing")
// Lanczos (sinc) smoothing kind
SmoLanczosKind = io.NewEnum("Lanczos", "fun.smoothing", "L", "Lanczos (sinc) smoothing kind")
// Cesaro
SmoCesaroKind = io.NewEnum("Cesaro", "fun.smoothing", "L", "Lanczos (sinc) smoothing kind")
// Raised Cosine
SmoRcosKind = io.NewEnum("Rcos", "fun.smoothing", "L", "Lanczos (sinc) smoothing kind")
)
// FourierInterp performs interpolation using truncated Fourier series
//
// N/2 - 1
// ———— +i k X[j]
// f(x[j]) = \ A[k] ⋅ e with X[j] = 2 π j / N
// /
// ————
// k = -N/2 Eq (2.1.27) of [1] x ϵ [0, 2π]
//
// where:
//
// N - 1
// 1 ———— -i k X[j]
// A[k] = ——— \ f(x[j]) ⋅ e with X[j] = 2 π j / N
// N /
// ————
// j = 0 Eq (2.1.25) of [1]
//
// NOTE: f=u in [1] and A[k] is the tilde(u[k]) of [1]
//
// Reference:
// [1] Canuto C, Hussaini MY, Quarteroni A, Zang TA (2006) Spectral Methods: Fundamentals in
// Single Domains. Springer. 563p
//
type FourierInterp struct {
N int // number of terms. must be power of 2; i.e. N = 2ⁿ
X la.Vector // point coordinates == 2⋅π.j/N
K la.Vector // k values computed from j such that j = 0...N-1 ⇒ k = -N/2...N/2-1
A la.VectorC // coefficients for interpolation. from FFT
S la.VectorC // smothing coefficients
}
// NewFourierInterp allocates a new FourierInterp object
// N -- number of terms. must be even; ideally power of 2, e.g. N = 2ⁿ
// smoothing -- type of smoothing: use SmoNoneKind for no smoothing
//
// NOTE: remember to call CalcA to calculate coefficients A!
//
func NewFourierInterp(N int, smoothing io.Enum) (o *FourierInterp, err error) {
// check
if N%2 != 0 {
err = chk.Err("N must be even. N=%d is invalid\n", N)
return
}
// allocate
o = new(FourierInterp)
o.N = N
o.X = make([]float64, o.N)
o.K = make([]float64, o.N)
o.A = make([]complex128, o.N)
o.S = make([]complex128, o.N)
// point coordinates and K values
n := float64(o.N)
for j := 0; j < o.N; j++ {
o.X[j] = 2.0 * math.Pi * float64(j) / n
o.K[j] = o.CalcK(j)
}
// compute smoothing coefficients
σ := func(k float64) float64 { return 1.0 }
switch smoothing {
case SmoLanczosKind:
σ = func(k float64) float64 { return Sinc(2 * k * π / n) }
case SmoRcosKind:
σ = func(k float64) float64 { return (1.0 + math.Cos(2*k*π/n)) / 2.0 }
case SmoCesaroKind:
σ = func(k float64) float64 { return 1.0 - math.Abs(k)/(1.0+n/2.0) }
}
for j := 0; j < o.N; j++ {
o.S[j] = complex(σ(o.K[j]), 0)
}
return
}
// CalcA calculates the coefficients A of the interpolation using (fwd) FFT
//
// N - 1
// 1 ———— -i k X[j]
// A[k] = ——— \ f(x[j]) ⋅ e with X[j] = 2 π j / N
// N /
// ————
// j = 0 Eq (2.1.25) of [1]
//
// rule32 -- uses 3/2-rule to remove alias error (padding method)
//
// NOTE: by using the 3/2-rule, the intepolatory property is not exact;
// i.e. I(xi)≈f(xi) only
//
func (o *FourierInterp) CalcA(f Ss, rule32 bool) (err error) {
// aliasing removal by padding (3/2-rule)
var fxj float64
if rule32 {
M := 3*o.N/2 - 1
m := float64(M)
tmp := make([]complex128, M)
for j := 0; j < M; j++ {
xj := 2.0 * math.Pi * float64(j) / m
fxj, err = f(xj)
if err != nil {
return
}
tmp[j] = complex(fxj/m, 0)
}
err = Dft1d(tmp, false)
if err != nil {
return
}
var jN, jM int // j's corresponding to the N and M series, respectively
for jN = 0; jN < o.N; jN++ {
k := int(o.K[jN])
if k < 0 {
jM = M + k
} else {
jM = k
}
o.A[jN] = tmp[jM]
}
return
}
// compute f(x[j]) and set A[j] with f(x[j]) / N
n := float64(o.N)
for j := 0; j < o.N; j++ {
fxj, err = f(o.X[j])
if err != nil {
return
}
o.A[j] = complex(fxj/n, 0)
}
// perform Fourier transform to find A[j]
err = Dft1d(o.A, false)
return
}
// CalcK computes k-index from j-index where j corresponds to the FFT index
//
// FFT returns the A coefficients as:
//
// {A[0], A[1], ..., A[N/2-1], A[-N/2], A[-N/2+1], ... A[-1]}
//
// k ϵ [-N/2, N/2-1]
// j ϵ [0, N-1]
//
// Example with N = 8:
//
// j=0 ⇒ k=0 j=4 ⇒ k=-4
// j=1 ⇒ k=1 j=5 ⇒ k=-3
// j=2 ⇒ k=2 j=6 ⇒ k=-2
// j=3 ⇒ k=3 j=7 ⇒ k=-1
//
func (o *FourierInterp) CalcK(j int) float64 {
h := o.N / 2
k := j - (j/h)*o.N
return float64(k)
}
// CalcJ computes j-index from k-index where j corresponds to the FFT index
//
// k ϵ [-N/2, N/2-1]
// j ϵ [0, N-1]
//
// Example with N = 8:
//
// k=0 ⇒ j=0 k=-4 ⇒ j=4
// k=1 ⇒ j=1 k=-3 ⇒ j=5 j = { N + k if k < 0
// k=2 ⇒ j=2 k=-2 ⇒ j=6 { k otherwise
// k=3 ⇒ j=3 k=-1 ⇒ j=7
//
func (o *FourierInterp) CalcJ(k float64) int {
if k < 0 {
return o.N + int(k)
}
return int(k)
}
// I computes the interpolation (with smoothing or not)
//
// N/2 - 1
// ———— +i k x
// I {f}(x) = \ A[k] ⋅ e x ϵ [0, 2π]
// N /
// ————
// k = -N/2 Eq (2.1.28) of [1]
//
// NOTE: remember to call CalcA to calculate coefficients A!
//
func (o *FourierInterp) I(x float64) float64 {
var res complex128
for j := 0; j < o.N; j++ {
res += o.S[j] * o.A[j] * cmplx.Exp(complex(0, o.K[j]*x))
}
return real(res)
}
// DI computes the p-derivative of the interpolation (with smoothing or not)
//
// p N/2 - 1
// p d(I{f}) ———— p +i k x
// DI{f}(x) = ——————— = \ (i⋅k) ⋅ A[k] ⋅ e
// N p /
// dx ————
// k = -N/2 x ϵ [0, 2π]
//
// NOTE: remember to call CalcA to calculate coefficients A!
//
func (o *FourierInterp) DI(p int, x float64) float64 {
var res complex128
pc := complex(float64(p), 0)
for j := 0; j < o.N; j++ {
ik := complex(0, o.K[j])
ikp := cmplx.Pow(ik, pc)
res += ikp * o.S[j] * o.A[j] * cmplx.Exp(complex(0, o.K[j]*x))
}
return real(res)
}
// Plot plots interpolated curve
// option -- 1: plot only f(x)
// 2: plot both f(x) and df/dx(x)
// 3: plot all f(x), df/dx(x) and d^2f/dx^2
// 4: plot only df/dx(x)
// 5: plot only d^2f/dx^2(x)
// 6: plot df^p/dx^p
// p -- order of the derivative to plot if option == 6
// dfdx -- is the analytic df/dx(x) [optional]
// d2fdx2 -- is the analytic d^2f/dx^2(x) [optional]
func (o *FourierInterp) Plot(option, p int, f, dfdx, d2fdx2 Ss, argsF, argsI, argsD1, argsD2 *plt.A) {
// set arguments
if argsF == nil {
argsF = &plt.A{L: "f(x)", C: plt.C(0, 1), NoClip: true}
}
if argsI == nil {
argsI = &plt.A{L: "I{f}(x)", C: plt.C(1, 1), NoClip: true}
}
if argsD1 == nil {
argsD1 = &plt.A{L: "D1I{f}(x)", C: plt.C(2, 1), NoClip: true}
}
if argsD2 == nil {
argsD2 = &plt.A{L: "D2I{f}(x)", C: plt.C(3, 1), NoClip: true}
}
// graph points
npts := 2001
xx := utl.LinSpace(0, 2.0*math.Pi, npts)
// options
withF := option == 1 || option == 2 || option == 3
firstD := option == 2 || option == 3 || option == 4
secondD := option == 3 || option == 5
// allocate arrays
var y1, y2 []float64
if withF {
y1 = make([]float64, npts)
y2 = make([]float64, npts)
}
var y3, y4 []float64
if firstD {
y3 = make([]float64, npts)
y4 = make([]float64, npts)
}
var y5, y6 []float64
if secondD {
y5 = make([]float64, npts)
y6 = make([]float64, npts)
}
var y7 []float64
if option == 6 {
y7 = make([]float64, npts)
}
// compute values
for i := 0; i < npts; i++ {
x := xx[i]
if withF {
if f != nil {
fx, err := f(x)
if err != nil {
chk.Panic("f(x) failed:\n%v\n", err)
}
y1[i] = fx
}
y2[i] = o.I(x)
}
if firstD {
if dfdx != nil {
dfx, err := dfdx(x)
if err != nil {
chk.Panic("df/dx(x) failed:\n%v\n", err)
}
y3[i] = dfx
}
y4[i] = o.DI(1, x)
}
if secondD {
if d2fdx2 != nil {
ddfx, err := d2fdx2(x)
if err != nil {
chk.Panic("d2f/dx2(x) failed:\n%v\n", err)
}
y5[i] = ddfx
}
y6[i] = o.DI(2, x)
}
if option == 6 {
y7[i] = o.DI(p, x)
}
}
// plot
if option == 2 {
plt.Subplot(2, 1, 1)
}
if option == 3 {
plt.Subplot(3, 1, 1)
}
if withF {
if f != nil {
plt.Plot(xx, y1, argsF)
}
plt.Plot(xx, y2, argsI)
plt.HideTRborders()
plt.Gll("$x$", "$f(x)$", nil)
}
if option == 2 {
plt.Subplot(2, 1, 2)
}
if option == 3 {
plt.Subplot(3, 1, 2)
}
if firstD {
argsF.L = "dfdx"
//plt.Plot(X, yX, argsX)
if dfdx != nil {
plt.Plot(xx, y3, argsF)
}
plt.Plot(xx, y4, argsD1)
plt.HideTRborders()
plt.Gll("$x$", "$\\frac{\\mathrm{d}f(x)}{\\mathrm{d}x}$", nil)
}
if option == 3 {
plt.Subplot(3, 1, 3)
}
if secondD {
argsF.L = "d2fdx2"
if d2fdx2 != nil {
plt.Plot(xx, y5, argsF)
}
plt.Plot(xx, y6, argsD2)
plt.HideTRborders()
plt.Gll("$x$", "$\\frac{\\mathrm{d}^2f(x)}{\\mathrm{d}x^2}$", nil)
}
if option == 6 {
argsI.L = io.Sf("D%d", p)
plt.Plot(xx, y7, argsI)
plt.HideTRborders()
plt.Gll("$x$", io.Sf("$\\frac{\\mathrm{d}^{%d}f(x)}{\\mathrm{d}x^{%d}}$", p, p), nil)
}
}
|
/*
Copyright 2021 The logr Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package funcr implements github.com/go-logr/logr.Logger in terms of
// an arbitrary "write" function.
package funcr
import (
"bytes"
"fmt"
"path/filepath"
"reflect"
"runtime"
"strconv"
"strings"
"sync/atomic"
"github.com/go-logr/logr"
)
// New returns a logr.Logger which is implemented by a function.
func New(fn func(prefix, args string), opts Options) logr.Logger {
return fnlogger{
level: 0,
prefix: "",
values: nil,
write: fn,
logCaller: opts.LogCaller,
}
}
type Options struct {
// LogCaller tells funcr to add a "caller" key to some or all log lines.
// This has some overhead, so some users might not want it.
LogCaller MessageClass
}
type MessageClass int
const (
None MessageClass = iota
All
Info
Error
)
type fnlogger struct {
level int
prefix string
values []interface{}
write func(prefix, args string)
logCaller MessageClass
}
// Magic string for intermediate frames that we should ignore.
const autogeneratedFrameName = "<autogenerated>"
// Cached depth of this interface's log functions.
var framesAtomic int32 // atomic
// Discover how many frames we need to climb to find the caller. This approach
// was suggested by Ian Lance Taylor of the Go team, so it *should* be safe
// enough (famous last words) and should survive changes in Go's optimizer.
//
// This assumes that all logging paths are the same depth from the caller,
// which should be a reasonable assumption since they are part of the same
// interface.
func framesToCaller() int {
// Figuring out the current depth is somewhat expensive. Saving the value
// amortizes most of that runtime cost.
if atomic.LoadInt32(&framesAtomic) != 0 {
return int(framesAtomic)
}
// 1 is the immediate caller. 3 should be too many.
for i := 1; i < 3; i++ {
_, file, _, _ := runtime.Caller(i + 1) // +1 for this function's frame
if file != autogeneratedFrameName {
atomic.StoreInt32(&framesAtomic, int32(i))
return i
}
}
return 1 // something went wrong, this is safe
}
func flatten(kvList ...interface{}) string {
if len(kvList)%2 != 0 {
kvList = append(kvList, "<no-value>")
}
// Empirically bytes.Buffer is faster than strings.Builder for this.
buf := bytes.NewBuffer(make([]byte, 0, 1024))
for i := 0; i < len(kvList); i += 2 {
k, ok := kvList[i].(string)
if !ok {
k = fmt.Sprintf("<non-string-key-%d>", i/2)
}
v := kvList[i+1]
if i > 0 {
buf.WriteRune(' ')
}
buf.WriteRune('"')
buf.WriteString(k)
buf.WriteRune('"')
buf.WriteRune('=')
buf.WriteString(pretty(v))
}
return buf.String()
}
func pretty(value interface{}) string {
return prettyWithFlags(value, 0)
}
const (
flagRawString = 0x1
)
// TODO: This is not fast. Most of the overhead goes here.
func prettyWithFlags(value interface{}, flags uint32) string {
// Handling the most common types without reflect is a small perf win.
switch v := value.(type) {
case bool:
return strconv.FormatBool(v)
case string:
if flags&flagRawString > 0 {
return v
}
// This is empirically faster than strings.Builder.
return `"` + v + `"`
case int:
return strconv.FormatInt(int64(v), 10)
case int8:
return strconv.FormatInt(int64(v), 10)
case int16:
return strconv.FormatInt(int64(v), 10)
case int32:
return strconv.FormatInt(int64(v), 10)
case int64:
return strconv.FormatInt(int64(v), 10)
case uint:
return strconv.FormatUint(uint64(v), 10)
case uint8:
return strconv.FormatUint(uint64(v), 10)
case uint16:
return strconv.FormatUint(uint64(v), 10)
case uint32:
return strconv.FormatUint(uint64(v), 10)
case uint64:
return strconv.FormatUint(v, 10)
case uintptr:
return strconv.FormatUint(uint64(v), 10)
case float32:
return strconv.FormatFloat(float64(v), 'f', -1, 32)
case float64:
return strconv.FormatFloat(v, 'f', -1, 64)
}
buf := bytes.NewBuffer(make([]byte, 0, 256))
t := reflect.TypeOf(value)
if t == nil {
return "null"
}
v := reflect.ValueOf(value)
switch t.Kind() {
case reflect.Bool:
return strconv.FormatBool(v.Bool())
case reflect.String:
if flags&flagRawString > 0 {
return v.String()
}
// This is empirically faster than strings.Builder.
return `"` + v.String() + `"`
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return strconv.FormatInt(int64(v.Int()), 10)
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
return strconv.FormatUint(uint64(v.Uint()), 10)
case reflect.Float32:
return strconv.FormatFloat(float64(v.Float()), 'f', -1, 32)
case reflect.Float64:
return strconv.FormatFloat(v.Float(), 'f', -1, 64)
case reflect.Struct:
buf.WriteRune('{')
for i := 0; i < t.NumField(); i++ {
f := t.Field(i)
if f.PkgPath != "" {
// reflect says this field is only defined for non-exported fields.
continue
}
if i > 0 {
buf.WriteRune(',')
}
buf.WriteRune('"')
name := f.Name
if tag, found := f.Tag.Lookup("json"); found {
if comma := strings.Index(tag, ","); comma != -1 {
name = tag[:comma]
} else {
name = tag
}
}
buf.WriteString(name)
buf.WriteRune('"')
buf.WriteRune(':')
buf.WriteString(pretty(v.Field(i).Interface()))
}
buf.WriteRune('}')
return buf.String()
case reflect.Slice, reflect.Array:
buf.WriteRune('[')
for i := 0; i < v.Len(); i++ {
if i > 0 {
buf.WriteRune(',')
}
e := v.Index(i)
buf.WriteString(pretty(e.Interface()))
}
buf.WriteRune(']')
return buf.String()
case reflect.Map:
buf.WriteRune('{')
// This does not sort the map keys, for best perf.
it := v.MapRange()
i := 0
for it.Next() {
if i > 0 {
buf.WriteRune(',')
}
// JSON only does string keys.
buf.WriteRune('"')
buf.WriteString(prettyWithFlags(it.Key().Interface(), flagRawString))
buf.WriteRune('"')
buf.WriteRune(':')
buf.WriteString(pretty(it.Value().Interface()))
i++
}
buf.WriteRune('}')
return buf.String()
case reflect.Ptr, reflect.Interface:
return pretty(v.Elem().Interface())
}
return fmt.Sprintf(`"<unhandled-%s>"`, t.Kind().String())
}
type callerID struct {
File string `json:"file"`
Line int `json:"line"`
}
func (l fnlogger) caller() callerID {
// +1 for this frame, +1 for logr itself.
// FIXME: Maybe logr should offer a clue as to how many frames are
// needed here? Or is it part of the contract to LogSinks?
_, file, line, ok := runtime.Caller(framesToCaller() + 2)
if !ok {
return callerID{"<unknown>", 0}
}
return callerID{filepath.Base(file), line}
}
func (l fnlogger) Enabled() bool {
return l.level == 0
}
func (l fnlogger) Info(msg string, kvList ...interface{}) {
if l.Enabled() {
args := make([]interface{}, 0, 64) // using a constant here impacts perf
if l.logCaller == All || l.logCaller == Info {
args = append(args, "caller", l.caller())
}
args = append(args, "level", l.level, "msg", msg)
args = append(args, l.values...)
args = append(args, kvList...)
argsStr := flatten(args...)
l.write(l.prefix, argsStr)
}
}
func (l fnlogger) Error(err error, msg string, kvList ...interface{}) {
args := make([]interface{}, 0, 64) // using a constant here impacts perf
if l.logCaller == All || l.logCaller == Error {
args = append(args, "caller", l.caller())
}
args = append(args, "msg", msg)
var loggableErr interface{}
if err != nil {
loggableErr = err.Error()
}
args = append(args, "error", loggableErr)
args = append(args, l.values...)
args = append(args, kvList...)
argsStr := flatten(args...)
l.write(l.prefix, argsStr)
}
func (l fnlogger) V(level int) logr.Logger {
l.level += level
return l
}
// WithName returns a new Logger with the specified name appended. funcr
// uses '/' characters to separate name elements. Callers should not pass '/'
// in the provided name string, but this library does not actually enforce that.
func (l fnlogger) WithName(name string) logr.Logger {
if len(l.prefix) > 0 {
l.prefix = l.prefix + "/"
}
l.prefix += name
return l
}
func (l fnlogger) WithValues(kvList ...interface{}) logr.Logger {
// Three slice args forces a copy.
n := len(l.values)
l.values = append(l.values[:n:n], kvList...)
return l
}
var _ logr.Logger = fnlogger{}
Implement funcr verbosity
/*
Copyright 2021 The logr Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package funcr implements github.com/go-logr/logr.Logger in terms of
// an arbitrary "write" function.
package funcr
import (
"bytes"
"fmt"
"path/filepath"
"reflect"
"runtime"
"strconv"
"strings"
"sync/atomic"
"github.com/go-logr/logr"
)
// New returns a logr.Logger which is implemented by a function.
func New(fn func(prefix, args string), opts Options) logr.Logger {
return fnlogger{
level: 0,
prefix: "",
values: nil,
write: fn,
logCaller: opts.LogCaller,
verbosity: opts.Verbosity,
}
}
type Options struct {
// LogCaller tells funcr to add a "caller" key to some or all log lines.
// This has some overhead, so some users might not want it.
LogCaller MessageClass
// Verbosity tells funcr which V logs to be write. Higher values enable
// more logs.
Verbosity int
}
type MessageClass int
const (
None MessageClass = iota
All
Info
Error
)
type fnlogger struct {
level int
prefix string
values []interface{}
write func(prefix, args string)
logCaller MessageClass
verbosity int
}
// Magic string for intermediate frames that we should ignore.
const autogeneratedFrameName = "<autogenerated>"
// Cached depth of this interface's log functions.
var framesAtomic int32 // atomic
// Discover how many frames we need to climb to find the caller. This approach
// was suggested by Ian Lance Taylor of the Go team, so it *should* be safe
// enough (famous last words) and should survive changes in Go's optimizer.
//
// This assumes that all logging paths are the same depth from the caller,
// which should be a reasonable assumption since they are part of the same
// interface.
func framesToCaller() int {
// Figuring out the current depth is somewhat expensive. Saving the value
// amortizes most of that runtime cost.
if atomic.LoadInt32(&framesAtomic) != 0 {
return int(framesAtomic)
}
// 1 is the immediate caller. 3 should be too many.
for i := 1; i < 3; i++ {
_, file, _, _ := runtime.Caller(i + 1) // +1 for this function's frame
if file != autogeneratedFrameName {
atomic.StoreInt32(&framesAtomic, int32(i))
return i
}
}
return 1 // something went wrong, this is safe
}
func flatten(kvList ...interface{}) string {
if len(kvList)%2 != 0 {
kvList = append(kvList, "<no-value>")
}
// Empirically bytes.Buffer is faster than strings.Builder for this.
buf := bytes.NewBuffer(make([]byte, 0, 1024))
for i := 0; i < len(kvList); i += 2 {
k, ok := kvList[i].(string)
if !ok {
k = fmt.Sprintf("<non-string-key-%d>", i/2)
}
v := kvList[i+1]
if i > 0 {
buf.WriteRune(' ')
}
buf.WriteRune('"')
buf.WriteString(k)
buf.WriteRune('"')
buf.WriteRune('=')
buf.WriteString(pretty(v))
}
return buf.String()
}
func pretty(value interface{}) string {
return prettyWithFlags(value, 0)
}
const (
flagRawString = 0x1
)
// TODO: This is not fast. Most of the overhead goes here.
func prettyWithFlags(value interface{}, flags uint32) string {
// Handling the most common types without reflect is a small perf win.
switch v := value.(type) {
case bool:
return strconv.FormatBool(v)
case string:
if flags&flagRawString > 0 {
return v
}
// This is empirically faster than strings.Builder.
return `"` + v + `"`
case int:
return strconv.FormatInt(int64(v), 10)
case int8:
return strconv.FormatInt(int64(v), 10)
case int16:
return strconv.FormatInt(int64(v), 10)
case int32:
return strconv.FormatInt(int64(v), 10)
case int64:
return strconv.FormatInt(int64(v), 10)
case uint:
return strconv.FormatUint(uint64(v), 10)
case uint8:
return strconv.FormatUint(uint64(v), 10)
case uint16:
return strconv.FormatUint(uint64(v), 10)
case uint32:
return strconv.FormatUint(uint64(v), 10)
case uint64:
return strconv.FormatUint(v, 10)
case uintptr:
return strconv.FormatUint(uint64(v), 10)
case float32:
return strconv.FormatFloat(float64(v), 'f', -1, 32)
case float64:
return strconv.FormatFloat(v, 'f', -1, 64)
}
buf := bytes.NewBuffer(make([]byte, 0, 256))
t := reflect.TypeOf(value)
if t == nil {
return "null"
}
v := reflect.ValueOf(value)
switch t.Kind() {
case reflect.Bool:
return strconv.FormatBool(v.Bool())
case reflect.String:
if flags&flagRawString > 0 {
return v.String()
}
// This is empirically faster than strings.Builder.
return `"` + v.String() + `"`
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return strconv.FormatInt(int64(v.Int()), 10)
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
return strconv.FormatUint(uint64(v.Uint()), 10)
case reflect.Float32:
return strconv.FormatFloat(float64(v.Float()), 'f', -1, 32)
case reflect.Float64:
return strconv.FormatFloat(v.Float(), 'f', -1, 64)
case reflect.Struct:
buf.WriteRune('{')
for i := 0; i < t.NumField(); i++ {
f := t.Field(i)
if f.PkgPath != "" {
// reflect says this field is only defined for non-exported fields.
continue
}
if i > 0 {
buf.WriteRune(',')
}
buf.WriteRune('"')
name := f.Name
if tag, found := f.Tag.Lookup("json"); found {
if comma := strings.Index(tag, ","); comma != -1 {
name = tag[:comma]
} else {
name = tag
}
}
buf.WriteString(name)
buf.WriteRune('"')
buf.WriteRune(':')
buf.WriteString(pretty(v.Field(i).Interface()))
}
buf.WriteRune('}')
return buf.String()
case reflect.Slice, reflect.Array:
buf.WriteRune('[')
for i := 0; i < v.Len(); i++ {
if i > 0 {
buf.WriteRune(',')
}
e := v.Index(i)
buf.WriteString(pretty(e.Interface()))
}
buf.WriteRune(']')
return buf.String()
case reflect.Map:
buf.WriteRune('{')
// This does not sort the map keys, for best perf.
it := v.MapRange()
i := 0
for it.Next() {
if i > 0 {
buf.WriteRune(',')
}
// JSON only does string keys.
buf.WriteRune('"')
buf.WriteString(prettyWithFlags(it.Key().Interface(), flagRawString))
buf.WriteRune('"')
buf.WriteRune(':')
buf.WriteString(pretty(it.Value().Interface()))
i++
}
buf.WriteRune('}')
return buf.String()
case reflect.Ptr, reflect.Interface:
return pretty(v.Elem().Interface())
}
return fmt.Sprintf(`"<unhandled-%s>"`, t.Kind().String())
}
type callerID struct {
File string `json:"file"`
Line int `json:"line"`
}
func (l fnlogger) caller() callerID {
// +1 for this frame, +1 for logr itself.
// FIXME: Maybe logr should offer a clue as to how many frames are
// needed here? Or is it part of the contract to LogSinks?
_, file, line, ok := runtime.Caller(framesToCaller() + 2)
if !ok {
return callerID{"<unknown>", 0}
}
return callerID{filepath.Base(file), line}
}
func (l fnlogger) Enabled() bool {
return l.level <= l.verbosity
}
func (l fnlogger) Info(msg string, kvList ...interface{}) {
if l.Enabled() {
args := make([]interface{}, 0, 64) // using a constant here impacts perf
if l.logCaller == All || l.logCaller == Info {
args = append(args, "caller", l.caller())
}
args = append(args, "level", l.level, "msg", msg)
args = append(args, l.values...)
args = append(args, kvList...)
argsStr := flatten(args...)
l.write(l.prefix, argsStr)
}
}
func (l fnlogger) Error(err error, msg string, kvList ...interface{}) {
args := make([]interface{}, 0, 64) // using a constant here impacts perf
if l.logCaller == All || l.logCaller == Error {
args = append(args, "caller", l.caller())
}
args = append(args, "msg", msg)
var loggableErr interface{}
if err != nil {
loggableErr = err.Error()
}
args = append(args, "error", loggableErr)
args = append(args, l.values...)
args = append(args, kvList...)
argsStr := flatten(args...)
l.write(l.prefix, argsStr)
}
func (l fnlogger) V(level int) logr.Logger {
l.level += level
return l
}
// WithName returns a new Logger with the specified name appended. funcr
// uses '/' characters to separate name elements. Callers should not pass '/'
// in the provided name string, but this library does not actually enforce that.
func (l fnlogger) WithName(name string) logr.Logger {
if len(l.prefix) > 0 {
l.prefix = l.prefix + "/"
}
l.prefix += name
return l
}
func (l fnlogger) WithValues(kvList ...interface{}) logr.Logger {
// Three slice args forces a copy.
n := len(l.values)
l.values = append(l.values[:n:n], kvList...)
return l
}
var _ logr.Logger = fnlogger{}
|
package test
import (
"fmt"
"io"
"io/ioutil"
"os"
"sync"
"testing"
"time"
"github.com/anacrolix/missinggo/v2/filecache"
"github.com/anacrolix/torrent"
"github.com/anacrolix/torrent/internal/testutil"
"github.com/anacrolix/torrent/storage"
"golang.org/x/time/rate"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
type testClientTransferParams struct {
Responsive bool
Readahead int64
SetReadahead bool
ExportClientStatus bool
LeecherStorage func(string) storage.ClientImplCloser
SeederStorage func(string) storage.ClientImplCloser
SeederUploadRateLimiter *rate.Limiter
LeecherDownloadRateLimiter *rate.Limiter
ConfigureSeeder ConfigureClient
ConfigureLeecher ConfigureClient
}
func assertReadAllGreeting(t *testing.T, r io.ReadSeeker) {
pos, err := r.Seek(0, io.SeekStart)
assert.NoError(t, err)
assert.EqualValues(t, 0, pos)
_greeting, err := ioutil.ReadAll(r)
assert.NoError(t, err)
assert.EqualValues(t, testutil.GreetingFileContents, string(_greeting))
}
// Creates a seeder and a leecher, and ensures the data transfers when a read
// is attempted on the leecher.
func testClientTransfer(t *testing.T, ps testClientTransferParams) {
greetingTempDir, mi := testutil.GreetingTestTorrent()
defer os.RemoveAll(greetingTempDir)
// Create seeder and a Torrent.
cfg := torrent.TestingConfig()
cfg.Seed = true
if ps.SeederUploadRateLimiter != nil {
cfg.UploadRateLimiter = ps.SeederUploadRateLimiter
}
// cfg.ListenAddr = "localhost:4000"
if ps.SeederStorage != nil {
storage := ps.SeederStorage(greetingTempDir)
defer storage.Close()
cfg.DefaultStorage = storage
} else {
cfg.DataDir = greetingTempDir
}
if ps.ConfigureSeeder.Config != nil {
ps.ConfigureSeeder.Config(cfg)
}
seeder, err := torrent.NewClient(cfg)
require.NoError(t, err)
if ps.ConfigureSeeder.Client != nil {
ps.ConfigureSeeder.Client(seeder)
}
if ps.ExportClientStatus {
defer testutil.ExportStatusWriter(seeder, "s")()
}
seederTorrent, _, _ := seeder.AddTorrentSpec(torrent.TorrentSpecFromMetaInfo(mi))
// Run a Stats right after Closing the Client. This will trigger the Stats
// panic in #214 caused by RemoteAddr on Closed uTP sockets.
defer seederTorrent.Stats()
defer seeder.Close()
seederTorrent.VerifyData()
// Create leecher and a Torrent.
leecherDataDir, err := ioutil.TempDir("", "")
require.NoError(t, err)
defer os.RemoveAll(leecherDataDir)
cfg = torrent.TestingConfig()
if ps.LeecherStorage == nil {
cfg.DataDir = leecherDataDir
} else {
storage := ps.LeecherStorage(leecherDataDir)
defer storage.Close()
cfg.DefaultStorage = storage
}
if ps.LeecherDownloadRateLimiter != nil {
cfg.DownloadRateLimiter = ps.LeecherDownloadRateLimiter
}
cfg.Seed = false
if ps.ConfigureLeecher.Config != nil {
ps.ConfigureLeecher.Config(cfg)
}
leecher, err := torrent.NewClient(cfg)
require.NoError(t, err)
defer leecher.Close()
if ps.ConfigureLeecher.Client != nil {
ps.ConfigureLeecher.Client(leecher)
}
if ps.ExportClientStatus {
defer testutil.ExportStatusWriter(leecher, "l")()
}
leecherTorrent, new, err := leecher.AddTorrentSpec(func() (ret *torrent.TorrentSpec) {
ret = torrent.TorrentSpecFromMetaInfo(mi)
ret.ChunkSize = 2
return
}())
require.NoError(t, err)
assert.True(t, new)
//// This was used when observing coalescing of piece state changes.
//logPieceStateChanges(leecherTorrent)
// Now do some things with leecher and seeder.
added := leecherTorrent.AddClientPeer(seeder)
// The Torrent should not be interested in obtaining peers, so the one we
// just added should be the only one.
assert.False(t, leecherTorrent.Seeding())
assert.EqualValues(t, added, leecherTorrent.Stats().PendingPeers)
r := leecherTorrent.NewReader()
defer r.Close()
if ps.Responsive {
r.SetResponsive()
}
if ps.SetReadahead {
r.SetReadahead(ps.Readahead)
}
assertReadAllGreeting(t, r)
assert.NotEmpty(t, seederTorrent.PeerConns())
leecherPeerConns := leecherTorrent.PeerConns()
assert.NotEmpty(t, leecherPeerConns)
for _, pc := range leecherPeerConns {
assert.EqualValues(t, leecherTorrent.Info().NumPieces(), pc.PeerPieces().Len())
}
seederStats := seederTorrent.Stats()
assert.True(t, 13 <= seederStats.BytesWrittenData.Int64())
assert.True(t, 8 <= seederStats.ChunksWritten.Int64())
leecherStats := leecherTorrent.Stats()
assert.True(t, 13 <= leecherStats.BytesReadData.Int64())
assert.True(t, 8 <= leecherStats.ChunksRead.Int64())
// Try reading through again for the cases where the torrent data size
// exceeds the size of the cache.
assertReadAllGreeting(t, r)
}
type fileCacheClientStorageFactoryParams struct {
Capacity int64
SetCapacity bool
Wrapper func(*filecache.Cache) storage.ClientImplCloser
}
func newFileCacheClientStorageFactory(ps fileCacheClientStorageFactoryParams) storageFactory {
return func(dataDir string) storage.ClientImplCloser {
fc, err := filecache.NewCache(dataDir)
if err != nil {
panic(err)
}
if ps.SetCapacity {
fc.SetCapacity(ps.Capacity)
}
return ps.Wrapper(fc)
}
}
type storageFactory func(string) storage.ClientImplCloser
func TestClientTransferDefault(t *testing.T) {
testClientTransfer(t, testClientTransferParams{
ExportClientStatus: true,
LeecherStorage: newFileCacheClientStorageFactory(fileCacheClientStorageFactoryParams{
Wrapper: fileCachePieceResourceStorage,
}),
})
}
func TestClientTransferRateLimitedUpload(t *testing.T) {
started := time.Now()
testClientTransfer(t, testClientTransferParams{
// We are uploading 13 bytes (the length of the greeting torrent). The
// chunks are 2 bytes in length. Then the smallest burst we can run
// with is 2. Time taken is (13-burst)/rate.
SeederUploadRateLimiter: rate.NewLimiter(11, 2),
ExportClientStatus: true,
})
require.True(t, time.Since(started) > time.Second)
}
func TestClientTransferRateLimitedDownload(t *testing.T) {
testClientTransfer(t, testClientTransferParams{
LeecherDownloadRateLimiter: rate.NewLimiter(512, 512),
})
}
func fileCachePieceResourceStorage(fc *filecache.Cache) storage.ClientImplCloser {
return storage.NewResourcePieces(fc.AsResourceProvider())
}
func testClientTransferSmallCache(t *testing.T, setReadahead bool, readahead int64) {
testClientTransfer(t, testClientTransferParams{
LeecherStorage: newFileCacheClientStorageFactory(fileCacheClientStorageFactoryParams{
SetCapacity: true,
// Going below the piece length means it can't complete a piece so
// that it can be hashed.
Capacity: 5,
Wrapper: fileCachePieceResourceStorage,
}),
SetReadahead: setReadahead,
// Can't readahead too far or the cache will thrash and drop data we
// thought we had.
Readahead: readahead,
ExportClientStatus: true,
// These tests don't work well with more than 1 connection to the seeder.
ConfigureLeecher: ConfigureClient{
Config: func(cfg *torrent.ClientConfig) {
cfg.DropDuplicatePeerIds = true
//cfg.DisableIPv6 = true
//cfg.DisableUTP = true
},
},
})
}
func TestClientTransferSmallCachePieceSizedReadahead(t *testing.T) {
testClientTransferSmallCache(t, true, 5)
}
func TestClientTransferSmallCacheLargeReadahead(t *testing.T) {
testClientTransferSmallCache(t, true, 15)
}
func TestClientTransferSmallCacheDefaultReadahead(t *testing.T) {
testClientTransferSmallCache(t, false, -1)
}
func TestClientTransferVarious(t *testing.T) {
// Leecher storage
for _, ls := range []struct {
name string
f storageFactory
}{
{"Filecache", newFileCacheClientStorageFactory(fileCacheClientStorageFactoryParams{
Wrapper: fileCachePieceResourceStorage,
})},
{"Boltdb", storage.NewBoltDB},
} {
t.Run(fmt.Sprintf("LeecherStorage=%s", ls.name), func(t *testing.T) {
// Seeder storage
for _, ss := range []struct {
name string
f func(string) storage.ClientImplCloser
}{
{"File", storage.NewFile},
{"Mmap", storage.NewMMap},
} {
t.Run(fmt.Sprintf("%sSeederStorage", ss.name), func(t *testing.T) {
for _, responsive := range []bool{false, true} {
t.Run(fmt.Sprintf("Responsive=%v", responsive), func(t *testing.T) {
t.Run("NoReadahead", func(t *testing.T) {
testClientTransfer(t, testClientTransferParams{
Responsive: responsive,
SeederStorage: ss.f,
LeecherStorage: ls.f,
})
})
for _, readahead := range []int64{-1, 0, 1, 2, 3, 4, 5, 6, 9, 10, 11, 12, 13, 14, 15, 20} {
t.Run(fmt.Sprintf("readahead=%v", readahead), func(t *testing.T) {
testClientTransfer(t, testClientTransferParams{
SeederStorage: ss.f,
Responsive: responsive,
SetReadahead: true,
Readahead: readahead,
LeecherStorage: ls.f,
})
})
}
})
}
})
}
})
}
}
// Check that after completing leeching, a leecher transitions to a seeding
// correctly. Connected in a chain like so: Seeder <-> Leecher <-> LeecherLeecher.
func TestSeedAfterDownloading(t *testing.T) {
greetingTempDir, mi := testutil.GreetingTestTorrent()
defer os.RemoveAll(greetingTempDir)
cfg := torrent.TestingConfig()
cfg.Seed = true
cfg.DataDir = greetingTempDir
seeder, err := torrent.NewClient(cfg)
require.NoError(t, err)
defer seeder.Close()
defer testutil.ExportStatusWriter(seeder, "s")()
seederTorrent, ok, err := seeder.AddTorrentSpec(torrent.TorrentSpecFromMetaInfo(mi))
require.NoError(t, err)
assert.True(t, ok)
seederTorrent.VerifyData()
cfg = torrent.TestingConfig()
cfg.Seed = true
cfg.DataDir, err = ioutil.TempDir("", "")
require.NoError(t, err)
defer os.RemoveAll(cfg.DataDir)
leecher, err := torrent.NewClient(cfg)
require.NoError(t, err)
defer leecher.Close()
defer testutil.ExportStatusWriter(leecher, "l")()
cfg = torrent.TestingConfig()
cfg.Seed = false
cfg.DataDir, err = ioutil.TempDir("", "")
require.NoError(t, err)
defer os.RemoveAll(cfg.DataDir)
leecherLeecher, _ := torrent.NewClient(cfg)
require.NoError(t, err)
defer leecherLeecher.Close()
defer testutil.ExportStatusWriter(leecherLeecher, "ll")()
leecherGreeting, ok, err := leecher.AddTorrentSpec(func() (ret *torrent.TorrentSpec) {
ret = torrent.TorrentSpecFromMetaInfo(mi)
ret.ChunkSize = 2
return
}())
require.NoError(t, err)
assert.True(t, ok)
llg, ok, err := leecherLeecher.AddTorrentSpec(func() (ret *torrent.TorrentSpec) {
ret = torrent.TorrentSpecFromMetaInfo(mi)
ret.ChunkSize = 3
return
}())
require.NoError(t, err)
assert.True(t, ok)
// Simultaneously DownloadAll in Leecher, and read the contents
// consecutively in LeecherLeecher. This non-deterministically triggered a
// case where the leecher wouldn't unchoke the LeecherLeecher.
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer wg.Done()
r := llg.NewReader()
defer r.Close()
b, err := ioutil.ReadAll(r)
require.NoError(t, err)
assert.EqualValues(t, testutil.GreetingFileContents, b)
}()
done := make(chan struct{})
defer close(done)
go leecherGreeting.AddClientPeer(seeder)
go leecherGreeting.AddClientPeer(leecherLeecher)
wg.Add(1)
go func() {
defer wg.Done()
leecherGreeting.DownloadAll()
leecher.WaitAll()
}()
wg.Wait()
}
type ConfigureClient struct {
Config func(*torrent.ClientConfig)
Client func(*torrent.Client)
}
Fix transfer test check for seeder piece counts
I suspect that there's a race where a connection is established to the seeder, but we haven't received it's completed piece information yet, and we already finished reading all the data we need from another connection. Probably comes up now because pending peers with the same address aren't clobbering each other since that was fixed.
package test
import (
"fmt"
"io"
"io/ioutil"
"os"
"sync"
"testing"
"time"
"github.com/anacrolix/missinggo/v2/filecache"
"github.com/anacrolix/torrent"
"github.com/anacrolix/torrent/internal/testutil"
"github.com/anacrolix/torrent/storage"
"golang.org/x/time/rate"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
type testClientTransferParams struct {
Responsive bool
Readahead int64
SetReadahead bool
ExportClientStatus bool
LeecherStorage func(string) storage.ClientImplCloser
SeederStorage func(string) storage.ClientImplCloser
SeederUploadRateLimiter *rate.Limiter
LeecherDownloadRateLimiter *rate.Limiter
ConfigureSeeder ConfigureClient
ConfigureLeecher ConfigureClient
}
func assertReadAllGreeting(t *testing.T, r io.ReadSeeker) {
pos, err := r.Seek(0, io.SeekStart)
assert.NoError(t, err)
assert.EqualValues(t, 0, pos)
_greeting, err := ioutil.ReadAll(r)
assert.NoError(t, err)
assert.EqualValues(t, testutil.GreetingFileContents, string(_greeting))
}
// Creates a seeder and a leecher, and ensures the data transfers when a read
// is attempted on the leecher.
func testClientTransfer(t *testing.T, ps testClientTransferParams) {
greetingTempDir, mi := testutil.GreetingTestTorrent()
defer os.RemoveAll(greetingTempDir)
// Create seeder and a Torrent.
cfg := torrent.TestingConfig()
cfg.Seed = true
if ps.SeederUploadRateLimiter != nil {
cfg.UploadRateLimiter = ps.SeederUploadRateLimiter
}
// cfg.ListenAddr = "localhost:4000"
if ps.SeederStorage != nil {
storage := ps.SeederStorage(greetingTempDir)
defer storage.Close()
cfg.DefaultStorage = storage
} else {
cfg.DataDir = greetingTempDir
}
if ps.ConfigureSeeder.Config != nil {
ps.ConfigureSeeder.Config(cfg)
}
seeder, err := torrent.NewClient(cfg)
require.NoError(t, err)
if ps.ConfigureSeeder.Client != nil {
ps.ConfigureSeeder.Client(seeder)
}
if ps.ExportClientStatus {
defer testutil.ExportStatusWriter(seeder, "s")()
}
seederTorrent, _, _ := seeder.AddTorrentSpec(torrent.TorrentSpecFromMetaInfo(mi))
// Run a Stats right after Closing the Client. This will trigger the Stats
// panic in #214 caused by RemoteAddr on Closed uTP sockets.
defer seederTorrent.Stats()
defer seeder.Close()
seederTorrent.VerifyData()
// Create leecher and a Torrent.
leecherDataDir, err := ioutil.TempDir("", "")
require.NoError(t, err)
defer os.RemoveAll(leecherDataDir)
cfg = torrent.TestingConfig()
if ps.LeecherStorage == nil {
cfg.DataDir = leecherDataDir
} else {
storage := ps.LeecherStorage(leecherDataDir)
defer storage.Close()
cfg.DefaultStorage = storage
}
if ps.LeecherDownloadRateLimiter != nil {
cfg.DownloadRateLimiter = ps.LeecherDownloadRateLimiter
}
cfg.Seed = false
if ps.ConfigureLeecher.Config != nil {
ps.ConfigureLeecher.Config(cfg)
}
leecher, err := torrent.NewClient(cfg)
require.NoError(t, err)
defer leecher.Close()
if ps.ConfigureLeecher.Client != nil {
ps.ConfigureLeecher.Client(leecher)
}
if ps.ExportClientStatus {
defer testutil.ExportStatusWriter(leecher, "l")()
}
leecherTorrent, new, err := leecher.AddTorrentSpec(func() (ret *torrent.TorrentSpec) {
ret = torrent.TorrentSpecFromMetaInfo(mi)
ret.ChunkSize = 2
return
}())
require.NoError(t, err)
assert.True(t, new)
//// This was used when observing coalescing of piece state changes.
//logPieceStateChanges(leecherTorrent)
// Now do some things with leecher and seeder.
added := leecherTorrent.AddClientPeer(seeder)
// The Torrent should not be interested in obtaining peers, so the one we
// just added should be the only one.
assert.False(t, leecherTorrent.Seeding())
assert.EqualValues(t, added, leecherTorrent.Stats().PendingPeers)
r := leecherTorrent.NewReader()
defer r.Close()
if ps.Responsive {
r.SetResponsive()
}
if ps.SetReadahead {
r.SetReadahead(ps.Readahead)
}
assertReadAllGreeting(t, r)
assert.NotEmpty(t, seederTorrent.PeerConns())
leecherPeerConns := leecherTorrent.PeerConns()
assert.NotEmpty(t, leecherPeerConns)
foundSeeder := false
for _, pc := range leecherPeerConns {
completed := pc.PeerPieces().Len()
t.Logf("peer conn %v has %v completed pieces", pc, completed)
if completed == leecherTorrent.Info().NumPieces() {
foundSeeder = true
}
}
if !foundSeeder {
t.Errorf("didn't find seeder amongst leecher peer conns")
}
seederStats := seederTorrent.Stats()
assert.True(t, 13 <= seederStats.BytesWrittenData.Int64())
assert.True(t, 8 <= seederStats.ChunksWritten.Int64())
leecherStats := leecherTorrent.Stats()
assert.True(t, 13 <= leecherStats.BytesReadData.Int64())
assert.True(t, 8 <= leecherStats.ChunksRead.Int64())
// Try reading through again for the cases where the torrent data size
// exceeds the size of the cache.
assertReadAllGreeting(t, r)
}
type fileCacheClientStorageFactoryParams struct {
Capacity int64
SetCapacity bool
Wrapper func(*filecache.Cache) storage.ClientImplCloser
}
func newFileCacheClientStorageFactory(ps fileCacheClientStorageFactoryParams) storageFactory {
return func(dataDir string) storage.ClientImplCloser {
fc, err := filecache.NewCache(dataDir)
if err != nil {
panic(err)
}
if ps.SetCapacity {
fc.SetCapacity(ps.Capacity)
}
return ps.Wrapper(fc)
}
}
type storageFactory func(string) storage.ClientImplCloser
func TestClientTransferDefault(t *testing.T) {
testClientTransfer(t, testClientTransferParams{
ExportClientStatus: true,
LeecherStorage: newFileCacheClientStorageFactory(fileCacheClientStorageFactoryParams{
Wrapper: fileCachePieceResourceStorage,
}),
})
}
func TestClientTransferRateLimitedUpload(t *testing.T) {
started := time.Now()
testClientTransfer(t, testClientTransferParams{
// We are uploading 13 bytes (the length of the greeting torrent). The
// chunks are 2 bytes in length. Then the smallest burst we can run
// with is 2. Time taken is (13-burst)/rate.
SeederUploadRateLimiter: rate.NewLimiter(11, 2),
ExportClientStatus: true,
})
require.True(t, time.Since(started) > time.Second)
}
func TestClientTransferRateLimitedDownload(t *testing.T) {
testClientTransfer(t, testClientTransferParams{
LeecherDownloadRateLimiter: rate.NewLimiter(512, 512),
})
}
func fileCachePieceResourceStorage(fc *filecache.Cache) storage.ClientImplCloser {
return storage.NewResourcePieces(fc.AsResourceProvider())
}
func testClientTransferSmallCache(t *testing.T, setReadahead bool, readahead int64) {
testClientTransfer(t, testClientTransferParams{
LeecherStorage: newFileCacheClientStorageFactory(fileCacheClientStorageFactoryParams{
SetCapacity: true,
// Going below the piece length means it can't complete a piece so
// that it can be hashed.
Capacity: 5,
Wrapper: fileCachePieceResourceStorage,
}),
SetReadahead: setReadahead,
// Can't readahead too far or the cache will thrash and drop data we
// thought we had.
Readahead: readahead,
ExportClientStatus: true,
// These tests don't work well with more than 1 connection to the seeder.
ConfigureLeecher: ConfigureClient{
Config: func(cfg *torrent.ClientConfig) {
cfg.DropDuplicatePeerIds = true
//cfg.DisableIPv6 = true
//cfg.DisableUTP = true
},
},
})
}
func TestClientTransferSmallCachePieceSizedReadahead(t *testing.T) {
testClientTransferSmallCache(t, true, 5)
}
func TestClientTransferSmallCacheLargeReadahead(t *testing.T) {
testClientTransferSmallCache(t, true, 15)
}
func TestClientTransferSmallCacheDefaultReadahead(t *testing.T) {
testClientTransferSmallCache(t, false, -1)
}
func TestClientTransferVarious(t *testing.T) {
// Leecher storage
for _, ls := range []struct {
name string
f storageFactory
}{
{"Filecache", newFileCacheClientStorageFactory(fileCacheClientStorageFactoryParams{
Wrapper: fileCachePieceResourceStorage,
})},
{"Boltdb", storage.NewBoltDB},
} {
t.Run(fmt.Sprintf("LeecherStorage=%s", ls.name), func(t *testing.T) {
// Seeder storage
for _, ss := range []struct {
name string
f func(string) storage.ClientImplCloser
}{
{"File", storage.NewFile},
{"Mmap", storage.NewMMap},
} {
t.Run(fmt.Sprintf("%sSeederStorage", ss.name), func(t *testing.T) {
for _, responsive := range []bool{false, true} {
t.Run(fmt.Sprintf("Responsive=%v", responsive), func(t *testing.T) {
t.Run("NoReadahead", func(t *testing.T) {
testClientTransfer(t, testClientTransferParams{
Responsive: responsive,
SeederStorage: ss.f,
LeecherStorage: ls.f,
})
})
for _, readahead := range []int64{-1, 0, 1, 2, 3, 4, 5, 6, 9, 10, 11, 12, 13, 14, 15, 20} {
t.Run(fmt.Sprintf("readahead=%v", readahead), func(t *testing.T) {
testClientTransfer(t, testClientTransferParams{
SeederStorage: ss.f,
Responsive: responsive,
SetReadahead: true,
Readahead: readahead,
LeecherStorage: ls.f,
})
})
}
})
}
})
}
})
}
}
// Check that after completing leeching, a leecher transitions to a seeding
// correctly. Connected in a chain like so: Seeder <-> Leecher <-> LeecherLeecher.
func TestSeedAfterDownloading(t *testing.T) {
greetingTempDir, mi := testutil.GreetingTestTorrent()
defer os.RemoveAll(greetingTempDir)
cfg := torrent.TestingConfig()
cfg.Seed = true
cfg.DataDir = greetingTempDir
seeder, err := torrent.NewClient(cfg)
require.NoError(t, err)
defer seeder.Close()
defer testutil.ExportStatusWriter(seeder, "s")()
seederTorrent, ok, err := seeder.AddTorrentSpec(torrent.TorrentSpecFromMetaInfo(mi))
require.NoError(t, err)
assert.True(t, ok)
seederTorrent.VerifyData()
cfg = torrent.TestingConfig()
cfg.Seed = true
cfg.DataDir, err = ioutil.TempDir("", "")
require.NoError(t, err)
defer os.RemoveAll(cfg.DataDir)
leecher, err := torrent.NewClient(cfg)
require.NoError(t, err)
defer leecher.Close()
defer testutil.ExportStatusWriter(leecher, "l")()
cfg = torrent.TestingConfig()
cfg.Seed = false
cfg.DataDir, err = ioutil.TempDir("", "")
require.NoError(t, err)
defer os.RemoveAll(cfg.DataDir)
leecherLeecher, _ := torrent.NewClient(cfg)
require.NoError(t, err)
defer leecherLeecher.Close()
defer testutil.ExportStatusWriter(leecherLeecher, "ll")()
leecherGreeting, ok, err := leecher.AddTorrentSpec(func() (ret *torrent.TorrentSpec) {
ret = torrent.TorrentSpecFromMetaInfo(mi)
ret.ChunkSize = 2
return
}())
require.NoError(t, err)
assert.True(t, ok)
llg, ok, err := leecherLeecher.AddTorrentSpec(func() (ret *torrent.TorrentSpec) {
ret = torrent.TorrentSpecFromMetaInfo(mi)
ret.ChunkSize = 3
return
}())
require.NoError(t, err)
assert.True(t, ok)
// Simultaneously DownloadAll in Leecher, and read the contents
// consecutively in LeecherLeecher. This non-deterministically triggered a
// case where the leecher wouldn't unchoke the LeecherLeecher.
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer wg.Done()
r := llg.NewReader()
defer r.Close()
b, err := ioutil.ReadAll(r)
require.NoError(t, err)
assert.EqualValues(t, testutil.GreetingFileContents, b)
}()
done := make(chan struct{})
defer close(done)
go leecherGreeting.AddClientPeer(seeder)
go leecherGreeting.AddClientPeer(leecherLeecher)
wg.Add(1)
go func() {
defer wg.Done()
leecherGreeting.DownloadAll()
leecher.WaitAll()
}()
wg.Wait()
}
type ConfigureClient struct {
Config func(*torrent.ClientConfig)
Client func(*torrent.Client)
}
|
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package fuseops
import (
"fmt"
"os"
"time"
"unsafe"
"github.com/jacobsa/fuse/internal/fusekernel"
"github.com/jacobsa/fuse/internal/fuseshim"
"golang.org/x/net/context"
)
// A common interface implemented by all ops in this package. Use a type switch
// to find particular concrete types, responding with fuse.ENOSYS if a type is
// not supported.
type Op interface {
// A short description of the op, to be used in logging.
ShortDesc() string
// A context that can be used for long-running operations.
Context() context.Context
// Repond to the operation with the supplied error. If there is no error, set
// any necessary output fields and then call Respond(nil). The user must not
// call with a nil error for unrecognized ops; instead, use ENOSYS.
//
// Once this is invoked, the user must exclude any further calls to any
// method of this op.
Respond(error)
// Log information tied to this operation, with semantics equivalent to
// log.Printf, except that the format is different and logging is suppressed
// if no debug logger was set when mounting.
Logf(format string, v ...interface{})
}
////////////////////////////////////////////////////////////////////////
// Inodes
////////////////////////////////////////////////////////////////////////
// Look up a child by name within a parent directory. The kernel sends this
// when resolving user paths to dentry structs, which are then cached.
type LookUpInodeOp struct {
commonOp
// The ID of the directory inode to which the child belongs.
Parent InodeID
// The name of the child of interest, relative to the parent. For example, in
// this directory structure:
//
// foo/
// bar/
// baz
//
// the file system may receive a request to look up the child named "bar" for
// the parent foo/.
Name string
// The resulting entry. Must be filled out by the file system.
//
// The lookup count for the inode is implicitly incremented. See notes on
// ForgetInodeOp for more information.
Entry ChildInodeEntry
}
func (o *LookUpInodeOp) ShortDesc() (desc string) {
desc = fmt.Sprintf("LookUpInode(parent=%v, name=%q)", o.Parent, o.Name)
return
}
func (o *LookUpInodeOp) kernelResponse() (msg []byte) {
size := fusekernel.EntryOutSize(fusekernel.Protocol{0, 0})
buf := fuseshim.NewBuffer(size)
out := (*fusekernel.EntryOut)(buf.Alloc(size))
convertChildInodeEntry(&o.Entry, out)
msg = buf
return
}
// Refresh the attributes for an inode whose ID was previously returned in a
// LookUpInodeOp. The kernel sends this when the FUSE VFS layer's cache of
// inode attributes is stale. This is controlled by the AttributesExpiration
// field of ChildInodeEntry, etc.
type GetInodeAttributesOp struct {
commonOp
// The inode of interest.
Inode InodeID
// Set by the file system: attributes for the inode, and the time at which
// they should expire. See notes on ChildInodeEntry.AttributesExpiration for
// more.
Attributes InodeAttributes
AttributesExpiration time.Time
}
func (o *GetInodeAttributesOp) kernelResponse() (msg []byte) {
size := fusekernel.AttrOutSize(fusekernel.Protocol{0, 0})
buf := fuseshim.NewBuffer(size)
out := (*fusekernel.AttrOut)(buf.Alloc(size))
out.AttrValid, out.AttrValidNsec = convertExpirationTime(o.AttributesExpiration)
convertAttributes(o.Inode, &o.Attributes, &out.Attr)
msg = buf
return
}
// Change attributes for an inode.
//
// The kernel sends this for obvious cases like chmod(2), and for less obvious
// cases like ftrunctate(2).
type SetInodeAttributesOp struct {
commonOp
// The inode of interest.
Inode InodeID
// The attributes to modify, or nil for attributes that don't need a change.
Size *uint64
Mode *os.FileMode
Atime *time.Time
Mtime *time.Time
// Set by the file system: the new attributes for the inode, and the time at
// which they should expire. See notes on
// ChildInodeEntry.AttributesExpiration for more.
Attributes InodeAttributes
AttributesExpiration time.Time
}
func (o *SetInodeAttributesOp) kernelResponse() (msg []byte) {
size := fusekernel.AttrOutSize(fusekernel.Protocol{0, 0})
buf := fuseshim.NewBuffer(size)
out := (*fusekernel.AttrOut)(buf.Alloc(size))
out.AttrValid, out.AttrValidNsec = convertExpirationTime(o.AttributesExpiration)
convertAttributes(o.Inode, &o.Attributes, &out.Attr)
msg = buf
return
}
// Decrement the reference count for an inode ID previously issued by the file
// system.
//
// The comments for the ops that implicitly increment the reference count
// contain a note of this (but see also the note about the root inode below).
// For example, LookUpInodeOp and MkDirOp. The authoritative source is the
// libfuse documentation, which states that any op that returns
// fuse_reply_entry fuse_reply_create implicitly increments (cf.
// http://goo.gl/o5C7Dx).
//
// If the reference count hits zero, the file system can forget about that ID
// entirely, and even re-use it in future responses. The kernel guarantees that
// it will not otherwise use it again.
//
// The reference count corresponds to fuse_inode::nlookup
// (http://goo.gl/ut48S4). Some examples of where the kernel manipulates it:
//
// * (http://goo.gl/vPD9Oh) Any caller to fuse_iget increases the count.
// * (http://goo.gl/B6tTTC) fuse_lookup_name calls fuse_iget.
// * (http://goo.gl/IlcxWv) fuse_create_open calls fuse_iget.
// * (http://goo.gl/VQMQul) fuse_dentry_revalidate increments after
// revalidating.
//
// In contrast to all other inodes, RootInodeID begins with an implicit
// lookup count of one, without a corresponding op to increase it. (There
// could be no such op, because the root cannot be referred to by name.) Code
// walk:
//
// * (http://goo.gl/gWAheU) fuse_fill_super calls fuse_get_root_inode.
//
// * (http://goo.gl/AoLsbb) fuse_get_root_inode calls fuse_iget without
// sending any particular request.
//
// * (http://goo.gl/vPD9Oh) fuse_iget increments nlookup.
//
// File systems should tolerate but not rely on receiving forget ops for
// remaining inodes when the file system unmounts, including the root inode.
// Rather they should take fuse.Connection.ReadOp returning io.EOF as
// implicitly decrementing all lookup counts to zero.
type ForgetInodeOp struct {
commonOp
// The inode whose reference count should be decremented.
Inode InodeID
// The amount to decrement the reference count.
N uint64
}
func (o *ForgetInodeOp) kernelResponse() (msg []byte) {
panic("TODO: Signal that no response should happen here.")
}
////////////////////////////////////////////////////////////////////////
// Inode creation
////////////////////////////////////////////////////////////////////////
// Create a directory inode as a child of an existing directory inode. The
// kernel sends this in response to a mkdir(2) call.
//
// The Linux kernel appears to verify the name doesn't already exist (mkdir
// calls mkdirat calls user_path_create calls filename_create, which verifies:
// http://goo.gl/FZpLu5). Indeed, the tests in samples/memfs that call in
// parallel appear to bear this out. But osxfuse does not appear to guarantee
// this (cf. https://goo.gl/PqzZDv). And if names may be created outside of the
// kernel's control, it doesn't matter what the kernel does anyway.
//
// Therefore the file system should return EEXIST if the name already exists.
type MkDirOp struct {
commonOp
// The ID of parent directory inode within which to create the child.
Parent InodeID
// The name of the child to create, and the mode with which to create it.
Name string
Mode os.FileMode
// Set by the file system: information about the inode that was created.
//
// The lookup count for the inode is implicitly incremented. See notes on
// ForgetInodeOp for more information.
Entry ChildInodeEntry
}
func (o *MkDirOp) ShortDesc() (desc string) {
desc = fmt.Sprintf("MkDir(parent=%v, name=%q)", o.Parent, o.Name)
return
}
func (o *MkDirOp) kernelResponse() (msg []byte) {
size := fusekernel.EntryOutSize(fusekernel.Protocol{0, 0})
buf := fuseshim.NewBuffer(size)
out := (*fusekernel.EntryOut)(buf.Alloc(size))
convertChildInodeEntry(&o.Entry, out)
msg = buf
return
}
// Create a file inode and open it.
//
// The kernel sends this when the user asks to open a file with the O_CREAT
// flag and the kernel has observed that the file doesn't exist. (See for
// example lookup_open, http://goo.gl/PlqE9d). However, osxfuse doesn't appear
// to make this check atomically (cf. https://goo.gl/PqzZDv). And if names may
// be created outside of the kernel's control, it doesn't matter what the
// kernel does anyway.
//
// Therefore the file system should return EEXIST if the name already exists.
type CreateFileOp struct {
commonOp
// The ID of parent directory inode within which to create the child file.
Parent InodeID
// The name of the child to create, and the mode with which to create it.
Name string
Mode os.FileMode
// Set by the file system: information about the inode that was created.
//
// The lookup count for the inode is implicitly incremented. See notes on
// ForgetInodeOp for more information.
Entry ChildInodeEntry
// Set by the file system: an opaque ID that will be echoed in follow-up
// calls for this file using the same struct file in the kernel. In practice
// this usually means follow-up calls using the file descriptor returned by
// open(2).
//
// The handle may be supplied in future ops like ReadFileOp that contain a
// file handle. The file system must ensure this ID remains valid until a
// later call to ReleaseFileHandle.
Handle HandleID
}
func (o *CreateFileOp) ShortDesc() (desc string) {
desc = fmt.Sprintf("CreateFile(parent=%v, name=%q)", o.Parent, o.Name)
return
}
func (o *CreateFileOp) kernelResponse() (msg []byte) {
eSize := fusekernel.EntryOutSize(fusekernel.Protocol{0, 0})
buf := fuseshim.NewBuffer(eSize + unsafe.Sizeof(fusekernel.OpenOut{}))
e := (*fusekernel.EntryOut)(buf.Alloc(eSize))
convertChildInodeEntry(&o.Entry, e)
oo := (*fusekernel.OpenOut)(buf.Alloc(unsafe.Sizeof(fusekernel.OpenOut{})))
oo.Fh = uint64(o.Handle)
msg = buf
return
}
// Create a symlink inode. If the name already exists, the file system should
// return EEXIST (cf. the notes on CreateFileOp and MkDirOp).
type CreateSymlinkOp struct {
commonOp
// The ID of parent directory inode within which to create the child symlink.
Parent InodeID
// The name of the symlink to create.
Name string
// The target of the symlink.
Target string
// Set by the file system: information about the symlink inode that was
// created.
//
// The lookup count for the inode is implicitly incremented. See notes on
// ForgetInodeOp for more information.
Entry ChildInodeEntry
}
func (o *CreateSymlinkOp) ShortDesc() (desc string) {
desc = fmt.Sprintf(
"CreateSymlink(parent=%v, name=%q, target=%q)",
o.Parent,
o.Name,
o.Target)
return
}
func (o *CreateSymlinkOp) kernelResponse() (msg []byte) {
size := fusekernel.EntryOutSize(fusekernel.Protocol{0, 0})
buf := fuseshim.NewBuffer(size)
out := (*fusekernel.EntryOut)(buf.Alloc(size))
convertChildInodeEntry(&o.Entry, out)
msg = buf
return
}
////////////////////////////////////////////////////////////////////////
// Unlinking
////////////////////////////////////////////////////////////////////////
// Rename a file or directory, given the IDs of the original parent directory
// and the new one (which may be the same).
//
// In Linux, this is called by vfs_rename (https://goo.gl/eERItT), which is
// called by sys_renameat2 (https://goo.gl/fCC9qC).
//
// The kernel takes care of ensuring that the source and destination are not
// identical (in which case it does nothing), that the rename is not across
// file system boundaries, and that the destination doesn't already exist with
// the wrong type. Some subtleties that the file system must care about:
//
// * If the new name is an existing directory, the file system must ensure it
// is empty before replacing it, returning ENOTEMPTY otherwise. (This is
// per the posix spec: http://goo.gl/4XtT79)
//
// * The rename must be atomic from the point of view of an observer of the
// new name. That is, if the new name already exists, there must be no
// point at which it doesn't exist.
//
// * It is okay for the new name to be modified before the old name is
// removed; these need not be atomic. In fact, the Linux man page
// explicitly says this is likely (cf. https://goo.gl/Y1wVZc).
//
// * Linux bends over backwards (https://goo.gl/pLDn3r) to ensure that
// neither the old nor the new parent can be concurrently modified. But
// it's not clear whether OS X does this, and in any case it doesn't matter
// for file systems that may be modified remotely. Therefore a careful file
// system implementor should probably ensure if possible that the unlink
// step in the "link new name, unlink old name" process doesn't unlink a
// different inode than the one that was linked to the new name. Still,
// posix and the man pages are imprecise about the actual semantics of a
// rename if it's not atomic, so it is probably not disastrous to be loose
// about this.
//
type RenameOp struct {
commonOp
// The old parent directory, and the name of the entry within it to be
// relocated.
OldParent InodeID
OldName string
// The new parent directory, and the name of the entry to be created or
// overwritten within it.
NewParent InodeID
NewName string
}
func (o *RenameOp) kernelResponse() (msg []byte) {
msg = fuseshim.NewBuffer(0)
return
}
// Unlink a directory from its parent. Because directories cannot have a link
// count above one, this means the directory inode should be deleted as well
// once the kernel sends ForgetInodeOp.
//
// The file system is responsible for checking that the directory is empty.
//
// Sample implementation in ext2: ext2_rmdir (http://goo.gl/B9QmFf)
type RmDirOp struct {
commonOp
// The ID of parent directory inode, and the name of the directory being
// removed within it.
Parent InodeID
Name string
}
func (o *RmDirOp) kernelResponse() (msg []byte) {
msg = fuseshim.NewBuffer(0)
return
}
// Unlink a file or symlink from its parent. If this brings the inode's link
// count to zero, the inode should be deleted once the kernel sends
// ForgetInodeOp. It may still be referenced before then if a user still has
// the file open.
//
// Sample implementation in ext2: ext2_unlink (http://goo.gl/hY6r6C)
type UnlinkOp struct {
commonOp
// The ID of parent directory inode, and the name of the entry being removed
// within it.
Parent InodeID
Name string
}
func (o *UnlinkOp) kernelResponse() (msg []byte) {
msg = fuseshim.NewBuffer(0)
return
}
////////////////////////////////////////////////////////////////////////
// Directory handles
////////////////////////////////////////////////////////////////////////
// Open a directory inode.
//
// On Linux the sends this when setting up a struct file for a particular inode
// with type directory, usually in response to an open(2) call from a
// user-space process. On OS X it may not be sent for every open(2) (cf.
// https://github.com/osxfuse/osxfuse/issues/199).
type OpenDirOp struct {
commonOp
// The ID of the inode to be opened.
Inode InodeID
// Set by the file system: an opaque ID that will be echoed in follow-up
// calls for this directory using the same struct file in the kernel. In
// practice this usually means follow-up calls using the file descriptor
// returned by open(2).
//
// The handle may be supplied in future ops like ReadDirOp that contain a
// directory handle. The file system must ensure this ID remains valid until
// a later call to ReleaseDirHandle.
Handle HandleID
}
func (o *OpenDirOp) kernelResponse() (msg []byte) {
buf := fuseshim.NewBuffer(unsafe.Sizeof(fusekernel.OpenOut{}))
out := (*fusekernel.OpenOut)(buf.Alloc(unsafe.Sizeof(fusekernel.OpenOut{})))
out.Fh = uint64(o.Handle)
msg = buf
return
}
// Read entries from a directory previously opened with OpenDir.
type ReadDirOp struct {
commonOp
// The directory inode that we are reading, and the handle previously
// returned by OpenDir when opening that inode.
Inode InodeID
Handle HandleID
// The offset within the directory at which to read.
//
// Warning: this field is not necessarily a count of bytes. Its legal values
// are defined by the results returned in ReadDirResponse. See the notes
// below and the notes on that struct.
//
// In the Linux kernel this ultimately comes from file::f_pos, which starts
// at zero and is set by llseek and by the final consumed result returned by
// each call to ReadDir:
//
// * (http://goo.gl/2nWJPL) iterate_dir, which is called by getdents(2) and
// readdir(2), sets dir_context::pos to file::f_pos before calling
// f_op->iterate, and then does the opposite assignment afterward.
//
// * (http://goo.gl/rTQVSL) fuse_readdir, which implements iterate for fuse
// directories, passes dir_context::pos as the offset to fuse_read_fill,
// which passes it on to user-space. fuse_readdir later calls
// parse_dirfile with the same context.
//
// * (http://goo.gl/vU5ukv) For each returned result (except perhaps the
// last, which may be truncated by the page boundary), parse_dirfile
// updates dir_context::pos with fuse_dirent::off.
//
// It is affected by the Posix directory stream interfaces in the following
// manner:
//
// * (http://goo.gl/fQhbyn, http://goo.gl/ns1kDF) opendir initially causes
// filepos to be set to zero.
//
// * (http://goo.gl/ezNKyR, http://goo.gl/xOmDv0) readdir allows the user
// to iterate through the directory one entry at a time. As each entry is
// consumed, its d_off field is stored in __dirstream::filepos.
//
// * (http://goo.gl/WEOXG8, http://goo.gl/rjSXl3) telldir allows the user
// to obtain the d_off field from the most recently returned entry.
//
// * (http://goo.gl/WG3nDZ, http://goo.gl/Lp0U6W) seekdir allows the user
// to seek backward to an offset previously returned by telldir. It
// stores the new offset in filepos, and calls llseek to update the
// kernel's struct file.
//
// * (http://goo.gl/gONQhz, http://goo.gl/VlrQkc) rewinddir allows the user
// to go back to the beginning of the directory, obtaining a fresh view.
// It updates filepos and calls llseek to update the kernel's struct
// file.
//
// Unfortunately, FUSE offers no way to intercept seeks
// (http://goo.gl/H6gEXa), so there is no way to cause seekdir or rewinddir
// to fail. Additionally, there is no way to distinguish an explicit
// rewinddir followed by readdir from the initial readdir, or a rewinddir
// from a seekdir to the value returned by telldir just after opendir.
//
// Luckily, Posix is vague about what the user will see if they seek
// backwards, and requires the user not to seek to an old offset after a
// rewind. The only requirement on freshness is that rewinddir results in
// something that looks like a newly-opened directory. So FUSE file systems
// may e.g. cache an entire fresh listing for each ReadDir with a zero
// offset, and return array offsets into that cached listing.
Offset DirOffset
// The maximum number of bytes to return in ReadDirResponse.Data. A smaller
// number is acceptable.
Size int
// Set by the file system: a buffer consisting of a sequence of FUSE
// directory entries in the format generated by fuse_add_direntry
// (http://goo.gl/qCcHCV), which is consumed by parse_dirfile
// (http://goo.gl/2WUmD2). Use fuseutil.AppendDirent to generate this data.
//
// The buffer must not exceed the length specified in ReadDirRequest.Size. It
// is okay for the final entry to be truncated; parse_dirfile copes with this
// by ignoring the partial record.
//
// Each entry returned exposes a directory offset to the user that may later
// show up in ReadDirRequest.Offset. See notes on that field for more
// information.
//
// An empty buffer indicates the end of the directory has been reached.
Data []byte
}
func (o *ReadDirOp) kernelResponse() (msg []byte) {
msg = fuseshim.NewBuffer(uintptr(len(o.Data)))
msg = append(msg, o.Data...)
return
}
// Release a previously-minted directory handle. The kernel sends this when
// there are no more references to an open directory: all file descriptors are
// closed and all memory mappings are unmapped.
//
// The kernel guarantees that the handle ID will not be used in further ops
// sent to the file system (unless it is reissued by the file system).
//
// Errors from this op are ignored by the kernel (cf. http://goo.gl/RL38Do).
type ReleaseDirHandleOp struct {
commonOp
// The handle ID to be released. The kernel guarantees that this ID will not
// be used in further calls to the file system (unless it is reissued by the
// file system).
Handle HandleID
}
func (o *ReleaseDirHandleOp) kernelResponse() (msg []byte) {
msg = fuseshim.NewBuffer(0)
return
}
////////////////////////////////////////////////////////////////////////
// File handles
////////////////////////////////////////////////////////////////////////
// Open a file inode.
//
// On Linux the sends this when setting up a struct file for a particular inode
// with type file, usually in response to an open(2) call from a user-space
// process. On OS X it may not be sent for every open(2)
// (cf.https://github.com/osxfuse/osxfuse/issues/199).
type OpenFileOp struct {
commonOp
// The ID of the inode to be opened.
Inode InodeID
// An opaque ID that will be echoed in follow-up calls for this file using
// the same struct file in the kernel. In practice this usually means
// follow-up calls using the file descriptor returned by open(2).
//
// The handle may be supplied in future ops like ReadFileOp that contain a
// file handle. The file system must ensure this ID remains valid until a
// later call to ReleaseFileHandle.
Handle HandleID
}
func (o *OpenFileOp) kernelResponse() (msg []byte) {
buf := fuseshim.NewBuffer(unsafe.Sizeof(fusekernel.OpenOut{}))
out := (*fusekernel.OpenOut)(buf.Alloc(unsafe.Sizeof(fusekernel.OpenOut{})))
out.Fh = uint64(o.Handle)
msg = buf
return
}
// Read data from a file previously opened with CreateFile or OpenFile.
//
// Note that this op is not sent for every call to read(2) by the end user;
// some reads may be served by the page cache. See notes on WriteFileOp for
// more.
type ReadFileOp struct {
commonOp
// The file inode that we are reading, and the handle previously returned by
// CreateFile or OpenFile when opening that inode.
Inode InodeID
Handle HandleID
// The range of the file to read.
//
// The FUSE documentation requires that exactly the number of bytes be
// returned, except in the case of EOF or error (http://goo.gl/ZgfBkF). This
// appears to be because it uses file mmapping machinery
// (http://goo.gl/SGxnaN) to read a page at a time. It appears to understand
// where EOF is by checking the inode size (http://goo.gl/0BkqKD), returned
// by a previous call to LookUpInode, GetInodeAttributes, etc.
Offset int64
Size int
// Set by the file system: the data read. If this is less than the requested
// size, it indicates EOF. An error should not be returned in this case.
Data []byte
}
func (o *ReadFileOp) kernelResponse() (msg []byte) {
msg = fuseshim.NewBuffer(uintptr(len(o.Data)))
msg = append(msg, o.Data...)
return
}
// Write data to a file previously opened with CreateFile or OpenFile.
//
// When the user writes data using write(2), the write goes into the page
// cache and the page is marked dirty. Later the kernel may write back the
// page via the FUSE VFS layer, causing this op to be sent:
//
// * The kernel calls address_space_operations::writepage when a dirty page
// needs to be written to backing store (cf. http://goo.gl/Ezbewg). Fuse
// sets this to fuse_writepage (cf. http://goo.gl/IeNvLT).
//
// * (http://goo.gl/Eestuy) fuse_writepage calls fuse_writepage_locked.
//
// * (http://goo.gl/RqYIxY) fuse_writepage_locked makes a write request to
// the userspace server.
//
// Note that the kernel *will* ensure that writes are received and acknowledged
// by the file system before sending a FlushFileOp when closing the file
// descriptor to which they were written:
//
// * (http://goo.gl/PheZjf) fuse_flush calls write_inode_now, which appears
// to start a writeback in the background (it talks about a "flusher
// thread").
//
// * (http://goo.gl/1IiepM) fuse_flush then calls fuse_sync_writes, which
// "[waits] for all pending writepages on the inode to finish".
//
// * (http://goo.gl/zzvxWv) Only then does fuse_flush finally send the
// flush request.
//
// (See also http://goo.gl/ocdTdM, fuse-devel thread "Fuse guarantees on
// concurrent requests".)
type WriteFileOp struct {
commonOp
// The file inode that we are modifying, and the handle previously returned
// by CreateFile or OpenFile when opening that inode.
Inode InodeID
Handle HandleID
// The offset at which to write the data below.
//
// The man page for pwrite(2) implies that aside from changing the file
// handle's offset, using pwrite is equivalent to using lseek(2) and then
// write(2). The man page for lseek(2) says the following:
//
// "The lseek() function allows the file offset to be set beyond the end of
// the file (but this does not change the size of the file). If data is later
// written at this point, subsequent reads of the data in the gap (a "hole")
// return null bytes (aq\0aq) until data is actually written into the gap."
//
// It is therefore reasonable to assume that the kernel is looking for
// the following semantics:
//
// * If the offset is less than or equal to the current size, extend the
// file as necessary to fit any data that goes past the end of the file.
//
// * If the offset is greater than the current size, extend the file
// with null bytes until it is not, then do the above.
//
Offset int64
// The data to write.
//
// The FUSE documentation requires that exactly the number of bytes supplied
// be written, except on error (http://goo.gl/KUpwwn). This appears to be
// because it uses file mmapping machinery (http://goo.gl/SGxnaN) to write a
// page at a time.
Data []byte
}
func (o *WriteFileOp) kernelResponse() (msg []byte) {
buf := fuseshim.NewBuffer(unsafe.Sizeof(fusekernel.WriteOut{}))
out := (*fusekernel.WriteOut)(buf.Alloc(unsafe.Sizeof(fusekernel.WriteOut{})))
out.Size = uint32(len(o.Data))
msg = buf
return
}
// Synchronize the current contents of an open file to storage.
//
// vfs.txt documents this as being called for by the fsync(2) system call
// (cf. http://goo.gl/j9X8nB). Code walk for that case:
//
// * (http://goo.gl/IQkWZa) sys_fsync calls do_fsync, calls vfs_fsync, calls
// vfs_fsync_range.
//
// * (http://goo.gl/5L2SMy) vfs_fsync_range calls f_op->fsync.
//
// Note that this is also sent by fdatasync(2) (cf. http://goo.gl/01R7rF), and
// may be sent for msync(2) with the MS_SYNC flag (see the notes on
// FlushFileOp).
//
// See also: FlushFileOp, which may perform a similar function when closing a
// file (but which is not used in "real" file systems).
type SyncFileOp struct {
commonOp
// The file and handle being sync'd.
Inode InodeID
Handle HandleID
}
func (o *SyncFileOp) kernelResponse() (msg []byte) {
msg = fuseshim.NewBuffer(0)
return
}
// Flush the current state of an open file to storage upon closing a file
// descriptor.
//
// vfs.txt documents this as being sent for each close(2) system call (cf.
// http://goo.gl/FSkbrq). Code walk for that case:
//
// * (http://goo.gl/e3lv0e) sys_close calls __close_fd, calls filp_close.
// * (http://goo.gl/nI8fxD) filp_close calls f_op->flush (fuse_flush).
//
// But note that this is also sent in other contexts where a file descriptor is
// closed, such as dup2(2) (cf. http://goo.gl/NQDvFS). In the case of close(2),
// a flush error is returned to the user. For dup2(2), it is not.
//
// One potentially significant case where this may not be sent is mmap'd files,
// where the behavior is complicated:
//
// * munmap(2) does not cause flushes (cf. http://goo.gl/j8B9g0).
//
// * On OS X, if a user modifies a mapped file via the mapping before
// closing the file with close(2), the WriteFileOps for the modifications
// may not be received before the FlushFileOp for the close(2) (cf.
// http://goo.gl/kVmNcx).
//
// * However, even on OS X you can arrange for writes via a mapping to be
// flushed by calling msync(2) followed by close(2). On OS X msync(2)
// will cause a WriteFileOps to go through and close(2) will cause a
// FlushFile as usual (cf. http://goo.gl/kVmNcx). On Linux, msync(2) does
// nothing unless you set the MS_SYNC flag, in which case it causes a
// SyncFileOp to be sent (cf. http://goo.gl/P3mErk).
//
// In summary: if you make data durable in both FlushFile and SyncFile, then
// your users can get safe behavior from mapped files on both operating systems
// by calling msync(2) with MS_SYNC, followed by munmap(2), followed by
// close(2). On Linux, the msync(2) is optional (cf. http://goo.gl/EIhAxv and
// the notes on WriteFileOp).
//
// Because of cases like dup2(2), FlushFileOps are not necessarily one to one
// with OpenFileOps. They should not be used for reference counting, and the
// handle must remain valid even after the flush op is received (use
// ReleaseFileHandleOp for disposing of it).
//
// Typical "real" file systems do not implement this, presumably relying on
// the kernel to write out the page cache to the block device eventually.
// They can get away with this because a later open(2) will see the same
// data. A file system that writes to remote storage however probably wants
// to at least schedule a real flush, and maybe do it immediately in order to
// return any errors that occur.
type FlushFileOp struct {
commonOp
// The file and handle being flushed.
Inode InodeID
Handle HandleID
}
func (o *FlushFileOp) kernelResponse() (msg []byte) {
msg = fuseshim.NewBuffer(0)
return
}
// Release a previously-minted file handle. The kernel calls this when there
// are no more references to an open file: all file descriptors are closed
// and all memory mappings are unmapped.
//
// The kernel guarantees that the handle ID will not be used in further calls
// to the file system (unless it is reissued by the file system).
//
// Errors from this op are ignored by the kernel (cf. http://goo.gl/RL38Do).
type ReleaseFileHandleOp struct {
commonOp
// The handle ID to be released. The kernel guarantees that this ID will not
// be used in further calls to the file system (unless it is reissued by the
// file system).
Handle HandleID
}
func (o *ReleaseFileHandleOp) kernelResponse() (msg []byte) {
msg = fuseshim.NewBuffer(0)
return
}
// A sentinel used for unknown ops. The user is expected to respond with a
// non-nil error.
type unknownOp struct {
commonOp
opCode uint32
inode InodeID
}
func (o *unknownOp) ShortDesc() (desc string) {
desc = fmt.Sprintf("<opcode %d>(inode=%v)", o.opCode, o.inode)
return
}
func (o *unknownOp) kernelResponse() (msg []byte) {
panic(fmt.Sprintf("Should never get here for unknown op: %s", o.ShortDesc()))
}
////////////////////////////////////////////////////////////////////////
// Reading symlinks
////////////////////////////////////////////////////////////////////////
// Read the target of a symlink inode.
type ReadSymlinkOp struct {
commonOp
// The symlink inode that we are reading.
Inode InodeID
// Set by the file system: the target of the symlink.
Target string
}
func (o *ReadSymlinkOp) kernelResponse() (msg []byte) {
msg = fuseshim.NewBuffer(uintptr(len(o.Target)))
msg = append(msg, o.Target...)
return
}
////////////////////////////////////////////////////////////////////////
// Internal
////////////////////////////////////////////////////////////////////////
// TODO(jacobsa): Untangle the way ops work and move these to an internal
// package, along with Convert. I think all of the behavior wants to be on
// Connection. Ops have only String methods. Connection.ReadRequest returns an
// interace{} and a context. If we must restore debug logging later, we can
// stuff an op ID in that context and add a Connection.Logf method.
// Do not use this struct directly. See the TODO in fuseops/ops.go.
type InternalStatFSOp struct {
commonOp
}
func (o *InternalStatFSOp) kernelResponse() (msg []byte) {
buf := fuseshim.NewBuffer(unsafe.Sizeof(fusekernel.StatfsOut{}))
buf.Alloc(unsafe.Sizeof(fusekernel.StatfsOut{}))
return
}
// Do not use this struct directly. See the TODO in fuseops/ops.go.
type InternalInterruptOp struct {
commonOp
FuseID uint64
}
func (o *InternalInterruptOp) kernelResponse() (msg []byte) {
panic("Shouldn't get here.")
}
Fixed a bug.
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package fuseops
import (
"fmt"
"os"
"time"
"unsafe"
"github.com/jacobsa/fuse/internal/fusekernel"
"github.com/jacobsa/fuse/internal/fuseshim"
"golang.org/x/net/context"
)
// A common interface implemented by all ops in this package. Use a type switch
// to find particular concrete types, responding with fuse.ENOSYS if a type is
// not supported.
type Op interface {
// A short description of the op, to be used in logging.
ShortDesc() string
// A context that can be used for long-running operations.
Context() context.Context
// Repond to the operation with the supplied error. If there is no error, set
// any necessary output fields and then call Respond(nil). The user must not
// call with a nil error for unrecognized ops; instead, use ENOSYS.
//
// Once this is invoked, the user must exclude any further calls to any
// method of this op.
Respond(error)
// Log information tied to this operation, with semantics equivalent to
// log.Printf, except that the format is different and logging is suppressed
// if no debug logger was set when mounting.
Logf(format string, v ...interface{})
}
////////////////////////////////////////////////////////////////////////
// Inodes
////////////////////////////////////////////////////////////////////////
// Look up a child by name within a parent directory. The kernel sends this
// when resolving user paths to dentry structs, which are then cached.
type LookUpInodeOp struct {
commonOp
// The ID of the directory inode to which the child belongs.
Parent InodeID
// The name of the child of interest, relative to the parent. For example, in
// this directory structure:
//
// foo/
// bar/
// baz
//
// the file system may receive a request to look up the child named "bar" for
// the parent foo/.
Name string
// The resulting entry. Must be filled out by the file system.
//
// The lookup count for the inode is implicitly incremented. See notes on
// ForgetInodeOp for more information.
Entry ChildInodeEntry
}
func (o *LookUpInodeOp) ShortDesc() (desc string) {
desc = fmt.Sprintf("LookUpInode(parent=%v, name=%q)", o.Parent, o.Name)
return
}
func (o *LookUpInodeOp) kernelResponse() (msg []byte) {
size := fusekernel.EntryOutSize(fusekernel.Protocol{0, 0})
buf := fuseshim.NewBuffer(size)
out := (*fusekernel.EntryOut)(buf.Alloc(size))
convertChildInodeEntry(&o.Entry, out)
msg = buf
return
}
// Refresh the attributes for an inode whose ID was previously returned in a
// LookUpInodeOp. The kernel sends this when the FUSE VFS layer's cache of
// inode attributes is stale. This is controlled by the AttributesExpiration
// field of ChildInodeEntry, etc.
type GetInodeAttributesOp struct {
commonOp
// The inode of interest.
Inode InodeID
// Set by the file system: attributes for the inode, and the time at which
// they should expire. See notes on ChildInodeEntry.AttributesExpiration for
// more.
Attributes InodeAttributes
AttributesExpiration time.Time
}
func (o *GetInodeAttributesOp) kernelResponse() (msg []byte) {
size := fusekernel.AttrOutSize(fusekernel.Protocol{0, 0})
buf := fuseshim.NewBuffer(size)
out := (*fusekernel.AttrOut)(buf.Alloc(size))
out.AttrValid, out.AttrValidNsec = convertExpirationTime(o.AttributesExpiration)
convertAttributes(o.Inode, &o.Attributes, &out.Attr)
msg = buf
return
}
// Change attributes for an inode.
//
// The kernel sends this for obvious cases like chmod(2), and for less obvious
// cases like ftrunctate(2).
type SetInodeAttributesOp struct {
commonOp
// The inode of interest.
Inode InodeID
// The attributes to modify, or nil for attributes that don't need a change.
Size *uint64
Mode *os.FileMode
Atime *time.Time
Mtime *time.Time
// Set by the file system: the new attributes for the inode, and the time at
// which they should expire. See notes on
// ChildInodeEntry.AttributesExpiration for more.
Attributes InodeAttributes
AttributesExpiration time.Time
}
func (o *SetInodeAttributesOp) kernelResponse() (msg []byte) {
size := fusekernel.AttrOutSize(fusekernel.Protocol{0, 0})
buf := fuseshim.NewBuffer(size)
out := (*fusekernel.AttrOut)(buf.Alloc(size))
out.AttrValid, out.AttrValidNsec = convertExpirationTime(o.AttributesExpiration)
convertAttributes(o.Inode, &o.Attributes, &out.Attr)
msg = buf
return
}
// Decrement the reference count for an inode ID previously issued by the file
// system.
//
// The comments for the ops that implicitly increment the reference count
// contain a note of this (but see also the note about the root inode below).
// For example, LookUpInodeOp and MkDirOp. The authoritative source is the
// libfuse documentation, which states that any op that returns
// fuse_reply_entry fuse_reply_create implicitly increments (cf.
// http://goo.gl/o5C7Dx).
//
// If the reference count hits zero, the file system can forget about that ID
// entirely, and even re-use it in future responses. The kernel guarantees that
// it will not otherwise use it again.
//
// The reference count corresponds to fuse_inode::nlookup
// (http://goo.gl/ut48S4). Some examples of where the kernel manipulates it:
//
// * (http://goo.gl/vPD9Oh) Any caller to fuse_iget increases the count.
// * (http://goo.gl/B6tTTC) fuse_lookup_name calls fuse_iget.
// * (http://goo.gl/IlcxWv) fuse_create_open calls fuse_iget.
// * (http://goo.gl/VQMQul) fuse_dentry_revalidate increments after
// revalidating.
//
// In contrast to all other inodes, RootInodeID begins with an implicit
// lookup count of one, without a corresponding op to increase it. (There
// could be no such op, because the root cannot be referred to by name.) Code
// walk:
//
// * (http://goo.gl/gWAheU) fuse_fill_super calls fuse_get_root_inode.
//
// * (http://goo.gl/AoLsbb) fuse_get_root_inode calls fuse_iget without
// sending any particular request.
//
// * (http://goo.gl/vPD9Oh) fuse_iget increments nlookup.
//
// File systems should tolerate but not rely on receiving forget ops for
// remaining inodes when the file system unmounts, including the root inode.
// Rather they should take fuse.Connection.ReadOp returning io.EOF as
// implicitly decrementing all lookup counts to zero.
type ForgetInodeOp struct {
commonOp
// The inode whose reference count should be decremented.
Inode InodeID
// The amount to decrement the reference count.
N uint64
}
func (o *ForgetInodeOp) kernelResponse() (msg []byte) {
panic("TODO: Signal that no response should happen here.")
}
////////////////////////////////////////////////////////////////////////
// Inode creation
////////////////////////////////////////////////////////////////////////
// Create a directory inode as a child of an existing directory inode. The
// kernel sends this in response to a mkdir(2) call.
//
// The Linux kernel appears to verify the name doesn't already exist (mkdir
// calls mkdirat calls user_path_create calls filename_create, which verifies:
// http://goo.gl/FZpLu5). Indeed, the tests in samples/memfs that call in
// parallel appear to bear this out. But osxfuse does not appear to guarantee
// this (cf. https://goo.gl/PqzZDv). And if names may be created outside of the
// kernel's control, it doesn't matter what the kernel does anyway.
//
// Therefore the file system should return EEXIST if the name already exists.
type MkDirOp struct {
commonOp
// The ID of parent directory inode within which to create the child.
Parent InodeID
// The name of the child to create, and the mode with which to create it.
Name string
Mode os.FileMode
// Set by the file system: information about the inode that was created.
//
// The lookup count for the inode is implicitly incremented. See notes on
// ForgetInodeOp for more information.
Entry ChildInodeEntry
}
func (o *MkDirOp) ShortDesc() (desc string) {
desc = fmt.Sprintf("MkDir(parent=%v, name=%q)", o.Parent, o.Name)
return
}
func (o *MkDirOp) kernelResponse() (msg []byte) {
size := fusekernel.EntryOutSize(fusekernel.Protocol{0, 0})
buf := fuseshim.NewBuffer(size)
out := (*fusekernel.EntryOut)(buf.Alloc(size))
convertChildInodeEntry(&o.Entry, out)
msg = buf
return
}
// Create a file inode and open it.
//
// The kernel sends this when the user asks to open a file with the O_CREAT
// flag and the kernel has observed that the file doesn't exist. (See for
// example lookup_open, http://goo.gl/PlqE9d). However, osxfuse doesn't appear
// to make this check atomically (cf. https://goo.gl/PqzZDv). And if names may
// be created outside of the kernel's control, it doesn't matter what the
// kernel does anyway.
//
// Therefore the file system should return EEXIST if the name already exists.
type CreateFileOp struct {
commonOp
// The ID of parent directory inode within which to create the child file.
Parent InodeID
// The name of the child to create, and the mode with which to create it.
Name string
Mode os.FileMode
// Set by the file system: information about the inode that was created.
//
// The lookup count for the inode is implicitly incremented. See notes on
// ForgetInodeOp for more information.
Entry ChildInodeEntry
// Set by the file system: an opaque ID that will be echoed in follow-up
// calls for this file using the same struct file in the kernel. In practice
// this usually means follow-up calls using the file descriptor returned by
// open(2).
//
// The handle may be supplied in future ops like ReadFileOp that contain a
// file handle. The file system must ensure this ID remains valid until a
// later call to ReleaseFileHandle.
Handle HandleID
}
func (o *CreateFileOp) ShortDesc() (desc string) {
desc = fmt.Sprintf("CreateFile(parent=%v, name=%q)", o.Parent, o.Name)
return
}
func (o *CreateFileOp) kernelResponse() (msg []byte) {
eSize := fusekernel.EntryOutSize(fusekernel.Protocol{0, 0})
buf := fuseshim.NewBuffer(eSize + unsafe.Sizeof(fusekernel.OpenOut{}))
e := (*fusekernel.EntryOut)(buf.Alloc(eSize))
convertChildInodeEntry(&o.Entry, e)
oo := (*fusekernel.OpenOut)(buf.Alloc(unsafe.Sizeof(fusekernel.OpenOut{})))
oo.Fh = uint64(o.Handle)
msg = buf
return
}
// Create a symlink inode. If the name already exists, the file system should
// return EEXIST (cf. the notes on CreateFileOp and MkDirOp).
type CreateSymlinkOp struct {
commonOp
// The ID of parent directory inode within which to create the child symlink.
Parent InodeID
// The name of the symlink to create.
Name string
// The target of the symlink.
Target string
// Set by the file system: information about the symlink inode that was
// created.
//
// The lookup count for the inode is implicitly incremented. See notes on
// ForgetInodeOp for more information.
Entry ChildInodeEntry
}
func (o *CreateSymlinkOp) ShortDesc() (desc string) {
desc = fmt.Sprintf(
"CreateSymlink(parent=%v, name=%q, target=%q)",
o.Parent,
o.Name,
o.Target)
return
}
func (o *CreateSymlinkOp) kernelResponse() (msg []byte) {
size := fusekernel.EntryOutSize(fusekernel.Protocol{0, 0})
buf := fuseshim.NewBuffer(size)
out := (*fusekernel.EntryOut)(buf.Alloc(size))
convertChildInodeEntry(&o.Entry, out)
msg = buf
return
}
////////////////////////////////////////////////////////////////////////
// Unlinking
////////////////////////////////////////////////////////////////////////
// Rename a file or directory, given the IDs of the original parent directory
// and the new one (which may be the same).
//
// In Linux, this is called by vfs_rename (https://goo.gl/eERItT), which is
// called by sys_renameat2 (https://goo.gl/fCC9qC).
//
// The kernel takes care of ensuring that the source and destination are not
// identical (in which case it does nothing), that the rename is not across
// file system boundaries, and that the destination doesn't already exist with
// the wrong type. Some subtleties that the file system must care about:
//
// * If the new name is an existing directory, the file system must ensure it
// is empty before replacing it, returning ENOTEMPTY otherwise. (This is
// per the posix spec: http://goo.gl/4XtT79)
//
// * The rename must be atomic from the point of view of an observer of the
// new name. That is, if the new name already exists, there must be no
// point at which it doesn't exist.
//
// * It is okay for the new name to be modified before the old name is
// removed; these need not be atomic. In fact, the Linux man page
// explicitly says this is likely (cf. https://goo.gl/Y1wVZc).
//
// * Linux bends over backwards (https://goo.gl/pLDn3r) to ensure that
// neither the old nor the new parent can be concurrently modified. But
// it's not clear whether OS X does this, and in any case it doesn't matter
// for file systems that may be modified remotely. Therefore a careful file
// system implementor should probably ensure if possible that the unlink
// step in the "link new name, unlink old name" process doesn't unlink a
// different inode than the one that was linked to the new name. Still,
// posix and the man pages are imprecise about the actual semantics of a
// rename if it's not atomic, so it is probably not disastrous to be loose
// about this.
//
type RenameOp struct {
commonOp
// The old parent directory, and the name of the entry within it to be
// relocated.
OldParent InodeID
OldName string
// The new parent directory, and the name of the entry to be created or
// overwritten within it.
NewParent InodeID
NewName string
}
func (o *RenameOp) kernelResponse() (msg []byte) {
msg = fuseshim.NewBuffer(0)
return
}
// Unlink a directory from its parent. Because directories cannot have a link
// count above one, this means the directory inode should be deleted as well
// once the kernel sends ForgetInodeOp.
//
// The file system is responsible for checking that the directory is empty.
//
// Sample implementation in ext2: ext2_rmdir (http://goo.gl/B9QmFf)
type RmDirOp struct {
commonOp
// The ID of parent directory inode, and the name of the directory being
// removed within it.
Parent InodeID
Name string
}
func (o *RmDirOp) kernelResponse() (msg []byte) {
msg = fuseshim.NewBuffer(0)
return
}
// Unlink a file or symlink from its parent. If this brings the inode's link
// count to zero, the inode should be deleted once the kernel sends
// ForgetInodeOp. It may still be referenced before then if a user still has
// the file open.
//
// Sample implementation in ext2: ext2_unlink (http://goo.gl/hY6r6C)
type UnlinkOp struct {
commonOp
// The ID of parent directory inode, and the name of the entry being removed
// within it.
Parent InodeID
Name string
}
func (o *UnlinkOp) kernelResponse() (msg []byte) {
msg = fuseshim.NewBuffer(0)
return
}
////////////////////////////////////////////////////////////////////////
// Directory handles
////////////////////////////////////////////////////////////////////////
// Open a directory inode.
//
// On Linux the sends this when setting up a struct file for a particular inode
// with type directory, usually in response to an open(2) call from a
// user-space process. On OS X it may not be sent for every open(2) (cf.
// https://github.com/osxfuse/osxfuse/issues/199).
type OpenDirOp struct {
commonOp
// The ID of the inode to be opened.
Inode InodeID
// Set by the file system: an opaque ID that will be echoed in follow-up
// calls for this directory using the same struct file in the kernel. In
// practice this usually means follow-up calls using the file descriptor
// returned by open(2).
//
// The handle may be supplied in future ops like ReadDirOp that contain a
// directory handle. The file system must ensure this ID remains valid until
// a later call to ReleaseDirHandle.
Handle HandleID
}
func (o *OpenDirOp) kernelResponse() (msg []byte) {
buf := fuseshim.NewBuffer(unsafe.Sizeof(fusekernel.OpenOut{}))
out := (*fusekernel.OpenOut)(buf.Alloc(unsafe.Sizeof(fusekernel.OpenOut{})))
out.Fh = uint64(o.Handle)
msg = buf
return
}
// Read entries from a directory previously opened with OpenDir.
type ReadDirOp struct {
commonOp
// The directory inode that we are reading, and the handle previously
// returned by OpenDir when opening that inode.
Inode InodeID
Handle HandleID
// The offset within the directory at which to read.
//
// Warning: this field is not necessarily a count of bytes. Its legal values
// are defined by the results returned in ReadDirResponse. See the notes
// below and the notes on that struct.
//
// In the Linux kernel this ultimately comes from file::f_pos, which starts
// at zero and is set by llseek and by the final consumed result returned by
// each call to ReadDir:
//
// * (http://goo.gl/2nWJPL) iterate_dir, which is called by getdents(2) and
// readdir(2), sets dir_context::pos to file::f_pos before calling
// f_op->iterate, and then does the opposite assignment afterward.
//
// * (http://goo.gl/rTQVSL) fuse_readdir, which implements iterate for fuse
// directories, passes dir_context::pos as the offset to fuse_read_fill,
// which passes it on to user-space. fuse_readdir later calls
// parse_dirfile with the same context.
//
// * (http://goo.gl/vU5ukv) For each returned result (except perhaps the
// last, which may be truncated by the page boundary), parse_dirfile
// updates dir_context::pos with fuse_dirent::off.
//
// It is affected by the Posix directory stream interfaces in the following
// manner:
//
// * (http://goo.gl/fQhbyn, http://goo.gl/ns1kDF) opendir initially causes
// filepos to be set to zero.
//
// * (http://goo.gl/ezNKyR, http://goo.gl/xOmDv0) readdir allows the user
// to iterate through the directory one entry at a time. As each entry is
// consumed, its d_off field is stored in __dirstream::filepos.
//
// * (http://goo.gl/WEOXG8, http://goo.gl/rjSXl3) telldir allows the user
// to obtain the d_off field from the most recently returned entry.
//
// * (http://goo.gl/WG3nDZ, http://goo.gl/Lp0U6W) seekdir allows the user
// to seek backward to an offset previously returned by telldir. It
// stores the new offset in filepos, and calls llseek to update the
// kernel's struct file.
//
// * (http://goo.gl/gONQhz, http://goo.gl/VlrQkc) rewinddir allows the user
// to go back to the beginning of the directory, obtaining a fresh view.
// It updates filepos and calls llseek to update the kernel's struct
// file.
//
// Unfortunately, FUSE offers no way to intercept seeks
// (http://goo.gl/H6gEXa), so there is no way to cause seekdir or rewinddir
// to fail. Additionally, there is no way to distinguish an explicit
// rewinddir followed by readdir from the initial readdir, or a rewinddir
// from a seekdir to the value returned by telldir just after opendir.
//
// Luckily, Posix is vague about what the user will see if they seek
// backwards, and requires the user not to seek to an old offset after a
// rewind. The only requirement on freshness is that rewinddir results in
// something that looks like a newly-opened directory. So FUSE file systems
// may e.g. cache an entire fresh listing for each ReadDir with a zero
// offset, and return array offsets into that cached listing.
Offset DirOffset
// The maximum number of bytes to return in ReadDirResponse.Data. A smaller
// number is acceptable.
Size int
// Set by the file system: a buffer consisting of a sequence of FUSE
// directory entries in the format generated by fuse_add_direntry
// (http://goo.gl/qCcHCV), which is consumed by parse_dirfile
// (http://goo.gl/2WUmD2). Use fuseutil.AppendDirent to generate this data.
//
// The buffer must not exceed the length specified in ReadDirRequest.Size. It
// is okay for the final entry to be truncated; parse_dirfile copes with this
// by ignoring the partial record.
//
// Each entry returned exposes a directory offset to the user that may later
// show up in ReadDirRequest.Offset. See notes on that field for more
// information.
//
// An empty buffer indicates the end of the directory has been reached.
Data []byte
}
func (o *ReadDirOp) kernelResponse() (msg []byte) {
msg = fuseshim.NewBuffer(uintptr(len(o.Data)))
msg = append(msg, o.Data...)
return
}
// Release a previously-minted directory handle. The kernel sends this when
// there are no more references to an open directory: all file descriptors are
// closed and all memory mappings are unmapped.
//
// The kernel guarantees that the handle ID will not be used in further ops
// sent to the file system (unless it is reissued by the file system).
//
// Errors from this op are ignored by the kernel (cf. http://goo.gl/RL38Do).
type ReleaseDirHandleOp struct {
commonOp
// The handle ID to be released. The kernel guarantees that this ID will not
// be used in further calls to the file system (unless it is reissued by the
// file system).
Handle HandleID
}
func (o *ReleaseDirHandleOp) kernelResponse() (msg []byte) {
msg = fuseshim.NewBuffer(0)
return
}
////////////////////////////////////////////////////////////////////////
// File handles
////////////////////////////////////////////////////////////////////////
// Open a file inode.
//
// On Linux the sends this when setting up a struct file for a particular inode
// with type file, usually in response to an open(2) call from a user-space
// process. On OS X it may not be sent for every open(2)
// (cf.https://github.com/osxfuse/osxfuse/issues/199).
type OpenFileOp struct {
commonOp
// The ID of the inode to be opened.
Inode InodeID
// An opaque ID that will be echoed in follow-up calls for this file using
// the same struct file in the kernel. In practice this usually means
// follow-up calls using the file descriptor returned by open(2).
//
// The handle may be supplied in future ops like ReadFileOp that contain a
// file handle. The file system must ensure this ID remains valid until a
// later call to ReleaseFileHandle.
Handle HandleID
}
func (o *OpenFileOp) kernelResponse() (msg []byte) {
buf := fuseshim.NewBuffer(unsafe.Sizeof(fusekernel.OpenOut{}))
out := (*fusekernel.OpenOut)(buf.Alloc(unsafe.Sizeof(fusekernel.OpenOut{})))
out.Fh = uint64(o.Handle)
msg = buf
return
}
// Read data from a file previously opened with CreateFile or OpenFile.
//
// Note that this op is not sent for every call to read(2) by the end user;
// some reads may be served by the page cache. See notes on WriteFileOp for
// more.
type ReadFileOp struct {
commonOp
// The file inode that we are reading, and the handle previously returned by
// CreateFile or OpenFile when opening that inode.
Inode InodeID
Handle HandleID
// The range of the file to read.
//
// The FUSE documentation requires that exactly the number of bytes be
// returned, except in the case of EOF or error (http://goo.gl/ZgfBkF). This
// appears to be because it uses file mmapping machinery
// (http://goo.gl/SGxnaN) to read a page at a time. It appears to understand
// where EOF is by checking the inode size (http://goo.gl/0BkqKD), returned
// by a previous call to LookUpInode, GetInodeAttributes, etc.
Offset int64
Size int
// Set by the file system: the data read. If this is less than the requested
// size, it indicates EOF. An error should not be returned in this case.
Data []byte
}
func (o *ReadFileOp) kernelResponse() (msg []byte) {
msg = fuseshim.NewBuffer(uintptr(len(o.Data)))
msg = append(msg, o.Data...)
return
}
// Write data to a file previously opened with CreateFile or OpenFile.
//
// When the user writes data using write(2), the write goes into the page
// cache and the page is marked dirty. Later the kernel may write back the
// page via the FUSE VFS layer, causing this op to be sent:
//
// * The kernel calls address_space_operations::writepage when a dirty page
// needs to be written to backing store (cf. http://goo.gl/Ezbewg). Fuse
// sets this to fuse_writepage (cf. http://goo.gl/IeNvLT).
//
// * (http://goo.gl/Eestuy) fuse_writepage calls fuse_writepage_locked.
//
// * (http://goo.gl/RqYIxY) fuse_writepage_locked makes a write request to
// the userspace server.
//
// Note that the kernel *will* ensure that writes are received and acknowledged
// by the file system before sending a FlushFileOp when closing the file
// descriptor to which they were written:
//
// * (http://goo.gl/PheZjf) fuse_flush calls write_inode_now, which appears
// to start a writeback in the background (it talks about a "flusher
// thread").
//
// * (http://goo.gl/1IiepM) fuse_flush then calls fuse_sync_writes, which
// "[waits] for all pending writepages on the inode to finish".
//
// * (http://goo.gl/zzvxWv) Only then does fuse_flush finally send the
// flush request.
//
// (See also http://goo.gl/ocdTdM, fuse-devel thread "Fuse guarantees on
// concurrent requests".)
type WriteFileOp struct {
commonOp
// The file inode that we are modifying, and the handle previously returned
// by CreateFile or OpenFile when opening that inode.
Inode InodeID
Handle HandleID
// The offset at which to write the data below.
//
// The man page for pwrite(2) implies that aside from changing the file
// handle's offset, using pwrite is equivalent to using lseek(2) and then
// write(2). The man page for lseek(2) says the following:
//
// "The lseek() function allows the file offset to be set beyond the end of
// the file (but this does not change the size of the file). If data is later
// written at this point, subsequent reads of the data in the gap (a "hole")
// return null bytes (aq\0aq) until data is actually written into the gap."
//
// It is therefore reasonable to assume that the kernel is looking for
// the following semantics:
//
// * If the offset is less than or equal to the current size, extend the
// file as necessary to fit any data that goes past the end of the file.
//
// * If the offset is greater than the current size, extend the file
// with null bytes until it is not, then do the above.
//
Offset int64
// The data to write.
//
// The FUSE documentation requires that exactly the number of bytes supplied
// be written, except on error (http://goo.gl/KUpwwn). This appears to be
// because it uses file mmapping machinery (http://goo.gl/SGxnaN) to write a
// page at a time.
Data []byte
}
func (o *WriteFileOp) kernelResponse() (msg []byte) {
buf := fuseshim.NewBuffer(unsafe.Sizeof(fusekernel.WriteOut{}))
out := (*fusekernel.WriteOut)(buf.Alloc(unsafe.Sizeof(fusekernel.WriteOut{})))
out.Size = uint32(len(o.Data))
msg = buf
return
}
// Synchronize the current contents of an open file to storage.
//
// vfs.txt documents this as being called for by the fsync(2) system call
// (cf. http://goo.gl/j9X8nB). Code walk for that case:
//
// * (http://goo.gl/IQkWZa) sys_fsync calls do_fsync, calls vfs_fsync, calls
// vfs_fsync_range.
//
// * (http://goo.gl/5L2SMy) vfs_fsync_range calls f_op->fsync.
//
// Note that this is also sent by fdatasync(2) (cf. http://goo.gl/01R7rF), and
// may be sent for msync(2) with the MS_SYNC flag (see the notes on
// FlushFileOp).
//
// See also: FlushFileOp, which may perform a similar function when closing a
// file (but which is not used in "real" file systems).
type SyncFileOp struct {
commonOp
// The file and handle being sync'd.
Inode InodeID
Handle HandleID
}
func (o *SyncFileOp) kernelResponse() (msg []byte) {
msg = fuseshim.NewBuffer(0)
return
}
// Flush the current state of an open file to storage upon closing a file
// descriptor.
//
// vfs.txt documents this as being sent for each close(2) system call (cf.
// http://goo.gl/FSkbrq). Code walk for that case:
//
// * (http://goo.gl/e3lv0e) sys_close calls __close_fd, calls filp_close.
// * (http://goo.gl/nI8fxD) filp_close calls f_op->flush (fuse_flush).
//
// But note that this is also sent in other contexts where a file descriptor is
// closed, such as dup2(2) (cf. http://goo.gl/NQDvFS). In the case of close(2),
// a flush error is returned to the user. For dup2(2), it is not.
//
// One potentially significant case where this may not be sent is mmap'd files,
// where the behavior is complicated:
//
// * munmap(2) does not cause flushes (cf. http://goo.gl/j8B9g0).
//
// * On OS X, if a user modifies a mapped file via the mapping before
// closing the file with close(2), the WriteFileOps for the modifications
// may not be received before the FlushFileOp for the close(2) (cf.
// http://goo.gl/kVmNcx).
//
// * However, even on OS X you can arrange for writes via a mapping to be
// flushed by calling msync(2) followed by close(2). On OS X msync(2)
// will cause a WriteFileOps to go through and close(2) will cause a
// FlushFile as usual (cf. http://goo.gl/kVmNcx). On Linux, msync(2) does
// nothing unless you set the MS_SYNC flag, in which case it causes a
// SyncFileOp to be sent (cf. http://goo.gl/P3mErk).
//
// In summary: if you make data durable in both FlushFile and SyncFile, then
// your users can get safe behavior from mapped files on both operating systems
// by calling msync(2) with MS_SYNC, followed by munmap(2), followed by
// close(2). On Linux, the msync(2) is optional (cf. http://goo.gl/EIhAxv and
// the notes on WriteFileOp).
//
// Because of cases like dup2(2), FlushFileOps are not necessarily one to one
// with OpenFileOps. They should not be used for reference counting, and the
// handle must remain valid even after the flush op is received (use
// ReleaseFileHandleOp for disposing of it).
//
// Typical "real" file systems do not implement this, presumably relying on
// the kernel to write out the page cache to the block device eventually.
// They can get away with this because a later open(2) will see the same
// data. A file system that writes to remote storage however probably wants
// to at least schedule a real flush, and maybe do it immediately in order to
// return any errors that occur.
type FlushFileOp struct {
commonOp
// The file and handle being flushed.
Inode InodeID
Handle HandleID
}
func (o *FlushFileOp) kernelResponse() (msg []byte) {
msg = fuseshim.NewBuffer(0)
return
}
// Release a previously-minted file handle. The kernel calls this when there
// are no more references to an open file: all file descriptors are closed
// and all memory mappings are unmapped.
//
// The kernel guarantees that the handle ID will not be used in further calls
// to the file system (unless it is reissued by the file system).
//
// Errors from this op are ignored by the kernel (cf. http://goo.gl/RL38Do).
type ReleaseFileHandleOp struct {
commonOp
// The handle ID to be released. The kernel guarantees that this ID will not
// be used in further calls to the file system (unless it is reissued by the
// file system).
Handle HandleID
}
func (o *ReleaseFileHandleOp) kernelResponse() (msg []byte) {
msg = fuseshim.NewBuffer(0)
return
}
// A sentinel used for unknown ops. The user is expected to respond with a
// non-nil error.
type unknownOp struct {
commonOp
opCode uint32
inode InodeID
}
func (o *unknownOp) ShortDesc() (desc string) {
desc = fmt.Sprintf("<opcode %d>(inode=%v)", o.opCode, o.inode)
return
}
func (o *unknownOp) kernelResponse() (msg []byte) {
panic(fmt.Sprintf("Should never get here for unknown op: %s", o.ShortDesc()))
}
////////////////////////////////////////////////////////////////////////
// Reading symlinks
////////////////////////////////////////////////////////////////////////
// Read the target of a symlink inode.
type ReadSymlinkOp struct {
commonOp
// The symlink inode that we are reading.
Inode InodeID
// Set by the file system: the target of the symlink.
Target string
}
func (o *ReadSymlinkOp) kernelResponse() (msg []byte) {
msg = fuseshim.NewBuffer(uintptr(len(o.Target)))
msg = append(msg, o.Target...)
return
}
////////////////////////////////////////////////////////////////////////
// Internal
////////////////////////////////////////////////////////////////////////
// TODO(jacobsa): Untangle the way ops work and move these to an internal
// package, along with Convert. I think all of the behavior wants to be on
// Connection. Ops have only String methods. Connection.ReadRequest returns an
// interace{} and a context. If we must restore debug logging later, we can
// stuff an op ID in that context and add a Connection.Logf method.
// Do not use this struct directly. See the TODO in fuseops/ops.go.
type InternalStatFSOp struct {
commonOp
}
func (o *InternalStatFSOp) kernelResponse() (msg []byte) {
buf := fuseshim.NewBuffer(unsafe.Sizeof(fusekernel.StatfsOut{}))
buf.Alloc(unsafe.Sizeof(fusekernel.StatfsOut{}))
msg = buf
return
}
// Do not use this struct directly. See the TODO in fuseops/ops.go.
type InternalInterruptOp struct {
commonOp
FuseID uint64
}
func (o *InternalInterruptOp) kernelResponse() (msg []byte) {
panic("Shouldn't get here.")
}
|
// Copyright 2015 monsterqueue authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package mongodb
import (
"errors"
"fmt"
"os"
"runtime"
"sync"
"time"
"github.com/tsuru/monsterqueue"
"github.com/tsuru/monsterqueue/log"
"gopkg.in/mgo.v2"
"gopkg.in/mgo.v2/bson"
)
type queueMongoDB struct {
config *QueueConfig
session *mgo.Session
tasks map[string]monsterqueue.Task
tasksMut sync.RWMutex
done chan bool
wg sync.WaitGroup
}
type QueueConfig struct {
Url string // MongoDB connection url
Database string // MongoDB database name
CollectionPrefix string // Prefix for all collections created in MongoDB
PollingInterval time.Duration
}
// Creates a new queue. The QueueConfig parameter will tell us how to connect
// to mongodb. This command will fail if the MongoDB server is not available.
//
// Tasks registered in this queue instance will run when `ProcessLoop` is
// called in this *same* instance.
func NewQueue(conf QueueConfig) (monsterqueue.Queue, error) {
q := &queueMongoDB{
config: &conf,
tasks: make(map[string]monsterqueue.Task),
done: make(chan bool),
}
var err error
if conf.Url == "" {
return nil, errors.New("setting QueueConfig.Url is required")
}
dialInfo, err := mgo.ParseURL(conf.Url)
if err != nil {
return nil, err
}
dialInfo.FailFast = true
q.session, err = mgo.DialWithInfo(dialInfo)
if err != nil {
return nil, err
}
q.session.SetSyncTimeout(10 * time.Second)
q.session.SetSocketTimeout(1 * time.Minute)
db := q.session.DB(conf.Database)
if db.Name == "test" {
q.session.Close()
return nil, errors.New("database name should be set in QueueConfig.Url or QueueConfig.Database")
}
return q, err
}
func (q *queueMongoDB) tasksColl() *mgo.Collection {
s := q.session.Copy()
name := "queue_tasks"
if q.config.CollectionPrefix != "" {
name = fmt.Sprintf("%s_%s", q.config.CollectionPrefix, name)
}
return s.DB(q.config.Database).C(name)
}
func (q *queueMongoDB) RegisterTask(task monsterqueue.Task) error {
q.tasksMut.Lock()
defer q.tasksMut.Unlock()
if _, isRegistered := q.tasks[task.Name()]; isRegistered {
return errors.New("task already registered")
}
q.tasks[task.Name()] = task
return nil
}
func (q *queueMongoDB) Enqueue(taskName string, params monsterqueue.JobParams) (monsterqueue.Job, error) {
coll := q.tasksColl()
defer coll.Database.Session.Close()
j := q.initialJob(taskName, params)
err := coll.Insert(j)
if err != nil {
return nil, err
}
return &j, nil
}
func (q *queueMongoDB) getDoneJob(jobId bson.ObjectId) (*jobMongoDB, error) {
coll := q.tasksColl()
defer coll.Database.Session.Close()
var resultJob jobMongoDB
err := coll.Find(bson.M{"_id": jobId, "resultmessage.done": true, "waited": false}).One(&resultJob)
if err != nil {
if err == mgo.ErrNotFound {
return nil, nil
}
return nil, err
}
return &resultJob, nil
}
func (q *queueMongoDB) EnqueueWait(taskName string, params monsterqueue.JobParams, timeout time.Duration) (monsterqueue.Job, error) {
j := q.initialJob(taskName, params)
j.Waited = true
coll := q.tasksColl()
defer coll.Database.Session.Close()
err := coll.Insert(j)
if err != nil {
return nil, err
}
result := make(chan *jobMongoDB, 1)
quit := make(chan bool)
go func() {
defer close(result)
for {
job, err := q.getDoneJob(j.Id)
if err != nil {
log.Errorf("error trying to get job %s: %s", j.Id, err.Error())
}
if job != nil {
result <- job
return
}
select {
case <-quit:
return
case <-time.After(200 * time.Millisecond):
}
}
}()
var resultJob *jobMongoDB
select {
case resultJob = <-result:
return resultJob, nil
case <-time.After(timeout):
close(quit)
}
resultJob = <-result
if resultJob != nil {
return resultJob, nil
}
err = coll.Update(bson.M{
"_id": j.Id,
"waited": true,
}, bson.M{"$set": bson.M{"waited": false}})
if err == mgo.ErrNotFound {
resultJob, err = q.getDoneJob(j.Id)
}
if err != nil {
return &j, err
}
if resultJob != nil {
return resultJob, nil
}
return &j, monsterqueue.ErrQueueWaitTimeout
}
func (q *queueMongoDB) ProcessLoop() {
interval := q.config.PollingInterval
if interval == 0 {
interval = 1 * time.Second
}
for {
q.wg.Add(1)
hasMessage, err := q.waitForMessage()
if err != nil {
log.Debugf("error getting message from queue: %s", err.Error())
}
if hasMessage {
select {
case <-q.done:
return
default:
}
continue
}
select {
case <-time.After(interval):
case <-q.done:
return
}
}
}
func (q *queueMongoDB) Stop() {
q.done <- true
q.Wait()
}
func (q *queueMongoDB) Wait() {
q.wg.Wait()
}
func (q *queueMongoDB) ResetStorage() error {
coll := q.tasksColl()
defer coll.Database.Session.Close()
defer q.session.Close()
return coll.DropCollection()
}
func (q *queueMongoDB) RetrieveJob(jobId string) (monsterqueue.Job, error) {
coll := q.tasksColl()
defer coll.Database.Session.Close()
var job jobMongoDB
err := coll.FindId(bson.ObjectIdHex(jobId)).One(&job)
if err != nil {
if err == mgo.ErrNotFound {
return nil, monsterqueue.ErrNoSuchJob
}
return nil, err
}
return &job, err
}
func (q *queueMongoDB) ListJobs() ([]monsterqueue.Job, error) {
coll := q.tasksColl()
defer coll.Database.Session.Close()
var mongodbJobs []jobMongoDB
err := coll.Find(nil).All(&mongodbJobs)
if err != nil {
return nil, err
}
jobs := make([]monsterqueue.Job, len(mongodbJobs))
for i := range mongodbJobs {
jobs[i] = &mongodbJobs[i]
}
return jobs, nil
}
func (q *queueMongoDB) DeleteJob(jobId string) error {
coll := q.tasksColl()
defer coll.Database.Session.Close()
return coll.RemoveId(bson.ObjectIdHex(jobId))
}
func (q *queueMongoDB) initialJob(taskName string, params monsterqueue.JobParams) jobMongoDB {
buf := make([]byte, monsterqueue.StackTraceLimit)
buf = buf[:runtime.Stack(buf, false)]
return jobMongoDB{
Id: bson.NewObjectId(),
Task: taskName,
Params: params,
Timestamp: time.Now().UTC(),
Stack: string(buf),
queue: q,
}
}
func (q *queueMongoDB) waitForMessage() (bool, error) {
coll := q.tasksColl()
defer coll.Database.Session.Close()
var job jobMongoDB
hostname, _ := os.Hostname()
ownerData := jobOwnership{
Name: fmt.Sprintf("%s_%d", hostname, os.Getpid()),
Owned: true,
Timestamp: time.Now().UTC(),
}
q.tasksMut.RLock()
taskNames := make([]string, 0, len(q.tasks))
for taskName := range q.tasks {
taskNames = append(taskNames, taskName)
}
q.tasksMut.RUnlock()
_, err := coll.Find(bson.M{
"task": bson.M{"$in": taskNames},
"owner.owned": false,
"resultmessage.done": false,
}).Sort("_id").Apply(mgo.Change{
Update: bson.M{
"$set": bson.M{"owner": ownerData},
},
}, &job)
if err != nil {
q.wg.Done()
if err == mgo.ErrNotFound {
return false, nil
}
return false, err
}
job.queue = q
if err != nil {
q.moveToResult(&job, nil, err)
q.wg.Done()
return true, err
}
q.tasksMut.RLock()
task, _ := q.tasks[job.Task]
q.tasksMut.RUnlock()
if task == nil {
err := fmt.Errorf("unregistered task name %q", job.Task)
q.moveToResult(&job, nil, err)
q.wg.Done()
return true, err
}
go func() {
defer q.wg.Done()
task.Run(&job)
if !job.ResultMessage.Done {
q.moveToResult(&job, nil, monsterqueue.ErrNoJobResultSet)
}
}()
return true, nil
}
func (q *queueMongoDB) moveToResult(job *jobMongoDB, result monsterqueue.JobResult, jobErr error) error {
var resultMsg jobResultMessage
resultMsg.Result = result
resultMsg.Timestamp = time.Now().UTC()
resultMsg.Done = true
if jobErr != nil {
resultMsg.Error = jobErr.Error()
}
job.ResultMessage = resultMsg
coll := q.tasksColl()
defer coll.Database.Session.Close()
return coll.UpdateId(job.Id, bson.M{"$set": bson.M{"resultmessage": resultMsg, "owner.owned": false}})
}
func (q *queueMongoDB) publishResult(job *jobMongoDB) (bool, error) {
coll := q.tasksColl()
defer coll.Database.Session.Close()
err := coll.Update(bson.M{"_id": job.Id, "waited": true}, bson.M{"$set": bson.M{"waited": false}})
if err != nil {
if err == mgo.ErrNotFound {
return false, nil
}
return false, err
}
return true, nil
}
no need for buffered channel anymore
// Copyright 2015 monsterqueue authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package mongodb
import (
"errors"
"fmt"
"os"
"runtime"
"sync"
"time"
"github.com/tsuru/monsterqueue"
"github.com/tsuru/monsterqueue/log"
"gopkg.in/mgo.v2"
"gopkg.in/mgo.v2/bson"
)
type queueMongoDB struct {
config *QueueConfig
session *mgo.Session
tasks map[string]monsterqueue.Task
tasksMut sync.RWMutex
done chan bool
wg sync.WaitGroup
}
type QueueConfig struct {
Url string // MongoDB connection url
Database string // MongoDB database name
CollectionPrefix string // Prefix for all collections created in MongoDB
PollingInterval time.Duration
}
// Creates a new queue. The QueueConfig parameter will tell us how to connect
// to mongodb. This command will fail if the MongoDB server is not available.
//
// Tasks registered in this queue instance will run when `ProcessLoop` is
// called in this *same* instance.
func NewQueue(conf QueueConfig) (monsterqueue.Queue, error) {
q := &queueMongoDB{
config: &conf,
tasks: make(map[string]monsterqueue.Task),
done: make(chan bool),
}
var err error
if conf.Url == "" {
return nil, errors.New("setting QueueConfig.Url is required")
}
dialInfo, err := mgo.ParseURL(conf.Url)
if err != nil {
return nil, err
}
dialInfo.FailFast = true
q.session, err = mgo.DialWithInfo(dialInfo)
if err != nil {
return nil, err
}
q.session.SetSyncTimeout(10 * time.Second)
q.session.SetSocketTimeout(1 * time.Minute)
db := q.session.DB(conf.Database)
if db.Name == "test" {
q.session.Close()
return nil, errors.New("database name should be set in QueueConfig.Url or QueueConfig.Database")
}
return q, err
}
func (q *queueMongoDB) tasksColl() *mgo.Collection {
s := q.session.Copy()
name := "queue_tasks"
if q.config.CollectionPrefix != "" {
name = fmt.Sprintf("%s_%s", q.config.CollectionPrefix, name)
}
return s.DB(q.config.Database).C(name)
}
func (q *queueMongoDB) RegisterTask(task monsterqueue.Task) error {
q.tasksMut.Lock()
defer q.tasksMut.Unlock()
if _, isRegistered := q.tasks[task.Name()]; isRegistered {
return errors.New("task already registered")
}
q.tasks[task.Name()] = task
return nil
}
func (q *queueMongoDB) Enqueue(taskName string, params monsterqueue.JobParams) (monsterqueue.Job, error) {
coll := q.tasksColl()
defer coll.Database.Session.Close()
j := q.initialJob(taskName, params)
err := coll.Insert(j)
if err != nil {
return nil, err
}
return &j, nil
}
func (q *queueMongoDB) getDoneJob(jobId bson.ObjectId) (*jobMongoDB, error) {
coll := q.tasksColl()
defer coll.Database.Session.Close()
var resultJob jobMongoDB
err := coll.Find(bson.M{"_id": jobId, "resultmessage.done": true, "waited": false}).One(&resultJob)
if err != nil {
if err == mgo.ErrNotFound {
return nil, nil
}
return nil, err
}
return &resultJob, nil
}
func (q *queueMongoDB) EnqueueWait(taskName string, params monsterqueue.JobParams, timeout time.Duration) (monsterqueue.Job, error) {
j := q.initialJob(taskName, params)
j.Waited = true
coll := q.tasksColl()
defer coll.Database.Session.Close()
err := coll.Insert(j)
if err != nil {
return nil, err
}
result := make(chan *jobMongoDB)
quit := make(chan bool)
go func() {
defer close(result)
for {
job, err := q.getDoneJob(j.Id)
if err != nil {
log.Errorf("error trying to get job %s: %s", j.Id, err.Error())
}
if job != nil {
result <- job
return
}
select {
case <-quit:
return
case <-time.After(200 * time.Millisecond):
}
}
}()
var resultJob *jobMongoDB
select {
case resultJob = <-result:
return resultJob, nil
case <-time.After(timeout):
close(quit)
}
resultJob = <-result
if resultJob != nil {
return resultJob, nil
}
err = coll.Update(bson.M{
"_id": j.Id,
"waited": true,
}, bson.M{"$set": bson.M{"waited": false}})
if err == mgo.ErrNotFound {
resultJob, err = q.getDoneJob(j.Id)
}
if err != nil {
return &j, err
}
if resultJob != nil {
return resultJob, nil
}
return &j, monsterqueue.ErrQueueWaitTimeout
}
func (q *queueMongoDB) ProcessLoop() {
interval := q.config.PollingInterval
if interval == 0 {
interval = 1 * time.Second
}
for {
q.wg.Add(1)
hasMessage, err := q.waitForMessage()
if err != nil {
log.Debugf("error getting message from queue: %s", err.Error())
}
if hasMessage {
select {
case <-q.done:
return
default:
}
continue
}
select {
case <-time.After(interval):
case <-q.done:
return
}
}
}
func (q *queueMongoDB) Stop() {
q.done <- true
q.Wait()
}
func (q *queueMongoDB) Wait() {
q.wg.Wait()
}
func (q *queueMongoDB) ResetStorage() error {
coll := q.tasksColl()
defer coll.Database.Session.Close()
defer q.session.Close()
return coll.DropCollection()
}
func (q *queueMongoDB) RetrieveJob(jobId string) (monsterqueue.Job, error) {
coll := q.tasksColl()
defer coll.Database.Session.Close()
var job jobMongoDB
err := coll.FindId(bson.ObjectIdHex(jobId)).One(&job)
if err != nil {
if err == mgo.ErrNotFound {
return nil, monsterqueue.ErrNoSuchJob
}
return nil, err
}
return &job, err
}
func (q *queueMongoDB) ListJobs() ([]monsterqueue.Job, error) {
coll := q.tasksColl()
defer coll.Database.Session.Close()
var mongodbJobs []jobMongoDB
err := coll.Find(nil).All(&mongodbJobs)
if err != nil {
return nil, err
}
jobs := make([]monsterqueue.Job, len(mongodbJobs))
for i := range mongodbJobs {
jobs[i] = &mongodbJobs[i]
}
return jobs, nil
}
func (q *queueMongoDB) DeleteJob(jobId string) error {
coll := q.tasksColl()
defer coll.Database.Session.Close()
return coll.RemoveId(bson.ObjectIdHex(jobId))
}
func (q *queueMongoDB) initialJob(taskName string, params monsterqueue.JobParams) jobMongoDB {
buf := make([]byte, monsterqueue.StackTraceLimit)
buf = buf[:runtime.Stack(buf, false)]
return jobMongoDB{
Id: bson.NewObjectId(),
Task: taskName,
Params: params,
Timestamp: time.Now().UTC(),
Stack: string(buf),
queue: q,
}
}
func (q *queueMongoDB) waitForMessage() (bool, error) {
coll := q.tasksColl()
defer coll.Database.Session.Close()
var job jobMongoDB
hostname, _ := os.Hostname()
ownerData := jobOwnership{
Name: fmt.Sprintf("%s_%d", hostname, os.Getpid()),
Owned: true,
Timestamp: time.Now().UTC(),
}
q.tasksMut.RLock()
taskNames := make([]string, 0, len(q.tasks))
for taskName := range q.tasks {
taskNames = append(taskNames, taskName)
}
q.tasksMut.RUnlock()
_, err := coll.Find(bson.M{
"task": bson.M{"$in": taskNames},
"owner.owned": false,
"resultmessage.done": false,
}).Sort("_id").Apply(mgo.Change{
Update: bson.M{
"$set": bson.M{"owner": ownerData},
},
}, &job)
if err != nil {
q.wg.Done()
if err == mgo.ErrNotFound {
return false, nil
}
return false, err
}
job.queue = q
if err != nil {
q.moveToResult(&job, nil, err)
q.wg.Done()
return true, err
}
q.tasksMut.RLock()
task, _ := q.tasks[job.Task]
q.tasksMut.RUnlock()
if task == nil {
err := fmt.Errorf("unregistered task name %q", job.Task)
q.moveToResult(&job, nil, err)
q.wg.Done()
return true, err
}
go func() {
defer q.wg.Done()
task.Run(&job)
if !job.ResultMessage.Done {
q.moveToResult(&job, nil, monsterqueue.ErrNoJobResultSet)
}
}()
return true, nil
}
func (q *queueMongoDB) moveToResult(job *jobMongoDB, result monsterqueue.JobResult, jobErr error) error {
var resultMsg jobResultMessage
resultMsg.Result = result
resultMsg.Timestamp = time.Now().UTC()
resultMsg.Done = true
if jobErr != nil {
resultMsg.Error = jobErr.Error()
}
job.ResultMessage = resultMsg
coll := q.tasksColl()
defer coll.Database.Session.Close()
return coll.UpdateId(job.Id, bson.M{"$set": bson.M{"resultmessage": resultMsg, "owner.owned": false}})
}
func (q *queueMongoDB) publishResult(job *jobMongoDB) (bool, error) {
coll := q.tasksColl()
defer coll.Database.Session.Close()
err := coll.Update(bson.M{"_id": job.Id, "waited": true}, bson.M{"$set": bson.M{"waited": false}})
if err != nil {
if err == mgo.ErrNotFound {
return false, nil
}
return false, err
}
return true, nil
}
|
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controller
import (
"context"
"fmt"
"io/ioutil"
"net"
"net/http"
"os"
"strconv"
"strings"
"sync"
"time"
"github.com/golang/glog"
"github.com/kubernetes-incubator/external-storage/lib/controller/metrics"
"github.com/kubernetes-incubator/external-storage/lib/util"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
"golang.org/x/time/rate"
"k8s.io/api/core/v1"
storage "k8s.io/api/storage/v1"
storagebeta "k8s.io/api/storage/v1beta1"
apierrs "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/scheme"
corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/leaderelection"
"k8s.io/client-go/tools/leaderelection/resourcelock"
"k8s.io/client-go/tools/record"
ref "k8s.io/client-go/tools/reference"
"k8s.io/client-go/util/workqueue"
"k8s.io/kubernetes/pkg/apis/core/v1/helper"
utilversion "k8s.io/kubernetes/pkg/util/version"
)
// annClass annotation represents the storage class associated with a resource:
// - in PersistentVolumeClaim it represents required class to match.
// Only PersistentVolumes with the same class (i.e. annotation with the same
// value) can be bound to the claim. In case no such volume exists, the
// controller will provision a new one using StorageClass instance with
// the same name as the annotation value.
// - in PersistentVolume it represents storage class to which the persistent
// volume belongs.
const annClass = "volume.beta.kubernetes.io/storage-class"
// This annotation is added to a PV that has been dynamically provisioned by
// Kubernetes. Its value is name of volume plugin that created the volume.
// It serves both user (to show where a PV comes from) and Kubernetes (to
// recognize dynamically provisioned PVs in its decisions).
const annDynamicallyProvisioned = "pv.kubernetes.io/provisioned-by"
const annStorageProvisioner = "volume.beta.kubernetes.io/storage-provisioner"
// This annotation is added to a PVC that has been triggered by scheduler to
// be dynamically provisioned. Its value is the name of the selected node.
const annSelectedNode = "volume.alpha.kubernetes.io/selected-node"
// ProvisionController is a controller that provisions PersistentVolumes for
// PersistentVolumeClaims.
type ProvisionController struct {
client kubernetes.Interface
// The name of the provisioner for which this controller dynamically
// provisions volumes. The value of annDynamicallyProvisioned and
// annStorageProvisioner to set & watch for, respectively
provisionerName string
// The provisioner the controller will use to provision and delete volumes.
// Presumably this implementer of Provisioner carries its own
// volume-specific options and such that it needs in order to provision
// volumes.
provisioner Provisioner
// Kubernetes cluster server version:
// * 1.4: storage classes introduced as beta. Technically out-of-tree dynamic
// provisioning is not officially supported, though it works
// * 1.5: storage classes stay in beta. Out-of-tree dynamic provisioning is
// officially supported
// * 1.6: storage classes enter GA
kubeVersion *utilversion.Version
claimInformer cache.SharedInformer
claims cache.Store
claimController cache.Controller
volumeInformer cache.SharedInformer
volumes cache.Store
volumeController cache.Controller
classInformer cache.SharedInformer
classes cache.Store
classController cache.Controller
claimQueue workqueue.RateLimitingInterface
volumeQueue workqueue.RateLimitingInterface
// Identity of this controller, generated at creation time and not persisted
// across restarts. Useful only for debugging, for seeing the source of
// events. controller.provisioner may have its own, different notion of
// identity which may/may not persist across restarts
id string
component string
eventRecorder record.EventRecorder
resyncPeriod time.Duration
exponentialBackOffOnError bool
threadiness int
createProvisionedPVRetryCount int
createProvisionedPVInterval time.Duration
failedProvisionThreshold, failedDeleteThreshold int
// The port for metrics server to serve on.
metricsPort int32
// The IP address for metrics server to serve on.
metricsAddress string
// The path of metrics endpoint path.
metricsPath string
// Whether to do kubernetes leader election at all. It should basically
// always be done when possible to avoid duplicate Provision attempts.
leaderElection bool
leaderElectionNamespace string
// Parameters of leaderelection.LeaderElectionConfig.
leaseDuration, renewDeadline, retryPeriod time.Duration
hasRun bool
hasRunLock *sync.Mutex
}
const (
// DefaultResyncPeriod is used when option function ResyncPeriod is omitted
DefaultResyncPeriod = 15 * time.Minute
// DefaultThreadiness is used when option function Threadiness is omitted
DefaultThreadiness = 4
// DefaultExponentialBackOffOnError is used when option function ExponentialBackOffOnError is omitted
DefaultExponentialBackOffOnError = true
// DefaultCreateProvisionedPVRetryCount is used when option function CreateProvisionedPVRetryCount is omitted
DefaultCreateProvisionedPVRetryCount = 5
// DefaultCreateProvisionedPVInterval is used when option function CreateProvisionedPVInterval is omitted
DefaultCreateProvisionedPVInterval = 10 * time.Second
// DefaultFailedProvisionThreshold is used when option function FailedProvisionThreshold is omitted
DefaultFailedProvisionThreshold = 15
// DefaultFailedDeleteThreshold is used when option function FailedDeleteThreshold is omitted
DefaultFailedDeleteThreshold = 15
// DefaultLeaderElection is used when option function LeaderElection is omitted
DefaultLeaderElection = true
// DefaultLeaseDuration is used when option function LeaseDuration is omitted
DefaultLeaseDuration = 15 * time.Second
// DefaultRenewDeadline is used when option function RenewDeadline is omitted
DefaultRenewDeadline = 10 * time.Second
// DefaultRetryPeriod is used when option function RetryPeriod is omitted
DefaultRetryPeriod = 2 * time.Second
// DefaultMetricsPort is used when option function MetricsPort is omitted
DefaultMetricsPort = 0
// DefaultMetricsAddress is used when option function MetricsAddress is omitted
DefaultMetricsAddress = "0.0.0.0"
// DefaultMetricsPath is used when option function MetricsPath is omitted
DefaultMetricsPath = "/metrics"
)
var errRuntime = fmt.Errorf("cannot call option functions after controller has Run")
// ResyncPeriod is how often the controller relists PVCs, PVs, & storage
// classes. OnUpdate will be called even if nothing has changed, meaning failed
// operations may be retried on a PVC/PV every resyncPeriod regardless of
// whether it changed. Defaults to 15 minutes.
func ResyncPeriod(resyncPeriod time.Duration) func(*ProvisionController) error {
return func(c *ProvisionController) error {
if c.HasRun() {
return errRuntime
}
c.resyncPeriod = resyncPeriod
return nil
}
}
// Threadiness is the number of claim and volume workers each to launch.
// Defaults to 4.
func Threadiness(threadiness int) func(*ProvisionController) error {
return func(c *ProvisionController) error {
if c.HasRun() {
return errRuntime
}
c.threadiness = threadiness
return nil
}
}
// ExponentialBackOffOnError determines whether to exponentially back off from
// failures of Provision and Delete. Defaults to true.
func ExponentialBackOffOnError(exponentialBackOffOnError bool) func(*ProvisionController) error {
return func(c *ProvisionController) error {
if c.HasRun() {
return errRuntime
}
c.exponentialBackOffOnError = exponentialBackOffOnError
return nil
}
}
// CreateProvisionedPVRetryCount is the number of retries when we create a PV
// object for a provisioned volume. Defaults to 5.
func CreateProvisionedPVRetryCount(createProvisionedPVRetryCount int) func(*ProvisionController) error {
return func(c *ProvisionController) error {
if c.HasRun() {
return errRuntime
}
c.createProvisionedPVRetryCount = createProvisionedPVRetryCount
return nil
}
}
// CreateProvisionedPVInterval is the interval between retries when we create a
// PV object for a provisioned volume. Defaults to 10 seconds.
func CreateProvisionedPVInterval(createProvisionedPVInterval time.Duration) func(*ProvisionController) error {
return func(c *ProvisionController) error {
if c.HasRun() {
return errRuntime
}
c.createProvisionedPVInterval = createProvisionedPVInterval
return nil
}
}
// FailedProvisionThreshold is the threshold for max number of retries on
// failures of Provision. Defaults to 15.
func FailedProvisionThreshold(failedProvisionThreshold int) func(*ProvisionController) error {
return func(c *ProvisionController) error {
if c.HasRun() {
return errRuntime
}
c.failedProvisionThreshold = failedProvisionThreshold
return nil
}
}
// FailedDeleteThreshold is the threshold for max number of retries on failures
// of Delete. Defaults to 15.
func FailedDeleteThreshold(failedDeleteThreshold int) func(*ProvisionController) error {
return func(c *ProvisionController) error {
if c.HasRun() {
return errRuntime
}
c.failedDeleteThreshold = failedDeleteThreshold
return nil
}
}
// LeaderElection determines whether to enable leader election or not. Defaults
// to true.
func LeaderElection(leaderElection bool) func(*ProvisionController) error {
return func(c *ProvisionController) error {
if c.HasRun() {
return errRuntime
}
c.leaderElection = leaderElection
return nil
}
}
// LeaderElectionNamespace is the kubernetes namespace in which to create the
// leader election object. Defaults to the same namespace in which the
// the controller runs.
func LeaderElectionNamespace(leaderElectionNamespace string) func(*ProvisionController) error {
return func(c *ProvisionController) error {
if c.HasRun() {
return errRuntime
}
c.leaderElectionNamespace = leaderElectionNamespace
return nil
}
}
// LeaseDuration is the duration that non-leader candidates will
// wait to force acquire leadership. This is measured against time of
// last observed ack. Defaults to 15 seconds.
func LeaseDuration(leaseDuration time.Duration) func(*ProvisionController) error {
return func(c *ProvisionController) error {
if c.HasRun() {
return errRuntime
}
c.leaseDuration = leaseDuration
return nil
}
}
// RenewDeadline is the duration that the acting master will retry
// refreshing leadership before giving up. Defaults to 10 seconds.
func RenewDeadline(renewDeadline time.Duration) func(*ProvisionController) error {
return func(c *ProvisionController) error {
if c.HasRun() {
return errRuntime
}
c.renewDeadline = renewDeadline
return nil
}
}
// RetryPeriod is the duration the LeaderElector clients should wait
// between tries of actions. Defaults to 2 seconds.
func RetryPeriod(retryPeriod time.Duration) func(*ProvisionController) error {
return func(c *ProvisionController) error {
if c.HasRun() {
return errRuntime
}
c.retryPeriod = retryPeriod
return nil
}
}
// ClaimsInformer sets the informer to use for accessing PersistentVolumeClaims.
// Defaults to using a private (non-shared) informer.
func ClaimsInformer(informer cache.SharedInformer) func(*ProvisionController) error {
return func(c *ProvisionController) error {
if c.HasRun() {
return errRuntime
}
c.claimInformer = informer
return nil
}
}
// VolumesInformer sets the informer to use for accessing PersistentVolumes.
// Defaults to using a private (non-shared) informer.
func VolumesInformer(informer cache.SharedInformer) func(*ProvisionController) error {
return func(c *ProvisionController) error {
if c.HasRun() {
return errRuntime
}
c.volumeInformer = informer
return nil
}
}
// ClassesInformer sets the informer to use for accessing StorageClasses.
// The informer must use the versioned resource appropriate for the Kubernetes cluster version
// (that is, v1.StorageClass for >= 1.6, and v1beta1.StorageClass for < 1.6).
// Defaults to using a private (non-shared) informer.
func ClassesInformer(informer cache.SharedInformer) func(*ProvisionController) error {
return func(c *ProvisionController) error {
if c.HasRun() {
return errRuntime
}
c.classInformer = informer
return nil
}
}
// MetricsPort sets the port that metrics server serves on. Default: 0, set to non-zero to enable.
func MetricsPort(metricsPort int32) func(*ProvisionController) error {
return func(c *ProvisionController) error {
if c.HasRun() {
return errRuntime
}
c.metricsPort = metricsPort
return nil
}
}
// MetricsAddress sets the ip address that metrics serve serves on.
func MetricsAddress(metricsAddress string) func(*ProvisionController) error {
return func(c *ProvisionController) error {
if c.HasRun() {
return errRuntime
}
c.metricsAddress = metricsAddress
return nil
}
}
// MetricsPath sets the endpoint path of metrics server.
func MetricsPath(metricsPath string) func(*ProvisionController) error {
return func(c *ProvisionController) error {
if c.HasRun() {
return errRuntime
}
c.metricsPath = metricsPath
return nil
}
}
// HasRun returns whether the controller has Run
func (ctrl *ProvisionController) HasRun() bool {
ctrl.hasRunLock.Lock()
defer ctrl.hasRunLock.Unlock()
return ctrl.hasRun
}
// NewProvisionController creates a new provision controller using
// the given configuration parameters and with private (non-shared) informers.
func NewProvisionController(
client kubernetes.Interface,
provisionerName string,
provisioner Provisioner,
kubeVersion string,
options ...func(*ProvisionController) error,
) *ProvisionController {
id, err := os.Hostname()
if err != nil {
glog.Fatalf("Error getting hostname: %v", err)
}
// add a uniquifier so that two processes on the same host don't accidentally both become active
id = id + "_" + string(uuid.NewUUID())
component := provisionerName + "_" + id
v1.AddToScheme(scheme.Scheme)
broadcaster := record.NewBroadcaster()
broadcaster.StartLogging(glog.Infof)
broadcaster.StartRecordingToSink(&corev1.EventSinkImpl{Interface: client.CoreV1().Events(v1.NamespaceAll)})
eventRecorder := broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: component})
controller := &ProvisionController{
client: client,
provisionerName: provisionerName,
provisioner: provisioner,
kubeVersion: utilversion.MustParseSemantic(kubeVersion),
id: id,
component: component,
eventRecorder: eventRecorder,
resyncPeriod: DefaultResyncPeriod,
exponentialBackOffOnError: DefaultExponentialBackOffOnError,
threadiness: DefaultThreadiness,
createProvisionedPVRetryCount: DefaultCreateProvisionedPVRetryCount,
createProvisionedPVInterval: DefaultCreateProvisionedPVInterval,
failedProvisionThreshold: DefaultFailedProvisionThreshold,
failedDeleteThreshold: DefaultFailedDeleteThreshold,
leaderElection: DefaultLeaderElection,
leaderElectionNamespace: getInClusterNamespace(),
leaseDuration: DefaultLeaseDuration,
renewDeadline: DefaultRenewDeadline,
retryPeriod: DefaultRetryPeriod,
metricsPort: DefaultMetricsPort,
metricsAddress: DefaultMetricsAddress,
metricsPath: DefaultMetricsPath,
hasRun: false,
hasRunLock: &sync.Mutex{},
}
for _, option := range options {
option(controller)
}
ratelimiter := workqueue.NewMaxOfRateLimiter(
workqueue.NewItemExponentialFailureRateLimiter(15*time.Second, 1000*time.Second),
&workqueue.BucketRateLimiter{Limiter: rate.NewLimiter(rate.Limit(10), 100)},
)
if !controller.exponentialBackOffOnError {
ratelimiter = workqueue.NewMaxOfRateLimiter(
workqueue.NewItemExponentialFailureRateLimiter(15*time.Second, 15*time.Second),
&workqueue.BucketRateLimiter{Limiter: rate.NewLimiter(rate.Limit(10), 100)},
)
}
controller.claimQueue = workqueue.NewNamedRateLimitingQueue(ratelimiter, "claims")
controller.volumeQueue = workqueue.NewNamedRateLimitingQueue(ratelimiter, "volumes")
// ----------------------
// PersistentVolumeClaims
claimSource := &cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
return client.CoreV1().PersistentVolumeClaims(v1.NamespaceAll).List(options)
},
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
return client.CoreV1().PersistentVolumeClaims(v1.NamespaceAll).Watch(options)
},
}
claimHandler := cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) { controller.enqueueWork(controller.claimQueue, obj) },
UpdateFunc: func(oldObj, newObj interface{}) { controller.enqueueWork(controller.claimQueue, newObj) },
DeleteFunc: func(obj interface{}) { controller.forgetWork(controller.claimQueue, obj) },
}
if controller.claimInformer != nil {
controller.claimInformer.AddEventHandlerWithResyncPeriod(claimHandler, controller.resyncPeriod)
controller.claims, controller.claimController =
controller.claimInformer.GetStore(),
controller.claimInformer.GetController()
} else {
controller.claims, controller.claimController =
cache.NewInformer(
claimSource,
&v1.PersistentVolumeClaim{},
controller.resyncPeriod,
claimHandler,
)
}
// -----------------
// PersistentVolumes
volumeSource := &cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
return client.CoreV1().PersistentVolumes().List(options)
},
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
return client.CoreV1().PersistentVolumes().Watch(options)
},
}
volumeHandler := cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) { controller.enqueueWork(controller.volumeQueue, obj) },
UpdateFunc: func(oldObj, newObj interface{}) { controller.enqueueWork(controller.volumeQueue, newObj) },
DeleteFunc: func(obj interface{}) { controller.forgetWork(controller.volumeQueue, obj) },
}
if controller.volumeInformer != nil {
controller.volumeInformer.AddEventHandlerWithResyncPeriod(volumeHandler, controller.resyncPeriod)
controller.volumes, controller.volumeController =
controller.volumeInformer.GetStore(),
controller.volumeInformer.GetController()
} else {
controller.volumes, controller.volumeController =
cache.NewInformer(
volumeSource,
&v1.PersistentVolume{},
controller.resyncPeriod,
volumeHandler,
)
}
// --------------
// StorageClasses
var versionedClassType runtime.Object
var classSource cache.ListerWatcher
if controller.kubeVersion.AtLeast(utilversion.MustParseSemantic("v1.6.0")) {
versionedClassType = &storage.StorageClass{}
classSource = &cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
return client.StorageV1().StorageClasses().List(options)
},
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
return client.StorageV1().StorageClasses().Watch(options)
},
}
} else {
versionedClassType = &storagebeta.StorageClass{}
classSource = &cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
return client.StorageV1beta1().StorageClasses().List(options)
},
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
return client.StorageV1beta1().StorageClasses().Watch(options)
},
}
}
classHandler := cache.ResourceEventHandlerFuncs{
// We don't need an actual event handler for StorageClasses,
// but we must pass a non-nil one to cache.NewInformer()
AddFunc: nil,
UpdateFunc: nil,
DeleteFunc: nil,
}
if controller.classInformer != nil {
// no resource event handler needed for StorageClasses
controller.classes, controller.classController =
controller.classInformer.GetStore(),
controller.classInformer.GetController()
} else {
controller.classes, controller.classController = cache.NewInformer(
classSource,
versionedClassType,
controller.resyncPeriod,
classHandler,
)
}
return controller
}
// enqueueWork takes an obj and converts it into a namespace/name string which
// is then put onto the given work queue.
func (ctrl *ProvisionController) enqueueWork(queue workqueue.RateLimitingInterface, obj interface{}) {
var key string
var err error
if key, err = cache.DeletionHandlingMetaNamespaceKeyFunc(obj); err != nil {
utilruntime.HandleError(err)
return
}
// Re-Adding is harmless but try to add it to the queue only if it is not
// already there, because if it is already there we *must* be retrying it
if queue.NumRequeues(key) == 0 {
queue.Add(key)
}
}
// forgetWork Forgets an obj from the given work queue, telling the queue to
// stop tracking its retries because e.g. the obj was deleted
func (ctrl *ProvisionController) forgetWork(queue workqueue.RateLimitingInterface, obj interface{}) {
var key string
var err error
if key, err = cache.DeletionHandlingMetaNamespaceKeyFunc(obj); err != nil {
utilruntime.HandleError(err)
return
}
queue.Forget(key)
queue.Done(key)
}
// Run starts all of this controller's control loops
func (ctrl *ProvisionController) Run(_ <-chan struct{}) {
// TODO: arg is as of 1.12 unused. Nothing can ever be cancelled. Should
// accept a context instead and use it instead of context.TODO(), but would
// break API. Not urgent: realistically, users are simply passing in
// wait.NeverStop() anyway.
run := func(ctx context.Context) {
glog.Infof("Starting provisioner controller %s!", ctrl.component)
defer utilruntime.HandleCrash()
defer ctrl.claimQueue.ShutDown()
defer ctrl.volumeQueue.ShutDown()
ctrl.hasRunLock.Lock()
ctrl.hasRun = true
ctrl.hasRunLock.Unlock()
if ctrl.metricsPort > 0 {
prometheus.MustRegister([]prometheus.Collector{
metrics.PersistentVolumeClaimProvisionTotal,
metrics.PersistentVolumeClaimProvisionFailedTotal,
metrics.PersistentVolumeClaimProvisionDurationSeconds,
metrics.PersistentVolumeDeleteTotal,
metrics.PersistentVolumeDeleteFailedTotal,
metrics.PersistentVolumeDeleteDurationSeconds,
}...)
http.Handle(ctrl.metricsPath, promhttp.Handler())
address := net.JoinHostPort(ctrl.metricsAddress, strconv.FormatInt(int64(ctrl.metricsPort), 10))
glog.Infof("Starting metrics server at %s\n", address)
go wait.Forever(func() {
err := http.ListenAndServe(address, nil)
if err != nil {
glog.Errorf("Failed to listen on %s: %v", address, err)
}
}, 5*time.Second)
}
// If a SharedInformer has been passed in, this controller should not
// call Run again
if ctrl.claimInformer == nil {
go ctrl.claimController.Run(ctx.Done())
}
if ctrl.volumeInformer == nil {
go ctrl.volumeController.Run(ctx.Done())
}
if ctrl.classInformer == nil {
go ctrl.classController.Run(ctx.Done())
}
if !cache.WaitForCacheSync(ctx.Done(), ctrl.claimController.HasSynced, ctrl.volumeController.HasSynced, ctrl.classController.HasSynced) {
return
}
for i := 0; i < ctrl.threadiness; i++ {
go wait.Until(ctrl.runClaimWorker, time.Second, context.TODO().Done())
go wait.Until(ctrl.runVolumeWorker, time.Second, context.TODO().Done())
}
glog.Infof("Started provisioner controller %s!", ctrl.component)
select {}
}
if ctrl.leaderElection {
rl, err := resourcelock.New("endpoints",
ctrl.leaderElectionNamespace,
strings.Replace(ctrl.provisionerName, "/", "-", -1),
ctrl.client.CoreV1(),
resourcelock.ResourceLockConfig{
Identity: ctrl.id,
EventRecorder: ctrl.eventRecorder,
})
if err != nil {
glog.Fatalf("Error creating lock: %v", err)
}
leaderelection.RunOrDie(context.TODO(), leaderelection.LeaderElectionConfig{
Lock: rl,
LeaseDuration: ctrl.leaseDuration,
RenewDeadline: ctrl.renewDeadline,
RetryPeriod: ctrl.retryPeriod,
Callbacks: leaderelection.LeaderCallbacks{
OnStartedLeading: run,
OnStoppedLeading: func() {
glog.Fatalf("leaderelection lost")
},
},
})
panic("unreachable")
} else {
run(context.TODO())
}
}
func (ctrl *ProvisionController) runClaimWorker() {
for ctrl.processNextClaimWorkItem() {
}
}
func (ctrl *ProvisionController) runVolumeWorker() {
for ctrl.processNextVolumeWorkItem() {
}
}
// processNextClaimWorkItem processes items from claimQueue
func (ctrl *ProvisionController) processNextClaimWorkItem() bool {
obj, shutdown := ctrl.claimQueue.Get()
if shutdown {
return false
}
err := func(obj interface{}) error {
defer ctrl.claimQueue.Done(obj)
var key string
var ok bool
if key, ok = obj.(string); !ok {
ctrl.claimQueue.Forget(obj)
return fmt.Errorf("expected string in workqueue but got %#v", obj)
}
if err := ctrl.syncClaimHandler(key); err != nil {
if ctrl.claimQueue.NumRequeues(obj) < ctrl.failedProvisionThreshold {
glog.Warningf("Retrying syncing claim %q because failures %v < threshold %v", key, ctrl.claimQueue.NumRequeues(obj), ctrl.failedProvisionThreshold)
ctrl.claimQueue.AddRateLimited(obj)
} else {
glog.Errorf("Giving up syncing claim %q because failures %v >= threshold %v", key, ctrl.claimQueue.NumRequeues(obj), ctrl.failedProvisionThreshold)
// Done but do not Forget: it will not be in the queue but NumRequeues
// will be saved until the obj is deleted from kubernetes
}
return fmt.Errorf("error syncing claim %q: %s", key, err.Error())
}
ctrl.claimQueue.Forget(obj)
return nil
}(obj)
if err != nil {
utilruntime.HandleError(err)
return true
}
return true
}
// processNextVolumeWorkItem processes items from volumeQueue
func (ctrl *ProvisionController) processNextVolumeWorkItem() bool {
obj, shutdown := ctrl.volumeQueue.Get()
if shutdown {
return false
}
err := func(obj interface{}) error {
defer ctrl.volumeQueue.Done(obj)
var key string
var ok bool
if key, ok = obj.(string); !ok {
ctrl.volumeQueue.Forget(obj)
return fmt.Errorf("expected string in workqueue but got %#v", obj)
}
if err := ctrl.syncVolumeHandler(key); err != nil {
if ctrl.volumeQueue.NumRequeues(obj) < ctrl.failedDeleteThreshold {
glog.Warningf("Retrying syncing volume %q because failures %v < threshold %v", key, ctrl.volumeQueue.NumRequeues(obj), ctrl.failedProvisionThreshold)
ctrl.volumeQueue.AddRateLimited(obj)
} else {
glog.Errorf("Giving up syncing volume %q because failures %v >= threshold %v", key, ctrl.volumeQueue.NumRequeues(obj), ctrl.failedProvisionThreshold)
// Done but do not Forget: it will not be in the queue but NumRequeues
// will be saved until the obj is deleted from kubernetes
}
return fmt.Errorf("error syncing volume %q: %s", key, err.Error())
}
ctrl.volumeQueue.Forget(obj)
return nil
}(obj)
if err != nil {
utilruntime.HandleError(err)
return true
}
return true
}
// syncClaimHandler gets the claim from informer's cache then calls syncClaim
func (ctrl *ProvisionController) syncClaimHandler(key string) error {
claimObj, exists, err := ctrl.claims.GetByKey(key)
if err != nil {
return err
}
if !exists {
utilruntime.HandleError(fmt.Errorf("claim %q in work queue no longer exists", key))
return nil
}
return ctrl.syncClaim(claimObj)
}
// syncVolumeHandler gets the volume from informer's cache then calls syncVolume
func (ctrl *ProvisionController) syncVolumeHandler(key string) error {
volumeObj, exists, err := ctrl.volumes.GetByKey(key)
if err != nil {
return err
}
if !exists {
utilruntime.HandleError(fmt.Errorf("volume %q in work queue no longer exists", key))
return nil
}
return ctrl.syncVolume(volumeObj)
}
// syncClaim checks if the claim should have a volume provisioned for it and
// provisions one if so.
func (ctrl *ProvisionController) syncClaim(obj interface{}) error {
claim, ok := obj.(*v1.PersistentVolumeClaim)
if !ok {
return fmt.Errorf("expected claim but got %+v", obj)
}
if ctrl.shouldProvision(claim) {
startTime := time.Now()
err := ctrl.provisionClaimOperation(claim)
ctrl.updateProvisionStats(claim, err, startTime)
return err
}
return nil
}
// syncVolume checks if the volume should be deleted and deletes if so
func (ctrl *ProvisionController) syncVolume(obj interface{}) error {
volume, ok := obj.(*v1.PersistentVolume)
if !ok {
return fmt.Errorf("expected volume but got %+v", obj)
}
if ctrl.shouldDelete(volume) {
startTime := time.Now()
err := ctrl.deleteVolumeOperation(volume)
ctrl.updateDeleteStats(volume, err, startTime)
return err
}
return nil
}
// shouldProvision returns whether a claim should have a volume provisioned for
// it, i.e. whether a Provision is "desired"
func (ctrl *ProvisionController) shouldProvision(claim *v1.PersistentVolumeClaim) bool {
if claim.Spec.VolumeName != "" {
return false
}
if qualifier, ok := ctrl.provisioner.(Qualifier); ok {
if !qualifier.ShouldProvision(claim) {
return false
}
}
// Kubernetes 1.5 provisioning with annStorageProvisioner
if ctrl.kubeVersion.AtLeast(utilversion.MustParseSemantic("v1.5.0")) {
if provisioner, found := claim.Annotations[annStorageProvisioner]; found {
if provisioner == ctrl.provisionerName {
return true
}
return false
}
} else {
// Kubernetes 1.4 provisioning, evaluating class.Provisioner
claimClass := helper.GetPersistentVolumeClaimClass(claim)
provisioner, _, err := ctrl.getStorageClassFields(claimClass)
if err != nil {
glog.Errorf("Error getting claim %q's StorageClass's fields: %v", claimToClaimKey(claim), err)
return false
}
if provisioner != ctrl.provisionerName {
return false
}
return true
}
return false
}
// shouldDelete returns whether a volume should have its backing volume
// deleted, i.e. whether a Delete is "desired"
func (ctrl *ProvisionController) shouldDelete(volume *v1.PersistentVolume) bool {
// In 1.9+ PV protection means the object will exist briefly with a
// deletion timestamp even after our successful Delete. Ignore it.
if ctrl.kubeVersion.AtLeast(utilversion.MustParseSemantic("v1.9.0")) {
if volume.ObjectMeta.DeletionTimestamp != nil {
return false
}
}
// In 1.5+ we delete only if the volume is in state Released. In 1.4 we must
// delete if the volume is in state Failed too.
if ctrl.kubeVersion.AtLeast(utilversion.MustParseSemantic("v1.5.0")) {
if volume.Status.Phase != v1.VolumeReleased {
return false
}
} else {
if volume.Status.Phase != v1.VolumeReleased && volume.Status.Phase != v1.VolumeFailed {
return false
}
}
if volume.Spec.PersistentVolumeReclaimPolicy != v1.PersistentVolumeReclaimDelete {
return false
}
if !metav1.HasAnnotation(volume.ObjectMeta, annDynamicallyProvisioned) {
return false
}
if ann := volume.Annotations[annDynamicallyProvisioned]; ann != ctrl.provisionerName {
return false
}
return true
}
// canProvision returns error if provisioner can't provision claim.
func (ctrl *ProvisionController) canProvision(claim *v1.PersistentVolumeClaim) error {
// Check if this provisioner supports Block volume
if util.CheckPersistentVolumeClaimModeBlock(claim) && !ctrl.supportsBlock() {
return fmt.Errorf("%s does not support block volume provisioning", ctrl.provisionerName)
}
return nil
}
func (ctrl *ProvisionController) updateProvisionStats(claim *v1.PersistentVolumeClaim, err error, startTime time.Time) {
class := ""
if claim.Spec.StorageClassName != nil {
class = *claim.Spec.StorageClassName
}
if err != nil {
metrics.PersistentVolumeClaimProvisionFailedTotal.WithLabelValues(class).Inc()
} else {
metrics.PersistentVolumeClaimProvisionDurationSeconds.WithLabelValues(class).Observe(time.Since(startTime).Seconds())
metrics.PersistentVolumeClaimProvisionTotal.WithLabelValues(class).Inc()
}
}
func (ctrl *ProvisionController) updateDeleteStats(volume *v1.PersistentVolume, err error, startTime time.Time) {
class := volume.Spec.StorageClassName
if err != nil {
metrics.PersistentVolumeDeleteFailedTotal.WithLabelValues(class).Inc()
} else {
metrics.PersistentVolumeDeleteDurationSeconds.WithLabelValues(class).Observe(time.Since(startTime).Seconds())
metrics.PersistentVolumeDeleteTotal.WithLabelValues(class).Inc()
}
}
// provisionClaimOperation attempts to provision a volume for the given claim.
// Returns error, which indicates whether provisioning should be retried
// (requeue the claim) or not
func (ctrl *ProvisionController) provisionClaimOperation(claim *v1.PersistentVolumeClaim) error {
// Most code here is identical to that found in controller.go of kube's PV controller...
claimClass := helper.GetPersistentVolumeClaimClass(claim)
operation := fmt.Sprintf("provision %q class %q", claimToClaimKey(claim), claimClass)
glog.Infof(logOperation(operation, "started"))
// A previous doProvisionClaim may just have finished while we were waiting for
// the locks. Check that PV (with deterministic name) hasn't been provisioned
// yet.
pvName := ctrl.getProvisionedVolumeNameForClaim(claim)
volume, err := ctrl.client.CoreV1().PersistentVolumes().Get(pvName, metav1.GetOptions{})
if err == nil && volume != nil {
// Volume has been already provisioned, nothing to do.
glog.Infof(logOperation(operation, "persistentvolume %q already exists, skipping", pvName))
return nil
}
// Prepare a claimRef to the claim early (to fail before a volume is
// provisioned)
claimRef, err := ref.GetReference(scheme.Scheme, claim)
if err != nil {
glog.Errorf(logOperation(operation, "unexpected error getting claim reference: %v", err))
return nil
}
provisioner, parameters, err := ctrl.getStorageClassFields(claimClass)
if err != nil {
glog.Errorf(logOperation(operation, "error getting claim's StorageClass's fields: %v", err))
return nil
}
if provisioner != ctrl.provisionerName {
// class.Provisioner has either changed since shouldProvision() or
// annDynamicallyProvisioned contains different provisioner than
// class.Provisioner.
glog.Errorf(logOperation(operation, "unknown provisioner %q requested in claim's StorageClass", provisioner))
return nil
}
// Check if this provisioner can provision this claim.
if err = ctrl.canProvision(claim); err != nil {
ctrl.eventRecorder.Event(claim, v1.EventTypeWarning, "ProvisioningFailed", err.Error())
glog.Errorf(logOperation(operation, "failed to provision volume: %v", err))
return nil
}
reclaimPolicy := v1.PersistentVolumeReclaimDelete
if ctrl.kubeVersion.AtLeast(utilversion.MustParseSemantic("v1.8.0")) {
reclaimPolicy, err = ctrl.fetchReclaimPolicy(claimClass)
if err != nil {
return err
}
}
mountOptions, err := ctrl.fetchMountOptions(claimClass)
if err != nil {
return err
}
var selectedNode *v1.Node
var allowedTopologies []v1.TopologySelectorTerm
if ctrl.kubeVersion.AtLeast(utilversion.MustParseSemantic("v1.11.0")) {
// Get SelectedNode
if nodeName, ok := claim.Annotations[annSelectedNode]; ok {
selectedNode, err = ctrl.client.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) // TODO (verult) cache Nodes
if err != nil {
err = fmt.Errorf("failed to get target node: %v", err)
ctrl.eventRecorder.Event(claim, v1.EventTypeWarning, "ProvisioningFailed", err.Error())
return err
}
}
// Get AllowedTopologies
allowedTopologies, err = ctrl.fetchAllowedTopologies(claimClass)
if err != nil {
err = fmt.Errorf("failed to get AllowedTopologies from StorageClass: %v", err)
ctrl.eventRecorder.Event(claim, v1.EventTypeWarning, "ProvisioningFailed", err.Error())
return err
}
}
options := VolumeOptions{
PersistentVolumeReclaimPolicy: reclaimPolicy,
PVName: pvName,
PVC: claim,
MountOptions: mountOptions,
Parameters: parameters,
SelectedNode: selectedNode,
AllowedTopologies: allowedTopologies,
}
ctrl.eventRecorder.Event(claim, v1.EventTypeNormal, "Provisioning", fmt.Sprintf("External provisioner is provisioning volume for claim %q", claimToClaimKey(claim)))
volume, err = ctrl.provisioner.Provision(options)
if err != nil {
if ierr, ok := err.(*IgnoredError); ok {
// Provision ignored, do nothing and hope another provisioner will provision it.
glog.Infof(logOperation(operation, "volume provision ignored: %v", ierr))
return nil
}
err = fmt.Errorf("failed to provision volume with StorageClass %q: %v", claimClass, err)
ctrl.eventRecorder.Event(claim, v1.EventTypeWarning, "ProvisioningFailed", err.Error())
return err
}
glog.Infof(logOperation(operation, "volume %q provisioned", volume.Name))
// Set ClaimRef and the PV controller will bind and set annBoundByController for us
volume.Spec.ClaimRef = claimRef
metav1.SetMetaDataAnnotation(&volume.ObjectMeta, annDynamicallyProvisioned, ctrl.provisionerName)
if ctrl.kubeVersion.AtLeast(utilversion.MustParseSemantic("v1.6.0")) {
volume.Spec.StorageClassName = claimClass
} else {
metav1.SetMetaDataAnnotation(&volume.ObjectMeta, annClass, claimClass)
}
// Try to create the PV object several times
for i := 0; i < ctrl.createProvisionedPVRetryCount; i++ {
glog.Infof(logOperation(operation, "trying to save persistentvvolume %q", volume.Name))
if _, err = ctrl.client.CoreV1().PersistentVolumes().Create(volume); err == nil || apierrs.IsAlreadyExists(err) {
// Save succeeded.
if err != nil {
glog.Infof(logOperation(operation, "persistentvolume %q already exists, reusing", volume.Name))
err = nil
} else {
glog.Infof(logOperation(operation, "persistentvolume %q saved", volume.Name))
}
break
}
// Save failed, try again after a while.
glog.Infof(logOperation(operation, "failed to save persistentvolume %q: %v", volume.Name, err))
time.Sleep(ctrl.createProvisionedPVInterval)
}
if err != nil {
// Save failed. Now we have a storage asset outside of Kubernetes,
// but we don't have appropriate PV object for it.
// Emit some event here and try to delete the storage asset several
// times.
strerr := fmt.Sprintf("Error creating provisioned PV object for claim %s: %v. Deleting the volume.", claimToClaimKey(claim), err)
glog.Error(logOperation(operation, strerr))
ctrl.eventRecorder.Event(claim, v1.EventTypeWarning, "ProvisioningFailed", strerr)
for i := 0; i < ctrl.createProvisionedPVRetryCount; i++ {
if err = ctrl.provisioner.Delete(volume); err == nil {
// Delete succeeded
glog.Infof(logOperation(operation, "cleaning volume %q succeeded", volume.Name))
break
}
// Delete failed, try again after a while.
glog.Infof(logOperation(operation, "failed to clean volume %q: %v", volume.Name, err))
time.Sleep(ctrl.createProvisionedPVInterval)
}
if err != nil {
// Delete failed several times. There is an orphaned volume and there
// is nothing we can do about it.
strerr := fmt.Sprintf("Error cleaning provisioned volume for claim %s: %v. Please delete manually.", claimToClaimKey(claim), err)
glog.Error(logOperation(operation, strerr))
ctrl.eventRecorder.Event(claim, v1.EventTypeWarning, "ProvisioningCleanupFailed", strerr)
}
} else {
msg := fmt.Sprintf("Successfully provisioned volume %s", volume.Name)
ctrl.eventRecorder.Event(claim, v1.EventTypeNormal, "ProvisioningSucceeded", msg)
}
glog.Infof(logOperation(operation, "succeeded"))
return nil
}
// deleteVolumeOperation attempts to delete the volume backing the given
// volume. Returns error, which indicates whether deletion should be retried
// (requeue the volume) or not
func (ctrl *ProvisionController) deleteVolumeOperation(volume *v1.PersistentVolume) error {
operation := fmt.Sprintf("delete %q", volume.Name)
glog.Infof(logOperation(operation, "started"))
// This method may have been waiting for a volume lock for some time.
// Our check does not have to be as sophisticated as PV controller's, we can
// trust that the PV controller has set the PV to Released/Failed and it's
// ours to delete
newVolume, err := ctrl.client.CoreV1().PersistentVolumes().Get(volume.Name, metav1.GetOptions{})
if err != nil {
return nil
}
if !ctrl.shouldDelete(newVolume) {
glog.Infof(logOperation(operation, "persistentvolume no longer needs deletion, skipping"))
return nil
}
err = ctrl.provisioner.Delete(volume)
if err != nil {
if ierr, ok := err.(*IgnoredError); ok {
// Delete ignored, do nothing and hope another provisioner will delete it.
glog.Infof(logOperation(operation, "volume deletion ignored: %v", ierr))
return nil
}
// Delete failed, emit an event.
glog.Errorf(logOperation(operation, "volume deletion failed: %v", err))
ctrl.eventRecorder.Event(volume, v1.EventTypeWarning, "VolumeFailedDelete", err.Error())
return err
}
glog.Infof(logOperation(operation, "volume deleted"))
// Delete the volume
if err = ctrl.client.CoreV1().PersistentVolumes().Delete(volume.Name, nil); err != nil {
// Oops, could not delete the volume and therefore the controller will
// try to delete the volume again on next update.
glog.Infof(logOperation(operation, "failed to delete persistentvolume: %v", err))
return err
}
glog.Infof(logOperation(operation, "persistentvolume deleted"))
glog.Infof(logOperation(operation, "succeeded"))
return nil
}
func logOperation(operation, format string, a ...interface{}) string {
return fmt.Sprintf(fmt.Sprintf("%s: %s", operation, format), a...)
}
// getInClusterNamespace returns the namespace in which the controller runs.
func getInClusterNamespace() string {
if ns := os.Getenv("POD_NAMESPACE"); ns != "" {
return ns
}
// Fall back to the namespace associated with the service account token, if available
if data, err := ioutil.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/namespace"); err == nil {
if ns := strings.TrimSpace(string(data)); len(ns) > 0 {
return ns
}
}
return "default"
}
// getProvisionedVolumeNameForClaim returns PV.Name for the provisioned volume.
// The name must be unique.
func (ctrl *ProvisionController) getProvisionedVolumeNameForClaim(claim *v1.PersistentVolumeClaim) string {
return "pvc-" + string(claim.UID)
}
func (ctrl *ProvisionController) getStorageClassFields(name string) (string, map[string]string, error) {
classObj, found, err := ctrl.classes.GetByKey(name)
if err != nil {
return "", nil, err
}
if !found {
return "", nil, fmt.Errorf("storageClass %q not found", name)
// 3. It tries to find a StorageClass instance referenced by annotation
// `claim.Annotations["volume.beta.kubernetes.io/storage-class"]`. If not
// found, it SHOULD report an error (by sending an event to the claim) and it
// SHOULD retry periodically with step i.
}
switch class := classObj.(type) {
case *storage.StorageClass:
return class.Provisioner, class.Parameters, nil
case *storagebeta.StorageClass:
return class.Provisioner, class.Parameters, nil
}
return "", nil, fmt.Errorf("cannot convert object to StorageClass: %+v", classObj)
}
func claimToClaimKey(claim *v1.PersistentVolumeClaim) string {
return fmt.Sprintf("%s/%s", claim.Namespace, claim.Name)
}
func (ctrl *ProvisionController) fetchReclaimPolicy(storageClassName string) (v1.PersistentVolumeReclaimPolicy, error) {
classObj, found, err := ctrl.classes.GetByKey(storageClassName)
if err != nil {
return "", err
}
if !found {
return "", fmt.Errorf("storageClass %q not found", storageClassName)
}
switch class := classObj.(type) {
case *storage.StorageClass:
return *class.ReclaimPolicy, nil
case *storagebeta.StorageClass:
return *class.ReclaimPolicy, nil
}
return v1.PersistentVolumeReclaimDelete, fmt.Errorf("cannot convert object to StorageClass: %+v", classObj)
}
func (ctrl *ProvisionController) fetchMountOptions(storageClassName string) ([]string, error) {
classObj, found, err := ctrl.classes.GetByKey(storageClassName)
if err != nil {
return nil, err
}
if !found {
return nil, fmt.Errorf("storageClass %q not found", storageClassName)
}
switch class := classObj.(type) {
case *storage.StorageClass:
return class.MountOptions, nil
case *storagebeta.StorageClass:
return class.MountOptions, nil
}
return nil, fmt.Errorf("cannot convert object to StorageClass: %+v", classObj)
}
func (ctrl *ProvisionController) fetchAllowedTopologies(storageClassName string) ([]v1.TopologySelectorTerm, error) {
classObj, found, err := ctrl.classes.GetByKey(storageClassName)
if err != nil {
return nil, err
}
if !found {
return nil, fmt.Errorf("storageClass %q not found", storageClassName)
}
switch class := classObj.(type) {
case *storage.StorageClass:
return class.AllowedTopologies, nil
case *storagebeta.StorageClass:
return class.AllowedTopologies, nil
}
return nil, fmt.Errorf("cannot convert object to StorageClass: %+v", classObj)
}
// supportsBlock returns whether a provisioner supports block volume.
// Provisioners that implement BlockProvisioner interface and return true to SupportsBlock
// will be regarded as supported for block volume.
func (ctrl *ProvisionController) supportsBlock() bool {
if blockProvisioner, ok := ctrl.provisioner.(BlockProvisioner); ok {
return blockProvisioner.SupportsBlock()
}
return false
}
Moving selectedNode annotation to GA
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controller
import (
"context"
"fmt"
"io/ioutil"
"net"
"net/http"
"os"
"strconv"
"strings"
"sync"
"time"
"github.com/golang/glog"
"github.com/kubernetes-incubator/external-storage/lib/controller/metrics"
"github.com/kubernetes-incubator/external-storage/lib/util"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
"golang.org/x/time/rate"
"k8s.io/api/core/v1"
storage "k8s.io/api/storage/v1"
storagebeta "k8s.io/api/storage/v1beta1"
apierrs "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/scheme"
corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/leaderelection"
"k8s.io/client-go/tools/leaderelection/resourcelock"
"k8s.io/client-go/tools/record"
ref "k8s.io/client-go/tools/reference"
"k8s.io/client-go/util/workqueue"
"k8s.io/kubernetes/pkg/apis/core/v1/helper"
utilversion "k8s.io/kubernetes/pkg/util/version"
)
// annClass annotation represents the storage class associated with a resource:
// - in PersistentVolumeClaim it represents required class to match.
// Only PersistentVolumes with the same class (i.e. annotation with the same
// value) can be bound to the claim. In case no such volume exists, the
// controller will provision a new one using StorageClass instance with
// the same name as the annotation value.
// - in PersistentVolume it represents storage class to which the persistent
// volume belongs.
const annClass = "volume.beta.kubernetes.io/storage-class"
// This annotation is added to a PV that has been dynamically provisioned by
// Kubernetes. Its value is name of volume plugin that created the volume.
// It serves both user (to show where a PV comes from) and Kubernetes (to
// recognize dynamically provisioned PVs in its decisions).
const annDynamicallyProvisioned = "pv.kubernetes.io/provisioned-by"
const annStorageProvisioner = "volume.beta.kubernetes.io/storage-provisioner"
// This annotation is added to a PVC that has been triggered by scheduler to
// be dynamically provisioned. Its value is the name of the selected node.
const annSelectedNode = "volume.kubernetes.io/selected-node"
// ProvisionController is a controller that provisions PersistentVolumes for
// PersistentVolumeClaims.
type ProvisionController struct {
client kubernetes.Interface
// The name of the provisioner for which this controller dynamically
// provisions volumes. The value of annDynamicallyProvisioned and
// annStorageProvisioner to set & watch for, respectively
provisionerName string
// The provisioner the controller will use to provision and delete volumes.
// Presumably this implementer of Provisioner carries its own
// volume-specific options and such that it needs in order to provision
// volumes.
provisioner Provisioner
// Kubernetes cluster server version:
// * 1.4: storage classes introduced as beta. Technically out-of-tree dynamic
// provisioning is not officially supported, though it works
// * 1.5: storage classes stay in beta. Out-of-tree dynamic provisioning is
// officially supported
// * 1.6: storage classes enter GA
kubeVersion *utilversion.Version
claimInformer cache.SharedInformer
claims cache.Store
claimController cache.Controller
volumeInformer cache.SharedInformer
volumes cache.Store
volumeController cache.Controller
classInformer cache.SharedInformer
classes cache.Store
classController cache.Controller
claimQueue workqueue.RateLimitingInterface
volumeQueue workqueue.RateLimitingInterface
// Identity of this controller, generated at creation time and not persisted
// across restarts. Useful only for debugging, for seeing the source of
// events. controller.provisioner may have its own, different notion of
// identity which may/may not persist across restarts
id string
component string
eventRecorder record.EventRecorder
resyncPeriod time.Duration
exponentialBackOffOnError bool
threadiness int
createProvisionedPVRetryCount int
createProvisionedPVInterval time.Duration
failedProvisionThreshold, failedDeleteThreshold int
// The port for metrics server to serve on.
metricsPort int32
// The IP address for metrics server to serve on.
metricsAddress string
// The path of metrics endpoint path.
metricsPath string
// Whether to do kubernetes leader election at all. It should basically
// always be done when possible to avoid duplicate Provision attempts.
leaderElection bool
leaderElectionNamespace string
// Parameters of leaderelection.LeaderElectionConfig.
leaseDuration, renewDeadline, retryPeriod time.Duration
hasRun bool
hasRunLock *sync.Mutex
}
const (
// DefaultResyncPeriod is used when option function ResyncPeriod is omitted
DefaultResyncPeriod = 15 * time.Minute
// DefaultThreadiness is used when option function Threadiness is omitted
DefaultThreadiness = 4
// DefaultExponentialBackOffOnError is used when option function ExponentialBackOffOnError is omitted
DefaultExponentialBackOffOnError = true
// DefaultCreateProvisionedPVRetryCount is used when option function CreateProvisionedPVRetryCount is omitted
DefaultCreateProvisionedPVRetryCount = 5
// DefaultCreateProvisionedPVInterval is used when option function CreateProvisionedPVInterval is omitted
DefaultCreateProvisionedPVInterval = 10 * time.Second
// DefaultFailedProvisionThreshold is used when option function FailedProvisionThreshold is omitted
DefaultFailedProvisionThreshold = 15
// DefaultFailedDeleteThreshold is used when option function FailedDeleteThreshold is omitted
DefaultFailedDeleteThreshold = 15
// DefaultLeaderElection is used when option function LeaderElection is omitted
DefaultLeaderElection = true
// DefaultLeaseDuration is used when option function LeaseDuration is omitted
DefaultLeaseDuration = 15 * time.Second
// DefaultRenewDeadline is used when option function RenewDeadline is omitted
DefaultRenewDeadline = 10 * time.Second
// DefaultRetryPeriod is used when option function RetryPeriod is omitted
DefaultRetryPeriod = 2 * time.Second
// DefaultMetricsPort is used when option function MetricsPort is omitted
DefaultMetricsPort = 0
// DefaultMetricsAddress is used when option function MetricsAddress is omitted
DefaultMetricsAddress = "0.0.0.0"
// DefaultMetricsPath is used when option function MetricsPath is omitted
DefaultMetricsPath = "/metrics"
)
var errRuntime = fmt.Errorf("cannot call option functions after controller has Run")
// ResyncPeriod is how often the controller relists PVCs, PVs, & storage
// classes. OnUpdate will be called even if nothing has changed, meaning failed
// operations may be retried on a PVC/PV every resyncPeriod regardless of
// whether it changed. Defaults to 15 minutes.
func ResyncPeriod(resyncPeriod time.Duration) func(*ProvisionController) error {
return func(c *ProvisionController) error {
if c.HasRun() {
return errRuntime
}
c.resyncPeriod = resyncPeriod
return nil
}
}
// Threadiness is the number of claim and volume workers each to launch.
// Defaults to 4.
func Threadiness(threadiness int) func(*ProvisionController) error {
return func(c *ProvisionController) error {
if c.HasRun() {
return errRuntime
}
c.threadiness = threadiness
return nil
}
}
// ExponentialBackOffOnError determines whether to exponentially back off from
// failures of Provision and Delete. Defaults to true.
func ExponentialBackOffOnError(exponentialBackOffOnError bool) func(*ProvisionController) error {
return func(c *ProvisionController) error {
if c.HasRun() {
return errRuntime
}
c.exponentialBackOffOnError = exponentialBackOffOnError
return nil
}
}
// CreateProvisionedPVRetryCount is the number of retries when we create a PV
// object for a provisioned volume. Defaults to 5.
func CreateProvisionedPVRetryCount(createProvisionedPVRetryCount int) func(*ProvisionController) error {
return func(c *ProvisionController) error {
if c.HasRun() {
return errRuntime
}
c.createProvisionedPVRetryCount = createProvisionedPVRetryCount
return nil
}
}
// CreateProvisionedPVInterval is the interval between retries when we create a
// PV object for a provisioned volume. Defaults to 10 seconds.
func CreateProvisionedPVInterval(createProvisionedPVInterval time.Duration) func(*ProvisionController) error {
return func(c *ProvisionController) error {
if c.HasRun() {
return errRuntime
}
c.createProvisionedPVInterval = createProvisionedPVInterval
return nil
}
}
// FailedProvisionThreshold is the threshold for max number of retries on
// failures of Provision. Defaults to 15.
func FailedProvisionThreshold(failedProvisionThreshold int) func(*ProvisionController) error {
return func(c *ProvisionController) error {
if c.HasRun() {
return errRuntime
}
c.failedProvisionThreshold = failedProvisionThreshold
return nil
}
}
// FailedDeleteThreshold is the threshold for max number of retries on failures
// of Delete. Defaults to 15.
func FailedDeleteThreshold(failedDeleteThreshold int) func(*ProvisionController) error {
return func(c *ProvisionController) error {
if c.HasRun() {
return errRuntime
}
c.failedDeleteThreshold = failedDeleteThreshold
return nil
}
}
// LeaderElection determines whether to enable leader election or not. Defaults
// to true.
func LeaderElection(leaderElection bool) func(*ProvisionController) error {
return func(c *ProvisionController) error {
if c.HasRun() {
return errRuntime
}
c.leaderElection = leaderElection
return nil
}
}
// LeaderElectionNamespace is the kubernetes namespace in which to create the
// leader election object. Defaults to the same namespace in which the
// the controller runs.
func LeaderElectionNamespace(leaderElectionNamespace string) func(*ProvisionController) error {
return func(c *ProvisionController) error {
if c.HasRun() {
return errRuntime
}
c.leaderElectionNamespace = leaderElectionNamespace
return nil
}
}
// LeaseDuration is the duration that non-leader candidates will
// wait to force acquire leadership. This is measured against time of
// last observed ack. Defaults to 15 seconds.
func LeaseDuration(leaseDuration time.Duration) func(*ProvisionController) error {
return func(c *ProvisionController) error {
if c.HasRun() {
return errRuntime
}
c.leaseDuration = leaseDuration
return nil
}
}
// RenewDeadline is the duration that the acting master will retry
// refreshing leadership before giving up. Defaults to 10 seconds.
func RenewDeadline(renewDeadline time.Duration) func(*ProvisionController) error {
return func(c *ProvisionController) error {
if c.HasRun() {
return errRuntime
}
c.renewDeadline = renewDeadline
return nil
}
}
// RetryPeriod is the duration the LeaderElector clients should wait
// between tries of actions. Defaults to 2 seconds.
func RetryPeriod(retryPeriod time.Duration) func(*ProvisionController) error {
return func(c *ProvisionController) error {
if c.HasRun() {
return errRuntime
}
c.retryPeriod = retryPeriod
return nil
}
}
// ClaimsInformer sets the informer to use for accessing PersistentVolumeClaims.
// Defaults to using a private (non-shared) informer.
func ClaimsInformer(informer cache.SharedInformer) func(*ProvisionController) error {
return func(c *ProvisionController) error {
if c.HasRun() {
return errRuntime
}
c.claimInformer = informer
return nil
}
}
// VolumesInformer sets the informer to use for accessing PersistentVolumes.
// Defaults to using a private (non-shared) informer.
func VolumesInformer(informer cache.SharedInformer) func(*ProvisionController) error {
return func(c *ProvisionController) error {
if c.HasRun() {
return errRuntime
}
c.volumeInformer = informer
return nil
}
}
// ClassesInformer sets the informer to use for accessing StorageClasses.
// The informer must use the versioned resource appropriate for the Kubernetes cluster version
// (that is, v1.StorageClass for >= 1.6, and v1beta1.StorageClass for < 1.6).
// Defaults to using a private (non-shared) informer.
func ClassesInformer(informer cache.SharedInformer) func(*ProvisionController) error {
return func(c *ProvisionController) error {
if c.HasRun() {
return errRuntime
}
c.classInformer = informer
return nil
}
}
// MetricsPort sets the port that metrics server serves on. Default: 0, set to non-zero to enable.
func MetricsPort(metricsPort int32) func(*ProvisionController) error {
return func(c *ProvisionController) error {
if c.HasRun() {
return errRuntime
}
c.metricsPort = metricsPort
return nil
}
}
// MetricsAddress sets the ip address that metrics serve serves on.
func MetricsAddress(metricsAddress string) func(*ProvisionController) error {
return func(c *ProvisionController) error {
if c.HasRun() {
return errRuntime
}
c.metricsAddress = metricsAddress
return nil
}
}
// MetricsPath sets the endpoint path of metrics server.
func MetricsPath(metricsPath string) func(*ProvisionController) error {
return func(c *ProvisionController) error {
if c.HasRun() {
return errRuntime
}
c.metricsPath = metricsPath
return nil
}
}
// HasRun returns whether the controller has Run
func (ctrl *ProvisionController) HasRun() bool {
ctrl.hasRunLock.Lock()
defer ctrl.hasRunLock.Unlock()
return ctrl.hasRun
}
// NewProvisionController creates a new provision controller using
// the given configuration parameters and with private (non-shared) informers.
func NewProvisionController(
client kubernetes.Interface,
provisionerName string,
provisioner Provisioner,
kubeVersion string,
options ...func(*ProvisionController) error,
) *ProvisionController {
id, err := os.Hostname()
if err != nil {
glog.Fatalf("Error getting hostname: %v", err)
}
// add a uniquifier so that two processes on the same host don't accidentally both become active
id = id + "_" + string(uuid.NewUUID())
component := provisionerName + "_" + id
v1.AddToScheme(scheme.Scheme)
broadcaster := record.NewBroadcaster()
broadcaster.StartLogging(glog.Infof)
broadcaster.StartRecordingToSink(&corev1.EventSinkImpl{Interface: client.CoreV1().Events(v1.NamespaceAll)})
eventRecorder := broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: component})
controller := &ProvisionController{
client: client,
provisionerName: provisionerName,
provisioner: provisioner,
kubeVersion: utilversion.MustParseSemantic(kubeVersion),
id: id,
component: component,
eventRecorder: eventRecorder,
resyncPeriod: DefaultResyncPeriod,
exponentialBackOffOnError: DefaultExponentialBackOffOnError,
threadiness: DefaultThreadiness,
createProvisionedPVRetryCount: DefaultCreateProvisionedPVRetryCount,
createProvisionedPVInterval: DefaultCreateProvisionedPVInterval,
failedProvisionThreshold: DefaultFailedProvisionThreshold,
failedDeleteThreshold: DefaultFailedDeleteThreshold,
leaderElection: DefaultLeaderElection,
leaderElectionNamespace: getInClusterNamespace(),
leaseDuration: DefaultLeaseDuration,
renewDeadline: DefaultRenewDeadline,
retryPeriod: DefaultRetryPeriod,
metricsPort: DefaultMetricsPort,
metricsAddress: DefaultMetricsAddress,
metricsPath: DefaultMetricsPath,
hasRun: false,
hasRunLock: &sync.Mutex{},
}
for _, option := range options {
option(controller)
}
ratelimiter := workqueue.NewMaxOfRateLimiter(
workqueue.NewItemExponentialFailureRateLimiter(15*time.Second, 1000*time.Second),
&workqueue.BucketRateLimiter{Limiter: rate.NewLimiter(rate.Limit(10), 100)},
)
if !controller.exponentialBackOffOnError {
ratelimiter = workqueue.NewMaxOfRateLimiter(
workqueue.NewItemExponentialFailureRateLimiter(15*time.Second, 15*time.Second),
&workqueue.BucketRateLimiter{Limiter: rate.NewLimiter(rate.Limit(10), 100)},
)
}
controller.claimQueue = workqueue.NewNamedRateLimitingQueue(ratelimiter, "claims")
controller.volumeQueue = workqueue.NewNamedRateLimitingQueue(ratelimiter, "volumes")
// ----------------------
// PersistentVolumeClaims
claimSource := &cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
return client.CoreV1().PersistentVolumeClaims(v1.NamespaceAll).List(options)
},
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
return client.CoreV1().PersistentVolumeClaims(v1.NamespaceAll).Watch(options)
},
}
claimHandler := cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) { controller.enqueueWork(controller.claimQueue, obj) },
UpdateFunc: func(oldObj, newObj interface{}) { controller.enqueueWork(controller.claimQueue, newObj) },
DeleteFunc: func(obj interface{}) { controller.forgetWork(controller.claimQueue, obj) },
}
if controller.claimInformer != nil {
controller.claimInformer.AddEventHandlerWithResyncPeriod(claimHandler, controller.resyncPeriod)
controller.claims, controller.claimController =
controller.claimInformer.GetStore(),
controller.claimInformer.GetController()
} else {
controller.claims, controller.claimController =
cache.NewInformer(
claimSource,
&v1.PersistentVolumeClaim{},
controller.resyncPeriod,
claimHandler,
)
}
// -----------------
// PersistentVolumes
volumeSource := &cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
return client.CoreV1().PersistentVolumes().List(options)
},
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
return client.CoreV1().PersistentVolumes().Watch(options)
},
}
volumeHandler := cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) { controller.enqueueWork(controller.volumeQueue, obj) },
UpdateFunc: func(oldObj, newObj interface{}) { controller.enqueueWork(controller.volumeQueue, newObj) },
DeleteFunc: func(obj interface{}) { controller.forgetWork(controller.volumeQueue, obj) },
}
if controller.volumeInformer != nil {
controller.volumeInformer.AddEventHandlerWithResyncPeriod(volumeHandler, controller.resyncPeriod)
controller.volumes, controller.volumeController =
controller.volumeInformer.GetStore(),
controller.volumeInformer.GetController()
} else {
controller.volumes, controller.volumeController =
cache.NewInformer(
volumeSource,
&v1.PersistentVolume{},
controller.resyncPeriod,
volumeHandler,
)
}
// --------------
// StorageClasses
var versionedClassType runtime.Object
var classSource cache.ListerWatcher
if controller.kubeVersion.AtLeast(utilversion.MustParseSemantic("v1.6.0")) {
versionedClassType = &storage.StorageClass{}
classSource = &cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
return client.StorageV1().StorageClasses().List(options)
},
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
return client.StorageV1().StorageClasses().Watch(options)
},
}
} else {
versionedClassType = &storagebeta.StorageClass{}
classSource = &cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
return client.StorageV1beta1().StorageClasses().List(options)
},
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
return client.StorageV1beta1().StorageClasses().Watch(options)
},
}
}
classHandler := cache.ResourceEventHandlerFuncs{
// We don't need an actual event handler for StorageClasses,
// but we must pass a non-nil one to cache.NewInformer()
AddFunc: nil,
UpdateFunc: nil,
DeleteFunc: nil,
}
if controller.classInformer != nil {
// no resource event handler needed for StorageClasses
controller.classes, controller.classController =
controller.classInformer.GetStore(),
controller.classInformer.GetController()
} else {
controller.classes, controller.classController = cache.NewInformer(
classSource,
versionedClassType,
controller.resyncPeriod,
classHandler,
)
}
return controller
}
// enqueueWork takes an obj and converts it into a namespace/name string which
// is then put onto the given work queue.
func (ctrl *ProvisionController) enqueueWork(queue workqueue.RateLimitingInterface, obj interface{}) {
var key string
var err error
if key, err = cache.DeletionHandlingMetaNamespaceKeyFunc(obj); err != nil {
utilruntime.HandleError(err)
return
}
// Re-Adding is harmless but try to add it to the queue only if it is not
// already there, because if it is already there we *must* be retrying it
if queue.NumRequeues(key) == 0 {
queue.Add(key)
}
}
// forgetWork Forgets an obj from the given work queue, telling the queue to
// stop tracking its retries because e.g. the obj was deleted
func (ctrl *ProvisionController) forgetWork(queue workqueue.RateLimitingInterface, obj interface{}) {
var key string
var err error
if key, err = cache.DeletionHandlingMetaNamespaceKeyFunc(obj); err != nil {
utilruntime.HandleError(err)
return
}
queue.Forget(key)
queue.Done(key)
}
// Run starts all of this controller's control loops
func (ctrl *ProvisionController) Run(_ <-chan struct{}) {
// TODO: arg is as of 1.12 unused. Nothing can ever be cancelled. Should
// accept a context instead and use it instead of context.TODO(), but would
// break API. Not urgent: realistically, users are simply passing in
// wait.NeverStop() anyway.
run := func(ctx context.Context) {
glog.Infof("Starting provisioner controller %s!", ctrl.component)
defer utilruntime.HandleCrash()
defer ctrl.claimQueue.ShutDown()
defer ctrl.volumeQueue.ShutDown()
ctrl.hasRunLock.Lock()
ctrl.hasRun = true
ctrl.hasRunLock.Unlock()
if ctrl.metricsPort > 0 {
prometheus.MustRegister([]prometheus.Collector{
metrics.PersistentVolumeClaimProvisionTotal,
metrics.PersistentVolumeClaimProvisionFailedTotal,
metrics.PersistentVolumeClaimProvisionDurationSeconds,
metrics.PersistentVolumeDeleteTotal,
metrics.PersistentVolumeDeleteFailedTotal,
metrics.PersistentVolumeDeleteDurationSeconds,
}...)
http.Handle(ctrl.metricsPath, promhttp.Handler())
address := net.JoinHostPort(ctrl.metricsAddress, strconv.FormatInt(int64(ctrl.metricsPort), 10))
glog.Infof("Starting metrics server at %s\n", address)
go wait.Forever(func() {
err := http.ListenAndServe(address, nil)
if err != nil {
glog.Errorf("Failed to listen on %s: %v", address, err)
}
}, 5*time.Second)
}
// If a SharedInformer has been passed in, this controller should not
// call Run again
if ctrl.claimInformer == nil {
go ctrl.claimController.Run(ctx.Done())
}
if ctrl.volumeInformer == nil {
go ctrl.volumeController.Run(ctx.Done())
}
if ctrl.classInformer == nil {
go ctrl.classController.Run(ctx.Done())
}
if !cache.WaitForCacheSync(ctx.Done(), ctrl.claimController.HasSynced, ctrl.volumeController.HasSynced, ctrl.classController.HasSynced) {
return
}
for i := 0; i < ctrl.threadiness; i++ {
go wait.Until(ctrl.runClaimWorker, time.Second, context.TODO().Done())
go wait.Until(ctrl.runVolumeWorker, time.Second, context.TODO().Done())
}
glog.Infof("Started provisioner controller %s!", ctrl.component)
select {}
}
if ctrl.leaderElection {
rl, err := resourcelock.New("endpoints",
ctrl.leaderElectionNamespace,
strings.Replace(ctrl.provisionerName, "/", "-", -1),
ctrl.client.CoreV1(),
resourcelock.ResourceLockConfig{
Identity: ctrl.id,
EventRecorder: ctrl.eventRecorder,
})
if err != nil {
glog.Fatalf("Error creating lock: %v", err)
}
leaderelection.RunOrDie(context.TODO(), leaderelection.LeaderElectionConfig{
Lock: rl,
LeaseDuration: ctrl.leaseDuration,
RenewDeadline: ctrl.renewDeadline,
RetryPeriod: ctrl.retryPeriod,
Callbacks: leaderelection.LeaderCallbacks{
OnStartedLeading: run,
OnStoppedLeading: func() {
glog.Fatalf("leaderelection lost")
},
},
})
panic("unreachable")
} else {
run(context.TODO())
}
}
func (ctrl *ProvisionController) runClaimWorker() {
for ctrl.processNextClaimWorkItem() {
}
}
func (ctrl *ProvisionController) runVolumeWorker() {
for ctrl.processNextVolumeWorkItem() {
}
}
// processNextClaimWorkItem processes items from claimQueue
func (ctrl *ProvisionController) processNextClaimWorkItem() bool {
obj, shutdown := ctrl.claimQueue.Get()
if shutdown {
return false
}
err := func(obj interface{}) error {
defer ctrl.claimQueue.Done(obj)
var key string
var ok bool
if key, ok = obj.(string); !ok {
ctrl.claimQueue.Forget(obj)
return fmt.Errorf("expected string in workqueue but got %#v", obj)
}
if err := ctrl.syncClaimHandler(key); err != nil {
if ctrl.claimQueue.NumRequeues(obj) < ctrl.failedProvisionThreshold {
glog.Warningf("Retrying syncing claim %q because failures %v < threshold %v", key, ctrl.claimQueue.NumRequeues(obj), ctrl.failedProvisionThreshold)
ctrl.claimQueue.AddRateLimited(obj)
} else {
glog.Errorf("Giving up syncing claim %q because failures %v >= threshold %v", key, ctrl.claimQueue.NumRequeues(obj), ctrl.failedProvisionThreshold)
// Done but do not Forget: it will not be in the queue but NumRequeues
// will be saved until the obj is deleted from kubernetes
}
return fmt.Errorf("error syncing claim %q: %s", key, err.Error())
}
ctrl.claimQueue.Forget(obj)
return nil
}(obj)
if err != nil {
utilruntime.HandleError(err)
return true
}
return true
}
// processNextVolumeWorkItem processes items from volumeQueue
func (ctrl *ProvisionController) processNextVolumeWorkItem() bool {
obj, shutdown := ctrl.volumeQueue.Get()
if shutdown {
return false
}
err := func(obj interface{}) error {
defer ctrl.volumeQueue.Done(obj)
var key string
var ok bool
if key, ok = obj.(string); !ok {
ctrl.volumeQueue.Forget(obj)
return fmt.Errorf("expected string in workqueue but got %#v", obj)
}
if err := ctrl.syncVolumeHandler(key); err != nil {
if ctrl.volumeQueue.NumRequeues(obj) < ctrl.failedDeleteThreshold {
glog.Warningf("Retrying syncing volume %q because failures %v < threshold %v", key, ctrl.volumeQueue.NumRequeues(obj), ctrl.failedProvisionThreshold)
ctrl.volumeQueue.AddRateLimited(obj)
} else {
glog.Errorf("Giving up syncing volume %q because failures %v >= threshold %v", key, ctrl.volumeQueue.NumRequeues(obj), ctrl.failedProvisionThreshold)
// Done but do not Forget: it will not be in the queue but NumRequeues
// will be saved until the obj is deleted from kubernetes
}
return fmt.Errorf("error syncing volume %q: %s", key, err.Error())
}
ctrl.volumeQueue.Forget(obj)
return nil
}(obj)
if err != nil {
utilruntime.HandleError(err)
return true
}
return true
}
// syncClaimHandler gets the claim from informer's cache then calls syncClaim
func (ctrl *ProvisionController) syncClaimHandler(key string) error {
claimObj, exists, err := ctrl.claims.GetByKey(key)
if err != nil {
return err
}
if !exists {
utilruntime.HandleError(fmt.Errorf("claim %q in work queue no longer exists", key))
return nil
}
return ctrl.syncClaim(claimObj)
}
// syncVolumeHandler gets the volume from informer's cache then calls syncVolume
func (ctrl *ProvisionController) syncVolumeHandler(key string) error {
volumeObj, exists, err := ctrl.volumes.GetByKey(key)
if err != nil {
return err
}
if !exists {
utilruntime.HandleError(fmt.Errorf("volume %q in work queue no longer exists", key))
return nil
}
return ctrl.syncVolume(volumeObj)
}
// syncClaim checks if the claim should have a volume provisioned for it and
// provisions one if so.
func (ctrl *ProvisionController) syncClaim(obj interface{}) error {
claim, ok := obj.(*v1.PersistentVolumeClaim)
if !ok {
return fmt.Errorf("expected claim but got %+v", obj)
}
if ctrl.shouldProvision(claim) {
startTime := time.Now()
err := ctrl.provisionClaimOperation(claim)
ctrl.updateProvisionStats(claim, err, startTime)
return err
}
return nil
}
// syncVolume checks if the volume should be deleted and deletes if so
func (ctrl *ProvisionController) syncVolume(obj interface{}) error {
volume, ok := obj.(*v1.PersistentVolume)
if !ok {
return fmt.Errorf("expected volume but got %+v", obj)
}
if ctrl.shouldDelete(volume) {
startTime := time.Now()
err := ctrl.deleteVolumeOperation(volume)
ctrl.updateDeleteStats(volume, err, startTime)
return err
}
return nil
}
// shouldProvision returns whether a claim should have a volume provisioned for
// it, i.e. whether a Provision is "desired"
func (ctrl *ProvisionController) shouldProvision(claim *v1.PersistentVolumeClaim) bool {
if claim.Spec.VolumeName != "" {
return false
}
if qualifier, ok := ctrl.provisioner.(Qualifier); ok {
if !qualifier.ShouldProvision(claim) {
return false
}
}
// Kubernetes 1.5 provisioning with annStorageProvisioner
if ctrl.kubeVersion.AtLeast(utilversion.MustParseSemantic("v1.5.0")) {
if provisioner, found := claim.Annotations[annStorageProvisioner]; found {
if provisioner == ctrl.provisionerName {
return true
}
return false
}
} else {
// Kubernetes 1.4 provisioning, evaluating class.Provisioner
claimClass := helper.GetPersistentVolumeClaimClass(claim)
provisioner, _, err := ctrl.getStorageClassFields(claimClass)
if err != nil {
glog.Errorf("Error getting claim %q's StorageClass's fields: %v", claimToClaimKey(claim), err)
return false
}
if provisioner != ctrl.provisionerName {
return false
}
return true
}
return false
}
// shouldDelete returns whether a volume should have its backing volume
// deleted, i.e. whether a Delete is "desired"
func (ctrl *ProvisionController) shouldDelete(volume *v1.PersistentVolume) bool {
// In 1.9+ PV protection means the object will exist briefly with a
// deletion timestamp even after our successful Delete. Ignore it.
if ctrl.kubeVersion.AtLeast(utilversion.MustParseSemantic("v1.9.0")) {
if volume.ObjectMeta.DeletionTimestamp != nil {
return false
}
}
// In 1.5+ we delete only if the volume is in state Released. In 1.4 we must
// delete if the volume is in state Failed too.
if ctrl.kubeVersion.AtLeast(utilversion.MustParseSemantic("v1.5.0")) {
if volume.Status.Phase != v1.VolumeReleased {
return false
}
} else {
if volume.Status.Phase != v1.VolumeReleased && volume.Status.Phase != v1.VolumeFailed {
return false
}
}
if volume.Spec.PersistentVolumeReclaimPolicy != v1.PersistentVolumeReclaimDelete {
return false
}
if !metav1.HasAnnotation(volume.ObjectMeta, annDynamicallyProvisioned) {
return false
}
if ann := volume.Annotations[annDynamicallyProvisioned]; ann != ctrl.provisionerName {
return false
}
return true
}
// canProvision returns error if provisioner can't provision claim.
func (ctrl *ProvisionController) canProvision(claim *v1.PersistentVolumeClaim) error {
// Check if this provisioner supports Block volume
if util.CheckPersistentVolumeClaimModeBlock(claim) && !ctrl.supportsBlock() {
return fmt.Errorf("%s does not support block volume provisioning", ctrl.provisionerName)
}
return nil
}
func (ctrl *ProvisionController) updateProvisionStats(claim *v1.PersistentVolumeClaim, err error, startTime time.Time) {
class := ""
if claim.Spec.StorageClassName != nil {
class = *claim.Spec.StorageClassName
}
if err != nil {
metrics.PersistentVolumeClaimProvisionFailedTotal.WithLabelValues(class).Inc()
} else {
metrics.PersistentVolumeClaimProvisionDurationSeconds.WithLabelValues(class).Observe(time.Since(startTime).Seconds())
metrics.PersistentVolumeClaimProvisionTotal.WithLabelValues(class).Inc()
}
}
func (ctrl *ProvisionController) updateDeleteStats(volume *v1.PersistentVolume, err error, startTime time.Time) {
class := volume.Spec.StorageClassName
if err != nil {
metrics.PersistentVolumeDeleteFailedTotal.WithLabelValues(class).Inc()
} else {
metrics.PersistentVolumeDeleteDurationSeconds.WithLabelValues(class).Observe(time.Since(startTime).Seconds())
metrics.PersistentVolumeDeleteTotal.WithLabelValues(class).Inc()
}
}
// provisionClaimOperation attempts to provision a volume for the given claim.
// Returns error, which indicates whether provisioning should be retried
// (requeue the claim) or not
func (ctrl *ProvisionController) provisionClaimOperation(claim *v1.PersistentVolumeClaim) error {
// Most code here is identical to that found in controller.go of kube's PV controller...
claimClass := helper.GetPersistentVolumeClaimClass(claim)
operation := fmt.Sprintf("provision %q class %q", claimToClaimKey(claim), claimClass)
glog.Infof(logOperation(operation, "started"))
// A previous doProvisionClaim may just have finished while we were waiting for
// the locks. Check that PV (with deterministic name) hasn't been provisioned
// yet.
pvName := ctrl.getProvisionedVolumeNameForClaim(claim)
volume, err := ctrl.client.CoreV1().PersistentVolumes().Get(pvName, metav1.GetOptions{})
if err == nil && volume != nil {
// Volume has been already provisioned, nothing to do.
glog.Infof(logOperation(operation, "persistentvolume %q already exists, skipping", pvName))
return nil
}
// Prepare a claimRef to the claim early (to fail before a volume is
// provisioned)
claimRef, err := ref.GetReference(scheme.Scheme, claim)
if err != nil {
glog.Errorf(logOperation(operation, "unexpected error getting claim reference: %v", err))
return nil
}
provisioner, parameters, err := ctrl.getStorageClassFields(claimClass)
if err != nil {
glog.Errorf(logOperation(operation, "error getting claim's StorageClass's fields: %v", err))
return nil
}
if provisioner != ctrl.provisionerName {
// class.Provisioner has either changed since shouldProvision() or
// annDynamicallyProvisioned contains different provisioner than
// class.Provisioner.
glog.Errorf(logOperation(operation, "unknown provisioner %q requested in claim's StorageClass", provisioner))
return nil
}
// Check if this provisioner can provision this claim.
if err = ctrl.canProvision(claim); err != nil {
ctrl.eventRecorder.Event(claim, v1.EventTypeWarning, "ProvisioningFailed", err.Error())
glog.Errorf(logOperation(operation, "failed to provision volume: %v", err))
return nil
}
reclaimPolicy := v1.PersistentVolumeReclaimDelete
if ctrl.kubeVersion.AtLeast(utilversion.MustParseSemantic("v1.8.0")) {
reclaimPolicy, err = ctrl.fetchReclaimPolicy(claimClass)
if err != nil {
return err
}
}
mountOptions, err := ctrl.fetchMountOptions(claimClass)
if err != nil {
return err
}
var selectedNode *v1.Node
var allowedTopologies []v1.TopologySelectorTerm
if ctrl.kubeVersion.AtLeast(utilversion.MustParseSemantic("v1.11.0")) {
// Get SelectedNode
if nodeName, ok := claim.Annotations[annSelectedNode]; ok {
selectedNode, err = ctrl.client.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) // TODO (verult) cache Nodes
if err != nil {
err = fmt.Errorf("failed to get target node: %v", err)
ctrl.eventRecorder.Event(claim, v1.EventTypeWarning, "ProvisioningFailed", err.Error())
return err
}
}
// Get AllowedTopologies
allowedTopologies, err = ctrl.fetchAllowedTopologies(claimClass)
if err != nil {
err = fmt.Errorf("failed to get AllowedTopologies from StorageClass: %v", err)
ctrl.eventRecorder.Event(claim, v1.EventTypeWarning, "ProvisioningFailed", err.Error())
return err
}
}
options := VolumeOptions{
PersistentVolumeReclaimPolicy: reclaimPolicy,
PVName: pvName,
PVC: claim,
MountOptions: mountOptions,
Parameters: parameters,
SelectedNode: selectedNode,
AllowedTopologies: allowedTopologies,
}
ctrl.eventRecorder.Event(claim, v1.EventTypeNormal, "Provisioning", fmt.Sprintf("External provisioner is provisioning volume for claim %q", claimToClaimKey(claim)))
volume, err = ctrl.provisioner.Provision(options)
if err != nil {
if ierr, ok := err.(*IgnoredError); ok {
// Provision ignored, do nothing and hope another provisioner will provision it.
glog.Infof(logOperation(operation, "volume provision ignored: %v", ierr))
return nil
}
err = fmt.Errorf("failed to provision volume with StorageClass %q: %v", claimClass, err)
ctrl.eventRecorder.Event(claim, v1.EventTypeWarning, "ProvisioningFailed", err.Error())
return err
}
glog.Infof(logOperation(operation, "volume %q provisioned", volume.Name))
// Set ClaimRef and the PV controller will bind and set annBoundByController for us
volume.Spec.ClaimRef = claimRef
metav1.SetMetaDataAnnotation(&volume.ObjectMeta, annDynamicallyProvisioned, ctrl.provisionerName)
if ctrl.kubeVersion.AtLeast(utilversion.MustParseSemantic("v1.6.0")) {
volume.Spec.StorageClassName = claimClass
} else {
metav1.SetMetaDataAnnotation(&volume.ObjectMeta, annClass, claimClass)
}
// Try to create the PV object several times
for i := 0; i < ctrl.createProvisionedPVRetryCount; i++ {
glog.Infof(logOperation(operation, "trying to save persistentvvolume %q", volume.Name))
if _, err = ctrl.client.CoreV1().PersistentVolumes().Create(volume); err == nil || apierrs.IsAlreadyExists(err) {
// Save succeeded.
if err != nil {
glog.Infof(logOperation(operation, "persistentvolume %q already exists, reusing", volume.Name))
err = nil
} else {
glog.Infof(logOperation(operation, "persistentvolume %q saved", volume.Name))
}
break
}
// Save failed, try again after a while.
glog.Infof(logOperation(operation, "failed to save persistentvolume %q: %v", volume.Name, err))
time.Sleep(ctrl.createProvisionedPVInterval)
}
if err != nil {
// Save failed. Now we have a storage asset outside of Kubernetes,
// but we don't have appropriate PV object for it.
// Emit some event here and try to delete the storage asset several
// times.
strerr := fmt.Sprintf("Error creating provisioned PV object for claim %s: %v. Deleting the volume.", claimToClaimKey(claim), err)
glog.Error(logOperation(operation, strerr))
ctrl.eventRecorder.Event(claim, v1.EventTypeWarning, "ProvisioningFailed", strerr)
for i := 0; i < ctrl.createProvisionedPVRetryCount; i++ {
if err = ctrl.provisioner.Delete(volume); err == nil {
// Delete succeeded
glog.Infof(logOperation(operation, "cleaning volume %q succeeded", volume.Name))
break
}
// Delete failed, try again after a while.
glog.Infof(logOperation(operation, "failed to clean volume %q: %v", volume.Name, err))
time.Sleep(ctrl.createProvisionedPVInterval)
}
if err != nil {
// Delete failed several times. There is an orphaned volume and there
// is nothing we can do about it.
strerr := fmt.Sprintf("Error cleaning provisioned volume for claim %s: %v. Please delete manually.", claimToClaimKey(claim), err)
glog.Error(logOperation(operation, strerr))
ctrl.eventRecorder.Event(claim, v1.EventTypeWarning, "ProvisioningCleanupFailed", strerr)
}
} else {
msg := fmt.Sprintf("Successfully provisioned volume %s", volume.Name)
ctrl.eventRecorder.Event(claim, v1.EventTypeNormal, "ProvisioningSucceeded", msg)
}
glog.Infof(logOperation(operation, "succeeded"))
return nil
}
// deleteVolumeOperation attempts to delete the volume backing the given
// volume. Returns error, which indicates whether deletion should be retried
// (requeue the volume) or not
func (ctrl *ProvisionController) deleteVolumeOperation(volume *v1.PersistentVolume) error {
operation := fmt.Sprintf("delete %q", volume.Name)
glog.Infof(logOperation(operation, "started"))
// This method may have been waiting for a volume lock for some time.
// Our check does not have to be as sophisticated as PV controller's, we can
// trust that the PV controller has set the PV to Released/Failed and it's
// ours to delete
newVolume, err := ctrl.client.CoreV1().PersistentVolumes().Get(volume.Name, metav1.GetOptions{})
if err != nil {
return nil
}
if !ctrl.shouldDelete(newVolume) {
glog.Infof(logOperation(operation, "persistentvolume no longer needs deletion, skipping"))
return nil
}
err = ctrl.provisioner.Delete(volume)
if err != nil {
if ierr, ok := err.(*IgnoredError); ok {
// Delete ignored, do nothing and hope another provisioner will delete it.
glog.Infof(logOperation(operation, "volume deletion ignored: %v", ierr))
return nil
}
// Delete failed, emit an event.
glog.Errorf(logOperation(operation, "volume deletion failed: %v", err))
ctrl.eventRecorder.Event(volume, v1.EventTypeWarning, "VolumeFailedDelete", err.Error())
return err
}
glog.Infof(logOperation(operation, "volume deleted"))
// Delete the volume
if err = ctrl.client.CoreV1().PersistentVolumes().Delete(volume.Name, nil); err != nil {
// Oops, could not delete the volume and therefore the controller will
// try to delete the volume again on next update.
glog.Infof(logOperation(operation, "failed to delete persistentvolume: %v", err))
return err
}
glog.Infof(logOperation(operation, "persistentvolume deleted"))
glog.Infof(logOperation(operation, "succeeded"))
return nil
}
func logOperation(operation, format string, a ...interface{}) string {
return fmt.Sprintf(fmt.Sprintf("%s: %s", operation, format), a...)
}
// getInClusterNamespace returns the namespace in which the controller runs.
func getInClusterNamespace() string {
if ns := os.Getenv("POD_NAMESPACE"); ns != "" {
return ns
}
// Fall back to the namespace associated with the service account token, if available
if data, err := ioutil.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/namespace"); err == nil {
if ns := strings.TrimSpace(string(data)); len(ns) > 0 {
return ns
}
}
return "default"
}
// getProvisionedVolumeNameForClaim returns PV.Name for the provisioned volume.
// The name must be unique.
func (ctrl *ProvisionController) getProvisionedVolumeNameForClaim(claim *v1.PersistentVolumeClaim) string {
return "pvc-" + string(claim.UID)
}
func (ctrl *ProvisionController) getStorageClassFields(name string) (string, map[string]string, error) {
classObj, found, err := ctrl.classes.GetByKey(name)
if err != nil {
return "", nil, err
}
if !found {
return "", nil, fmt.Errorf("storageClass %q not found", name)
// 3. It tries to find a StorageClass instance referenced by annotation
// `claim.Annotations["volume.beta.kubernetes.io/storage-class"]`. If not
// found, it SHOULD report an error (by sending an event to the claim) and it
// SHOULD retry periodically with step i.
}
switch class := classObj.(type) {
case *storage.StorageClass:
return class.Provisioner, class.Parameters, nil
case *storagebeta.StorageClass:
return class.Provisioner, class.Parameters, nil
}
return "", nil, fmt.Errorf("cannot convert object to StorageClass: %+v", classObj)
}
func claimToClaimKey(claim *v1.PersistentVolumeClaim) string {
return fmt.Sprintf("%s/%s", claim.Namespace, claim.Name)
}
func (ctrl *ProvisionController) fetchReclaimPolicy(storageClassName string) (v1.PersistentVolumeReclaimPolicy, error) {
classObj, found, err := ctrl.classes.GetByKey(storageClassName)
if err != nil {
return "", err
}
if !found {
return "", fmt.Errorf("storageClass %q not found", storageClassName)
}
switch class := classObj.(type) {
case *storage.StorageClass:
return *class.ReclaimPolicy, nil
case *storagebeta.StorageClass:
return *class.ReclaimPolicy, nil
}
return v1.PersistentVolumeReclaimDelete, fmt.Errorf("cannot convert object to StorageClass: %+v", classObj)
}
func (ctrl *ProvisionController) fetchMountOptions(storageClassName string) ([]string, error) {
classObj, found, err := ctrl.classes.GetByKey(storageClassName)
if err != nil {
return nil, err
}
if !found {
return nil, fmt.Errorf("storageClass %q not found", storageClassName)
}
switch class := classObj.(type) {
case *storage.StorageClass:
return class.MountOptions, nil
case *storagebeta.StorageClass:
return class.MountOptions, nil
}
return nil, fmt.Errorf("cannot convert object to StorageClass: %+v", classObj)
}
func (ctrl *ProvisionController) fetchAllowedTopologies(storageClassName string) ([]v1.TopologySelectorTerm, error) {
classObj, found, err := ctrl.classes.GetByKey(storageClassName)
if err != nil {
return nil, err
}
if !found {
return nil, fmt.Errorf("storageClass %q not found", storageClassName)
}
switch class := classObj.(type) {
case *storage.StorageClass:
return class.AllowedTopologies, nil
case *storagebeta.StorageClass:
return class.AllowedTopologies, nil
}
return nil, fmt.Errorf("cannot convert object to StorageClass: %+v", classObj)
}
// supportsBlock returns whether a provisioner supports block volume.
// Provisioners that implement BlockProvisioner interface and return true to SupportsBlock
// will be regarded as supported for block volume.
func (ctrl *ProvisionController) supportsBlock() bool {
if blockProvisioner, ok := ctrl.provisioner.(BlockProvisioner); ok {
return blockProvisioner.SupportsBlock()
}
return false
}
|
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime
import (
"runtime/internal/sys"
"unsafe"
)
const (
_NSIG = 33
_SI_USER = 0
_SS_DISABLE = 4
_SIG_BLOCK = 1
_SIG_UNBLOCK = 2
_SIG_SETMASK = 3
)
type mOS struct{}
//go:noescape
func lwp_create(param *lwpparams) int32
//go:noescape
func sigaltstack(new, old *stackt)
//go:noescape
func sigaction(sig uint32, new, old *sigactiont)
//go:noescape
func sigprocmask(how int32, new, old *sigset)
//go:noescape
func setitimer(mode int32, new, old *itimerval)
//go:noescape
func sysctl(mib *uint32, miblen uint32, out *byte, size *uintptr, dst *byte, ndst uintptr) int32
func raiseproc(sig uint32)
func lwp_gettid() int32
func lwp_kill(pid, tid int32, sig int)
//go:noescape
func sys_umtx_sleep(addr *uint32, val, timeout int32) int32
//go:noescape
func sys_umtx_wakeup(addr *uint32, val int32) int32
func osyield()
//go:nosplit
func osyield_no_g() {
osyield()
}
func kqueue() int32
//go:noescape
func kevent(kq int32, ch *keventt, nch int32, ev *keventt, nev int32, ts *timespec) int32
func pipe() (r, w int32, errno int32)
func pipe2(flags int32) (r, w int32, errno int32)
func closeonexec(fd int32)
func setNonblock(fd int32)
// From DragonFly's <sys/sysctl.h>
const (
_CTL_HW = 6
_HW_NCPU = 3
_HW_PAGESIZE = 7
)
var sigset_all = sigset{[4]uint32{^uint32(0), ^uint32(0), ^uint32(0), ^uint32(0)}}
func getncpu() int32 {
mib := [2]uint32{_CTL_HW, _HW_NCPU}
out := uint32(0)
nout := unsafe.Sizeof(out)
ret := sysctl(&mib[0], 2, (*byte)(unsafe.Pointer(&out)), &nout, nil, 0)
if ret >= 0 {
return int32(out)
}
return 1
}
func getPageSize() uintptr {
mib := [2]uint32{_CTL_HW, _HW_PAGESIZE}
out := uint32(0)
nout := unsafe.Sizeof(out)
ret := sysctl(&mib[0], 2, (*byte)(unsafe.Pointer(&out)), &nout, nil, 0)
if ret >= 0 {
return uintptr(out)
}
return 0
}
//go:nosplit
func futexsleep(addr *uint32, val uint32, ns int64) {
systemstack(func() {
futexsleep1(addr, val, ns)
})
}
func futexsleep1(addr *uint32, val uint32, ns int64) {
var timeout int32
if ns >= 0 {
// The timeout is specified in microseconds - ensure that we
// do not end up dividing to zero, which would put us to sleep
// indefinitely...
timeout = timediv(ns, 1000, nil)
if timeout == 0 {
timeout = 1
}
}
// sys_umtx_sleep will return EWOULDBLOCK (EAGAIN) when the timeout
// expires or EBUSY if the mutex value does not match.
ret := sys_umtx_sleep(addr, int32(val), timeout)
if ret >= 0 || ret == -_EINTR || ret == -_EAGAIN || ret == -_EBUSY {
return
}
print("umtx_sleep addr=", addr, " val=", val, " ret=", ret, "\n")
*(*int32)(unsafe.Pointer(uintptr(0x1005))) = 0x1005
}
//go:nosplit
func futexwakeup(addr *uint32, cnt uint32) {
ret := sys_umtx_wakeup(addr, int32(cnt))
if ret >= 0 {
return
}
systemstack(func() {
print("umtx_wake_addr=", addr, " ret=", ret, "\n")
*(*int32)(unsafe.Pointer(uintptr(0x1006))) = 0x1006
})
}
func lwp_start(uintptr)
// May run with m.p==nil, so write barriers are not allowed.
//go:nowritebarrier
func newosproc(mp *m) {
stk := unsafe.Pointer(mp.g0.stack.hi)
if false {
print("newosproc stk=", stk, " m=", mp, " g=", mp.g0, " lwp_start=", funcPC(lwp_start), " id=", mp.id, " ostk=", &mp, "\n")
}
var oset sigset
sigprocmask(_SIG_SETMASK, &sigset_all, &oset)
params := lwpparams{
start_func: funcPC(lwp_start),
arg: unsafe.Pointer(mp),
stack: uintptr(stk),
tid1: nil, // minit will record tid
tid2: nil,
}
// TODO: Check for error.
lwp_create(¶ms)
sigprocmask(_SIG_SETMASK, &oset, nil)
}
func osinit() {
ncpu = getncpu()
if physPageSize == 0 {
physPageSize = getPageSize()
}
}
var urandom_dev = []byte("/dev/urandom\x00")
//go:nosplit
func getRandomData(r []byte) {
fd := open(&urandom_dev[0], 0 /* O_RDONLY */, 0)
n := read(fd, unsafe.Pointer(&r[0]), int32(len(r)))
closefd(fd)
extendRandom(r, int(n))
}
func goenvs() {
goenvs_unix()
}
// Called to initialize a new m (including the bootstrap m).
// Called on the parent thread (main thread in case of bootstrap), can allocate memory.
func mpreinit(mp *m) {
mp.gsignal = malg(32 * 1024)
mp.gsignal.m = mp
}
// Called to initialize a new m (including the bootstrap m).
// Called on the new thread, cannot allocate memory.
func minit() {
getg().m.procid = uint64(lwp_gettid())
minitSignals()
}
// Called from dropm to undo the effect of an minit.
//go:nosplit
func unminit() {
unminitSignals()
}
// Called from exitm, but not from drop, to undo the effect of thread-owned
// resources in minit, semacreate, or elsewhere. Do not take locks after calling this.
func mdestroy(mp *m) {
}
func sigtramp()
type sigactiont struct {
sa_sigaction uintptr
sa_flags int32
sa_mask sigset
}
//go:nosplit
//go:nowritebarrierrec
func setsig(i uint32, fn uintptr) {
var sa sigactiont
sa.sa_flags = _SA_SIGINFO | _SA_ONSTACK | _SA_RESTART
sa.sa_mask = sigset_all
if fn == funcPC(sighandler) {
fn = funcPC(sigtramp)
}
sa.sa_sigaction = fn
sigaction(i, &sa, nil)
}
//go:nosplit
//go:nowritebarrierrec
func setsigstack(i uint32) {
throw("setsigstack")
}
//go:nosplit
//go:nowritebarrierrec
func getsig(i uint32) uintptr {
var sa sigactiont
sigaction(i, nil, &sa)
return sa.sa_sigaction
}
// setSignaltstackSP sets the ss_sp field of a stackt.
//go:nosplit
func setSignalstackSP(s *stackt, sp uintptr) {
s.ss_sp = sp
}
//go:nosplit
//go:nowritebarrierrec
func sigaddset(mask *sigset, i int) {
mask.__bits[(i-1)/32] |= 1 << ((uint32(i) - 1) & 31)
}
func sigdelset(mask *sigset, i int) {
mask.__bits[(i-1)/32] &^= 1 << ((uint32(i) - 1) & 31)
}
//go:nosplit
func (c *sigctxt) fixsigcode(sig uint32) {
}
func sysargs(argc int32, argv **byte) {
n := argc + 1
// skip over argv, envp to get to auxv
for argv_index(argv, n) != nil {
n++
}
// skip NULL separator
n++
auxv := (*[1 << 28]uintptr)(add(unsafe.Pointer(argv), uintptr(n)*sys.PtrSize))
sysauxv(auxv[:])
}
const (
_AT_NULL = 0
_AT_PAGESZ = 6
)
func sysauxv(auxv []uintptr) {
for i := 0; auxv[i] != _AT_NULL; i += 2 {
tag, val := auxv[i], auxv[i+1]
switch tag {
case _AT_PAGESZ:
physPageSize = val
}
}
}
// raise sends a signal to the calling thread.
//
// It must be nosplit because it is used by the signal handler before
// it definitely has a Go stack.
//
//go:nosplit
func raise(sig uint32) {
lwp_kill(-1, lwp_gettid(), int(sig))
}
func signalM(mp *m, sig int) {
lwp_kill(-1, int32(mp.procid), sig)
}
[dev.typeparams] runtime: use internal/abi.FuncPCABI0 for sigtramp PC on DragonflyBSD
Same as CL 313230, for DragonflyBSD. sigtramp is the only one we need.
Change-Id: Ic11d0aedc7422512b43b2e4505e8f95056f915bd
Reviewed-on: https://go-review.googlesource.com/c/go/+/321312
Trust: Cherry Mui <d62e63aa42ce272d7b6a5055d97e942b33a34679@google.com>
Run-TryBot: Cherry Mui <d62e63aa42ce272d7b6a5055d97e942b33a34679@google.com>
Reviewed-by: Than McIntosh <4b2922593166fc595a9b1a408f34f5d3817fe9d2@google.com>
Reviewed-by: Michael Knyszek <4473450dc5c88f06e1fb787f303d7805ec2aee65@google.com>
TryBot-Result: Go Bot <66cb808b70d30c07676d5e946fee83fd561249e5@golang.org>
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime
import (
"internal/abi"
"runtime/internal/sys"
"unsafe"
)
const (
_NSIG = 33
_SI_USER = 0
_SS_DISABLE = 4
_SIG_BLOCK = 1
_SIG_UNBLOCK = 2
_SIG_SETMASK = 3
)
type mOS struct{}
//go:noescape
func lwp_create(param *lwpparams) int32
//go:noescape
func sigaltstack(new, old *stackt)
//go:noescape
func sigaction(sig uint32, new, old *sigactiont)
//go:noescape
func sigprocmask(how int32, new, old *sigset)
//go:noescape
func setitimer(mode int32, new, old *itimerval)
//go:noescape
func sysctl(mib *uint32, miblen uint32, out *byte, size *uintptr, dst *byte, ndst uintptr) int32
func raiseproc(sig uint32)
func lwp_gettid() int32
func lwp_kill(pid, tid int32, sig int)
//go:noescape
func sys_umtx_sleep(addr *uint32, val, timeout int32) int32
//go:noescape
func sys_umtx_wakeup(addr *uint32, val int32) int32
func osyield()
//go:nosplit
func osyield_no_g() {
osyield()
}
func kqueue() int32
//go:noescape
func kevent(kq int32, ch *keventt, nch int32, ev *keventt, nev int32, ts *timespec) int32
func pipe() (r, w int32, errno int32)
func pipe2(flags int32) (r, w int32, errno int32)
func closeonexec(fd int32)
func setNonblock(fd int32)
// From DragonFly's <sys/sysctl.h>
const (
_CTL_HW = 6
_HW_NCPU = 3
_HW_PAGESIZE = 7
)
var sigset_all = sigset{[4]uint32{^uint32(0), ^uint32(0), ^uint32(0), ^uint32(0)}}
func getncpu() int32 {
mib := [2]uint32{_CTL_HW, _HW_NCPU}
out := uint32(0)
nout := unsafe.Sizeof(out)
ret := sysctl(&mib[0], 2, (*byte)(unsafe.Pointer(&out)), &nout, nil, 0)
if ret >= 0 {
return int32(out)
}
return 1
}
func getPageSize() uintptr {
mib := [2]uint32{_CTL_HW, _HW_PAGESIZE}
out := uint32(0)
nout := unsafe.Sizeof(out)
ret := sysctl(&mib[0], 2, (*byte)(unsafe.Pointer(&out)), &nout, nil, 0)
if ret >= 0 {
return uintptr(out)
}
return 0
}
//go:nosplit
func futexsleep(addr *uint32, val uint32, ns int64) {
systemstack(func() {
futexsleep1(addr, val, ns)
})
}
func futexsleep1(addr *uint32, val uint32, ns int64) {
var timeout int32
if ns >= 0 {
// The timeout is specified in microseconds - ensure that we
// do not end up dividing to zero, which would put us to sleep
// indefinitely...
timeout = timediv(ns, 1000, nil)
if timeout == 0 {
timeout = 1
}
}
// sys_umtx_sleep will return EWOULDBLOCK (EAGAIN) when the timeout
// expires or EBUSY if the mutex value does not match.
ret := sys_umtx_sleep(addr, int32(val), timeout)
if ret >= 0 || ret == -_EINTR || ret == -_EAGAIN || ret == -_EBUSY {
return
}
print("umtx_sleep addr=", addr, " val=", val, " ret=", ret, "\n")
*(*int32)(unsafe.Pointer(uintptr(0x1005))) = 0x1005
}
//go:nosplit
func futexwakeup(addr *uint32, cnt uint32) {
ret := sys_umtx_wakeup(addr, int32(cnt))
if ret >= 0 {
return
}
systemstack(func() {
print("umtx_wake_addr=", addr, " ret=", ret, "\n")
*(*int32)(unsafe.Pointer(uintptr(0x1006))) = 0x1006
})
}
func lwp_start(uintptr)
// May run with m.p==nil, so write barriers are not allowed.
//go:nowritebarrier
func newosproc(mp *m) {
stk := unsafe.Pointer(mp.g0.stack.hi)
if false {
print("newosproc stk=", stk, " m=", mp, " g=", mp.g0, " lwp_start=", funcPC(lwp_start), " id=", mp.id, " ostk=", &mp, "\n")
}
var oset sigset
sigprocmask(_SIG_SETMASK, &sigset_all, &oset)
params := lwpparams{
start_func: funcPC(lwp_start),
arg: unsafe.Pointer(mp),
stack: uintptr(stk),
tid1: nil, // minit will record tid
tid2: nil,
}
// TODO: Check for error.
lwp_create(¶ms)
sigprocmask(_SIG_SETMASK, &oset, nil)
}
func osinit() {
ncpu = getncpu()
if physPageSize == 0 {
physPageSize = getPageSize()
}
}
var urandom_dev = []byte("/dev/urandom\x00")
//go:nosplit
func getRandomData(r []byte) {
fd := open(&urandom_dev[0], 0 /* O_RDONLY */, 0)
n := read(fd, unsafe.Pointer(&r[0]), int32(len(r)))
closefd(fd)
extendRandom(r, int(n))
}
func goenvs() {
goenvs_unix()
}
// Called to initialize a new m (including the bootstrap m).
// Called on the parent thread (main thread in case of bootstrap), can allocate memory.
func mpreinit(mp *m) {
mp.gsignal = malg(32 * 1024)
mp.gsignal.m = mp
}
// Called to initialize a new m (including the bootstrap m).
// Called on the new thread, cannot allocate memory.
func minit() {
getg().m.procid = uint64(lwp_gettid())
minitSignals()
}
// Called from dropm to undo the effect of an minit.
//go:nosplit
func unminit() {
unminitSignals()
}
// Called from exitm, but not from drop, to undo the effect of thread-owned
// resources in minit, semacreate, or elsewhere. Do not take locks after calling this.
func mdestroy(mp *m) {
}
func sigtramp()
type sigactiont struct {
sa_sigaction uintptr
sa_flags int32
sa_mask sigset
}
//go:nosplit
//go:nowritebarrierrec
func setsig(i uint32, fn uintptr) {
var sa sigactiont
sa.sa_flags = _SA_SIGINFO | _SA_ONSTACK | _SA_RESTART
sa.sa_mask = sigset_all
if fn == funcPC(sighandler) {
fn = abi.FuncPCABI0(sigtramp)
}
sa.sa_sigaction = fn
sigaction(i, &sa, nil)
}
//go:nosplit
//go:nowritebarrierrec
func setsigstack(i uint32) {
throw("setsigstack")
}
//go:nosplit
//go:nowritebarrierrec
func getsig(i uint32) uintptr {
var sa sigactiont
sigaction(i, nil, &sa)
return sa.sa_sigaction
}
// setSignaltstackSP sets the ss_sp field of a stackt.
//go:nosplit
func setSignalstackSP(s *stackt, sp uintptr) {
s.ss_sp = sp
}
//go:nosplit
//go:nowritebarrierrec
func sigaddset(mask *sigset, i int) {
mask.__bits[(i-1)/32] |= 1 << ((uint32(i) - 1) & 31)
}
func sigdelset(mask *sigset, i int) {
mask.__bits[(i-1)/32] &^= 1 << ((uint32(i) - 1) & 31)
}
//go:nosplit
func (c *sigctxt) fixsigcode(sig uint32) {
}
func sysargs(argc int32, argv **byte) {
n := argc + 1
// skip over argv, envp to get to auxv
for argv_index(argv, n) != nil {
n++
}
// skip NULL separator
n++
auxv := (*[1 << 28]uintptr)(add(unsafe.Pointer(argv), uintptr(n)*sys.PtrSize))
sysauxv(auxv[:])
}
const (
_AT_NULL = 0
_AT_PAGESZ = 6
)
func sysauxv(auxv []uintptr) {
for i := 0; auxv[i] != _AT_NULL; i += 2 {
tag, val := auxv[i], auxv[i+1]
switch tag {
case _AT_PAGESZ:
physPageSize = val
}
}
}
// raise sends a signal to the calling thread.
//
// It must be nosplit because it is used by the signal handler before
// it definitely has a Go stack.
//
//go:nosplit
func raise(sig uint32) {
lwp_kill(-1, lwp_gettid(), int(sig))
}
func signalM(mp *m, sig int) {
lwp_kill(-1, int32(mp.procid), sig)
}
|
/*
Copyright 2017 Matthew Lord (mattalord@gmail.com)
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package main
import (
"os"
"fmt"
"log"
"github.com/mattlord/myarbitratord/group_replication/instances"
"time"
"flag"
"sort"
"encoding/json"
"io/ioutil"
)
type MembersByOnlineNodes []instances.Instance
var debug = false
func main(){
var seed_host string
var seed_port string
var debug_mode bool
var mysql_user string
var mysql_pass string
var mysql_auth_file string
type json_mysql_auth struct {
User string
Password string
}
flag.StringVar( &seed_host, "seed_host", "", "IP/Hostname of the seed node used to start monitoring the Group Replication cluster" )
flag.StringVar( &seed_port, "seed_port", "3306", "Port of the seed node used to start monitoring the Group Replication cluster" )
flag.BoolVar( &debug_mode, "debug", false, "Execute in debug mode with all debug logging enabled" )
flag.StringVar( &mysql_user, "mysql_user", "root", "The mysql user account to be used when connecting to any node in the cluster" )
flag.StringVar( &mysql_pass, "mysql_password", "", "The mysql user account password to be used when connecting to any node in the cluster" )
flag.StringVar( &mysql_auth_file, "mysql_auth_file", "", "The JSON encoded file containining user and password entities for the mysql account to be used when connecting to any node in the cluster" )
flag.Parse()
// ToDo: I need to handle the password on the command-line more securely
// I need to do some data masking for the processlist
// A host is required, the default port of 3306 will then be attempted
if( seed_host == "" ){
fmt.Println( "myarbitratord usage: myarbitratord -seed_host=<seed_host> [-seed_port=<seed_port>] [-mysql_user=<mysql_user>] [-mysql_password=<mysql_pass>] [-mysql_auth_file=<path to json file>] [-debug=true]" )
os.Exit(1);
}
if( debug_mode ){
instances.Debug = true
}
if( mysql_auth_file != "" && mysql_pass == "" ){
if( debug_mode ){
fmt.Printf( "Reading MySQL credentials from file: %s\n", mysql_auth_file )
}
jsonfile, err := ioutil.ReadFile( mysql_auth_file )
if( err != nil ){
log.Fatal( "Could not read mysql credentials from specified file: " + mysql_auth_file )
}
var jsonauth json_mysql_auth
json.Unmarshal( jsonfile, &jsonauth )
if( debug_mode ){
fmt.Printf( "Unmarshaled mysql auth file contents: %v\n", jsonauth )
}
mysql_user = jsonauth.User
mysql_pass = jsonauth.Password
if( mysql_user == "" || mysql_pass == "" ){
errstr := "Failed to read user and password from " + mysql_auth_file + ". Ensure that the file contents are in the required format: \n{\n \"user\": \"myser\",\n \"password\": \"mypass\"\n }"
log.Fatal( errstr )
}
if( debug_mode ){
fmt.Printf( "Read mysql auth info from file. user: %s, password: %s\n", mysql_user, mysql_pass )
}
}
fmt.Println( "Welcome to the MySQL Group Replication Arbitrator!" )
fmt.Printf( "Starting operations from seed node: '%s:%s'\n", seed_host, seed_port )
seed_node := instances.New( seed_host, seed_port, mysql_user, mysql_pass )
err := MonitorCluster( seed_node )
if( err != nil ){
log.Fatal( err )
os.Exit( 100 )
} else {
os.Exit( 0 )
}
}
func MonitorCluster( seed_node *instances.Instance ) error {
loop := true
var err error
last_view := []instances.Instance{}
for( loop == true ){
// let's check the status of the current seed node
// if the seed node
err = seed_node.Connect()
if( err != nil || seed_node.Member_state != "ONLINE" ){
// if we couldn't connect to the current seed node or it's no longer part of the group
// let's try and get a new seed node from the last known membership view
fmt.Println( "Attempting to get a new seed node..." )
for _, member := range last_view {
err = member.Connect()
if( err == nil && member.Member_state == "ONLINE" ){
seed_node = &member
fmt.Printf( "Updated seed node! New seed node is: '%s:%s'\n", seed_node.Mysql_host, seed_node.Mysql_port )
break
}
}
}
members, err := seed_node.GetMembers()
if( err != nil ){
// something is up with our current seed node, let's loop again
continue
}
// save this view in case the seed node is no longer valid next time
//last_view = copy( last_view, members[:] )
last_view = *members
quorum, err := seed_node.HasQuorum()
if( err != nil ){
// something is up with our current seed node, let's loop again
continue
}
if( quorum ){
// Let's try and shutdown the nodes NOT in the primary partition if we can reach them from the arbitrator
for _, member := range *members {
if( member.Member_state == "ERROR" || member.Member_state == "UNREACHABLE" ){
fmt.Printf( "Shutting down non-healthy node: '%s:%s'\n", member.Mysql_host, member.Mysql_port )
err = member.Connect()
if( err != nil ){
fmt.Printf( "Could not connect to '%s:%s' in order to shut it down\n", member.Mysql_host, member.Mysql_port )
}
err = member.Shutdown()
if( err != nil ){
fmt.Printf( "Could not shutdown instance: '%s:%s'\n", member.Mysql_host, member.Mysql_port )
}
}
}
} else {
// handling other network partitions and split brain scenarios will be much trickier... I'll need to try and
// contact each member in the last seen view and try to determine which partition should become the
// primary one. We'll then need to contact 1 node in the new primary partition and explicitly set the new
// membership with 'set global group_replication_force_members="<node_list>"'. Finally we'll need to try
// and connect to the nodes on the losing side(s) of the partition and attempt to shutdown the mysqlds
fmt.Println( "Network partition detected! Attempting to handle... " )
// does anyone have a quorum? Let's double check before forcing the membership
primary_partition := false
for _, member := range last_view {
member.Connect()
quorum, err := member.HasQuorum()
if( err == nil && quorum ){
seed_node = &member
primary_partition = true
break
}
}
// If no one in fact has a quorum, then let's see which partition has the most
// online/participating/communicating members. The participants in that partition
// will then be the ones that we use to force the new membership and unlock the cluster
// ToDo: should we consider GTID_EXECUTED sets when choosing the primary partition???
if( primary_partition == false ){
fmt.Println( "No primary partition found! Attempting to choose and force a new one ... " )
sort.Sort( MembersByOnlineNodes(last_view) )
// now the last element in the array is the one to use as it's coordinating with the most nodes
seed_node = &last_view[len(last_view)-1]
err = seed_node.Connect()
if( err != nil ){
// let's just loop again
continue
}
if( debug ){
fmt.Printf( "Member view sorted by number of online nodes: %v\n", last_view )
}
// let's build a string of '<host>:<port>' combinations that we want to use for the new membership view
members, _ := seed_node.GetMembers()
force_member_string := ""
for i, member := range *members {
err = member.Connect()
if( err == nil && member.Member_state == "ONLINE" ){
if( i != 0 ){
force_member_string = force_member_string + ","
}
force_member_string = force_member_string + member.Mysql_host + ":" + member.Mysql_port
}
}
if( force_member_string != "" ){
fmt.Printf( "Forcing group membership to form new primary partition! Using: '%s'\n", force_member_string )
err = seed_node.ForceMembers( force_member_string )
} else {
fmt.Println( "No valid group membership to force!" )
}
}
}
if( err != nil ){
loop = false
} else {
time.Sleep( time.Millisecond * 2000 )
}
}
return err
}
// The remaining functions are used to sort our membership slice
func (a MembersByOnlineNodes) Len() int {
return len(a)
}
func (a MembersByOnlineNodes) Swap( i, j int ) {
a[i], a[j] = a[j], a[i]
}
func (a MembersByOnlineNodes) Less( i, j int ) bool {
return a[i].Online_participants < a[j].Online_participants
}
Cleaned up JSON auth file format example output
/*
Copyright 2017 Matthew Lord (mattalord@gmail.com)
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package main
import (
"os"
"fmt"
"log"
"github.com/mattlord/myarbitratord/group_replication/instances"
"time"
"flag"
"sort"
"encoding/json"
"io/ioutil"
)
type MembersByOnlineNodes []instances.Instance
var debug = false
func main(){
var seed_host string
var seed_port string
var debug_mode bool
var mysql_user string
var mysql_pass string
var mysql_auth_file string
type json_mysql_auth struct {
User string
Password string
}
flag.StringVar( &seed_host, "seed_host", "", "IP/Hostname of the seed node used to start monitoring the Group Replication cluster" )
flag.StringVar( &seed_port, "seed_port", "3306", "Port of the seed node used to start monitoring the Group Replication cluster" )
flag.BoolVar( &debug_mode, "debug", false, "Execute in debug mode with all debug logging enabled" )
flag.StringVar( &mysql_user, "mysql_user", "root", "The mysql user account to be used when connecting to any node in the cluster" )
flag.StringVar( &mysql_pass, "mysql_password", "", "The mysql user account password to be used when connecting to any node in the cluster" )
flag.StringVar( &mysql_auth_file, "mysql_auth_file", "", "The JSON encoded file containining user and password entities for the mysql account to be used when connecting to any node in the cluster" )
flag.Parse()
// ToDo: I need to handle the password on the command-line more securely
// I need to do some data masking for the processlist
// A host is required, the default port of 3306 will then be attempted
if( seed_host == "" ){
fmt.Println( "myarbitratord usage: myarbitratord -seed_host=<seed_host> [-seed_port=<seed_port>] [-mysql_user=<mysql_user>] [-mysql_password=<mysql_pass>] [-mysql_auth_file=<path to json file>] [-debug=true]" )
os.Exit(1);
}
if( debug_mode ){
instances.Debug = true
}
if( mysql_auth_file != "" && mysql_pass == "" ){
if( debug_mode ){
fmt.Printf( "Reading MySQL credentials from file: %s\n", mysql_auth_file )
}
jsonfile, err := ioutil.ReadFile( mysql_auth_file )
if( err != nil ){
log.Fatal( "Could not read mysql credentials from specified file: " + mysql_auth_file )
}
var jsonauth json_mysql_auth
json.Unmarshal( jsonfile, &jsonauth )
if( debug_mode ){
fmt.Printf( "Unmarshaled mysql auth file contents: %v\n", jsonauth )
}
mysql_user = jsonauth.User
mysql_pass = jsonauth.Password
if( mysql_user == "" || mysql_pass == "" ){
errstr := "Failed to read user and password from " + mysql_auth_file + ". Ensure that the file contents are in the required format: \n{\n \"user\": \"myser\",\n \"password\": \"mypass\"\n}"
log.Fatal( errstr )
}
if( debug_mode ){
fmt.Printf( "Read mysql auth info from file. user: %s, password: %s\n", mysql_user, mysql_pass )
}
}
fmt.Println( "Welcome to the MySQL Group Replication Arbitrator!" )
fmt.Printf( "Starting operations from seed node: '%s:%s'\n", seed_host, seed_port )
seed_node := instances.New( seed_host, seed_port, mysql_user, mysql_pass )
err := MonitorCluster( seed_node )
if( err != nil ){
log.Fatal( err )
os.Exit( 100 )
} else {
os.Exit( 0 )
}
}
func MonitorCluster( seed_node *instances.Instance ) error {
loop := true
var err error
last_view := []instances.Instance{}
for( loop == true ){
// let's check the status of the current seed node
// if the seed node
err = seed_node.Connect()
if( err != nil || seed_node.Member_state != "ONLINE" ){
// if we couldn't connect to the current seed node or it's no longer part of the group
// let's try and get a new seed node from the last known membership view
fmt.Println( "Attempting to get a new seed node..." )
for _, member := range last_view {
err = member.Connect()
if( err == nil && member.Member_state == "ONLINE" ){
seed_node = &member
fmt.Printf( "Updated seed node! New seed node is: '%s:%s'\n", seed_node.Mysql_host, seed_node.Mysql_port )
break
}
}
}
members, err := seed_node.GetMembers()
if( err != nil ){
// something is up with our current seed node, let's loop again
continue
}
// save this view in case the seed node is no longer valid next time
//last_view = copy( last_view, members[:] )
last_view = *members
quorum, err := seed_node.HasQuorum()
if( err != nil ){
// something is up with our current seed node, let's loop again
continue
}
if( quorum ){
// Let's try and shutdown the nodes NOT in the primary partition if we can reach them from the arbitrator
for _, member := range *members {
if( member.Member_state == "ERROR" || member.Member_state == "UNREACHABLE" ){
fmt.Printf( "Shutting down non-healthy node: '%s:%s'\n", member.Mysql_host, member.Mysql_port )
err = member.Connect()
if( err != nil ){
fmt.Printf( "Could not connect to '%s:%s' in order to shut it down\n", member.Mysql_host, member.Mysql_port )
}
err = member.Shutdown()
if( err != nil ){
fmt.Printf( "Could not shutdown instance: '%s:%s'\n", member.Mysql_host, member.Mysql_port )
}
}
}
} else {
// handling other network partitions and split brain scenarios will be much trickier... I'll need to try and
// contact each member in the last seen view and try to determine which partition should become the
// primary one. We'll then need to contact 1 node in the new primary partition and explicitly set the new
// membership with 'set global group_replication_force_members="<node_list>"'. Finally we'll need to try
// and connect to the nodes on the losing side(s) of the partition and attempt to shutdown the mysqlds
fmt.Println( "Network partition detected! Attempting to handle... " )
// does anyone have a quorum? Let's double check before forcing the membership
primary_partition := false
for _, member := range last_view {
member.Connect()
quorum, err := member.HasQuorum()
if( err == nil && quorum ){
seed_node = &member
primary_partition = true
break
}
}
// If no one in fact has a quorum, then let's see which partition has the most
// online/participating/communicating members. The participants in that partition
// will then be the ones that we use to force the new membership and unlock the cluster
// ToDo: should we consider GTID_EXECUTED sets when choosing the primary partition???
if( primary_partition == false ){
fmt.Println( "No primary partition found! Attempting to choose and force a new one ... " )
sort.Sort( MembersByOnlineNodes(last_view) )
// now the last element in the array is the one to use as it's coordinating with the most nodes
seed_node = &last_view[len(last_view)-1]
err = seed_node.Connect()
if( err != nil ){
// let's just loop again
continue
}
if( debug ){
fmt.Printf( "Member view sorted by number of online nodes: %v\n", last_view )
}
// let's build a string of '<host>:<port>' combinations that we want to use for the new membership view
members, _ := seed_node.GetMembers()
force_member_string := ""
for i, member := range *members {
err = member.Connect()
if( err == nil && member.Member_state == "ONLINE" ){
if( i != 0 ){
force_member_string = force_member_string + ","
}
force_member_string = force_member_string + member.Mysql_host + ":" + member.Mysql_port
}
}
if( force_member_string != "" ){
fmt.Printf( "Forcing group membership to form new primary partition! Using: '%s'\n", force_member_string )
err = seed_node.ForceMembers( force_member_string )
} else {
fmt.Println( "No valid group membership to force!" )
}
}
}
if( err != nil ){
loop = false
} else {
time.Sleep( time.Millisecond * 2000 )
}
}
return err
}
// The remaining functions are used to sort our membership slice
func (a MembersByOnlineNodes) Len() int {
return len(a)
}
func (a MembersByOnlineNodes) Swap( i, j int ) {
a[i], a[j] = a[j], a[i]
}
func (a MembersByOnlineNodes) Less( i, j int ) bool {
return a[i].Online_participants < a[j].Online_participants
}
|
package cmds
import (
"bufio"
"bytes"
"errors"
"fmt"
"io"
"net/http"
"net/url"
"os"
"path/filepath"
"strings"
gosync "sync"
"syscall"
"golang.org/x/sync/errgroup"
"github.com/gogo/protobuf/jsonpb"
"github.com/hanwen/go-fuse/fuse/nodefs"
"github.com/pachyderm/pachyderm/src/client"
"github.com/pachyderm/pachyderm/src/client/limit"
pfsclient "github.com/pachyderm/pachyderm/src/client/pfs"
"github.com/pachyderm/pachyderm/src/client/pkg/grpcutil"
"github.com/pachyderm/pachyderm/src/server/pfs/fuse"
"github.com/pachyderm/pachyderm/src/server/pfs/pretty"
"github.com/pachyderm/pachyderm/src/server/pkg/cmdutil"
"github.com/pachyderm/pachyderm/src/server/pkg/sync"
"github.com/pachyderm/pachyderm/src/server/pkg/tabwriter"
"github.com/spf13/cobra"
)
const (
codestart = "```sh\n\n"
codeend = "\n```"
// DefaultParallelism is the default parallelism used by get-file
// and put-file.
DefaultParallelism = 10
)
// Cmds returns a slice containing pfs commands.
func Cmds(noMetrics *bool) []*cobra.Command {
metrics := !*noMetrics
raw := false
rawFlag := func(cmd *cobra.Command) {
cmd.Flags().BoolVar(&raw, "raw", false, "disable pretty printing, print raw json")
}
marshaller := &jsonpb.Marshaler{Indent: " "}
repo := &cobra.Command{
Use: "repo",
Short: "Docs for repos.",
Long: `Repos, short for repository, are the top level data object in Pachyderm.
Repos are created with create-repo.`,
Run: cmdutil.RunFixedArgs(0, func(args []string) error {
return nil
}),
}
var description string
createRepo := &cobra.Command{
Use: "create-repo repo-name",
Short: "Create a new repo.",
Long: "Create a new repo.",
Run: cmdutil.RunFixedArgs(1, func(args []string) error {
c, err := client.NewOnUserMachine(metrics, "user")
if err != nil {
return err
}
_, err = c.PfsAPIClient.CreateRepo(
c.Ctx(),
&pfsclient.CreateRepoRequest{
Repo: client.NewRepo(args[0]),
Description: description,
},
)
return grpcutil.ScrubGRPC(err)
}),
}
createRepo.Flags().StringVarP(&description, "description", "d", "", "A description of the repo.")
updateRepo := &cobra.Command{
Use: "update-repo repo-name",
Short: "Update a repo.",
Long: "Update a repo.",
Run: cmdutil.RunFixedArgs(1, func(args []string) error {
c, err := client.NewOnUserMachine(metrics, "user")
if err != nil {
return err
}
_, err = c.PfsAPIClient.CreateRepo(
c.Ctx(),
&pfsclient.CreateRepoRequest{
Repo: client.NewRepo(args[0]),
Description: description,
Update: true,
},
)
return grpcutil.ScrubGRPC(err)
}),
}
updateRepo.Flags().StringVarP(&description, "description", "d", "", "A description of the repo.")
inspectRepo := &cobra.Command{
Use: "inspect-repo repo-name",
Short: "Return info about a repo.",
Long: "Return info about a repo.",
Run: cmdutil.RunFixedArgs(1, func(args []string) error {
c, err := client.NewOnUserMachine(metrics, "user")
if err != nil {
return err
}
repoInfo, err := c.InspectRepo(args[0])
if err != nil {
return err
}
if repoInfo == nil {
return fmt.Errorf("repo %s not found", args[0])
}
if raw {
return marshaller.Marshal(os.Stdout, repoInfo)
}
return pretty.PrintDetailedRepoInfo(repoInfo)
}),
}
rawFlag(inspectRepo)
listRepo := &cobra.Command{
Use: "list-repo",
Short: "Return all repos.",
Long: "Return all repos.",
Run: cmdutil.RunFixedArgs(0, func(args []string) error {
c, err := client.NewOnUserMachine(metrics, "user")
if err != nil {
return err
}
repoInfos, err := c.ListRepo()
if err != nil {
return err
}
if raw {
for _, repoInfo := range repoInfos {
if err := marshaller.Marshal(os.Stdout, repoInfo); err != nil {
return err
}
}
return nil
}
header := pretty.RepoHeader
if (len(repoInfos) > 0) && (repoInfos[0].AuthInfo != nil) {
header = pretty.RepoAuthHeader
}
writer := tabwriter.NewWriter(os.Stdout, header)
for _, repoInfo := range repoInfos {
pretty.PrintRepoInfo(writer, repoInfo)
}
return writer.Flush()
}),
}
rawFlag(listRepo)
var force bool
var all bool
deleteRepo := &cobra.Command{
Use: "delete-repo repo-name",
Short: "Delete a repo.",
Long: "Delete a repo.",
Run: cmdutil.RunBoundedArgs(0, 1, func(args []string) error {
client, err := client.NewOnUserMachine(metrics, "user")
if err != nil {
return err
}
if len(args) > 0 && all {
return fmt.Errorf("cannot use the --all flag with an argument")
}
if len(args) == 0 && !all {
return fmt.Errorf("either a repo name or the --all flag needs to be provided")
}
if all {
_, err = client.PfsAPIClient.DeleteRepo(client.Ctx(),
&pfsclient.DeleteRepoRequest{
Force: force,
All: all,
})
} else {
err = client.DeleteRepo(args[0], force)
}
if err != nil {
return grpcutil.ScrubGRPC(err)
}
return nil
}),
}
deleteRepo.Flags().BoolVarP(&force, "force", "f", false, "remove the repo regardless of errors; use with care")
deleteRepo.Flags().BoolVar(&all, "all", false, "remove all repos")
commit := &cobra.Command{
Use: "commit",
Short: "Docs for commits.",
Long: `Commits are atomic transactions on the content of a repo.
Creating a commit is a multistep process:
- start a new commit with start-commit
- write files to it through fuse or with put-file
- finish the new commit with finish-commit
Commits that have been started but not finished are NOT durable storage.
Commits become reliable (and immutable) when they are finished.
Commits can be created with another commit as a parent.
This layers the data in the commit over the data in the parent.
`,
Run: cmdutil.RunFixedArgs(0, func(args []string) error {
return nil
}),
}
var parent string
startCommit := &cobra.Command{
Use: "start-commit repo-name [branch]",
Short: "Start a new commit.",
Long: `Start a new commit with parent-commit as the parent, or start a commit on the given branch; if the branch does not exist, it will be created.
Examples:
` + codestart + `# Start a new commit in repo "test" that's not on any branch
$ pachctl start-commit test
# Start a commit in repo "test" on branch "master"
$ pachctl start-commit test master
# Start a commit with "master" as the parent in repo "test", on a new branch "patch"; essentially a fork.
$ pachctl start-commit test patch -p master
# Start a commit with XXX as the parent in repo "test", not on any branch
$ pachctl start-commit test -p XXX
` + codeend,
Run: cmdutil.RunBoundedArgs(1, 2, func(args []string) error {
cli, err := client.NewOnUserMachine(metrics, "user")
if err != nil {
return err
}
var branch string
if len(args) == 2 {
branch = args[1]
}
commit, err := cli.PfsAPIClient.StartCommit(cli.Ctx(),
&pfsclient.StartCommitRequest{
Branch: branch,
Parent: client.NewCommit(args[0], parent),
Description: description,
})
if err != nil {
return grpcutil.ScrubGRPC(err)
}
fmt.Println(commit.ID)
return nil
}),
}
startCommit.Flags().StringVarP(&parent, "parent", "p", "", "The parent of the new commit, unneeded if branch is specified and you want to use the previous head of the branch as the parent.")
startCommit.Flags().StringVarP(&description, "message", "m", "", "A description of this commit's contents")
startCommit.Flags().StringVar(&description, "description", "", "A description of this commit's contents (synonym for --message)")
finishCommit := &cobra.Command{
Use: "finish-commit repo-name commit-id",
Short: "Finish a started commit.",
Long: "Finish a started commit. Commit-id must be a writeable commit.",
Run: cmdutil.RunFixedArgs(2, func(args []string) error {
cli, err := client.NewOnUserMachine(metrics, "user")
if err != nil {
return err
}
if description != "" {
_, err := cli.PfsAPIClient.FinishCommit(cli.Ctx(),
&pfsclient.FinishCommitRequest{
Commit: client.NewCommit(args[0], args[1]),
Description: description,
})
return grpcutil.ScrubGRPC(err)
}
return cli.FinishCommit(args[0], args[1])
}),
}
finishCommit.Flags().StringVarP(&description, "message", "m", "", "A description of this commit's contents (overwrites any existing commit description)")
finishCommit.Flags().StringVar(&description, "description", "", "A description of this commit's contents (synonym for --message)")
inspectCommit := &cobra.Command{
Use: "inspect-commit repo-name commit-id",
Short: "Return info about a commit.",
Long: "Return info about a commit.",
Run: cmdutil.RunFixedArgs(2, func(args []string) error {
client, err := client.NewOnUserMachine(metrics, "user")
if err != nil {
return err
}
commitInfo, err := client.InspectCommit(args[0], args[1])
if err != nil {
return err
}
if commitInfo == nil {
return fmt.Errorf("commit %s not found", args[1])
}
if raw {
return marshaller.Marshal(os.Stdout, commitInfo)
}
return pretty.PrintDetailedCommitInfo(commitInfo)
}),
}
rawFlag(inspectCommit)
var from string
var number int
listCommit := &cobra.Command{
Use: "list-commit repo-name",
Short: "Return all commits on a set of repos.",
Long: `Return all commits on a set of repos.
Examples:
` + codestart + `# return commits in repo "foo"
$ pachctl list-commit foo
# return commits in repo "foo" on branch "master"
$ pachctl list-commit foo master
# return the last 20 commits in repo "foo" on branch "master"
$ pachctl list-commit foo master -n 20
# return commits that are the ancestors of XXX
$ pachctl list-commit foo XXX
# return commits in repo "foo" since commit XXX
$ pachctl list-commit foo master --from XXX
` + codeend,
Run: cmdutil.RunBoundedArgs(1, 2, func(args []string) (retErr error) {
c, err := client.NewOnUserMachine(metrics, "user")
if err != nil {
return err
}
var to string
if len(args) == 2 {
to = args[1]
}
if raw {
return c.ListCommitF(args[0], to, from, uint64(number), func(ci *pfsclient.CommitInfo) error {
return marshaller.Marshal(os.Stdout, ci)
})
}
writer := tabwriter.NewWriter(os.Stdout, pretty.CommitHeader)
if err := c.ListCommitF(args[0], to, from, uint64(number), func(ci *pfsclient.CommitInfo) error {
pretty.PrintCommitInfo(writer, ci)
return nil
}); err != nil {
return err
}
return writer.Flush()
}),
}
listCommit.Flags().StringVarP(&from, "from", "f", "", "list all commits since this commit")
listCommit.Flags().IntVarP(&number, "number", "n", 0, "list only this many commits; if set to zero, list all commits")
rawFlag(listCommit)
printCommitIter := func(commitIter client.CommitInfoIterator) error {
if raw {
for {
commitInfo, err := commitIter.Next()
if err == io.EOF {
return nil
}
if err != nil {
return err
}
if err := marshaller.Marshal(os.Stdout, commitInfo); err != nil {
return err
}
}
}
writer := tabwriter.NewWriter(os.Stdout, pretty.CommitHeader)
for {
commitInfo, err := commitIter.Next()
if err == io.EOF {
break
}
if err != nil {
return err
}
pretty.PrintCommitInfo(writer, commitInfo)
}
return writer.Flush()
}
var repos cmdutil.RepeatedStringArg
flushCommit := &cobra.Command{
Use: "flush-commit commit [commit ...]",
Short: "Wait for all commits caused by the specified commits to finish and return them.",
Long: `Wait for all commits caused by the specified commits to finish and return them.
Examples:
` + codestart + `# return commits caused by foo/XXX and bar/YYY
$ pachctl flush-commit foo/XXX bar/YYY
# return commits caused by foo/XXX leading to repos bar and baz
$ pachctl flush-commit foo/XXX -r bar -r baz
` + codeend,
Run: cmdutil.Run(func(args []string) error {
commits, err := cmdutil.ParseCommits(args)
if err != nil {
return err
}
c, err := client.NewOnUserMachine(metrics, "user")
if err != nil {
return err
}
var toRepos []*pfsclient.Repo
for _, repoName := range repos {
toRepos = append(toRepos, client.NewRepo(repoName))
}
commitIter, err := c.FlushCommit(commits, toRepos)
if err != nil {
return err
}
return printCommitIter(commitIter)
}),
}
flushCommit.Flags().VarP(&repos, "repos", "r", "Wait only for commits leading to a specific set of repos")
rawFlag(flushCommit)
var new bool
subscribeCommit := &cobra.Command{
Use: "subscribe-commit repo branch",
Short: "Print commits as they are created (finished).",
Long: `Print commits as they are created in the specified repo and
branch. By default, all existing commits on the specified branch are
returned first. A commit is only considered "created" when it's been
finished.
Examples:
` + codestart + `# subscribe to commits in repo "test" on branch "master"
$ pachctl subscribe-commit test master
# subscribe to commits in repo "test" on branch "master", but only since commit XXX.
$ pachctl subscribe-commit test master --from XXX
# subscribe to commits in repo "test" on branch "master", but only for new
# commits created from now on.
$ pachctl subscribe-commit test master --new
` + codeend,
Run: cmdutil.RunFixedArgs(2, func(args []string) error {
repo, branch := args[0], args[1]
c, err := client.NewOnUserMachine(metrics, "user")
if err != nil {
return err
}
if new && from != "" {
return fmt.Errorf("--new and --from cannot both be provided")
}
if new {
from = branch
}
commitIter, err := c.SubscribeCommit(repo, branch, from, pfsclient.CommitState_STARTED)
if err != nil {
return err
}
return printCommitIter(commitIter)
}),
}
subscribeCommit.Flags().StringVar(&from, "from", "", "subscribe to all commits since this commit")
subscribeCommit.Flags().BoolVar(&new, "new", false, "subscribe to only new commits created from now on")
rawFlag(subscribeCommit)
deleteCommit := &cobra.Command{
Use: "delete-commit repo-name commit-id",
Short: "Delete an input commit.",
Long: "Delete an input commit. An input is a commit which is not the output of a pipeline.",
Run: cmdutil.RunFixedArgs(2, func(args []string) error {
client, err := client.NewOnUserMachine(metrics, "user")
if err != nil {
return err
}
return client.DeleteCommit(args[0], args[1])
}),
}
var branchProvenance cmdutil.RepeatedStringArg
var head string
createBranch := &cobra.Command{
Use: "create-branch <repo-name> <branch-name> [flags]",
Short: "Create a new branch, or update an existing branch, on a repo.",
Long: "Create a new branch, or update an existing branch, on a repo, starting a commit on the branch will also create it, so there's often no need to call this.",
Run: cmdutil.RunFixedArgs(2, func(args []string) error {
client, err := client.NewOnUserMachine(metrics, "user")
if err != nil {
return err
}
provenance, err := cmdutil.ParseBranches(branchProvenance)
if err != nil {
return err
}
return client.CreateBranch(args[0], args[1], head, provenance)
}),
}
createBranch.Flags().VarP(&branchProvenance, "provenance", "p", "The provenance for the branch.")
createBranch.Flags().StringVarP(&head, "head", "", "", "The head of the newly created branch.")
listBranch := &cobra.Command{
Use: "list-branch repo-name",
Short: "Return all branches on a repo.",
Long: "Return all branches on a repo.",
Run: cmdutil.RunFixedArgs(1, func(args []string) error {
client, err := client.NewOnUserMachine(metrics, "user")
if err != nil {
return err
}
branches, err := client.ListBranch(args[0])
if err != nil {
return err
}
if raw {
for _, branch := range branches {
if err := marshaller.Marshal(os.Stdout, branch); err != nil {
return err
}
}
return nil
}
writer := tabwriter.NewWriter(os.Stdout, pretty.BranchHeader)
for _, branch := range branches {
pretty.PrintBranch(writer, branch)
}
return writer.Flush()
}),
}
rawFlag(listBranch)
setBranch := &cobra.Command{
Use: "set-branch repo-name commit-id/branch-name new-branch-name",
Short: "DEPRECATED Set a commit and its ancestors to a branch",
Long: `DEPRECATED Set a commit and its ancestors to a branch.
Examples:
` + codestart + `# Set commit XXX and its ancestors as branch master in repo foo.
$ pachctl set-branch foo XXX master
# Set the head of branch test as branch master in repo foo.
# After running this command, "test" and "master" both point to the
# same commit.
$ pachctl set-branch foo test master` + codeend,
Run: cmdutil.RunFixedArgs(3, func(args []string) error {
fmt.Fprintf(os.Stderr, "set-branch is DEPRECATED, use create-branch instead.\n")
client, err := client.NewOnUserMachine(metrics, "user")
if err != nil {
return err
}
return client.SetBranch(args[0], args[1], args[2])
}),
}
deleteBranch := &cobra.Command{
Use: "delete-branch repo-name branch-name",
Short: "Delete a branch",
Long: "Delete a branch, while leaving the commits intact",
Run: cmdutil.RunFixedArgs(2, func(args []string) error {
client, err := client.NewOnUserMachine(metrics, "user")
if err != nil {
return err
}
return client.DeleteBranch(args[0], args[1], force)
}),
}
deleteBranch.Flags().BoolVarP(&force, "force", "f", false, "remove the branch regardless of errors; use with care")
file := &cobra.Command{
Use: "file",
Short: "Docs for files.",
Long: `Files are the lowest level data object in Pachyderm.
Files can be written to started (but not finished) commits with put-file.
Files can be read from finished commits with get-file.`,
Run: cmdutil.RunFixedArgs(0, func(args []string) error {
return nil
}),
}
var filePaths []string
var recursive bool
var inputFile string
var parallelism int
var split string
var targetFileDatums uint
var targetFileBytes uint
var headerRecords uint
var putFileCommit bool
var overwrite bool
putFile := &cobra.Command{
Use: "put-file repo-name branch [path/to/file/in/pfs]",
Short: "Put a file into the filesystem.",
Long: `Put-file supports a number of ways to insert data into pfs:
` + codestart + `# Put data from stdin as repo/branch/path:
$ echo "data" | pachctl put-file repo branch path
# Put data from stdin as repo/branch/path and start / finish a new commit on the branch.
$ echo "data" | pachctl put-file -c repo branch path
# Put a file from the local filesystem as repo/branch/path:
$ pachctl put-file repo branch path -f file
# Put a file from the local filesystem as repo/branch/file:
$ pachctl put-file repo branch -f file
# Put the contents of a directory as repo/branch/path/dir/file:
$ pachctl put-file -r repo branch path -f dir
# Put the contents of a directory as repo/branch/dir/file:
$ pachctl put-file -r repo branch -f dir
# Put the contents of a directory as repo/branch/file, i.e. put files at the top level:
$ pachctl put-file -r repo branch / -f dir
# Put the data from a URL as repo/branch/path:
$ pachctl put-file repo branch path -f http://host/path
# Put the data from a URL as repo/branch/path:
$ pachctl put-file repo branch -f http://host/path
# Put the data from an S3 bucket as repo/branch/s3_object:
$ pachctl put-file repo branch -r -f s3://my_bucket
# Put several files or URLs that are listed in file.
# Files and URLs should be newline delimited.
$ pachctl put-file repo branch -i file
# Put several files or URLs that are listed at URL.
# NOTE this URL can reference local files, so it could cause you to put sensitive
# files into your Pachyderm cluster.
$ pachctl put-file repo branch -i http://host/path
` + codeend + `
NOTE there's a small performance overhead for using a branch name as opposed
to a commit ID in put-file. In most cases the performance overhead is
negligible, but if you are putting a large number of small files, you might
want to consider using commit IDs directly.
`,
Run: cmdutil.RunBoundedArgs(2, 3, func(args []string) (retErr error) {
c, err := client.NewOnUserMachine(metrics, "user", client.WithMaxConcurrentStreams(parallelism))
if err != nil {
return err
}
pfc, err := c.NewPutFileClient()
if err != nil {
return err
}
defer func() {
if err := pfc.Close(); err != nil && retErr == nil {
retErr = err
}
}()
repoName := args[0]
branch := args[1]
var path string
if len(args) == 3 {
path = args[2]
if url, err := url.Parse(path); err == nil && url.Scheme != "" {
fmt.Fprintf(os.Stderr, "warning: PFS destination \"%s\" looks like a URL; did you mean -f %s?\n", path, path)
}
}
if putFileCommit {
fmt.Fprintf(os.Stderr, "flag --commit / -c is deprecated; as of 1.7.2, you will get the same behavior without it\n")
}
limiter := limit.New(int(parallelism))
var sources []string
if inputFile != "" {
// User has provided a file listing sources, one per line. Read sources
var r io.Reader
if inputFile == "-" {
r = os.Stdin
} else if url, err := url.Parse(inputFile); err == nil && url.Scheme != "" {
resp, err := http.Get(url.String())
if err != nil {
return err
}
defer func() {
if err := resp.Body.Close(); err != nil && retErr == nil {
retErr = err
}
}()
r = resp.Body
} else {
inputFile, err := os.Open(inputFile)
if err != nil {
return err
}
defer func() {
if err := inputFile.Close(); err != nil && retErr == nil {
retErr = err
}
}()
r = inputFile
}
// scan line by line
scanner := bufio.NewScanner(r)
for scanner.Scan() {
if filePath := scanner.Text(); filePath != "" {
sources = append(sources, filePath)
}
}
} else {
// User has provided a single source
sources = filePaths
}
// Arguments parsed; create putFileHelper and begin copying data
var eg errgroup.Group
filesPut := &gosync.Map{}
for _, source := range sources {
source := source
if len(args) == 2 {
// The user has not specified a path so we use source as path.
if source == "-" {
return fmt.Errorf("must specify filename when reading data from stdin")
}
eg.Go(func() error {
return putFileHelper(c, pfc, repoName, branch, joinPaths("", source), source, recursive, overwrite, limiter, split, targetFileDatums, targetFileBytes, headerRecords, filesPut)
})
} else if len(sources) == 1 && len(args) == 3 {
// We have a single source and the user has specified a path,
// we use the path and ignore source (in terms of naming the file).
eg.Go(func() error {
return putFileHelper(c, pfc, repoName, branch, path, source, recursive, overwrite, limiter, split, targetFileDatums, targetFileBytes, headerRecords, filesPut)
})
} else if len(sources) > 1 && len(args) == 3 {
// We have multiple sources and the user has specified a path,
// we use that path as a prefix for the filepaths.
eg.Go(func() error {
return putFileHelper(c, pfc, repoName, branch, joinPaths(path, source), source, recursive, overwrite, limiter, split, targetFileDatums, targetFileBytes, headerRecords, filesPut)
})
}
}
return eg.Wait()
}),
}
putFile.Flags().StringSliceVarP(&filePaths, "file", "f", []string{"-"}, "The file to be put, it can be a local file or a URL.")
putFile.Flags().StringVarP(&inputFile, "input-file", "i", "", "Read filepaths or URLs from a file. If - is used, paths are read from the standard input.")
putFile.Flags().BoolVarP(&recursive, "recursive", "r", false, "Recursively put the files in a directory.")
putFile.Flags().IntVarP(¶llelism, "parallelism", "p", DefaultParallelism, "The maximum number of files that can be uploaded in parallel.")
putFile.Flags().StringVar(&split, "split", "", "Split the input file into smaller files, subject to the constraints of --target-file-datums and --target-file-bytes. Permissible values are `json` and `line`.")
putFile.Flags().UintVar(&targetFileDatums, "target-file-datums", 0, "The upper bound of the number of datums that each file contains, the last file will contain fewer if the datums don't divide evenly; needs to be used with --split.")
putFile.Flags().UintVar(&targetFileBytes, "target-file-bytes", 0, "The target upper bound of the number of bytes that each file contains; needs to be used with --split.")
putFile.Flags().UintVar(&headerRecords, "header-records", 0, "the number of records that will be converted to a PFS 'header', and prepended to future retrievals of any subset of data from PFS; needs to be used with --split=(json|line|csv)")
putFile.Flags().BoolVarP(&putFileCommit, "commit", "c", false, "DEPRECATED: Put file(s) in a new commit.")
putFile.Flags().BoolVarP(&overwrite, "overwrite", "o", false, "Overwrite the existing content of the file, either from previous commits or previous calls to put-file within this commit.")
copyFile := &cobra.Command{
Use: "copy-file src-repo src-commit src-path dst-repo dst-commit dst-path",
Short: "Copy files between pfs paths.",
Long: "Copy files between pfs paths.",
Run: cmdutil.RunFixedArgs(6, func(args []string) (retErr error) {
c, err := client.NewOnUserMachine(metrics, "user", client.WithMaxConcurrentStreams(parallelism))
if err != nil {
return err
}
return c.CopyFile(args[0], args[1], args[2], args[3], args[4], args[5], overwrite)
}),
}
copyFile.Flags().BoolVarP(&overwrite, "overwrite", "o", false, "Overwrite the existing content of the file, either from previous commits or previous calls to put-file within this commit.")
var outputPath string
getFile := &cobra.Command{
Use: "get-file repo-name commit-id path/to/file",
Short: "Return the contents of a file.",
Long: `Return the contents of a file.
` + codestart + `# get file "XXX" on branch "master" in repo "foo"
$ pachctl get-file foo master XXX
# get file "XXX" in the parent of the current head of branch "master"
# in repo "foo"
$ pachctl get-file foo master^ XXX
# get file "XXX" in the grandparent of the current head of branch "master"
# in repo "foo"
$ pachctl get-file foo master^2 XXX
` + codeend,
Run: cmdutil.RunFixedArgs(3, func(args []string) error {
client, err := client.NewOnUserMachine(metrics, "user")
if err != nil {
return err
}
if recursive {
if outputPath == "" {
return fmt.Errorf("an output path needs to be specified when using the --recursive flag")
}
puller := sync.NewPuller()
return puller.Pull(client, outputPath, args[0], args[1], args[2], false, false, parallelism, nil, "")
}
var w io.Writer
// If an output path is given, print the output to stdout
if outputPath == "" {
w = os.Stdout
} else {
f, err := os.Create(outputPath)
if err != nil {
return err
}
defer f.Close()
w = f
}
return client.GetFile(args[0], args[1], args[2], 0, 0, w)
}),
}
getFile.Flags().BoolVarP(&recursive, "recursive", "r", false, "Recursively download a directory.")
getFile.Flags().StringVarP(&outputPath, "output", "o", "", "The path where data will be downloaded.")
getFile.Flags().IntVarP(¶llelism, "parallelism", "p", DefaultParallelism, "The maximum number of files that can be downloaded in parallel")
inspectFile := &cobra.Command{
Use: "inspect-file repo-name commit-id path/to/file",
Short: "Return info about a file.",
Long: "Return info about a file.",
Run: cmdutil.RunFixedArgs(3, func(args []string) error {
client, err := client.NewOnUserMachine(metrics, "user")
if err != nil {
return err
}
fileInfo, err := client.InspectFile(args[0], args[1], args[2])
if err != nil {
return err
}
if fileInfo == nil {
return fmt.Errorf("file %s not found", args[2])
}
if raw {
return marshaller.Marshal(os.Stdout, fileInfo)
}
return pretty.PrintDetailedFileInfo(fileInfo)
}),
}
rawFlag(inspectFile)
var history int64
listFile := &cobra.Command{
Use: "list-file repo-name commit-id path/to/dir",
Short: "Return the files in a directory.",
Long: `Return the files in a directory.
Examples:
` + codestart + `# list top-level files on branch "master" in repo "foo"
$ pachctl list-file foo master
# list files under directory "dir" on branch "master" in repo "foo"
$ pachctl list-file foo master dir
# list top-level files in the parent commit of the current head of "master"
# in repo "foo"
$ pachctl list-file foo master^
# list top-level files in the grandparent of the current head of "master"
# in repo "foo"
$ pachctl list-file foo master^2
# list the last n versions of top-level files on branch "master" in repo "foo"
$ pachctl list-file foo master --history n
# list all versions of top-level files on branch "master" in repo "foo"
$ pachctl list-file foo master --history -1
` + codeend,
Run: cmdutil.RunBoundedArgs(2, 3, func(args []string) error {
client, err := client.NewOnUserMachine(metrics, "user")
if err != nil {
return err
}
var path string
if len(args) == 3 {
path = args[2]
}
if raw {
return client.ListFileF(args[0], args[1], path, history, func(fi *pfsclient.FileInfo) error {
return marshaller.Marshal(os.Stdout, fi)
})
}
writer := tabwriter.NewWriter(os.Stdout, pretty.FileHeader)
if err := client.ListFileF(args[0], args[1], path, history, func(fi *pfsclient.FileInfo) error {
pretty.PrintFileInfo(writer, fi)
return nil
}); err != nil {
return nil
}
return writer.Flush()
}),
}
rawFlag(listFile)
listFile.Flags().Int64Var(&history, "history", 0, "Return revision history for files.")
globFile := &cobra.Command{
Use: "glob-file repo-name commit-id pattern",
Short: "Return files that match a glob pattern in a commit.",
Long: `Return files that match a glob pattern in a commit (that is, match a glob pattern
in a repo at the state represented by a commit). Glob patterns are
documented [here](https://golang.org/pkg/path/filepath/#Match).
Examples:
` + codestart + `# Return files in repo "foo" on branch "master" that start
# with the character "A". Note how the double quotation marks around "A*" are
# necessary because otherwise your shell might interpret the "*".
$ pachctl glob-file foo master "A*"
# Return files in repo "foo" on branch "master" under directory "data".
$ pachctl glob-file foo master "data/*"
` + codeend,
Run: cmdutil.RunFixedArgs(3, func(args []string) error {
client, err := client.NewOnUserMachine(metrics, "user")
if err != nil {
return err
}
fileInfos, err := client.GlobFile(args[0], args[1], args[2])
if err != nil {
return err
}
if raw {
for _, fileInfo := range fileInfos {
if err := marshaller.Marshal(os.Stdout, fileInfo); err != nil {
return err
}
}
}
writer := tabwriter.NewWriter(os.Stdout, pretty.FileHeader)
for _, fileInfo := range fileInfos {
pretty.PrintFileInfo(writer, fileInfo)
}
return writer.Flush()
}),
}
rawFlag(globFile)
var shallow bool
diffFile := &cobra.Command{
Use: "diff-file new-repo-name new-commit-id new-path [old-repo-name old-commit-id old-path]",
Short: "Return a diff of two file trees.",
Long: `Return a diff of two file trees.
Examples:
` + codestart + `# Return the diff between foo master path and its parent.
$ pachctl diff-file foo master path
# Return the diff between foo master path1 and bar master path2.
$ pachctl diff-file foo master path1 bar master path2
` + codeend,
Run: cmdutil.RunBoundedArgs(3, 6, func(args []string) error {
client, err := client.NewOnUserMachine(metrics, "user")
if err != nil {
return err
}
var newFiles []*pfsclient.FileInfo
var oldFiles []*pfsclient.FileInfo
switch {
case len(args) == 3:
newFiles, oldFiles, err = client.DiffFile(args[0], args[1], args[2], "", "", "", shallow)
case len(args) == 6:
newFiles, oldFiles, err = client.DiffFile(args[0], args[1], args[2], args[3], args[4], args[5], shallow)
default:
return fmt.Errorf("diff-file expects either 3 or 6 args, got %d", len(args))
}
if err != nil {
return err
}
if len(newFiles) > 0 {
fmt.Println("New Files:")
writer := tabwriter.NewWriter(os.Stdout, pretty.FileHeader)
for _, fileInfo := range newFiles {
pretty.PrintFileInfo(writer, fileInfo)
}
if err := writer.Flush(); err != nil {
return err
}
}
if len(oldFiles) > 0 {
fmt.Println("Old Files:")
writer := tabwriter.NewWriter(os.Stdout, pretty.FileHeader)
for _, fileInfo := range oldFiles {
pretty.PrintFileInfo(writer, fileInfo)
}
if err := writer.Flush(); err != nil {
return err
}
}
return nil
}),
}
diffFile.Flags().BoolVarP(&shallow, "shallow", "s", false, "Specifies whether or not to diff subdirectories")
deleteFile := &cobra.Command{
Use: "delete-file repo-name commit-id path/to/file",
Short: "Delete a file.",
Long: "Delete a file.",
Run: cmdutil.RunFixedArgs(3, func(args []string) error {
client, err := client.NewOnUserMachine(metrics, "user")
if err != nil {
return err
}
return client.DeleteFile(args[0], args[1], args[2])
}),
}
getObject := &cobra.Command{
Use: "get-object hash",
Short: "Return the contents of an object",
Long: "Return the contents of an object",
Run: cmdutil.RunFixedArgs(1, func(args []string) error {
client, err := client.NewOnUserMachine(metrics, "user")
if err != nil {
return err
}
return client.GetObject(args[0], os.Stdout)
}),
}
getTag := &cobra.Command{
Use: "get-tag tag",
Short: "Return the contents of a tag",
Long: "Return the contents of a tag",
Run: cmdutil.RunFixedArgs(1, func(args []string) error {
client, err := client.NewOnUserMachine(metrics, "user")
if err != nil {
return err
}
return client.GetTag(args[0], os.Stdout)
}),
}
var debug bool
var commits cmdutil.RepeatedStringArg
mount := &cobra.Command{
Use: "mount path/to/mount/point",
Short: "Mount pfs locally. This command blocks.",
Long: "Mount pfs locally. This command blocks.",
Run: cmdutil.RunFixedArgs(1, func(args []string) error {
client, err := client.NewOnUserMachine(metrics, "fuse")
if err != nil {
return err
}
mountPoint := args[0]
commits, err := parseCommits(commits)
if err != nil {
return err
}
opts := &fuse.Options{
Fuse: &nodefs.Options{
Debug: debug,
},
Commits: commits,
}
return fuse.Mount(client, mountPoint, opts)
}),
}
mount.Flags().BoolVarP(&debug, "debug", "d", false, "Turn on debug messages.")
mount.Flags().VarP(&commits, "commits", "c", "Commits to mount for repos, arguments should be of the form \"repo:commit\"")
unmount := &cobra.Command{
Use: "unmount path/to/mount/point",
Short: "Unmount pfs.",
Long: "Unmount pfs.",
Run: cmdutil.RunBoundedArgs(0, 1, func(args []string) error {
if len(args) == 1 {
return syscall.Unmount(args[0], 0)
}
if all {
stdin := strings.NewReader(`
mount | grep pfs:// | cut -f 3 -d " "
`)
var stdout bytes.Buffer
if err := cmdutil.RunIO(cmdutil.IO{
Stdin: stdin,
Stdout: &stdout,
Stderr: os.Stderr,
}, "sh"); err != nil {
return err
}
scanner := bufio.NewScanner(&stdout)
var mounts []string
for scanner.Scan() {
mounts = append(mounts, scanner.Text())
}
if len(mounts) == 0 {
fmt.Println("No mounts found.")
return nil
}
fmt.Printf("Unmount the following filesystems? yN\n")
for _, mount := range mounts {
fmt.Printf("%s\n", mount)
}
r := bufio.NewReader(os.Stdin)
bytes, err := r.ReadBytes('\n')
if err != nil {
return err
}
if bytes[0] == 'y' || bytes[0] == 'Y' {
for _, mount := range mounts {
if err := syscall.Unmount(mount, 0); err != nil {
return err
}
}
}
}
return nil
}),
}
unmount.Flags().BoolVarP(&all, "all", "a", false, "unmount all pfs mounts")
var result []*cobra.Command
result = append(result, repo)
result = append(result, createRepo)
result = append(result, updateRepo)
result = append(result, inspectRepo)
result = append(result, listRepo)
result = append(result, deleteRepo)
result = append(result, commit)
result = append(result, startCommit)
result = append(result, finishCommit)
result = append(result, inspectCommit)
result = append(result, listCommit)
result = append(result, flushCommit)
result = append(result, subscribeCommit)
result = append(result, deleteCommit)
result = append(result, createBranch)
result = append(result, listBranch)
result = append(result, setBranch)
result = append(result, deleteBranch)
result = append(result, file)
result = append(result, putFile)
result = append(result, copyFile)
result = append(result, getFile)
result = append(result, inspectFile)
result = append(result, listFile)
result = append(result, globFile)
result = append(result, diffFile)
result = append(result, deleteFile)
result = append(result, getObject)
result = append(result, getTag)
result = append(result, mount)
result = append(result, unmount)
return result
}
func parseCommits(args []string) (map[string]string, error) {
result := make(map[string]string)
for _, arg := range args {
split := strings.Split(arg, ":")
if len(split) != 2 {
return nil, fmt.Errorf("malformed input %s, must be of the form repo:commit", args)
}
result[split[0]] = split[1]
}
return result, nil
}
func putFileHelper(c *client.APIClient, pfc client.PutFileClient,
repo, commit, path, source string, recursive, overwrite bool, // destination
limiter limit.ConcurrencyLimiter,
split string, targetFileDatums, targetFileBytes, headerRecords uint, // split
filesPut *gosync.Map) (retErr error) {
if _, ok := filesPut.LoadOrStore(path, nil); ok {
return fmt.Errorf("multiple files put with the path %s, aborting, "+
"some files may already have been put and should be cleaned up with "+
"delete-file or delete-commit", path)
}
putFile := func(reader io.ReadSeeker) error {
if split == "" {
if overwrite {
return sync.PushFile(c, pfc, client.NewFile(repo, commit, path), reader)
}
_, err := pfc.PutFile(repo, commit, path, reader)
return err
}
var delimiter pfsclient.Delimiter
switch split {
case "line":
delimiter = pfsclient.Delimiter_LINE
case "json":
delimiter = pfsclient.Delimiter_JSON
case "sql":
delimiter = pfsclient.Delimiter_SQL
case "csv":
delimiter = pfsclient.Delimiter_CSV
default:
return fmt.Errorf("unrecognized delimiter '%s'; only accepts one of "+
"{json,line,sql,csv}", split)
}
_, err := pfc.PutFileSplit(repo, commit, path, delimiter, int64(targetFileDatums), int64(targetFileBytes), int64(headerRecords), overwrite, reader)
return err
}
if source == "-" {
if recursive {
return errors.New("cannot set -r and read from stdin (must also set -f or -i)")
}
limiter.Acquire()
defer limiter.Release()
fmt.Fprintln(os.Stderr, "Reading from stdin.")
return putFile(os.Stdin)
}
// try parsing the filename as a url, if it is one do a PutFileURL
if url, err := url.Parse(source); err == nil && url.Scheme != "" {
limiter.Acquire()
defer limiter.Release()
return pfc.PutFileURL(repo, commit, path, url.String(), recursive, overwrite)
}
if recursive {
var eg errgroup.Group
if err := filepath.Walk(source, func(filePath string, info os.FileInfo, err error) error {
// file doesn't exist
if info == nil {
return fmt.Errorf("%s doesn't exist", filePath)
}
if info.IsDir() {
return nil
}
childDest := filepath.Join(path, strings.TrimPrefix(filePath, source))
eg.Go(func() error {
// don't do a second recursive put-file, just put the one file at
// filePath into childDest, and then this walk loop will go on to the
// next one
return putFileHelper(c, pfc, repo, commit, childDest, filePath, false,
overwrite, limiter, split, targetFileDatums, targetFileBytes,
headerRecords, filesPut)
})
return nil
}); err != nil {
return err
}
return eg.Wait()
}
limiter.Acquire()
defer limiter.Release()
f, err := os.Open(source)
if err != nil {
return err
}
defer func() {
if err := f.Close(); err != nil && retErr == nil {
retErr = err
}
}()
return putFile(f)
}
func joinPaths(prefix, filePath string) string {
if url, err := url.Parse(filePath); err == nil && url.Scheme != "" {
if url.Scheme == "pfs" {
// pfs paths are of the form pfs://host/repo/branch/path we don't
// want to prefix every file with host/repo so we remove those
splitPath := strings.Split(strings.TrimPrefix(url.Path, "/"), "/")
if len(splitPath) < 3 {
return prefix
}
return filepath.Join(append([]string{prefix}, splitPath[2:]...)...)
}
return filepath.Join(prefix, strings.TrimPrefix(url.Path, "/"))
}
return filepath.Join(prefix, filePath)
}
Killed outdated comment
package cmds
import (
"bufio"
"bytes"
"errors"
"fmt"
"io"
"net/http"
"net/url"
"os"
"path/filepath"
"strings"
gosync "sync"
"syscall"
"golang.org/x/sync/errgroup"
"github.com/gogo/protobuf/jsonpb"
"github.com/hanwen/go-fuse/fuse/nodefs"
"github.com/pachyderm/pachyderm/src/client"
"github.com/pachyderm/pachyderm/src/client/limit"
pfsclient "github.com/pachyderm/pachyderm/src/client/pfs"
"github.com/pachyderm/pachyderm/src/client/pkg/grpcutil"
"github.com/pachyderm/pachyderm/src/server/pfs/fuse"
"github.com/pachyderm/pachyderm/src/server/pfs/pretty"
"github.com/pachyderm/pachyderm/src/server/pkg/cmdutil"
"github.com/pachyderm/pachyderm/src/server/pkg/sync"
"github.com/pachyderm/pachyderm/src/server/pkg/tabwriter"
"github.com/spf13/cobra"
)
const (
codestart = "```sh\n\n"
codeend = "\n```"
// DefaultParallelism is the default parallelism used by get-file
// and put-file.
DefaultParallelism = 10
)
// Cmds returns a slice containing pfs commands.
func Cmds(noMetrics *bool) []*cobra.Command {
metrics := !*noMetrics
raw := false
rawFlag := func(cmd *cobra.Command) {
cmd.Flags().BoolVar(&raw, "raw", false, "disable pretty printing, print raw json")
}
marshaller := &jsonpb.Marshaler{Indent: " "}
repo := &cobra.Command{
Use: "repo",
Short: "Docs for repos.",
Long: `Repos, short for repository, are the top level data object in Pachyderm.
Repos are created with create-repo.`,
Run: cmdutil.RunFixedArgs(0, func(args []string) error {
return nil
}),
}
var description string
createRepo := &cobra.Command{
Use: "create-repo repo-name",
Short: "Create a new repo.",
Long: "Create a new repo.",
Run: cmdutil.RunFixedArgs(1, func(args []string) error {
c, err := client.NewOnUserMachine(metrics, "user")
if err != nil {
return err
}
_, err = c.PfsAPIClient.CreateRepo(
c.Ctx(),
&pfsclient.CreateRepoRequest{
Repo: client.NewRepo(args[0]),
Description: description,
},
)
return grpcutil.ScrubGRPC(err)
}),
}
createRepo.Flags().StringVarP(&description, "description", "d", "", "A description of the repo.")
updateRepo := &cobra.Command{
Use: "update-repo repo-name",
Short: "Update a repo.",
Long: "Update a repo.",
Run: cmdutil.RunFixedArgs(1, func(args []string) error {
c, err := client.NewOnUserMachine(metrics, "user")
if err != nil {
return err
}
_, err = c.PfsAPIClient.CreateRepo(
c.Ctx(),
&pfsclient.CreateRepoRequest{
Repo: client.NewRepo(args[0]),
Description: description,
Update: true,
},
)
return grpcutil.ScrubGRPC(err)
}),
}
updateRepo.Flags().StringVarP(&description, "description", "d", "", "A description of the repo.")
inspectRepo := &cobra.Command{
Use: "inspect-repo repo-name",
Short: "Return info about a repo.",
Long: "Return info about a repo.",
Run: cmdutil.RunFixedArgs(1, func(args []string) error {
c, err := client.NewOnUserMachine(metrics, "user")
if err != nil {
return err
}
repoInfo, err := c.InspectRepo(args[0])
if err != nil {
return err
}
if repoInfo == nil {
return fmt.Errorf("repo %s not found", args[0])
}
if raw {
return marshaller.Marshal(os.Stdout, repoInfo)
}
return pretty.PrintDetailedRepoInfo(repoInfo)
}),
}
rawFlag(inspectRepo)
listRepo := &cobra.Command{
Use: "list-repo",
Short: "Return all repos.",
Long: "Return all repos.",
Run: cmdutil.RunFixedArgs(0, func(args []string) error {
c, err := client.NewOnUserMachine(metrics, "user")
if err != nil {
return err
}
repoInfos, err := c.ListRepo()
if err != nil {
return err
}
if raw {
for _, repoInfo := range repoInfos {
if err := marshaller.Marshal(os.Stdout, repoInfo); err != nil {
return err
}
}
return nil
}
header := pretty.RepoHeader
if (len(repoInfos) > 0) && (repoInfos[0].AuthInfo != nil) {
header = pretty.RepoAuthHeader
}
writer := tabwriter.NewWriter(os.Stdout, header)
for _, repoInfo := range repoInfos {
pretty.PrintRepoInfo(writer, repoInfo)
}
return writer.Flush()
}),
}
rawFlag(listRepo)
var force bool
var all bool
deleteRepo := &cobra.Command{
Use: "delete-repo repo-name",
Short: "Delete a repo.",
Long: "Delete a repo.",
Run: cmdutil.RunBoundedArgs(0, 1, func(args []string) error {
client, err := client.NewOnUserMachine(metrics, "user")
if err != nil {
return err
}
if len(args) > 0 && all {
return fmt.Errorf("cannot use the --all flag with an argument")
}
if len(args) == 0 && !all {
return fmt.Errorf("either a repo name or the --all flag needs to be provided")
}
if all {
_, err = client.PfsAPIClient.DeleteRepo(client.Ctx(),
&pfsclient.DeleteRepoRequest{
Force: force,
All: all,
})
} else {
err = client.DeleteRepo(args[0], force)
}
if err != nil {
return grpcutil.ScrubGRPC(err)
}
return nil
}),
}
deleteRepo.Flags().BoolVarP(&force, "force", "f", false, "remove the repo regardless of errors; use with care")
deleteRepo.Flags().BoolVar(&all, "all", false, "remove all repos")
commit := &cobra.Command{
Use: "commit",
Short: "Docs for commits.",
Long: `Commits are atomic transactions on the content of a repo.
Creating a commit is a multistep process:
- start a new commit with start-commit
- write files to it through fuse or with put-file
- finish the new commit with finish-commit
Commits that have been started but not finished are NOT durable storage.
Commits become reliable (and immutable) when they are finished.
Commits can be created with another commit as a parent.
This layers the data in the commit over the data in the parent.
`,
Run: cmdutil.RunFixedArgs(0, func(args []string) error {
return nil
}),
}
var parent string
startCommit := &cobra.Command{
Use: "start-commit repo-name [branch]",
Short: "Start a new commit.",
Long: `Start a new commit with parent-commit as the parent, or start a commit on the given branch; if the branch does not exist, it will be created.
Examples:
` + codestart + `# Start a new commit in repo "test" that's not on any branch
$ pachctl start-commit test
# Start a commit in repo "test" on branch "master"
$ pachctl start-commit test master
# Start a commit with "master" as the parent in repo "test", on a new branch "patch"; essentially a fork.
$ pachctl start-commit test patch -p master
# Start a commit with XXX as the parent in repo "test", not on any branch
$ pachctl start-commit test -p XXX
` + codeend,
Run: cmdutil.RunBoundedArgs(1, 2, func(args []string) error {
cli, err := client.NewOnUserMachine(metrics, "user")
if err != nil {
return err
}
var branch string
if len(args) == 2 {
branch = args[1]
}
commit, err := cli.PfsAPIClient.StartCommit(cli.Ctx(),
&pfsclient.StartCommitRequest{
Branch: branch,
Parent: client.NewCommit(args[0], parent),
Description: description,
})
if err != nil {
return grpcutil.ScrubGRPC(err)
}
fmt.Println(commit.ID)
return nil
}),
}
startCommit.Flags().StringVarP(&parent, "parent", "p", "", "The parent of the new commit, unneeded if branch is specified and you want to use the previous head of the branch as the parent.")
startCommit.Flags().StringVarP(&description, "message", "m", "", "A description of this commit's contents")
startCommit.Flags().StringVar(&description, "description", "", "A description of this commit's contents (synonym for --message)")
finishCommit := &cobra.Command{
Use: "finish-commit repo-name commit-id",
Short: "Finish a started commit.",
Long: "Finish a started commit. Commit-id must be a writeable commit.",
Run: cmdutil.RunFixedArgs(2, func(args []string) error {
cli, err := client.NewOnUserMachine(metrics, "user")
if err != nil {
return err
}
if description != "" {
_, err := cli.PfsAPIClient.FinishCommit(cli.Ctx(),
&pfsclient.FinishCommitRequest{
Commit: client.NewCommit(args[0], args[1]),
Description: description,
})
return grpcutil.ScrubGRPC(err)
}
return cli.FinishCommit(args[0], args[1])
}),
}
finishCommit.Flags().StringVarP(&description, "message", "m", "", "A description of this commit's contents (overwrites any existing commit description)")
finishCommit.Flags().StringVar(&description, "description", "", "A description of this commit's contents (synonym for --message)")
inspectCommit := &cobra.Command{
Use: "inspect-commit repo-name commit-id",
Short: "Return info about a commit.",
Long: "Return info about a commit.",
Run: cmdutil.RunFixedArgs(2, func(args []string) error {
client, err := client.NewOnUserMachine(metrics, "user")
if err != nil {
return err
}
commitInfo, err := client.InspectCommit(args[0], args[1])
if err != nil {
return err
}
if commitInfo == nil {
return fmt.Errorf("commit %s not found", args[1])
}
if raw {
return marshaller.Marshal(os.Stdout, commitInfo)
}
return pretty.PrintDetailedCommitInfo(commitInfo)
}),
}
rawFlag(inspectCommit)
var from string
var number int
listCommit := &cobra.Command{
Use: "list-commit repo-name",
Short: "Return all commits on a set of repos.",
Long: `Return all commits on a set of repos.
Examples:
` + codestart + `# return commits in repo "foo"
$ pachctl list-commit foo
# return commits in repo "foo" on branch "master"
$ pachctl list-commit foo master
# return the last 20 commits in repo "foo" on branch "master"
$ pachctl list-commit foo master -n 20
# return commits that are the ancestors of XXX
$ pachctl list-commit foo XXX
# return commits in repo "foo" since commit XXX
$ pachctl list-commit foo master --from XXX
` + codeend,
Run: cmdutil.RunBoundedArgs(1, 2, func(args []string) (retErr error) {
c, err := client.NewOnUserMachine(metrics, "user")
if err != nil {
return err
}
var to string
if len(args) == 2 {
to = args[1]
}
if raw {
return c.ListCommitF(args[0], to, from, uint64(number), func(ci *pfsclient.CommitInfo) error {
return marshaller.Marshal(os.Stdout, ci)
})
}
writer := tabwriter.NewWriter(os.Stdout, pretty.CommitHeader)
if err := c.ListCommitF(args[0], to, from, uint64(number), func(ci *pfsclient.CommitInfo) error {
pretty.PrintCommitInfo(writer, ci)
return nil
}); err != nil {
return err
}
return writer.Flush()
}),
}
listCommit.Flags().StringVarP(&from, "from", "f", "", "list all commits since this commit")
listCommit.Flags().IntVarP(&number, "number", "n", 0, "list only this many commits; if set to zero, list all commits")
rawFlag(listCommit)
printCommitIter := func(commitIter client.CommitInfoIterator) error {
if raw {
for {
commitInfo, err := commitIter.Next()
if err == io.EOF {
return nil
}
if err != nil {
return err
}
if err := marshaller.Marshal(os.Stdout, commitInfo); err != nil {
return err
}
}
}
writer := tabwriter.NewWriter(os.Stdout, pretty.CommitHeader)
for {
commitInfo, err := commitIter.Next()
if err == io.EOF {
break
}
if err != nil {
return err
}
pretty.PrintCommitInfo(writer, commitInfo)
}
return writer.Flush()
}
var repos cmdutil.RepeatedStringArg
flushCommit := &cobra.Command{
Use: "flush-commit commit [commit ...]",
Short: "Wait for all commits caused by the specified commits to finish and return them.",
Long: `Wait for all commits caused by the specified commits to finish and return them.
Examples:
` + codestart + `# return commits caused by foo/XXX and bar/YYY
$ pachctl flush-commit foo/XXX bar/YYY
# return commits caused by foo/XXX leading to repos bar and baz
$ pachctl flush-commit foo/XXX -r bar -r baz
` + codeend,
Run: cmdutil.Run(func(args []string) error {
commits, err := cmdutil.ParseCommits(args)
if err != nil {
return err
}
c, err := client.NewOnUserMachine(metrics, "user")
if err != nil {
return err
}
var toRepos []*pfsclient.Repo
for _, repoName := range repos {
toRepos = append(toRepos, client.NewRepo(repoName))
}
commitIter, err := c.FlushCommit(commits, toRepos)
if err != nil {
return err
}
return printCommitIter(commitIter)
}),
}
flushCommit.Flags().VarP(&repos, "repos", "r", "Wait only for commits leading to a specific set of repos")
rawFlag(flushCommit)
var new bool
subscribeCommit := &cobra.Command{
Use: "subscribe-commit repo branch",
Short: "Print commits as they are created (finished).",
Long: `Print commits as they are created in the specified repo and
branch. By default, all existing commits on the specified branch are
returned first. A commit is only considered "created" when it's been
finished.
Examples:
` + codestart + `# subscribe to commits in repo "test" on branch "master"
$ pachctl subscribe-commit test master
# subscribe to commits in repo "test" on branch "master", but only since commit XXX.
$ pachctl subscribe-commit test master --from XXX
# subscribe to commits in repo "test" on branch "master", but only for new
# commits created from now on.
$ pachctl subscribe-commit test master --new
` + codeend,
Run: cmdutil.RunFixedArgs(2, func(args []string) error {
repo, branch := args[0], args[1]
c, err := client.NewOnUserMachine(metrics, "user")
if err != nil {
return err
}
if new && from != "" {
return fmt.Errorf("--new and --from cannot both be provided")
}
if new {
from = branch
}
commitIter, err := c.SubscribeCommit(repo, branch, from, pfsclient.CommitState_STARTED)
if err != nil {
return err
}
return printCommitIter(commitIter)
}),
}
subscribeCommit.Flags().StringVar(&from, "from", "", "subscribe to all commits since this commit")
subscribeCommit.Flags().BoolVar(&new, "new", false, "subscribe to only new commits created from now on")
rawFlag(subscribeCommit)
deleteCommit := &cobra.Command{
Use: "delete-commit repo-name commit-id",
Short: "Delete an input commit.",
Long: "Delete an input commit. An input is a commit which is not the output of a pipeline.",
Run: cmdutil.RunFixedArgs(2, func(args []string) error {
client, err := client.NewOnUserMachine(metrics, "user")
if err != nil {
return err
}
return client.DeleteCommit(args[0], args[1])
}),
}
var branchProvenance cmdutil.RepeatedStringArg
var head string
createBranch := &cobra.Command{
Use: "create-branch <repo-name> <branch-name> [flags]",
Short: "Create a new branch, or update an existing branch, on a repo.",
Long: "Create a new branch, or update an existing branch, on a repo, starting a commit on the branch will also create it, so there's often no need to call this.",
Run: cmdutil.RunFixedArgs(2, func(args []string) error {
client, err := client.NewOnUserMachine(metrics, "user")
if err != nil {
return err
}
provenance, err := cmdutil.ParseBranches(branchProvenance)
if err != nil {
return err
}
return client.CreateBranch(args[0], args[1], head, provenance)
}),
}
createBranch.Flags().VarP(&branchProvenance, "provenance", "p", "The provenance for the branch.")
createBranch.Flags().StringVarP(&head, "head", "", "", "The head of the newly created branch.")
listBranch := &cobra.Command{
Use: "list-branch repo-name",
Short: "Return all branches on a repo.",
Long: "Return all branches on a repo.",
Run: cmdutil.RunFixedArgs(1, func(args []string) error {
client, err := client.NewOnUserMachine(metrics, "user")
if err != nil {
return err
}
branches, err := client.ListBranch(args[0])
if err != nil {
return err
}
if raw {
for _, branch := range branches {
if err := marshaller.Marshal(os.Stdout, branch); err != nil {
return err
}
}
return nil
}
writer := tabwriter.NewWriter(os.Stdout, pretty.BranchHeader)
for _, branch := range branches {
pretty.PrintBranch(writer, branch)
}
return writer.Flush()
}),
}
rawFlag(listBranch)
setBranch := &cobra.Command{
Use: "set-branch repo-name commit-id/branch-name new-branch-name",
Short: "DEPRECATED Set a commit and its ancestors to a branch",
Long: `DEPRECATED Set a commit and its ancestors to a branch.
Examples:
` + codestart + `# Set commit XXX and its ancestors as branch master in repo foo.
$ pachctl set-branch foo XXX master
# Set the head of branch test as branch master in repo foo.
# After running this command, "test" and "master" both point to the
# same commit.
$ pachctl set-branch foo test master` + codeend,
Run: cmdutil.RunFixedArgs(3, func(args []string) error {
fmt.Fprintf(os.Stderr, "set-branch is DEPRECATED, use create-branch instead.\n")
client, err := client.NewOnUserMachine(metrics, "user")
if err != nil {
return err
}
return client.SetBranch(args[0], args[1], args[2])
}),
}
deleteBranch := &cobra.Command{
Use: "delete-branch repo-name branch-name",
Short: "Delete a branch",
Long: "Delete a branch, while leaving the commits intact",
Run: cmdutil.RunFixedArgs(2, func(args []string) error {
client, err := client.NewOnUserMachine(metrics, "user")
if err != nil {
return err
}
return client.DeleteBranch(args[0], args[1], force)
}),
}
deleteBranch.Flags().BoolVarP(&force, "force", "f", false, "remove the branch regardless of errors; use with care")
file := &cobra.Command{
Use: "file",
Short: "Docs for files.",
Long: `Files are the lowest level data object in Pachyderm.
Files can be written to started (but not finished) commits with put-file.
Files can be read from finished commits with get-file.`,
Run: cmdutil.RunFixedArgs(0, func(args []string) error {
return nil
}),
}
var filePaths []string
var recursive bool
var inputFile string
var parallelism int
var split string
var targetFileDatums uint
var targetFileBytes uint
var headerRecords uint
var putFileCommit bool
var overwrite bool
putFile := &cobra.Command{
Use: "put-file repo-name branch [path/to/file/in/pfs]",
Short: "Put a file into the filesystem.",
Long: `Put-file supports a number of ways to insert data into pfs:
` + codestart + `# Put data from stdin as repo/branch/path:
$ echo "data" | pachctl put-file repo branch path
# Put data from stdin as repo/branch/path and start / finish a new commit on the branch.
$ echo "data" | pachctl put-file -c repo branch path
# Put a file from the local filesystem as repo/branch/path:
$ pachctl put-file repo branch path -f file
# Put a file from the local filesystem as repo/branch/file:
$ pachctl put-file repo branch -f file
# Put the contents of a directory as repo/branch/path/dir/file:
$ pachctl put-file -r repo branch path -f dir
# Put the contents of a directory as repo/branch/dir/file:
$ pachctl put-file -r repo branch -f dir
# Put the contents of a directory as repo/branch/file, i.e. put files at the top level:
$ pachctl put-file -r repo branch / -f dir
# Put the data from a URL as repo/branch/path:
$ pachctl put-file repo branch path -f http://host/path
# Put the data from a URL as repo/branch/path:
$ pachctl put-file repo branch -f http://host/path
# Put the data from an S3 bucket as repo/branch/s3_object:
$ pachctl put-file repo branch -r -f s3://my_bucket
# Put several files or URLs that are listed in file.
# Files and URLs should be newline delimited.
$ pachctl put-file repo branch -i file
# Put several files or URLs that are listed at URL.
# NOTE this URL can reference local files, so it could cause you to put sensitive
# files into your Pachyderm cluster.
$ pachctl put-file repo branch -i http://host/path
` + codeend,
Run: cmdutil.RunBoundedArgs(2, 3, func(args []string) (retErr error) {
c, err := client.NewOnUserMachine(metrics, "user", client.WithMaxConcurrentStreams(parallelism))
if err != nil {
return err
}
pfc, err := c.NewPutFileClient()
if err != nil {
return err
}
defer func() {
if err := pfc.Close(); err != nil && retErr == nil {
retErr = err
}
}()
repoName := args[0]
branch := args[1]
var path string
if len(args) == 3 {
path = args[2]
if url, err := url.Parse(path); err == nil && url.Scheme != "" {
fmt.Fprintf(os.Stderr, "warning: PFS destination \"%s\" looks like a URL; did you mean -f %s?\n", path, path)
}
}
if putFileCommit {
fmt.Fprintf(os.Stderr, "flag --commit / -c is deprecated; as of 1.7.2, you will get the same behavior without it\n")
}
limiter := limit.New(int(parallelism))
var sources []string
if inputFile != "" {
// User has provided a file listing sources, one per line. Read sources
var r io.Reader
if inputFile == "-" {
r = os.Stdin
} else if url, err := url.Parse(inputFile); err == nil && url.Scheme != "" {
resp, err := http.Get(url.String())
if err != nil {
return err
}
defer func() {
if err := resp.Body.Close(); err != nil && retErr == nil {
retErr = err
}
}()
r = resp.Body
} else {
inputFile, err := os.Open(inputFile)
if err != nil {
return err
}
defer func() {
if err := inputFile.Close(); err != nil && retErr == nil {
retErr = err
}
}()
r = inputFile
}
// scan line by line
scanner := bufio.NewScanner(r)
for scanner.Scan() {
if filePath := scanner.Text(); filePath != "" {
sources = append(sources, filePath)
}
}
} else {
// User has provided a single source
sources = filePaths
}
// Arguments parsed; create putFileHelper and begin copying data
var eg errgroup.Group
filesPut := &gosync.Map{}
for _, source := range sources {
source := source
if len(args) == 2 {
// The user has not specified a path so we use source as path.
if source == "-" {
return fmt.Errorf("must specify filename when reading data from stdin")
}
eg.Go(func() error {
return putFileHelper(c, pfc, repoName, branch, joinPaths("", source), source, recursive, overwrite, limiter, split, targetFileDatums, targetFileBytes, headerRecords, filesPut)
})
} else if len(sources) == 1 && len(args) == 3 {
// We have a single source and the user has specified a path,
// we use the path and ignore source (in terms of naming the file).
eg.Go(func() error {
return putFileHelper(c, pfc, repoName, branch, path, source, recursive, overwrite, limiter, split, targetFileDatums, targetFileBytes, headerRecords, filesPut)
})
} else if len(sources) > 1 && len(args) == 3 {
// We have multiple sources and the user has specified a path,
// we use that path as a prefix for the filepaths.
eg.Go(func() error {
return putFileHelper(c, pfc, repoName, branch, joinPaths(path, source), source, recursive, overwrite, limiter, split, targetFileDatums, targetFileBytes, headerRecords, filesPut)
})
}
}
return eg.Wait()
}),
}
putFile.Flags().StringSliceVarP(&filePaths, "file", "f", []string{"-"}, "The file to be put, it can be a local file or a URL.")
putFile.Flags().StringVarP(&inputFile, "input-file", "i", "", "Read filepaths or URLs from a file. If - is used, paths are read from the standard input.")
putFile.Flags().BoolVarP(&recursive, "recursive", "r", false, "Recursively put the files in a directory.")
putFile.Flags().IntVarP(¶llelism, "parallelism", "p", DefaultParallelism, "The maximum number of files that can be uploaded in parallel.")
putFile.Flags().StringVar(&split, "split", "", "Split the input file into smaller files, subject to the constraints of --target-file-datums and --target-file-bytes. Permissible values are `json` and `line`.")
putFile.Flags().UintVar(&targetFileDatums, "target-file-datums", 0, "The upper bound of the number of datums that each file contains, the last file will contain fewer if the datums don't divide evenly; needs to be used with --split.")
putFile.Flags().UintVar(&targetFileBytes, "target-file-bytes", 0, "The target upper bound of the number of bytes that each file contains; needs to be used with --split.")
putFile.Flags().UintVar(&headerRecords, "header-records", 0, "the number of records that will be converted to a PFS 'header', and prepended to future retrievals of any subset of data from PFS; needs to be used with --split=(json|line|csv)")
putFile.Flags().BoolVarP(&putFileCommit, "commit", "c", false, "DEPRECATED: Put file(s) in a new commit.")
putFile.Flags().BoolVarP(&overwrite, "overwrite", "o", false, "Overwrite the existing content of the file, either from previous commits or previous calls to put-file within this commit.")
copyFile := &cobra.Command{
Use: "copy-file src-repo src-commit src-path dst-repo dst-commit dst-path",
Short: "Copy files between pfs paths.",
Long: "Copy files between pfs paths.",
Run: cmdutil.RunFixedArgs(6, func(args []string) (retErr error) {
c, err := client.NewOnUserMachine(metrics, "user", client.WithMaxConcurrentStreams(parallelism))
if err != nil {
return err
}
return c.CopyFile(args[0], args[1], args[2], args[3], args[4], args[5], overwrite)
}),
}
copyFile.Flags().BoolVarP(&overwrite, "overwrite", "o", false, "Overwrite the existing content of the file, either from previous commits or previous calls to put-file within this commit.")
var outputPath string
getFile := &cobra.Command{
Use: "get-file repo-name commit-id path/to/file",
Short: "Return the contents of a file.",
Long: `Return the contents of a file.
` + codestart + `# get file "XXX" on branch "master" in repo "foo"
$ pachctl get-file foo master XXX
# get file "XXX" in the parent of the current head of branch "master"
# in repo "foo"
$ pachctl get-file foo master^ XXX
# get file "XXX" in the grandparent of the current head of branch "master"
# in repo "foo"
$ pachctl get-file foo master^2 XXX
` + codeend,
Run: cmdutil.RunFixedArgs(3, func(args []string) error {
client, err := client.NewOnUserMachine(metrics, "user")
if err != nil {
return err
}
if recursive {
if outputPath == "" {
return fmt.Errorf("an output path needs to be specified when using the --recursive flag")
}
puller := sync.NewPuller()
return puller.Pull(client, outputPath, args[0], args[1], args[2], false, false, parallelism, nil, "")
}
var w io.Writer
// If an output path is given, print the output to stdout
if outputPath == "" {
w = os.Stdout
} else {
f, err := os.Create(outputPath)
if err != nil {
return err
}
defer f.Close()
w = f
}
return client.GetFile(args[0], args[1], args[2], 0, 0, w)
}),
}
getFile.Flags().BoolVarP(&recursive, "recursive", "r", false, "Recursively download a directory.")
getFile.Flags().StringVarP(&outputPath, "output", "o", "", "The path where data will be downloaded.")
getFile.Flags().IntVarP(¶llelism, "parallelism", "p", DefaultParallelism, "The maximum number of files that can be downloaded in parallel")
inspectFile := &cobra.Command{
Use: "inspect-file repo-name commit-id path/to/file",
Short: "Return info about a file.",
Long: "Return info about a file.",
Run: cmdutil.RunFixedArgs(3, func(args []string) error {
client, err := client.NewOnUserMachine(metrics, "user")
if err != nil {
return err
}
fileInfo, err := client.InspectFile(args[0], args[1], args[2])
if err != nil {
return err
}
if fileInfo == nil {
return fmt.Errorf("file %s not found", args[2])
}
if raw {
return marshaller.Marshal(os.Stdout, fileInfo)
}
return pretty.PrintDetailedFileInfo(fileInfo)
}),
}
rawFlag(inspectFile)
var history int64
listFile := &cobra.Command{
Use: "list-file repo-name commit-id path/to/dir",
Short: "Return the files in a directory.",
Long: `Return the files in a directory.
Examples:
` + codestart + `# list top-level files on branch "master" in repo "foo"
$ pachctl list-file foo master
# list files under directory "dir" on branch "master" in repo "foo"
$ pachctl list-file foo master dir
# list top-level files in the parent commit of the current head of "master"
# in repo "foo"
$ pachctl list-file foo master^
# list top-level files in the grandparent of the current head of "master"
# in repo "foo"
$ pachctl list-file foo master^2
# list the last n versions of top-level files on branch "master" in repo "foo"
$ pachctl list-file foo master --history n
# list all versions of top-level files on branch "master" in repo "foo"
$ pachctl list-file foo master --history -1
` + codeend,
Run: cmdutil.RunBoundedArgs(2, 3, func(args []string) error {
client, err := client.NewOnUserMachine(metrics, "user")
if err != nil {
return err
}
var path string
if len(args) == 3 {
path = args[2]
}
if raw {
return client.ListFileF(args[0], args[1], path, history, func(fi *pfsclient.FileInfo) error {
return marshaller.Marshal(os.Stdout, fi)
})
}
writer := tabwriter.NewWriter(os.Stdout, pretty.FileHeader)
if err := client.ListFileF(args[0], args[1], path, history, func(fi *pfsclient.FileInfo) error {
pretty.PrintFileInfo(writer, fi)
return nil
}); err != nil {
return nil
}
return writer.Flush()
}),
}
rawFlag(listFile)
listFile.Flags().Int64Var(&history, "history", 0, "Return revision history for files.")
globFile := &cobra.Command{
Use: "glob-file repo-name commit-id pattern",
Short: "Return files that match a glob pattern in a commit.",
Long: `Return files that match a glob pattern in a commit (that is, match a glob pattern
in a repo at the state represented by a commit). Glob patterns are
documented [here](https://golang.org/pkg/path/filepath/#Match).
Examples:
` + codestart + `# Return files in repo "foo" on branch "master" that start
# with the character "A". Note how the double quotation marks around "A*" are
# necessary because otherwise your shell might interpret the "*".
$ pachctl glob-file foo master "A*"
# Return files in repo "foo" on branch "master" under directory "data".
$ pachctl glob-file foo master "data/*"
` + codeend,
Run: cmdutil.RunFixedArgs(3, func(args []string) error {
client, err := client.NewOnUserMachine(metrics, "user")
if err != nil {
return err
}
fileInfos, err := client.GlobFile(args[0], args[1], args[2])
if err != nil {
return err
}
if raw {
for _, fileInfo := range fileInfos {
if err := marshaller.Marshal(os.Stdout, fileInfo); err != nil {
return err
}
}
}
writer := tabwriter.NewWriter(os.Stdout, pretty.FileHeader)
for _, fileInfo := range fileInfos {
pretty.PrintFileInfo(writer, fileInfo)
}
return writer.Flush()
}),
}
rawFlag(globFile)
var shallow bool
diffFile := &cobra.Command{
Use: "diff-file new-repo-name new-commit-id new-path [old-repo-name old-commit-id old-path]",
Short: "Return a diff of two file trees.",
Long: `Return a diff of two file trees.
Examples:
` + codestart + `# Return the diff between foo master path and its parent.
$ pachctl diff-file foo master path
# Return the diff between foo master path1 and bar master path2.
$ pachctl diff-file foo master path1 bar master path2
` + codeend,
Run: cmdutil.RunBoundedArgs(3, 6, func(args []string) error {
client, err := client.NewOnUserMachine(metrics, "user")
if err != nil {
return err
}
var newFiles []*pfsclient.FileInfo
var oldFiles []*pfsclient.FileInfo
switch {
case len(args) == 3:
newFiles, oldFiles, err = client.DiffFile(args[0], args[1], args[2], "", "", "", shallow)
case len(args) == 6:
newFiles, oldFiles, err = client.DiffFile(args[0], args[1], args[2], args[3], args[4], args[5], shallow)
default:
return fmt.Errorf("diff-file expects either 3 or 6 args, got %d", len(args))
}
if err != nil {
return err
}
if len(newFiles) > 0 {
fmt.Println("New Files:")
writer := tabwriter.NewWriter(os.Stdout, pretty.FileHeader)
for _, fileInfo := range newFiles {
pretty.PrintFileInfo(writer, fileInfo)
}
if err := writer.Flush(); err != nil {
return err
}
}
if len(oldFiles) > 0 {
fmt.Println("Old Files:")
writer := tabwriter.NewWriter(os.Stdout, pretty.FileHeader)
for _, fileInfo := range oldFiles {
pretty.PrintFileInfo(writer, fileInfo)
}
if err := writer.Flush(); err != nil {
return err
}
}
return nil
}),
}
diffFile.Flags().BoolVarP(&shallow, "shallow", "s", false, "Specifies whether or not to diff subdirectories")
deleteFile := &cobra.Command{
Use: "delete-file repo-name commit-id path/to/file",
Short: "Delete a file.",
Long: "Delete a file.",
Run: cmdutil.RunFixedArgs(3, func(args []string) error {
client, err := client.NewOnUserMachine(metrics, "user")
if err != nil {
return err
}
return client.DeleteFile(args[0], args[1], args[2])
}),
}
getObject := &cobra.Command{
Use: "get-object hash",
Short: "Return the contents of an object",
Long: "Return the contents of an object",
Run: cmdutil.RunFixedArgs(1, func(args []string) error {
client, err := client.NewOnUserMachine(metrics, "user")
if err != nil {
return err
}
return client.GetObject(args[0], os.Stdout)
}),
}
getTag := &cobra.Command{
Use: "get-tag tag",
Short: "Return the contents of a tag",
Long: "Return the contents of a tag",
Run: cmdutil.RunFixedArgs(1, func(args []string) error {
client, err := client.NewOnUserMachine(metrics, "user")
if err != nil {
return err
}
return client.GetTag(args[0], os.Stdout)
}),
}
var debug bool
var commits cmdutil.RepeatedStringArg
mount := &cobra.Command{
Use: "mount path/to/mount/point",
Short: "Mount pfs locally. This command blocks.",
Long: "Mount pfs locally. This command blocks.",
Run: cmdutil.RunFixedArgs(1, func(args []string) error {
client, err := client.NewOnUserMachine(metrics, "fuse")
if err != nil {
return err
}
mountPoint := args[0]
commits, err := parseCommits(commits)
if err != nil {
return err
}
opts := &fuse.Options{
Fuse: &nodefs.Options{
Debug: debug,
},
Commits: commits,
}
return fuse.Mount(client, mountPoint, opts)
}),
}
mount.Flags().BoolVarP(&debug, "debug", "d", false, "Turn on debug messages.")
mount.Flags().VarP(&commits, "commits", "c", "Commits to mount for repos, arguments should be of the form \"repo:commit\"")
unmount := &cobra.Command{
Use: "unmount path/to/mount/point",
Short: "Unmount pfs.",
Long: "Unmount pfs.",
Run: cmdutil.RunBoundedArgs(0, 1, func(args []string) error {
if len(args) == 1 {
return syscall.Unmount(args[0], 0)
}
if all {
stdin := strings.NewReader(`
mount | grep pfs:// | cut -f 3 -d " "
`)
var stdout bytes.Buffer
if err := cmdutil.RunIO(cmdutil.IO{
Stdin: stdin,
Stdout: &stdout,
Stderr: os.Stderr,
}, "sh"); err != nil {
return err
}
scanner := bufio.NewScanner(&stdout)
var mounts []string
for scanner.Scan() {
mounts = append(mounts, scanner.Text())
}
if len(mounts) == 0 {
fmt.Println("No mounts found.")
return nil
}
fmt.Printf("Unmount the following filesystems? yN\n")
for _, mount := range mounts {
fmt.Printf("%s\n", mount)
}
r := bufio.NewReader(os.Stdin)
bytes, err := r.ReadBytes('\n')
if err != nil {
return err
}
if bytes[0] == 'y' || bytes[0] == 'Y' {
for _, mount := range mounts {
if err := syscall.Unmount(mount, 0); err != nil {
return err
}
}
}
}
return nil
}),
}
unmount.Flags().BoolVarP(&all, "all", "a", false, "unmount all pfs mounts")
var result []*cobra.Command
result = append(result, repo)
result = append(result, createRepo)
result = append(result, updateRepo)
result = append(result, inspectRepo)
result = append(result, listRepo)
result = append(result, deleteRepo)
result = append(result, commit)
result = append(result, startCommit)
result = append(result, finishCommit)
result = append(result, inspectCommit)
result = append(result, listCommit)
result = append(result, flushCommit)
result = append(result, subscribeCommit)
result = append(result, deleteCommit)
result = append(result, createBranch)
result = append(result, listBranch)
result = append(result, setBranch)
result = append(result, deleteBranch)
result = append(result, file)
result = append(result, putFile)
result = append(result, copyFile)
result = append(result, getFile)
result = append(result, inspectFile)
result = append(result, listFile)
result = append(result, globFile)
result = append(result, diffFile)
result = append(result, deleteFile)
result = append(result, getObject)
result = append(result, getTag)
result = append(result, mount)
result = append(result, unmount)
return result
}
func parseCommits(args []string) (map[string]string, error) {
result := make(map[string]string)
for _, arg := range args {
split := strings.Split(arg, ":")
if len(split) != 2 {
return nil, fmt.Errorf("malformed input %s, must be of the form repo:commit", args)
}
result[split[0]] = split[1]
}
return result, nil
}
func putFileHelper(c *client.APIClient, pfc client.PutFileClient,
repo, commit, path, source string, recursive, overwrite bool, // destination
limiter limit.ConcurrencyLimiter,
split string, targetFileDatums, targetFileBytes, headerRecords uint, // split
filesPut *gosync.Map) (retErr error) {
if _, ok := filesPut.LoadOrStore(path, nil); ok {
return fmt.Errorf("multiple files put with the path %s, aborting, "+
"some files may already have been put and should be cleaned up with "+
"delete-file or delete-commit", path)
}
putFile := func(reader io.ReadSeeker) error {
if split == "" {
if overwrite {
return sync.PushFile(c, pfc, client.NewFile(repo, commit, path), reader)
}
_, err := pfc.PutFile(repo, commit, path, reader)
return err
}
var delimiter pfsclient.Delimiter
switch split {
case "line":
delimiter = pfsclient.Delimiter_LINE
case "json":
delimiter = pfsclient.Delimiter_JSON
case "sql":
delimiter = pfsclient.Delimiter_SQL
case "csv":
delimiter = pfsclient.Delimiter_CSV
default:
return fmt.Errorf("unrecognized delimiter '%s'; only accepts one of "+
"{json,line,sql,csv}", split)
}
_, err := pfc.PutFileSplit(repo, commit, path, delimiter, int64(targetFileDatums), int64(targetFileBytes), int64(headerRecords), overwrite, reader)
return err
}
if source == "-" {
if recursive {
return errors.New("cannot set -r and read from stdin (must also set -f or -i)")
}
limiter.Acquire()
defer limiter.Release()
fmt.Fprintln(os.Stderr, "Reading from stdin.")
return putFile(os.Stdin)
}
// try parsing the filename as a url, if it is one do a PutFileURL
if url, err := url.Parse(source); err == nil && url.Scheme != "" {
limiter.Acquire()
defer limiter.Release()
return pfc.PutFileURL(repo, commit, path, url.String(), recursive, overwrite)
}
if recursive {
var eg errgroup.Group
if err := filepath.Walk(source, func(filePath string, info os.FileInfo, err error) error {
// file doesn't exist
if info == nil {
return fmt.Errorf("%s doesn't exist", filePath)
}
if info.IsDir() {
return nil
}
childDest := filepath.Join(path, strings.TrimPrefix(filePath, source))
eg.Go(func() error {
// don't do a second recursive put-file, just put the one file at
// filePath into childDest, and then this walk loop will go on to the
// next one
return putFileHelper(c, pfc, repo, commit, childDest, filePath, false,
overwrite, limiter, split, targetFileDatums, targetFileBytes,
headerRecords, filesPut)
})
return nil
}); err != nil {
return err
}
return eg.Wait()
}
limiter.Acquire()
defer limiter.Release()
f, err := os.Open(source)
if err != nil {
return err
}
defer func() {
if err := f.Close(); err != nil && retErr == nil {
retErr = err
}
}()
return putFile(f)
}
func joinPaths(prefix, filePath string) string {
if url, err := url.Parse(filePath); err == nil && url.Scheme != "" {
if url.Scheme == "pfs" {
// pfs paths are of the form pfs://host/repo/branch/path we don't
// want to prefix every file with host/repo so we remove those
splitPath := strings.Split(strings.TrimPrefix(url.Path, "/"), "/")
if len(splitPath) < 3 {
return prefix
}
return filepath.Join(append([]string{prefix}, splitPath[2:]...)...)
}
return filepath.Join(prefix, strings.TrimPrefix(url.Path, "/"))
}
return filepath.Join(prefix, filePath)
}
|
package cmds
import (
"bufio"
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"os"
"os/exec"
"path/filepath"
"strings"
gosync "sync"
prompt "github.com/c-bata/go-prompt"
"github.com/gogo/protobuf/jsonpb"
"github.com/pachyderm/pachyderm/src/client"
"github.com/pachyderm/pachyderm/src/client/limit"
pfsclient "github.com/pachyderm/pachyderm/src/client/pfs"
"github.com/pachyderm/pachyderm/src/client/pkg/grpcutil"
"github.com/pachyderm/pachyderm/src/server/cmd/pachctl/shell"
"github.com/pachyderm/pachyderm/src/server/pfs/pretty"
"github.com/pachyderm/pachyderm/src/server/pkg/cmdutil"
"github.com/pachyderm/pachyderm/src/server/pkg/errutil"
"github.com/pachyderm/pachyderm/src/server/pkg/pager"
"github.com/pachyderm/pachyderm/src/server/pkg/ppsconsts"
"github.com/pachyderm/pachyderm/src/server/pkg/sync"
"github.com/pachyderm/pachyderm/src/server/pkg/tabwriter"
txncmds "github.com/pachyderm/pachyderm/src/server/transaction/cmds"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"golang.org/x/sync/errgroup"
)
const (
// DefaultParallelism is the default parallelism used by 'get file' and 'put file'.
DefaultParallelism = 10
)
// Cmds returns a slice containing pfs commands.
func Cmds() []*cobra.Command {
var commands []*cobra.Command
raw := false
rawFlags := pflag.NewFlagSet("", pflag.ContinueOnError)
rawFlags.BoolVar(&raw, "raw", false, "disable pretty printing, print raw json")
fullTimestamps := false
fullTimestampsFlags := pflag.NewFlagSet("", pflag.ContinueOnError)
fullTimestampsFlags.BoolVar(&fullTimestamps, "full-timestamps", false, "Return absolute timestamps (as opposed to the default, relative timestamps).")
noPager := false
noPagerFlags := pflag.NewFlagSet("", pflag.ContinueOnError)
noPagerFlags.BoolVar(&noPager, "no-pager", false, "Don't pipe output into a pager (i.e. less).")
marshaller := &jsonpb.Marshaler{Indent: " "}
repoDocs := &cobra.Command{
Short: "Docs for repos.",
Long: `Repos, short for repository, are the top level data objects in Pachyderm.
Repos contain version-controlled directories and files. Files can be of any size
or type (e.g. csv, binary, images, etc).`,
}
commands = append(commands, cmdutil.CreateDocsAlias(repoDocs, "repo", " repo$"))
var description string
createRepo := &cobra.Command{
Use: "{{alias}} <repo>",
Short: "Create a new repo.",
Long: "Create a new repo.",
Run: cmdutil.RunFixedArgs(1, func(args []string) error {
c, err := client.NewOnUserMachine("user")
if err != nil {
return err
}
defer c.Close()
err = txncmds.WithActiveTransaction(c, func(c *client.APIClient) error {
_, err = c.PfsAPIClient.CreateRepo(
c.Ctx(),
&pfsclient.CreateRepoRequest{
Repo: client.NewRepo(args[0]),
Description: description,
},
)
return err
})
return grpcutil.ScrubGRPC(err)
}),
}
createRepo.Flags().StringVarP(&description, "description", "d", "", "A description of the repo.")
commands = append(commands, cmdutil.CreateAlias(createRepo, "create repo"))
updateRepo := &cobra.Command{
Use: "{{alias}} <repo>",
Short: "Update a repo.",
Long: "Update a repo.",
Run: cmdutil.RunFixedArgs(1, func(args []string) error {
c, err := client.NewOnUserMachine("user")
if err != nil {
return err
}
defer c.Close()
err = txncmds.WithActiveTransaction(c, func(c *client.APIClient) error {
_, err = c.PfsAPIClient.CreateRepo(
c.Ctx(),
&pfsclient.CreateRepoRequest{
Repo: client.NewRepo(args[0]),
Description: description,
Update: true,
},
)
return err
})
return grpcutil.ScrubGRPC(err)
}),
}
updateRepo.Flags().StringVarP(&description, "description", "d", "", "A description of the repo.")
shell.RegisterCompletionFunc(updateRepo, shell.RepoCompletion)
commands = append(commands, cmdutil.CreateAlias(updateRepo, "update repo"))
inspectRepo := &cobra.Command{
Use: "{{alias}} <repo>",
Short: "Return info about a repo.",
Long: "Return info about a repo.",
Run: cmdutil.RunFixedArgs(1, func(args []string) error {
c, err := client.NewOnUserMachine("user")
if err != nil {
return err
}
defer c.Close()
repoInfo, err := c.InspectRepo(args[0])
if err != nil {
return err
}
if repoInfo == nil {
return fmt.Errorf("repo %s not found", args[0])
}
if raw {
return marshaller.Marshal(os.Stdout, repoInfo)
}
ri := &pretty.PrintableRepoInfo{
RepoInfo: repoInfo,
FullTimestamps: fullTimestamps,
}
return pretty.PrintDetailedRepoInfo(ri)
}),
}
inspectRepo.Flags().AddFlagSet(rawFlags)
inspectRepo.Flags().AddFlagSet(fullTimestampsFlags)
shell.RegisterCompletionFunc(inspectRepo, shell.RepoCompletion)
commands = append(commands, cmdutil.CreateAlias(inspectRepo, "inspect repo"))
listRepo := &cobra.Command{
Short: "Return all repos.",
Long: "Return all repos.",
Run: cmdutil.RunFixedArgs(0, func(args []string) error {
c, err := client.NewOnUserMachine("user")
if err != nil {
return err
}
defer c.Close()
repoInfos, err := c.ListRepo()
if err != nil {
return err
}
if raw {
for _, repoInfo := range repoInfos {
if err := marshaller.Marshal(os.Stdout, repoInfo); err != nil {
return err
}
}
return nil
}
header := pretty.RepoHeader
if (len(repoInfos) > 0) && (repoInfos[0].AuthInfo != nil) {
header = pretty.RepoAuthHeader
}
writer := tabwriter.NewWriter(os.Stdout, header)
for _, repoInfo := range repoInfos {
pretty.PrintRepoInfo(writer, repoInfo, fullTimestamps)
}
return writer.Flush()
}),
}
listRepo.Flags().AddFlagSet(rawFlags)
listRepo.Flags().AddFlagSet(fullTimestampsFlags)
commands = append(commands, cmdutil.CreateAlias(listRepo, "list repo"))
var force bool
var all bool
deleteRepo := &cobra.Command{
Use: "{{alias}} <repo>",
Short: "Delete a repo.",
Long: "Delete a repo.",
Run: cmdutil.RunBoundedArgs(0, 1, func(args []string) error {
c, err := client.NewOnUserMachine("user")
if err != nil {
return err
}
defer c.Close()
request := &pfsclient.DeleteRepoRequest{
Force: force,
All: all,
}
if len(args) > 0 {
if all {
return fmt.Errorf("cannot use the --all flag with an argument")
}
request.Repo = client.NewRepo(args[0])
} else if !all {
return fmt.Errorf("either a repo name or the --all flag needs to be provided")
}
err = txncmds.WithActiveTransaction(c, func(c *client.APIClient) error {
_, err = c.PfsAPIClient.DeleteRepo(c.Ctx(), request)
return err
})
return grpcutil.ScrubGRPC(err)
}),
}
deleteRepo.Flags().BoolVarP(&force, "force", "f", false, "remove the repo regardless of errors; use with care")
deleteRepo.Flags().BoolVar(&all, "all", false, "remove all repos")
shell.RegisterCompletionFunc(deleteRepo, shell.RepoCompletion)
commands = append(commands, cmdutil.CreateAlias(deleteRepo, "delete repo"))
commitDocs := &cobra.Command{
Short: "Docs for commits.",
Long: `Commits are atomic transactions on the content of a repo.
Creating a commit is a multistep process:
- start a new commit with 'start commit'
- write files to the commit via 'put file'
- finish the new commit with 'finish commit'
Commits that have been started but not finished are NOT durable storage.
Commits become reliable (and immutable) when they are finished.
Commits can be created with another commit as a parent.`,
}
commands = append(commands, cmdutil.CreateDocsAlias(commitDocs, "commit", " commit$"))
var parent string
startCommit := &cobra.Command{
Use: "{{alias}} <repo>@<branch-or-commit>",
Short: "Start a new commit.",
Long: "Start a new commit with parent-commit as the parent, or start a commit on the given branch; if the branch does not exist, it will be created.",
Example: `# Start a new commit in repo "test" that's not on any branch
$ {{alias}} test
# Start a commit in repo "test" on branch "master"
$ {{alias}} test@master
# Start a commit with "master" as the parent in repo "test", on a new branch "patch"; essentially a fork.
$ {{alias}} test@patch -p master
# Start a commit with XXX as the parent in repo "test", not on any branch
$ {{alias}} test -p XXX`,
Run: cmdutil.RunFixedArgs(1, func(args []string) error {
branch, err := cmdutil.ParseBranch(args[0])
if err != nil {
return err
}
c, err := client.NewOnUserMachine("user")
if err != nil {
return err
}
defer c.Close()
var commit *pfsclient.Commit
err = txncmds.WithActiveTransaction(c, func(c *client.APIClient) error {
var err error
commit, err = c.PfsAPIClient.StartCommit(
c.Ctx(),
&pfsclient.StartCommitRequest{
Branch: branch.Name,
Parent: client.NewCommit(branch.Repo.Name, parent),
Description: description,
},
)
return err
})
if err == nil {
fmt.Println(commit.ID)
}
return grpcutil.ScrubGRPC(err)
}),
}
startCommit.Flags().StringVarP(&parent, "parent", "p", "", "The parent of the new commit, unneeded if branch is specified and you want to use the previous head of the branch as the parent.")
startCommit.MarkFlagCustom("parent", "__pachctl_get_commit $(__parse_repo ${nouns[0]})")
startCommit.Flags().StringVarP(&description, "message", "m", "", "A description of this commit's contents")
startCommit.Flags().StringVar(&description, "description", "", "A description of this commit's contents (synonym for --message)")
shell.RegisterCompletionFunc(startCommit, shell.BranchCompletion)
commands = append(commands, cmdutil.CreateAlias(startCommit, "start commit"))
finishCommit := &cobra.Command{
Use: "{{alias}} <repo>@<branch-or-commit>",
Short: "Finish a started commit.",
Long: "Finish a started commit. Commit-id must be a writeable commit.",
Run: cmdutil.RunFixedArgs(1, func(args []string) error {
commit, err := cmdutil.ParseCommit(args[0])
if err != nil {
return err
}
c, err := client.NewOnUserMachine("user")
if err != nil {
return err
}
defer c.Close()
err = txncmds.WithActiveTransaction(c, func(c *client.APIClient) error {
_, err = c.PfsAPIClient.FinishCommit(
c.Ctx(),
&pfsclient.FinishCommitRequest{
Commit: commit,
Description: description,
},
)
return err
})
return grpcutil.ScrubGRPC(err)
}),
}
finishCommit.Flags().StringVarP(&description, "message", "m", "", "A description of this commit's contents (overwrites any existing commit description)")
finishCommit.Flags().StringVar(&description, "description", "", "A description of this commit's contents (synonym for --message)")
shell.RegisterCompletionFunc(finishCommit, shell.BranchCompletion)
commands = append(commands, cmdutil.CreateAlias(finishCommit, "finish commit"))
inspectCommit := &cobra.Command{
Use: "{{alias}} <repo>@<branch-or-commit>",
Short: "Return info about a commit.",
Long: "Return info about a commit.",
Run: cmdutil.RunFixedArgs(1, func(args []string) error {
commit, err := cmdutil.ParseCommit(args[0])
if err != nil {
return err
}
c, err := client.NewOnUserMachine("user")
if err != nil {
return err
}
defer c.Close()
commitInfo, err := c.InspectCommit(commit.Repo.Name, commit.ID)
if err != nil {
return err
}
if commitInfo == nil {
return fmt.Errorf("commit %s not found", commit.ID)
}
if raw {
return marshaller.Marshal(os.Stdout, commitInfo)
}
ci := &pretty.PrintableCommitInfo{
CommitInfo: commitInfo,
FullTimestamps: fullTimestamps,
}
return pretty.PrintDetailedCommitInfo(ci)
}),
}
inspectCommit.Flags().AddFlagSet(rawFlags)
inspectCommit.Flags().AddFlagSet(fullTimestampsFlags)
shell.RegisterCompletionFunc(inspectCommit, shell.BranchCompletion)
commands = append(commands, cmdutil.CreateAlias(inspectCommit, "inspect commit"))
var from string
var number int
listCommit := &cobra.Command{
Use: "{{alias}} <repo>[@<branch>]",
Short: "Return all commits on a repo.",
Long: "Return all commits on a repo.",
Example: `
# return commits in repo "foo"
$ {{alias}} foo
# return commits in repo "foo" on branch "master"
$ {{alias}} foo@master
# return the last 20 commits in repo "foo" on branch "master"
$ {{alias}} foo@master -n 20
# return commits in repo "foo" since commit XXX
$ {{alias}} foo@master --from XXX`,
Run: cmdutil.RunFixedArgs(1, func(args []string) (retErr error) {
c, err := client.NewOnUserMachine("user")
if err != nil {
return err
}
defer c.Close()
branch, err := cmdutil.ParseBranch(args[0])
if err != nil {
return err
}
if raw {
return c.ListCommitF(branch.Repo.Name, branch.Name, from, uint64(number), false, func(ci *pfsclient.CommitInfo) error {
return marshaller.Marshal(os.Stdout, ci)
})
}
writer := tabwriter.NewWriter(os.Stdout, pretty.CommitHeader)
if err := c.ListCommitF(branch.Repo.Name, branch.Name, from, uint64(number), false, func(ci *pfsclient.CommitInfo) error {
pretty.PrintCommitInfo(writer, ci, fullTimestamps)
return nil
}); err != nil {
return err
}
return writer.Flush()
}),
}
listCommit.Flags().StringVarP(&from, "from", "f", "", "list all commits since this commit")
listCommit.Flags().IntVarP(&number, "number", "n", 0, "list only this many commits; if set to zero, list all commits")
listCommit.MarkFlagCustom("from", "__pachctl_get_commit $(__parse_repo ${nouns[0]})")
listCommit.Flags().AddFlagSet(rawFlags)
listCommit.Flags().AddFlagSet(fullTimestampsFlags)
shell.RegisterCompletionFunc(listCommit, shell.RepoCompletion)
commands = append(commands, cmdutil.CreateAlias(listCommit, "list commit"))
printCommitIter := func(commitIter client.CommitInfoIterator) error {
if raw {
for {
commitInfo, err := commitIter.Next()
if err == io.EOF {
return nil
}
if err != nil {
return err
}
if err := marshaller.Marshal(os.Stdout, commitInfo); err != nil {
return err
}
}
}
writer := tabwriter.NewWriter(os.Stdout, pretty.CommitHeader)
for {
commitInfo, err := commitIter.Next()
if err == io.EOF {
break
}
if err != nil {
return err
}
pretty.PrintCommitInfo(writer, commitInfo, fullTimestamps)
}
return writer.Flush()
}
var repos cmdutil.RepeatedStringArg
flushCommit := &cobra.Command{
Use: "{{alias}} <repo>@<branch-or-commit> ...",
Short: "Wait for all commits caused by the specified commits to finish and return them.",
Long: "Wait for all commits caused by the specified commits to finish and return them.",
Example: `
# return commits caused by foo@XXX and bar@YYY
$ {{alias}} foo@XXX bar@YYY
# return commits caused by foo@XXX leading to repos bar and baz
$ {{alias}} foo@XXX -r bar -r baz`,
Run: cmdutil.Run(func(args []string) error {
commits, err := cmdutil.ParseCommits(args)
if err != nil {
return err
}
c, err := client.NewOnUserMachine("user")
if err != nil {
return err
}
defer c.Close()
var toRepos []*pfsclient.Repo
for _, repoName := range repos {
toRepos = append(toRepos, client.NewRepo(repoName))
}
commitIter, err := c.FlushCommit(commits, toRepos)
if err != nil {
return err
}
return printCommitIter(commitIter)
}),
}
flushCommit.Flags().VarP(&repos, "repos", "r", "Wait only for commits leading to a specific set of repos")
flushCommit.MarkFlagCustom("repos", "__pachctl_get_repo")
flushCommit.Flags().AddFlagSet(rawFlags)
flushCommit.Flags().AddFlagSet(fullTimestampsFlags)
shell.RegisterCompletionFunc(flushCommit, shell.BranchCompletion)
commands = append(commands, cmdutil.CreateAlias(flushCommit, "flush commit"))
var newCommits bool
var pipeline string
subscribeCommit := &cobra.Command{
Use: "{{alias}} <repo>@<branch>",
Short: "Print commits as they are created (finished).",
Long: "Print commits as they are created in the specified repo and branch. By default, all existing commits on the specified branch are returned first. A commit is only considered 'created' when it's been finished.",
Example: `
# subscribe to commits in repo "test" on branch "master"
$ {{alias}} test@master
# subscribe to commits in repo "test" on branch "master", but only since commit XXX.
$ {{alias}} test@master --from XXX
# subscribe to commits in repo "test" on branch "master", but only for new commits created from now on.
$ {{alias}} test@master --new`,
Run: cmdutil.RunFixedArgs(1, func(args []string) error {
branch, err := cmdutil.ParseBranch(args[0])
if err != nil {
return err
}
c, err := client.NewOnUserMachine("user")
if err != nil {
return err
}
defer c.Close()
if newCommits && from != "" {
return fmt.Errorf("--new and --from cannot be used together")
}
if newCommits {
from = branch.Name
}
var prov *pfsclient.CommitProvenance
if pipeline != "" {
pipelineInfo, err := c.InspectPipeline(pipeline)
if err != nil {
return err
}
prov = client.NewCommitProvenance(ppsconsts.SpecRepo, pipeline, pipelineInfo.SpecCommit.ID)
}
commitIter, err := c.SubscribeCommit(branch.Repo.Name, branch.Name, prov, from, pfsclient.CommitState_STARTED)
if err != nil {
return err
}
return printCommitIter(commitIter)
}),
}
subscribeCommit.Flags().StringVar(&from, "from", "", "subscribe to all commits since this commit")
subscribeCommit.Flags().StringVar(&pipeline, "pipeline", "", "subscribe to all commits created by this pipeline")
subscribeCommit.MarkFlagCustom("from", "__pachctl_get_commit $(__parse_repo ${nouns[0]})")
subscribeCommit.Flags().BoolVar(&newCommits, "new", false, "subscribe to only new commits created from now on")
subscribeCommit.Flags().AddFlagSet(rawFlags)
subscribeCommit.Flags().AddFlagSet(fullTimestampsFlags)
shell.RegisterCompletionFunc(subscribeCommit, shell.BranchCompletion)
commands = append(commands, cmdutil.CreateAlias(subscribeCommit, "subscribe commit"))
deleteCommit := &cobra.Command{
Use: "{{alias}} <repo>@<branch-or-commit>",
Short: "Delete an input commit.",
Long: "Delete an input commit. An input is a commit which is not the output of a pipeline.",
Run: cmdutil.RunFixedArgs(1, func(args []string) error {
commit, err := cmdutil.ParseCommit(args[0])
if err != nil {
return err
}
c, err := client.NewOnUserMachine("user")
if err != nil {
return err
}
defer c.Close()
return txncmds.WithActiveTransaction(c, func(c *client.APIClient) error {
return c.DeleteCommit(commit.Repo.Name, commit.ID)
})
}),
}
shell.RegisterCompletionFunc(deleteCommit, shell.BranchCompletion)
commands = append(commands, cmdutil.CreateAlias(deleteCommit, "delete commit"))
branchDocs := &cobra.Command{
Short: "Docs for branches.",
Long: `A branch in Pachyderm is an alias for a Commit ID.
The branch reference will "float" to always refer to the latest commit on the
branch, known as the HEAD commit. Not all commits must be on a branch and
multiple branches can refer to the same commit.
Any pachctl command that can take a Commit ID, can take a branch name instead.`,
}
commands = append(commands, cmdutil.CreateDocsAlias(branchDocs, "branch", " branch$"))
var branchProvenance cmdutil.RepeatedStringArg
var head string
createBranch := &cobra.Command{
Use: "{{alias}} <repo>@<branch-or-commit>",
Short: "Create a new branch, or update an existing branch, on a repo.",
Long: "Create a new branch, or update an existing branch, on a repo, starting a commit on the branch will also create it, so there's often no need to call this.",
Run: cmdutil.RunFixedArgs(1, func(args []string) error {
branch, err := cmdutil.ParseBranch(args[0])
if err != nil {
return err
}
provenance, err := cmdutil.ParseBranches(branchProvenance)
if err != nil {
return err
}
c, err := client.NewOnUserMachine("user")
if err != nil {
return err
}
defer c.Close()
return txncmds.WithActiveTransaction(c, func(c *client.APIClient) error {
return c.CreateBranch(branch.Repo.Name, branch.Name, head, provenance)
})
}),
}
createBranch.Flags().VarP(&branchProvenance, "provenance", "p", "The provenance for the branch. format: <repo>@<branch-or-commit>")
createBranch.MarkFlagCustom("provenance", "__pachctl_get_repo_commit")
createBranch.Flags().StringVarP(&head, "head", "", "", "The head of the newly created branch.")
createBranch.MarkFlagCustom("head", "__pachctl_get_commit $(__parse_repo ${nouns[0]})")
commands = append(commands, cmdutil.CreateAlias(createBranch, "create branch"))
inspectBranch := &cobra.Command{
Use: "{{alias}} <repo>@<branch>",
Short: "Return info about a branch.",
Long: "Return info about a branch.",
Run: cmdutil.RunFixedArgs(1, func(args []string) error {
c, err := client.NewOnUserMachine("user")
if err != nil {
return err
}
defer c.Close()
branch, err := cmdutil.ParseBranch(args[0])
if err != nil {
return err
}
branchInfo, err := c.InspectBranch(branch.Repo.Name, branch.Name)
if err != nil {
return err
}
if branchInfo == nil {
return fmt.Errorf("branch %s not found", args[0])
}
if raw {
return marshaller.Marshal(os.Stdout, branchInfo)
}
return pretty.PrintDetailedBranchInfo(branchInfo)
}),
}
inspectBranch.Flags().AddFlagSet(rawFlags)
inspectBranch.Flags().AddFlagSet(fullTimestampsFlags)
shell.RegisterCompletionFunc(inspectBranch, shell.BranchCompletion)
commands = append(commands, cmdutil.CreateAlias(inspectBranch, "inspect branch"))
listBranch := &cobra.Command{
Use: "{{alias}} <repo>",
Short: "Return all branches on a repo.",
Long: "Return all branches on a repo.",
Run: cmdutil.RunFixedArgs(1, func(args []string) error {
c, err := client.NewOnUserMachine("user")
if err != nil {
return err
}
defer c.Close()
branches, err := c.ListBranch(args[0])
if err != nil {
return err
}
if raw {
for _, branch := range branches {
if err := marshaller.Marshal(os.Stdout, branch); err != nil {
return err
}
}
return nil
}
writer := tabwriter.NewWriter(os.Stdout, pretty.BranchHeader)
for _, branch := range branches {
pretty.PrintBranch(writer, branch)
}
return writer.Flush()
}),
}
listBranch.Flags().AddFlagSet(rawFlags)
shell.RegisterCompletionFunc(listBranch, shell.RepoCompletion)
commands = append(commands, cmdutil.CreateAlias(listBranch, "list branch"))
deleteBranch := &cobra.Command{
Use: "{{alias}} <repo>@<branch-or-commit>",
Short: "Delete a branch",
Long: "Delete a branch, while leaving the commits intact",
Run: cmdutil.RunFixedArgs(1, func(args []string) error {
branch, err := cmdutil.ParseBranch(args[0])
if err != nil {
return err
}
c, err := client.NewOnUserMachine("user")
if err != nil {
return err
}
defer c.Close()
return txncmds.WithActiveTransaction(c, func(c *client.APIClient) error {
return c.DeleteBranch(branch.Repo.Name, branch.Name, force)
})
}),
}
deleteBranch.Flags().BoolVarP(&force, "force", "f", false, "remove the branch regardless of errors; use with care")
shell.RegisterCompletionFunc(deleteBranch, shell.BranchCompletion)
commands = append(commands, cmdutil.CreateAlias(deleteBranch, "delete branch"))
fileDocs := &cobra.Command{
Short: "Docs for files.",
Long: `Files are the lowest level data objects in Pachyderm.
Files can be of any type (e.g. csv, binary, images, etc) or size and can be
written to started (but not finished) commits with 'put file'. Files can be read
from commits with 'get file'.`,
}
commands = append(commands, cmdutil.CreateDocsAlias(fileDocs, "file", " file$"))
var filePaths []string
var recursive bool
var inputFile string
var parallelism int
var split string
var targetFileDatums uint
var targetFileBytes uint
var headerRecords uint
var putFileCommit bool
var overwrite bool
putFile := &cobra.Command{
Use: "{{alias}} <repo>@<branch-or-commit>[:<path/in/pfs>]",
Short: "Put a file into the filesystem.",
Long: "Put a file into the filesystem. This supports a number of ways to insert data into pfs.",
Example: `
# Put data from stdin as repo/branch/path:
$ echo "data" | {{alias}} repo@branch:/path
# Put data from stdin as repo/branch/path and start / finish a new commit on the branch.
$ echo "data" | {{alias}} -c repo@branch:/path
# Put a file from the local filesystem as repo/branch/path:
$ {{alias}} repo@branch:/path -f file
# Put a file from the local filesystem as repo/branch/file:
$ {{alias}} repo@branch -f file
# Put the contents of a directory as repo/branch/path/dir/file:
$ {{alias}} -r repo@branch:/path -f dir
# Put the contents of a directory as repo/branch/dir/file:
$ {{alias}} -r repo@branch -f dir
# Put the contents of a directory as repo/branch/file, i.e. put files at the top level:
$ {{alias}} -r repo@branch:/ -f dir
# Put the data from a URL as repo/branch/path:
$ {{alias}} repo@branch:/path -f http://host/path
# Put the data from a URL as repo/branch/path:
$ {{alias}} repo@branch -f http://host/path
# Put the data from an S3 bucket as repo/branch/s3_object:
$ {{alias}} repo@branch -r -f s3://my_bucket
# Put several files or URLs that are listed in file.
# Files and URLs should be newline delimited.
$ {{alias}} repo@branch -i file
# Put several files or URLs that are listed at URL.
# NOTE this URL can reference local files, so it could cause you to put sensitive
# files into your Pachyderm cluster.
$ {{alias}} repo@branch -i http://host/path`,
Run: cmdutil.RunFixedArgs(1, func(args []string) (retErr error) {
file, err := cmdutil.ParseFile(args[0])
if err != nil {
return err
}
c, err := client.NewOnUserMachine("user", client.WithMaxConcurrentStreams(parallelism))
if err != nil {
return err
}
defer c.Close()
// load data into pachyderm
pfc, err := c.NewPutFileClient()
if err != nil {
return err
}
defer func() {
if err := pfc.Close(); err != nil && retErr == nil {
retErr = err
}
}()
if putFileCommit {
fmt.Fprintf(os.Stderr, "flag --commit / -c is deprecated; as of 1.7.2, you will get the same behavior without it\n")
}
limiter := limit.New(int(parallelism))
var sources []string
if inputFile != "" {
// User has provided a file listing sources, one per line. Read sources
var r io.Reader
if inputFile == "-" {
r = os.Stdin
} else if url, err := url.Parse(inputFile); err == nil && url.Scheme != "" {
resp, err := http.Get(url.String())
if err != nil {
return err
}
defer func() {
if err := resp.Body.Close(); err != nil && retErr == nil {
retErr = err
}
}()
r = resp.Body
} else {
inputFile, err := os.Open(inputFile)
if err != nil {
return err
}
defer func() {
if err := inputFile.Close(); err != nil && retErr == nil {
retErr = err
}
}()
r = inputFile
}
// scan line by line
scanner := bufio.NewScanner(r)
for scanner.Scan() {
if filePath := scanner.Text(); filePath != "" {
sources = append(sources, filePath)
}
}
} else {
// User has provided a single source
sources = filePaths
}
// Arguments parsed; create putFileHelper and begin copying data
var eg errgroup.Group
filesPut := &gosync.Map{}
for _, source := range sources {
source := source
if file.Path == "" {
// The user has not specified a path so we use source as path.
if source == "-" {
return fmt.Errorf("must specify filename when reading data from stdin")
}
eg.Go(func() error {
return putFileHelper(c, pfc, file.Commit.Repo.Name, file.Commit.ID, joinPaths("", source), source, recursive, overwrite, limiter, split, targetFileDatums, targetFileBytes, headerRecords, filesPut)
})
} else if len(sources) == 1 {
// We have a single source and the user has specified a path,
// we use the path and ignore source (in terms of naming the file).
eg.Go(func() error {
return putFileHelper(c, pfc, file.Commit.Repo.Name, file.Commit.ID, file.Path, source, recursive, overwrite, limiter, split, targetFileDatums, targetFileBytes, headerRecords, filesPut)
})
} else {
// We have multiple sources and the user has specified a path,
// we use that path as a prefix for the filepaths.
eg.Go(func() error {
return putFileHelper(c, pfc, file.Commit.Repo.Name, file.Commit.ID, joinPaths(file.Path, source), source, recursive, overwrite, limiter, split, targetFileDatums, targetFileBytes, headerRecords, filesPut)
})
}
}
return eg.Wait()
}),
}
putFile.Flags().StringSliceVarP(&filePaths, "file", "f", []string{"-"}, "The file to be put, it can be a local file or a URL.")
putFile.Flags().StringVarP(&inputFile, "input-file", "i", "", "Read filepaths or URLs from a file. If - is used, paths are read from the standard input.")
putFile.Flags().BoolVarP(&recursive, "recursive", "r", false, "Recursively put the files in a directory.")
putFile.Flags().IntVarP(¶llelism, "parallelism", "p", DefaultParallelism, "The maximum number of files that can be uploaded in parallel.")
putFile.Flags().StringVar(&split, "split", "", "Split the input file into smaller files, subject to the constraints of --target-file-datums and --target-file-bytes. Permissible values are `line`, `json`, `sql` and `csv`.")
putFile.Flags().UintVar(&targetFileDatums, "target-file-datums", 0, "The upper bound of the number of datums that each file contains, the last file will contain fewer if the datums don't divide evenly; needs to be used with --split.")
putFile.Flags().UintVar(&targetFileBytes, "target-file-bytes", 0, "The target upper bound of the number of bytes that each file contains; needs to be used with --split.")
putFile.Flags().UintVar(&headerRecords, "header-records", 0, "the number of records that will be converted to a PFS 'header', and prepended to future retrievals of any subset of data from PFS; needs to be used with --split=(json|line|csv)")
putFile.Flags().BoolVarP(&putFileCommit, "commit", "c", false, "DEPRECATED: Put file(s) in a new commit.")
putFile.Flags().BoolVarP(&overwrite, "overwrite", "o", false, "Overwrite the existing content of the file, either from previous commits or previous calls to 'put file' within this commit.")
shell.RegisterCompletionFunc(putFile,
func(flag, text string, maxCompletions int64) ([]prompt.Suggest, shell.CacheFunc) {
if flag == "-f" || flag == "--file" || flag == "-i" || flag == "input-file" {
cs, cf := shell.FilesystemCompletion(flag, text, maxCompletions)
return cs, shell.AndCacheFunc(cf, shell.SameFlag(flag))
} else if flag == "" || flag == "-c" || flag == "--commit" || flag == "-o" || flag == "--overwrite" {
cs, cf := shell.FileCompletion(flag, text, maxCompletions)
return cs, shell.AndCacheFunc(cf, shell.SameFlag(flag))
}
return nil, shell.SameFlag(flag)
})
commands = append(commands, cmdutil.CreateAlias(putFile, "put file"))
copyFile := &cobra.Command{
Use: "{{alias}} <src-repo>@<src-branch-or-commit>:<src-path> <dst-repo>@<dst-branch-or-commit>:<dst-path>",
Short: "Copy files between pfs paths.",
Long: "Copy files between pfs paths.",
Run: cmdutil.RunFixedArgs(2, func(args []string) (retErr error) {
srcFile, err := cmdutil.ParseFile(args[0])
if err != nil {
return err
}
destFile, err := cmdutil.ParseFile(args[1])
if err != nil {
return err
}
c, err := client.NewOnUserMachine("user", client.WithMaxConcurrentStreams(parallelism))
if err != nil {
return err
}
defer c.Close()
return c.CopyFile(
srcFile.Commit.Repo.Name, srcFile.Commit.ID, srcFile.Path,
destFile.Commit.Repo.Name, destFile.Commit.ID, destFile.Path,
overwrite,
)
}),
}
copyFile.Flags().BoolVarP(&overwrite, "overwrite", "o", false, "Overwrite the existing content of the file, either from previous commits or previous calls to 'put file' within this commit.")
shell.RegisterCompletionFunc(copyFile, shell.FileCompletion)
commands = append(commands, cmdutil.CreateAlias(copyFile, "copy file"))
var outputPath string
getFile := &cobra.Command{
Use: "{{alias}} <repo>@<branch-or-commit>:<path/in/pfs>",
Short: "Return the contents of a file.",
Long: "Return the contents of a file.",
Example: `
# get file "XXX" on branch "master" in repo "foo"
$ {{alias}} foo@master:XXX
# get file "XXX" in the parent of the current head of branch "master"
# in repo "foo"
$ {{alias}} foo@master^:XXX
# get file "XXX" in the grandparent of the current head of branch "master"
# in repo "foo"
$ {{alias}} foo@master^2:XXX`,
Run: cmdutil.RunFixedArgs(1, func(args []string) error {
file, err := cmdutil.ParseFile(args[0])
if err != nil {
return err
}
c, err := client.NewOnUserMachine("user")
if err != nil {
return err
}
defer c.Close()
if recursive {
if outputPath == "" {
return fmt.Errorf("an output path needs to be specified when using the --recursive flag")
}
puller := sync.NewPuller()
return puller.Pull(c, outputPath, file.Commit.Repo.Name, file.Commit.ID, file.Path, false, false, parallelism, nil, "")
}
var w io.Writer
// If an output path is given, print the output to stdout
if outputPath == "" {
w = os.Stdout
} else {
f, err := os.Create(outputPath)
if err != nil {
return err
}
defer f.Close()
w = f
}
return c.GetFile(file.Commit.Repo.Name, file.Commit.ID, file.Path, 0, 0, w)
}),
}
getFile.Flags().BoolVarP(&recursive, "recursive", "r", false, "Recursively download a directory.")
getFile.Flags().StringVarP(&outputPath, "output", "o", "", "The path where data will be downloaded.")
getFile.Flags().IntVarP(¶llelism, "parallelism", "p", DefaultParallelism, "The maximum number of files that can be downloaded in parallel")
shell.RegisterCompletionFunc(getFile, shell.FileCompletion)
commands = append(commands, cmdutil.CreateAlias(getFile, "get file"))
inspectFile := &cobra.Command{
Use: "{{alias}} <repo>@<branch-or-commit>:<path/in/pfs>",
Short: "Return info about a file.",
Long: "Return info about a file.",
Run: cmdutil.RunFixedArgs(1, func(args []string) error {
file, err := cmdutil.ParseFile(args[0])
if err != nil {
return err
}
c, err := client.NewOnUserMachine("user")
if err != nil {
return err
}
defer c.Close()
fileInfo, err := c.InspectFile(file.Commit.Repo.Name, file.Commit.ID, file.Path)
if err != nil {
return err
}
if fileInfo == nil {
return fmt.Errorf("file %s not found", file.Path)
}
if raw {
return marshaller.Marshal(os.Stdout, fileInfo)
}
return pretty.PrintDetailedFileInfo(fileInfo)
}),
}
inspectFile.Flags().AddFlagSet(rawFlags)
shell.RegisterCompletionFunc(inspectFile, shell.FileCompletion)
commands = append(commands, cmdutil.CreateAlias(inspectFile, "inspect file"))
var history string
listFile := &cobra.Command{
Use: "{{alias}} <repo>@<branch-or-commit>[:<path/in/pfs>]",
Short: "Return the files in a directory.",
Long: "Return the files in a directory.",
Example: `
# list top-level files on branch "master" in repo "foo"
$ {{alias}} foo@master
# list files under directory "dir" on branch "master" in repo "foo"
$ {{alias}} foo@master:dir
# list top-level files in the parent commit of the current head of "master"
# in repo "foo"
$ {{alias}} foo@master^
# list top-level files in the grandparent of the current head of "master"
# in repo "foo"
$ {{alias}} foo@master^2
# list the last n versions of top-level files on branch "master" in repo "foo"
$ {{alias}} foo@master --history n
# list all versions of top-level files on branch "master" in repo "foo"
$ {{alias}} foo@master --history all`,
Run: cmdutil.RunFixedArgs(1, func(args []string) error {
file, err := cmdutil.ParseFile(args[0])
if err != nil {
return err
}
history, err := cmdutil.ParseHistory(history)
if err != nil {
return fmt.Errorf("error parsing history flag: %v", err)
}
c, err := client.NewOnUserMachine("user")
if err != nil {
return err
}
defer c.Close()
if raw {
return c.ListFileF(file.Commit.Repo.Name, file.Commit.ID, file.Path, history, func(fi *pfsclient.FileInfo) error {
return marshaller.Marshal(os.Stdout, fi)
})
}
header := pretty.FileHeader
if history != 0 {
header = pretty.FileHeaderWithCommit
}
writer := tabwriter.NewWriter(os.Stdout, header)
if err := c.ListFileF(file.Commit.Repo.Name, file.Commit.ID, file.Path, history, func(fi *pfsclient.FileInfo) error {
pretty.PrintFileInfo(writer, fi, fullTimestamps, history != 0)
return nil
}); err != nil {
return err
}
return writer.Flush()
}),
}
listFile.Flags().AddFlagSet(rawFlags)
listFile.Flags().AddFlagSet(fullTimestampsFlags)
listFile.Flags().StringVar(&history, "history", "none", "Return revision history for files.")
shell.RegisterCompletionFunc(listFile, shell.FileCompletion)
commands = append(commands, cmdutil.CreateAlias(listFile, "list file"))
globFile := &cobra.Command{
Use: "{{alias}} <repo>@<branch-or-commit>:<pattern>",
Short: "Return files that match a glob pattern in a commit.",
Long: "Return files that match a glob pattern in a commit (that is, match a glob pattern in a repo at the state represented by a commit). Glob patterns are documented [here](https://golang.org/pkg/path/filepath/#Match).",
Example: `
# Return files in repo "foo" on branch "master" that start
# with the character "A". Note how the double quotation marks around the
# parameter are necessary because otherwise your shell might interpret the "*".
$ {{alias}} "foo@master:A*"
# Return files in repo "foo" on branch "master" under directory "data".
$ {{alias}} "foo@master:data/*"`,
Run: cmdutil.RunFixedArgs(1, func(args []string) error {
file, err := cmdutil.ParseFile(args[0])
if err != nil {
return err
}
c, err := client.NewOnUserMachine("user")
if err != nil {
return err
}
defer c.Close()
fileInfos, err := c.GlobFile(file.Commit.Repo.Name, file.Commit.ID, file.Path)
if err != nil {
return err
}
if raw {
for _, fileInfo := range fileInfos {
if err := marshaller.Marshal(os.Stdout, fileInfo); err != nil {
return err
}
}
return nil
}
writer := tabwriter.NewWriter(os.Stdout, pretty.FileHeader)
for _, fileInfo := range fileInfos {
pretty.PrintFileInfo(writer, fileInfo, fullTimestamps, false)
}
return writer.Flush()
}),
}
globFile.Flags().AddFlagSet(rawFlags)
globFile.Flags().AddFlagSet(fullTimestampsFlags)
shell.RegisterCompletionFunc(globFile, shell.FileCompletion)
commands = append(commands, cmdutil.CreateAlias(globFile, "glob file"))
var shallow bool
var nameOnly bool
var diffCmdArg string
diffFile := &cobra.Command{
Use: "{{alias}} <new-repo>@<new-branch-or-commit>:<new-path> [<old-repo>@<old-branch-or-commit>:<old-path>]",
Short: "Return a diff of two file trees.",
Long: "Return a diff of two file trees.",
Example: `
# Return the diff of the file "path" of the repo "foo" between the head of the
# "master" branch and its parent.
$ {{alias}} foo@master:path
# Return the diff between the master branches of repos foo and bar at paths
# path1 and path2, respectively.
$ {{alias}} foo@master:path1 bar@master:path2`,
Run: cmdutil.RunBoundedArgs(1, 2, func(args []string) error {
newFile, err := cmdutil.ParseFile(args[0])
if err != nil {
return err
}
oldFile := client.NewFile("", "", "")
if len(args) == 2 {
oldFile, err = cmdutil.ParseFile(args[1])
if err != nil {
return err
}
}
c, err := client.NewOnUserMachine("user")
if err != nil {
return err
}
defer c.Close()
return pager.Page(noPager, os.Stdout, func(w io.Writer) (retErr error) {
var writer *tabwriter.Writer
if nameOnly {
writer = tabwriter.NewWriter(w, pretty.DiffFileHeader)
defer func() {
if err := writer.Flush(); err != nil && retErr == nil {
retErr = err
}
}()
}
newFiles, oldFiles, err := c.DiffFile(
newFile.Commit.Repo.Name, newFile.Commit.ID, newFile.Path,
oldFile.Commit.Repo.Name, oldFile.Commit.ID, oldFile.Path,
shallow,
)
if err != nil {
return err
}
diffCmd := diffCommand(diffCmdArg)
return forEachDiffFile(newFiles, oldFiles, func(nFI, oFI *pfsclient.FileInfo) error {
if nameOnly {
if nFI != nil {
pretty.PrintDiffFileInfo(writer, true, nFI, fullTimestamps)
}
if oFI != nil {
pretty.PrintDiffFileInfo(writer, false, oFI, fullTimestamps)
}
return nil
}
nPath, oPath := "/dev/null", "/dev/null"
if nFI != nil {
nPath, err = dlFile(c, nFI.File)
if err != nil {
return err
}
defer func() {
if err := os.RemoveAll(nPath); err != nil && retErr == nil {
retErr = err
}
}()
}
if oFI != nil {
oPath, err = dlFile(c, oFI.File)
defer func() {
if err := os.RemoveAll(oPath); err != nil && retErr == nil {
retErr = err
}
}()
}
cmd := exec.Command(diffCmd[0], append(diffCmd[1:], oPath, nPath)...)
cmd.Stdout = w
cmd.Stderr = os.Stderr
// Diff returns exit code 1 when it finds differences
// between the files, so we catch it.
if err := cmd.Run(); err != nil && cmd.ProcessState.ExitCode() != 1 {
return err
}
return nil
})
})
}),
}
diffFile.Flags().BoolVarP(&shallow, "shallow", "s", false, "Don't descend into sub directories.")
diffFile.Flags().BoolVar(&nameOnly, "name-only", false, "Show only the names of changed files.")
diffFile.Flags().StringVar(&diffCmdArg, "diff-command", "", "Use a program other than git to diff files.")
diffFile.Flags().AddFlagSet(fullTimestampsFlags)
diffFile.Flags().AddFlagSet(noPagerFlags)
shell.RegisterCompletionFunc(diffFile, shell.FileCompletion)
commands = append(commands, cmdutil.CreateAlias(diffFile, "diff file"))
deleteFile := &cobra.Command{
Use: "{{alias}} <repo>@<branch-or-commit>:<path/in/pfs>",
Short: "Delete a file.",
Long: "Delete a file.",
Run: cmdutil.RunFixedArgs(1, func(args []string) error {
file, err := cmdutil.ParseFile(args[0])
if err != nil {
return err
}
c, err := client.NewOnUserMachine("user")
if err != nil {
return err
}
defer c.Close()
return c.DeleteFile(file.Commit.Repo.Name, file.Commit.ID, file.Path)
}),
}
shell.RegisterCompletionFunc(deleteFile, shell.FileCompletion)
commands = append(commands, cmdutil.CreateAlias(deleteFile, "delete file"))
objectDocs := &cobra.Command{
Short: "Docs for objects.",
Long: `Objects are content-addressed blobs of data that are directly stored in the backend object store.
Objects are a low-level resource and should not be accessed directly by most users.`,
}
commands = append(commands, cmdutil.CreateDocsAlias(objectDocs, "object", " object$"))
getObject := &cobra.Command{
Use: "{{alias}} <hash>",
Short: "Print the contents of an object.",
Long: "Print the contents of an object.",
Run: cmdutil.RunFixedArgs(1, func(args []string) error {
c, err := client.NewOnUserMachine("user")
if err != nil {
return err
}
defer c.Close()
return c.GetObject(args[0], os.Stdout)
}),
}
commands = append(commands, cmdutil.CreateAlias(getObject, "get object"))
tagDocs := &cobra.Command{
Short: "Docs for tags.",
Long: `Tags are aliases for objects. Many tags can refer to the same object.
Tags are a low-level resource and should not be accessed directly by most users.`,
}
commands = append(commands, cmdutil.CreateDocsAlias(tagDocs, "tag", " tag$"))
getTag := &cobra.Command{
Use: "{{alias}} <tag>",
Short: "Print the contents of a tag.",
Long: "Print the contents of a tag.",
Run: cmdutil.RunFixedArgs(1, func(args []string) error {
c, err := client.NewOnUserMachine("user")
if err != nil {
return err
}
defer c.Close()
return c.GetTag(args[0], os.Stdout)
}),
}
commands = append(commands, cmdutil.CreateAlias(getTag, "get tag"))
var fix bool
fsck := &cobra.Command{
Use: "{{alias}}",
Short: "Run a file system consistency check on pfs.",
Long: "Run a file system consistency check on the pachyderm file system, ensuring the correct provenance relationships are satisfied.",
Run: cmdutil.RunFixedArgs(0, func(args []string) error {
c, err := client.NewOnUserMachine("user")
if err != nil {
return err
}
defer c.Close()
errors := false
if err = c.Fsck(fix, func(resp *pfsclient.FsckResponse) error {
if resp.Error != "" {
errors = true
fmt.Printf("Error: %s\n", resp.Error)
} else {
fmt.Printf("Fix applied: %v", resp.Fix)
}
return nil
}); err != nil {
return err
}
if !errors {
fmt.Println("No errors found.")
}
return nil
}),
}
fsck.Flags().BoolVarP(&fix, "fix", "f", false, "Attempt to fix as many issues as possible.")
commands = append(commands, cmdutil.CreateAlias(fsck, "fsck"))
// Add the mount commands (which aren't available on Windows, so they're in
// their own file)
commands = append(commands, mountCmds()...)
return commands
}
func putFileHelper(c *client.APIClient, pfc client.PutFileClient,
repo, commit, path, source string, recursive, overwrite bool, // destination
limiter limit.ConcurrencyLimiter,
split string, targetFileDatums, targetFileBytes, headerRecords uint, // split
filesPut *gosync.Map) (retErr error) {
// Resolve the path, then trim any prefixed '../' to avoid sending bad paths
// to the server
path = filepath.Clean(path)
for strings.HasPrefix(path, "../") {
path = strings.TrimPrefix(path, "../")
}
if _, ok := filesPut.LoadOrStore(path, nil); ok {
return fmt.Errorf("multiple files put with the path %s, aborting, "+
"some files may already have been put and should be cleaned up with "+
"'delete file' or 'delete commit'", path)
}
putFile := func(reader io.ReadSeeker) error {
if split == "" {
pipe, err := isPipe(reader)
if err != nil {
return err
}
if overwrite && !pipe {
return sync.PushFile(c, pfc, client.NewFile(repo, commit, path), reader)
}
if overwrite {
_, err = pfc.PutFileOverwrite(repo, commit, path, reader, 0)
return err
}
_, err = pfc.PutFile(repo, commit, path, reader)
return err
}
var delimiter pfsclient.Delimiter
switch split {
case "line":
delimiter = pfsclient.Delimiter_LINE
case "json":
delimiter = pfsclient.Delimiter_JSON
case "sql":
delimiter = pfsclient.Delimiter_SQL
case "csv":
delimiter = pfsclient.Delimiter_CSV
default:
return fmt.Errorf("unrecognized delimiter '%s'; only accepts one of "+
"{json,line,sql,csv}", split)
}
_, err := pfc.PutFileSplit(repo, commit, path, delimiter, int64(targetFileDatums), int64(targetFileBytes), int64(headerRecords), overwrite, reader)
return err
}
if source == "-" {
if recursive {
return errors.New("cannot set -r and read from stdin (must also set -f or -i)")
}
limiter.Acquire()
defer limiter.Release()
fmt.Fprintln(os.Stderr, "Reading from stdin.")
return putFile(os.Stdin)
}
// try parsing the filename as a url, if it is one do a PutFileURL
if url, err := url.Parse(source); err == nil && url.Scheme != "" {
limiter.Acquire()
defer limiter.Release()
return pfc.PutFileURL(repo, commit, path, url.String(), recursive, overwrite)
}
if recursive {
var eg errgroup.Group
if err := filepath.Walk(source, func(filePath string, info os.FileInfo, err error) error {
// file doesn't exist
if info == nil {
return fmt.Errorf("%s doesn't exist", filePath)
}
if info.IsDir() {
return nil
}
childDest := filepath.Join(path, strings.TrimPrefix(filePath, source))
eg.Go(func() error {
// don't do a second recursive 'put file', just put the one file at
// filePath into childDest, and then this walk loop will go on to the
// next one
return putFileHelper(c, pfc, repo, commit, childDest, filePath, false,
overwrite, limiter, split, targetFileDatums, targetFileBytes,
headerRecords, filesPut)
})
return nil
}); err != nil {
return err
}
return eg.Wait()
}
limiter.Acquire()
defer limiter.Release()
f, err := os.Open(source)
if err != nil {
return err
}
defer func() {
if err := f.Close(); err != nil && retErr == nil {
retErr = err
}
}()
return putFile(f)
}
func joinPaths(prefix, filePath string) string {
if url, err := url.Parse(filePath); err == nil && url.Scheme != "" {
if url.Scheme == "pfs" {
// pfs paths are of the form pfs://host/repo/branch/path we don't
// want to prefix every file with host/repo so we remove those
splitPath := strings.Split(strings.TrimPrefix(url.Path, "/"), "/")
if len(splitPath) < 3 {
return prefix
}
return filepath.Join(append([]string{prefix}, splitPath[2:]...)...)
}
return filepath.Join(prefix, strings.TrimPrefix(url.Path, "/"))
}
return filepath.Join(prefix, filePath)
}
func isPipe(r io.ReadSeeker) (bool, error) {
file, ok := r.(*os.File)
if !ok {
return false, nil
}
fi, err := file.Stat()
if err != nil {
return false, err
}
return fi.Mode()&os.ModeNamedPipe != 0, nil
}
func dlFile(pachClient *client.APIClient, f *pfsclient.File) (_ string, retErr error) {
if err := os.MkdirAll(filepath.Join(os.TempDir(), filepath.Dir(f.Path)), 0777); err != nil {
return "", err
}
file, err := ioutil.TempFile("", f.Path+"_")
if err != nil {
return "", err
}
defer func() {
if err := file.Close(); err != nil && retErr == nil {
retErr = err
}
}()
if err := pachClient.GetFile(f.Commit.Repo.Name, f.Commit.ID, f.Path, 0, 0, file); err != nil {
return "", err
}
return file.Name(), nil
}
func diffCommand(cmdArg string) []string {
if cmdArg != "" {
return strings.Fields(cmdArg)
}
_, err := exec.LookPath("git")
if err == nil {
return []string{"git", "-c", "color.ui=always", "--no-pager", "diff", "--no-index"}
}
return []string{"diff"}
}
func forEachDiffFile(newFiles, oldFiles []*pfsclient.FileInfo, f func(newFile, oldFile *pfsclient.FileInfo) error) error {
nI, oI := 0, 0
for {
if nI == len(newFiles) && oI == len(oldFiles) {
return nil
}
var oFI *pfsclient.FileInfo
var nFI *pfsclient.FileInfo
switch {
case oI == len(oldFiles) || newFiles[nI].File.Path < oldFiles[oI].File.Path:
nFI = newFiles[nI]
nI++
case nI == len(newFiles) || oldFiles[oI].File.Path < newFiles[nI].File.Path:
oFI = oldFiles[oI]
oI++
case newFiles[nI].File.Path == oldFiles[oI].File.Path:
nFI = newFiles[nI]
nI++
oFI = oldFiles[oI]
oI++
}
if err := f(nFI, oFI); err != nil {
if err == errutil.ErrBreak {
return nil
}
return err
}
}
}
Fixes segfault in diff file.
Fixes #4577.
package cmds
import (
"bufio"
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"os"
"os/exec"
"path/filepath"
"strings"
gosync "sync"
prompt "github.com/c-bata/go-prompt"
"github.com/gogo/protobuf/jsonpb"
"github.com/pachyderm/pachyderm/src/client"
"github.com/pachyderm/pachyderm/src/client/limit"
pfsclient "github.com/pachyderm/pachyderm/src/client/pfs"
"github.com/pachyderm/pachyderm/src/client/pkg/grpcutil"
"github.com/pachyderm/pachyderm/src/server/cmd/pachctl/shell"
"github.com/pachyderm/pachyderm/src/server/pfs/pretty"
"github.com/pachyderm/pachyderm/src/server/pkg/cmdutil"
"github.com/pachyderm/pachyderm/src/server/pkg/errutil"
"github.com/pachyderm/pachyderm/src/server/pkg/pager"
"github.com/pachyderm/pachyderm/src/server/pkg/ppsconsts"
"github.com/pachyderm/pachyderm/src/server/pkg/sync"
"github.com/pachyderm/pachyderm/src/server/pkg/tabwriter"
txncmds "github.com/pachyderm/pachyderm/src/server/transaction/cmds"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"golang.org/x/sync/errgroup"
)
const (
// DefaultParallelism is the default parallelism used by 'get file' and 'put file'.
DefaultParallelism = 10
)
// Cmds returns a slice containing pfs commands.
func Cmds() []*cobra.Command {
var commands []*cobra.Command
raw := false
rawFlags := pflag.NewFlagSet("", pflag.ContinueOnError)
rawFlags.BoolVar(&raw, "raw", false, "disable pretty printing, print raw json")
fullTimestamps := false
fullTimestampsFlags := pflag.NewFlagSet("", pflag.ContinueOnError)
fullTimestampsFlags.BoolVar(&fullTimestamps, "full-timestamps", false, "Return absolute timestamps (as opposed to the default, relative timestamps).")
noPager := false
noPagerFlags := pflag.NewFlagSet("", pflag.ContinueOnError)
noPagerFlags.BoolVar(&noPager, "no-pager", false, "Don't pipe output into a pager (i.e. less).")
marshaller := &jsonpb.Marshaler{Indent: " "}
repoDocs := &cobra.Command{
Short: "Docs for repos.",
Long: `Repos, short for repository, are the top level data objects in Pachyderm.
Repos contain version-controlled directories and files. Files can be of any size
or type (e.g. csv, binary, images, etc).`,
}
commands = append(commands, cmdutil.CreateDocsAlias(repoDocs, "repo", " repo$"))
var description string
createRepo := &cobra.Command{
Use: "{{alias}} <repo>",
Short: "Create a new repo.",
Long: "Create a new repo.",
Run: cmdutil.RunFixedArgs(1, func(args []string) error {
c, err := client.NewOnUserMachine("user")
if err != nil {
return err
}
defer c.Close()
err = txncmds.WithActiveTransaction(c, func(c *client.APIClient) error {
_, err = c.PfsAPIClient.CreateRepo(
c.Ctx(),
&pfsclient.CreateRepoRequest{
Repo: client.NewRepo(args[0]),
Description: description,
},
)
return err
})
return grpcutil.ScrubGRPC(err)
}),
}
createRepo.Flags().StringVarP(&description, "description", "d", "", "A description of the repo.")
commands = append(commands, cmdutil.CreateAlias(createRepo, "create repo"))
updateRepo := &cobra.Command{
Use: "{{alias}} <repo>",
Short: "Update a repo.",
Long: "Update a repo.",
Run: cmdutil.RunFixedArgs(1, func(args []string) error {
c, err := client.NewOnUserMachine("user")
if err != nil {
return err
}
defer c.Close()
err = txncmds.WithActiveTransaction(c, func(c *client.APIClient) error {
_, err = c.PfsAPIClient.CreateRepo(
c.Ctx(),
&pfsclient.CreateRepoRequest{
Repo: client.NewRepo(args[0]),
Description: description,
Update: true,
},
)
return err
})
return grpcutil.ScrubGRPC(err)
}),
}
updateRepo.Flags().StringVarP(&description, "description", "d", "", "A description of the repo.")
shell.RegisterCompletionFunc(updateRepo, shell.RepoCompletion)
commands = append(commands, cmdutil.CreateAlias(updateRepo, "update repo"))
inspectRepo := &cobra.Command{
Use: "{{alias}} <repo>",
Short: "Return info about a repo.",
Long: "Return info about a repo.",
Run: cmdutil.RunFixedArgs(1, func(args []string) error {
c, err := client.NewOnUserMachine("user")
if err != nil {
return err
}
defer c.Close()
repoInfo, err := c.InspectRepo(args[0])
if err != nil {
return err
}
if repoInfo == nil {
return fmt.Errorf("repo %s not found", args[0])
}
if raw {
return marshaller.Marshal(os.Stdout, repoInfo)
}
ri := &pretty.PrintableRepoInfo{
RepoInfo: repoInfo,
FullTimestamps: fullTimestamps,
}
return pretty.PrintDetailedRepoInfo(ri)
}),
}
inspectRepo.Flags().AddFlagSet(rawFlags)
inspectRepo.Flags().AddFlagSet(fullTimestampsFlags)
shell.RegisterCompletionFunc(inspectRepo, shell.RepoCompletion)
commands = append(commands, cmdutil.CreateAlias(inspectRepo, "inspect repo"))
listRepo := &cobra.Command{
Short: "Return all repos.",
Long: "Return all repos.",
Run: cmdutil.RunFixedArgs(0, func(args []string) error {
c, err := client.NewOnUserMachine("user")
if err != nil {
return err
}
defer c.Close()
repoInfos, err := c.ListRepo()
if err != nil {
return err
}
if raw {
for _, repoInfo := range repoInfos {
if err := marshaller.Marshal(os.Stdout, repoInfo); err != nil {
return err
}
}
return nil
}
header := pretty.RepoHeader
if (len(repoInfos) > 0) && (repoInfos[0].AuthInfo != nil) {
header = pretty.RepoAuthHeader
}
writer := tabwriter.NewWriter(os.Stdout, header)
for _, repoInfo := range repoInfos {
pretty.PrintRepoInfo(writer, repoInfo, fullTimestamps)
}
return writer.Flush()
}),
}
listRepo.Flags().AddFlagSet(rawFlags)
listRepo.Flags().AddFlagSet(fullTimestampsFlags)
commands = append(commands, cmdutil.CreateAlias(listRepo, "list repo"))
var force bool
var all bool
deleteRepo := &cobra.Command{
Use: "{{alias}} <repo>",
Short: "Delete a repo.",
Long: "Delete a repo.",
Run: cmdutil.RunBoundedArgs(0, 1, func(args []string) error {
c, err := client.NewOnUserMachine("user")
if err != nil {
return err
}
defer c.Close()
request := &pfsclient.DeleteRepoRequest{
Force: force,
All: all,
}
if len(args) > 0 {
if all {
return fmt.Errorf("cannot use the --all flag with an argument")
}
request.Repo = client.NewRepo(args[0])
} else if !all {
return fmt.Errorf("either a repo name or the --all flag needs to be provided")
}
err = txncmds.WithActiveTransaction(c, func(c *client.APIClient) error {
_, err = c.PfsAPIClient.DeleteRepo(c.Ctx(), request)
return err
})
return grpcutil.ScrubGRPC(err)
}),
}
deleteRepo.Flags().BoolVarP(&force, "force", "f", false, "remove the repo regardless of errors; use with care")
deleteRepo.Flags().BoolVar(&all, "all", false, "remove all repos")
shell.RegisterCompletionFunc(deleteRepo, shell.RepoCompletion)
commands = append(commands, cmdutil.CreateAlias(deleteRepo, "delete repo"))
commitDocs := &cobra.Command{
Short: "Docs for commits.",
Long: `Commits are atomic transactions on the content of a repo.
Creating a commit is a multistep process:
- start a new commit with 'start commit'
- write files to the commit via 'put file'
- finish the new commit with 'finish commit'
Commits that have been started but not finished are NOT durable storage.
Commits become reliable (and immutable) when they are finished.
Commits can be created with another commit as a parent.`,
}
commands = append(commands, cmdutil.CreateDocsAlias(commitDocs, "commit", " commit$"))
var parent string
startCommit := &cobra.Command{
Use: "{{alias}} <repo>@<branch-or-commit>",
Short: "Start a new commit.",
Long: "Start a new commit with parent-commit as the parent, or start a commit on the given branch; if the branch does not exist, it will be created.",
Example: `# Start a new commit in repo "test" that's not on any branch
$ {{alias}} test
# Start a commit in repo "test" on branch "master"
$ {{alias}} test@master
# Start a commit with "master" as the parent in repo "test", on a new branch "patch"; essentially a fork.
$ {{alias}} test@patch -p master
# Start a commit with XXX as the parent in repo "test", not on any branch
$ {{alias}} test -p XXX`,
Run: cmdutil.RunFixedArgs(1, func(args []string) error {
branch, err := cmdutil.ParseBranch(args[0])
if err != nil {
return err
}
c, err := client.NewOnUserMachine("user")
if err != nil {
return err
}
defer c.Close()
var commit *pfsclient.Commit
err = txncmds.WithActiveTransaction(c, func(c *client.APIClient) error {
var err error
commit, err = c.PfsAPIClient.StartCommit(
c.Ctx(),
&pfsclient.StartCommitRequest{
Branch: branch.Name,
Parent: client.NewCommit(branch.Repo.Name, parent),
Description: description,
},
)
return err
})
if err == nil {
fmt.Println(commit.ID)
}
return grpcutil.ScrubGRPC(err)
}),
}
startCommit.Flags().StringVarP(&parent, "parent", "p", "", "The parent of the new commit, unneeded if branch is specified and you want to use the previous head of the branch as the parent.")
startCommit.MarkFlagCustom("parent", "__pachctl_get_commit $(__parse_repo ${nouns[0]})")
startCommit.Flags().StringVarP(&description, "message", "m", "", "A description of this commit's contents")
startCommit.Flags().StringVar(&description, "description", "", "A description of this commit's contents (synonym for --message)")
shell.RegisterCompletionFunc(startCommit, shell.BranchCompletion)
commands = append(commands, cmdutil.CreateAlias(startCommit, "start commit"))
finishCommit := &cobra.Command{
Use: "{{alias}} <repo>@<branch-or-commit>",
Short: "Finish a started commit.",
Long: "Finish a started commit. Commit-id must be a writeable commit.",
Run: cmdutil.RunFixedArgs(1, func(args []string) error {
commit, err := cmdutil.ParseCommit(args[0])
if err != nil {
return err
}
c, err := client.NewOnUserMachine("user")
if err != nil {
return err
}
defer c.Close()
err = txncmds.WithActiveTransaction(c, func(c *client.APIClient) error {
_, err = c.PfsAPIClient.FinishCommit(
c.Ctx(),
&pfsclient.FinishCommitRequest{
Commit: commit,
Description: description,
},
)
return err
})
return grpcutil.ScrubGRPC(err)
}),
}
finishCommit.Flags().StringVarP(&description, "message", "m", "", "A description of this commit's contents (overwrites any existing commit description)")
finishCommit.Flags().StringVar(&description, "description", "", "A description of this commit's contents (synonym for --message)")
shell.RegisterCompletionFunc(finishCommit, shell.BranchCompletion)
commands = append(commands, cmdutil.CreateAlias(finishCommit, "finish commit"))
inspectCommit := &cobra.Command{
Use: "{{alias}} <repo>@<branch-or-commit>",
Short: "Return info about a commit.",
Long: "Return info about a commit.",
Run: cmdutil.RunFixedArgs(1, func(args []string) error {
commit, err := cmdutil.ParseCommit(args[0])
if err != nil {
return err
}
c, err := client.NewOnUserMachine("user")
if err != nil {
return err
}
defer c.Close()
commitInfo, err := c.InspectCommit(commit.Repo.Name, commit.ID)
if err != nil {
return err
}
if commitInfo == nil {
return fmt.Errorf("commit %s not found", commit.ID)
}
if raw {
return marshaller.Marshal(os.Stdout, commitInfo)
}
ci := &pretty.PrintableCommitInfo{
CommitInfo: commitInfo,
FullTimestamps: fullTimestamps,
}
return pretty.PrintDetailedCommitInfo(ci)
}),
}
inspectCommit.Flags().AddFlagSet(rawFlags)
inspectCommit.Flags().AddFlagSet(fullTimestampsFlags)
shell.RegisterCompletionFunc(inspectCommit, shell.BranchCompletion)
commands = append(commands, cmdutil.CreateAlias(inspectCommit, "inspect commit"))
var from string
var number int
listCommit := &cobra.Command{
Use: "{{alias}} <repo>[@<branch>]",
Short: "Return all commits on a repo.",
Long: "Return all commits on a repo.",
Example: `
# return commits in repo "foo"
$ {{alias}} foo
# return commits in repo "foo" on branch "master"
$ {{alias}} foo@master
# return the last 20 commits in repo "foo" on branch "master"
$ {{alias}} foo@master -n 20
# return commits in repo "foo" since commit XXX
$ {{alias}} foo@master --from XXX`,
Run: cmdutil.RunFixedArgs(1, func(args []string) (retErr error) {
c, err := client.NewOnUserMachine("user")
if err != nil {
return err
}
defer c.Close()
branch, err := cmdutil.ParseBranch(args[0])
if err != nil {
return err
}
if raw {
return c.ListCommitF(branch.Repo.Name, branch.Name, from, uint64(number), false, func(ci *pfsclient.CommitInfo) error {
return marshaller.Marshal(os.Stdout, ci)
})
}
writer := tabwriter.NewWriter(os.Stdout, pretty.CommitHeader)
if err := c.ListCommitF(branch.Repo.Name, branch.Name, from, uint64(number), false, func(ci *pfsclient.CommitInfo) error {
pretty.PrintCommitInfo(writer, ci, fullTimestamps)
return nil
}); err != nil {
return err
}
return writer.Flush()
}),
}
listCommit.Flags().StringVarP(&from, "from", "f", "", "list all commits since this commit")
listCommit.Flags().IntVarP(&number, "number", "n", 0, "list only this many commits; if set to zero, list all commits")
listCommit.MarkFlagCustom("from", "__pachctl_get_commit $(__parse_repo ${nouns[0]})")
listCommit.Flags().AddFlagSet(rawFlags)
listCommit.Flags().AddFlagSet(fullTimestampsFlags)
shell.RegisterCompletionFunc(listCommit, shell.RepoCompletion)
commands = append(commands, cmdutil.CreateAlias(listCommit, "list commit"))
printCommitIter := func(commitIter client.CommitInfoIterator) error {
if raw {
for {
commitInfo, err := commitIter.Next()
if err == io.EOF {
return nil
}
if err != nil {
return err
}
if err := marshaller.Marshal(os.Stdout, commitInfo); err != nil {
return err
}
}
}
writer := tabwriter.NewWriter(os.Stdout, pretty.CommitHeader)
for {
commitInfo, err := commitIter.Next()
if err == io.EOF {
break
}
if err != nil {
return err
}
pretty.PrintCommitInfo(writer, commitInfo, fullTimestamps)
}
return writer.Flush()
}
var repos cmdutil.RepeatedStringArg
flushCommit := &cobra.Command{
Use: "{{alias}} <repo>@<branch-or-commit> ...",
Short: "Wait for all commits caused by the specified commits to finish and return them.",
Long: "Wait for all commits caused by the specified commits to finish and return them.",
Example: `
# return commits caused by foo@XXX and bar@YYY
$ {{alias}} foo@XXX bar@YYY
# return commits caused by foo@XXX leading to repos bar and baz
$ {{alias}} foo@XXX -r bar -r baz`,
Run: cmdutil.Run(func(args []string) error {
commits, err := cmdutil.ParseCommits(args)
if err != nil {
return err
}
c, err := client.NewOnUserMachine("user")
if err != nil {
return err
}
defer c.Close()
var toRepos []*pfsclient.Repo
for _, repoName := range repos {
toRepos = append(toRepos, client.NewRepo(repoName))
}
commitIter, err := c.FlushCommit(commits, toRepos)
if err != nil {
return err
}
return printCommitIter(commitIter)
}),
}
flushCommit.Flags().VarP(&repos, "repos", "r", "Wait only for commits leading to a specific set of repos")
flushCommit.MarkFlagCustom("repos", "__pachctl_get_repo")
flushCommit.Flags().AddFlagSet(rawFlags)
flushCommit.Flags().AddFlagSet(fullTimestampsFlags)
shell.RegisterCompletionFunc(flushCommit, shell.BranchCompletion)
commands = append(commands, cmdutil.CreateAlias(flushCommit, "flush commit"))
var newCommits bool
var pipeline string
subscribeCommit := &cobra.Command{
Use: "{{alias}} <repo>@<branch>",
Short: "Print commits as they are created (finished).",
Long: "Print commits as they are created in the specified repo and branch. By default, all existing commits on the specified branch are returned first. A commit is only considered 'created' when it's been finished.",
Example: `
# subscribe to commits in repo "test" on branch "master"
$ {{alias}} test@master
# subscribe to commits in repo "test" on branch "master", but only since commit XXX.
$ {{alias}} test@master --from XXX
# subscribe to commits in repo "test" on branch "master", but only for new commits created from now on.
$ {{alias}} test@master --new`,
Run: cmdutil.RunFixedArgs(1, func(args []string) error {
branch, err := cmdutil.ParseBranch(args[0])
if err != nil {
return err
}
c, err := client.NewOnUserMachine("user")
if err != nil {
return err
}
defer c.Close()
if newCommits && from != "" {
return fmt.Errorf("--new and --from cannot be used together")
}
if newCommits {
from = branch.Name
}
var prov *pfsclient.CommitProvenance
if pipeline != "" {
pipelineInfo, err := c.InspectPipeline(pipeline)
if err != nil {
return err
}
prov = client.NewCommitProvenance(ppsconsts.SpecRepo, pipeline, pipelineInfo.SpecCommit.ID)
}
commitIter, err := c.SubscribeCommit(branch.Repo.Name, branch.Name, prov, from, pfsclient.CommitState_STARTED)
if err != nil {
return err
}
return printCommitIter(commitIter)
}),
}
subscribeCommit.Flags().StringVar(&from, "from", "", "subscribe to all commits since this commit")
subscribeCommit.Flags().StringVar(&pipeline, "pipeline", "", "subscribe to all commits created by this pipeline")
subscribeCommit.MarkFlagCustom("from", "__pachctl_get_commit $(__parse_repo ${nouns[0]})")
subscribeCommit.Flags().BoolVar(&newCommits, "new", false, "subscribe to only new commits created from now on")
subscribeCommit.Flags().AddFlagSet(rawFlags)
subscribeCommit.Flags().AddFlagSet(fullTimestampsFlags)
shell.RegisterCompletionFunc(subscribeCommit, shell.BranchCompletion)
commands = append(commands, cmdutil.CreateAlias(subscribeCommit, "subscribe commit"))
deleteCommit := &cobra.Command{
Use: "{{alias}} <repo>@<branch-or-commit>",
Short: "Delete an input commit.",
Long: "Delete an input commit. An input is a commit which is not the output of a pipeline.",
Run: cmdutil.RunFixedArgs(1, func(args []string) error {
commit, err := cmdutil.ParseCommit(args[0])
if err != nil {
return err
}
c, err := client.NewOnUserMachine("user")
if err != nil {
return err
}
defer c.Close()
return txncmds.WithActiveTransaction(c, func(c *client.APIClient) error {
return c.DeleteCommit(commit.Repo.Name, commit.ID)
})
}),
}
shell.RegisterCompletionFunc(deleteCommit, shell.BranchCompletion)
commands = append(commands, cmdutil.CreateAlias(deleteCommit, "delete commit"))
branchDocs := &cobra.Command{
Short: "Docs for branches.",
Long: `A branch in Pachyderm is an alias for a Commit ID.
The branch reference will "float" to always refer to the latest commit on the
branch, known as the HEAD commit. Not all commits must be on a branch and
multiple branches can refer to the same commit.
Any pachctl command that can take a Commit ID, can take a branch name instead.`,
}
commands = append(commands, cmdutil.CreateDocsAlias(branchDocs, "branch", " branch$"))
var branchProvenance cmdutil.RepeatedStringArg
var head string
createBranch := &cobra.Command{
Use: "{{alias}} <repo>@<branch-or-commit>",
Short: "Create a new branch, or update an existing branch, on a repo.",
Long: "Create a new branch, or update an existing branch, on a repo, starting a commit on the branch will also create it, so there's often no need to call this.",
Run: cmdutil.RunFixedArgs(1, func(args []string) error {
branch, err := cmdutil.ParseBranch(args[0])
if err != nil {
return err
}
provenance, err := cmdutil.ParseBranches(branchProvenance)
if err != nil {
return err
}
c, err := client.NewOnUserMachine("user")
if err != nil {
return err
}
defer c.Close()
return txncmds.WithActiveTransaction(c, func(c *client.APIClient) error {
return c.CreateBranch(branch.Repo.Name, branch.Name, head, provenance)
})
}),
}
createBranch.Flags().VarP(&branchProvenance, "provenance", "p", "The provenance for the branch. format: <repo>@<branch-or-commit>")
createBranch.MarkFlagCustom("provenance", "__pachctl_get_repo_commit")
createBranch.Flags().StringVarP(&head, "head", "", "", "The head of the newly created branch.")
createBranch.MarkFlagCustom("head", "__pachctl_get_commit $(__parse_repo ${nouns[0]})")
commands = append(commands, cmdutil.CreateAlias(createBranch, "create branch"))
inspectBranch := &cobra.Command{
Use: "{{alias}} <repo>@<branch>",
Short: "Return info about a branch.",
Long: "Return info about a branch.",
Run: cmdutil.RunFixedArgs(1, func(args []string) error {
c, err := client.NewOnUserMachine("user")
if err != nil {
return err
}
defer c.Close()
branch, err := cmdutil.ParseBranch(args[0])
if err != nil {
return err
}
branchInfo, err := c.InspectBranch(branch.Repo.Name, branch.Name)
if err != nil {
return err
}
if branchInfo == nil {
return fmt.Errorf("branch %s not found", args[0])
}
if raw {
return marshaller.Marshal(os.Stdout, branchInfo)
}
return pretty.PrintDetailedBranchInfo(branchInfo)
}),
}
inspectBranch.Flags().AddFlagSet(rawFlags)
inspectBranch.Flags().AddFlagSet(fullTimestampsFlags)
shell.RegisterCompletionFunc(inspectBranch, shell.BranchCompletion)
commands = append(commands, cmdutil.CreateAlias(inspectBranch, "inspect branch"))
listBranch := &cobra.Command{
Use: "{{alias}} <repo>",
Short: "Return all branches on a repo.",
Long: "Return all branches on a repo.",
Run: cmdutil.RunFixedArgs(1, func(args []string) error {
c, err := client.NewOnUserMachine("user")
if err != nil {
return err
}
defer c.Close()
branches, err := c.ListBranch(args[0])
if err != nil {
return err
}
if raw {
for _, branch := range branches {
if err := marshaller.Marshal(os.Stdout, branch); err != nil {
return err
}
}
return nil
}
writer := tabwriter.NewWriter(os.Stdout, pretty.BranchHeader)
for _, branch := range branches {
pretty.PrintBranch(writer, branch)
}
return writer.Flush()
}),
}
listBranch.Flags().AddFlagSet(rawFlags)
shell.RegisterCompletionFunc(listBranch, shell.RepoCompletion)
commands = append(commands, cmdutil.CreateAlias(listBranch, "list branch"))
deleteBranch := &cobra.Command{
Use: "{{alias}} <repo>@<branch-or-commit>",
Short: "Delete a branch",
Long: "Delete a branch, while leaving the commits intact",
Run: cmdutil.RunFixedArgs(1, func(args []string) error {
branch, err := cmdutil.ParseBranch(args[0])
if err != nil {
return err
}
c, err := client.NewOnUserMachine("user")
if err != nil {
return err
}
defer c.Close()
return txncmds.WithActiveTransaction(c, func(c *client.APIClient) error {
return c.DeleteBranch(branch.Repo.Name, branch.Name, force)
})
}),
}
deleteBranch.Flags().BoolVarP(&force, "force", "f", false, "remove the branch regardless of errors; use with care")
shell.RegisterCompletionFunc(deleteBranch, shell.BranchCompletion)
commands = append(commands, cmdutil.CreateAlias(deleteBranch, "delete branch"))
fileDocs := &cobra.Command{
Short: "Docs for files.",
Long: `Files are the lowest level data objects in Pachyderm.
Files can be of any type (e.g. csv, binary, images, etc) or size and can be
written to started (but not finished) commits with 'put file'. Files can be read
from commits with 'get file'.`,
}
commands = append(commands, cmdutil.CreateDocsAlias(fileDocs, "file", " file$"))
var filePaths []string
var recursive bool
var inputFile string
var parallelism int
var split string
var targetFileDatums uint
var targetFileBytes uint
var headerRecords uint
var putFileCommit bool
var overwrite bool
putFile := &cobra.Command{
Use: "{{alias}} <repo>@<branch-or-commit>[:<path/in/pfs>]",
Short: "Put a file into the filesystem.",
Long: "Put a file into the filesystem. This supports a number of ways to insert data into pfs.",
Example: `
# Put data from stdin as repo/branch/path:
$ echo "data" | {{alias}} repo@branch:/path
# Put data from stdin as repo/branch/path and start / finish a new commit on the branch.
$ echo "data" | {{alias}} -c repo@branch:/path
# Put a file from the local filesystem as repo/branch/path:
$ {{alias}} repo@branch:/path -f file
# Put a file from the local filesystem as repo/branch/file:
$ {{alias}} repo@branch -f file
# Put the contents of a directory as repo/branch/path/dir/file:
$ {{alias}} -r repo@branch:/path -f dir
# Put the contents of a directory as repo/branch/dir/file:
$ {{alias}} -r repo@branch -f dir
# Put the contents of a directory as repo/branch/file, i.e. put files at the top level:
$ {{alias}} -r repo@branch:/ -f dir
# Put the data from a URL as repo/branch/path:
$ {{alias}} repo@branch:/path -f http://host/path
# Put the data from a URL as repo/branch/path:
$ {{alias}} repo@branch -f http://host/path
# Put the data from an S3 bucket as repo/branch/s3_object:
$ {{alias}} repo@branch -r -f s3://my_bucket
# Put several files or URLs that are listed in file.
# Files and URLs should be newline delimited.
$ {{alias}} repo@branch -i file
# Put several files or URLs that are listed at URL.
# NOTE this URL can reference local files, so it could cause you to put sensitive
# files into your Pachyderm cluster.
$ {{alias}} repo@branch -i http://host/path`,
Run: cmdutil.RunFixedArgs(1, func(args []string) (retErr error) {
file, err := cmdutil.ParseFile(args[0])
if err != nil {
return err
}
c, err := client.NewOnUserMachine("user", client.WithMaxConcurrentStreams(parallelism))
if err != nil {
return err
}
defer c.Close()
// load data into pachyderm
pfc, err := c.NewPutFileClient()
if err != nil {
return err
}
defer func() {
if err := pfc.Close(); err != nil && retErr == nil {
retErr = err
}
}()
if putFileCommit {
fmt.Fprintf(os.Stderr, "flag --commit / -c is deprecated; as of 1.7.2, you will get the same behavior without it\n")
}
limiter := limit.New(int(parallelism))
var sources []string
if inputFile != "" {
// User has provided a file listing sources, one per line. Read sources
var r io.Reader
if inputFile == "-" {
r = os.Stdin
} else if url, err := url.Parse(inputFile); err == nil && url.Scheme != "" {
resp, err := http.Get(url.String())
if err != nil {
return err
}
defer func() {
if err := resp.Body.Close(); err != nil && retErr == nil {
retErr = err
}
}()
r = resp.Body
} else {
inputFile, err := os.Open(inputFile)
if err != nil {
return err
}
defer func() {
if err := inputFile.Close(); err != nil && retErr == nil {
retErr = err
}
}()
r = inputFile
}
// scan line by line
scanner := bufio.NewScanner(r)
for scanner.Scan() {
if filePath := scanner.Text(); filePath != "" {
sources = append(sources, filePath)
}
}
} else {
// User has provided a single source
sources = filePaths
}
// Arguments parsed; create putFileHelper and begin copying data
var eg errgroup.Group
filesPut := &gosync.Map{}
for _, source := range sources {
source := source
if file.Path == "" {
// The user has not specified a path so we use source as path.
if source == "-" {
return fmt.Errorf("must specify filename when reading data from stdin")
}
eg.Go(func() error {
return putFileHelper(c, pfc, file.Commit.Repo.Name, file.Commit.ID, joinPaths("", source), source, recursive, overwrite, limiter, split, targetFileDatums, targetFileBytes, headerRecords, filesPut)
})
} else if len(sources) == 1 {
// We have a single source and the user has specified a path,
// we use the path and ignore source (in terms of naming the file).
eg.Go(func() error {
return putFileHelper(c, pfc, file.Commit.Repo.Name, file.Commit.ID, file.Path, source, recursive, overwrite, limiter, split, targetFileDatums, targetFileBytes, headerRecords, filesPut)
})
} else {
// We have multiple sources and the user has specified a path,
// we use that path as a prefix for the filepaths.
eg.Go(func() error {
return putFileHelper(c, pfc, file.Commit.Repo.Name, file.Commit.ID, joinPaths(file.Path, source), source, recursive, overwrite, limiter, split, targetFileDatums, targetFileBytes, headerRecords, filesPut)
})
}
}
return eg.Wait()
}),
}
putFile.Flags().StringSliceVarP(&filePaths, "file", "f", []string{"-"}, "The file to be put, it can be a local file or a URL.")
putFile.Flags().StringVarP(&inputFile, "input-file", "i", "", "Read filepaths or URLs from a file. If - is used, paths are read from the standard input.")
putFile.Flags().BoolVarP(&recursive, "recursive", "r", false, "Recursively put the files in a directory.")
putFile.Flags().IntVarP(¶llelism, "parallelism", "p", DefaultParallelism, "The maximum number of files that can be uploaded in parallel.")
putFile.Flags().StringVar(&split, "split", "", "Split the input file into smaller files, subject to the constraints of --target-file-datums and --target-file-bytes. Permissible values are `line`, `json`, `sql` and `csv`.")
putFile.Flags().UintVar(&targetFileDatums, "target-file-datums", 0, "The upper bound of the number of datums that each file contains, the last file will contain fewer if the datums don't divide evenly; needs to be used with --split.")
putFile.Flags().UintVar(&targetFileBytes, "target-file-bytes", 0, "The target upper bound of the number of bytes that each file contains; needs to be used with --split.")
putFile.Flags().UintVar(&headerRecords, "header-records", 0, "the number of records that will be converted to a PFS 'header', and prepended to future retrievals of any subset of data from PFS; needs to be used with --split=(json|line|csv)")
putFile.Flags().BoolVarP(&putFileCommit, "commit", "c", false, "DEPRECATED: Put file(s) in a new commit.")
putFile.Flags().BoolVarP(&overwrite, "overwrite", "o", false, "Overwrite the existing content of the file, either from previous commits or previous calls to 'put file' within this commit.")
shell.RegisterCompletionFunc(putFile,
func(flag, text string, maxCompletions int64) ([]prompt.Suggest, shell.CacheFunc) {
if flag == "-f" || flag == "--file" || flag == "-i" || flag == "input-file" {
cs, cf := shell.FilesystemCompletion(flag, text, maxCompletions)
return cs, shell.AndCacheFunc(cf, shell.SameFlag(flag))
} else if flag == "" || flag == "-c" || flag == "--commit" || flag == "-o" || flag == "--overwrite" {
cs, cf := shell.FileCompletion(flag, text, maxCompletions)
return cs, shell.AndCacheFunc(cf, shell.SameFlag(flag))
}
return nil, shell.SameFlag(flag)
})
commands = append(commands, cmdutil.CreateAlias(putFile, "put file"))
copyFile := &cobra.Command{
Use: "{{alias}} <src-repo>@<src-branch-or-commit>:<src-path> <dst-repo>@<dst-branch-or-commit>:<dst-path>",
Short: "Copy files between pfs paths.",
Long: "Copy files between pfs paths.",
Run: cmdutil.RunFixedArgs(2, func(args []string) (retErr error) {
srcFile, err := cmdutil.ParseFile(args[0])
if err != nil {
return err
}
destFile, err := cmdutil.ParseFile(args[1])
if err != nil {
return err
}
c, err := client.NewOnUserMachine("user", client.WithMaxConcurrentStreams(parallelism))
if err != nil {
return err
}
defer c.Close()
return c.CopyFile(
srcFile.Commit.Repo.Name, srcFile.Commit.ID, srcFile.Path,
destFile.Commit.Repo.Name, destFile.Commit.ID, destFile.Path,
overwrite,
)
}),
}
copyFile.Flags().BoolVarP(&overwrite, "overwrite", "o", false, "Overwrite the existing content of the file, either from previous commits or previous calls to 'put file' within this commit.")
shell.RegisterCompletionFunc(copyFile, shell.FileCompletion)
commands = append(commands, cmdutil.CreateAlias(copyFile, "copy file"))
var outputPath string
getFile := &cobra.Command{
Use: "{{alias}} <repo>@<branch-or-commit>:<path/in/pfs>",
Short: "Return the contents of a file.",
Long: "Return the contents of a file.",
Example: `
# get file "XXX" on branch "master" in repo "foo"
$ {{alias}} foo@master:XXX
# get file "XXX" in the parent of the current head of branch "master"
# in repo "foo"
$ {{alias}} foo@master^:XXX
# get file "XXX" in the grandparent of the current head of branch "master"
# in repo "foo"
$ {{alias}} foo@master^2:XXX`,
Run: cmdutil.RunFixedArgs(1, func(args []string) error {
file, err := cmdutil.ParseFile(args[0])
if err != nil {
return err
}
c, err := client.NewOnUserMachine("user")
if err != nil {
return err
}
defer c.Close()
if recursive {
if outputPath == "" {
return fmt.Errorf("an output path needs to be specified when using the --recursive flag")
}
puller := sync.NewPuller()
return puller.Pull(c, outputPath, file.Commit.Repo.Name, file.Commit.ID, file.Path, false, false, parallelism, nil, "")
}
var w io.Writer
// If an output path is given, print the output to stdout
if outputPath == "" {
w = os.Stdout
} else {
f, err := os.Create(outputPath)
if err != nil {
return err
}
defer f.Close()
w = f
}
return c.GetFile(file.Commit.Repo.Name, file.Commit.ID, file.Path, 0, 0, w)
}),
}
getFile.Flags().BoolVarP(&recursive, "recursive", "r", false, "Recursively download a directory.")
getFile.Flags().StringVarP(&outputPath, "output", "o", "", "The path where data will be downloaded.")
getFile.Flags().IntVarP(¶llelism, "parallelism", "p", DefaultParallelism, "The maximum number of files that can be downloaded in parallel")
shell.RegisterCompletionFunc(getFile, shell.FileCompletion)
commands = append(commands, cmdutil.CreateAlias(getFile, "get file"))
inspectFile := &cobra.Command{
Use: "{{alias}} <repo>@<branch-or-commit>:<path/in/pfs>",
Short: "Return info about a file.",
Long: "Return info about a file.",
Run: cmdutil.RunFixedArgs(1, func(args []string) error {
file, err := cmdutil.ParseFile(args[0])
if err != nil {
return err
}
c, err := client.NewOnUserMachine("user")
if err != nil {
return err
}
defer c.Close()
fileInfo, err := c.InspectFile(file.Commit.Repo.Name, file.Commit.ID, file.Path)
if err != nil {
return err
}
if fileInfo == nil {
return fmt.Errorf("file %s not found", file.Path)
}
if raw {
return marshaller.Marshal(os.Stdout, fileInfo)
}
return pretty.PrintDetailedFileInfo(fileInfo)
}),
}
inspectFile.Flags().AddFlagSet(rawFlags)
shell.RegisterCompletionFunc(inspectFile, shell.FileCompletion)
commands = append(commands, cmdutil.CreateAlias(inspectFile, "inspect file"))
var history string
listFile := &cobra.Command{
Use: "{{alias}} <repo>@<branch-or-commit>[:<path/in/pfs>]",
Short: "Return the files in a directory.",
Long: "Return the files in a directory.",
Example: `
# list top-level files on branch "master" in repo "foo"
$ {{alias}} foo@master
# list files under directory "dir" on branch "master" in repo "foo"
$ {{alias}} foo@master:dir
# list top-level files in the parent commit of the current head of "master"
# in repo "foo"
$ {{alias}} foo@master^
# list top-level files in the grandparent of the current head of "master"
# in repo "foo"
$ {{alias}} foo@master^2
# list the last n versions of top-level files on branch "master" in repo "foo"
$ {{alias}} foo@master --history n
# list all versions of top-level files on branch "master" in repo "foo"
$ {{alias}} foo@master --history all`,
Run: cmdutil.RunFixedArgs(1, func(args []string) error {
file, err := cmdutil.ParseFile(args[0])
if err != nil {
return err
}
history, err := cmdutil.ParseHistory(history)
if err != nil {
return fmt.Errorf("error parsing history flag: %v", err)
}
c, err := client.NewOnUserMachine("user")
if err != nil {
return err
}
defer c.Close()
if raw {
return c.ListFileF(file.Commit.Repo.Name, file.Commit.ID, file.Path, history, func(fi *pfsclient.FileInfo) error {
return marshaller.Marshal(os.Stdout, fi)
})
}
header := pretty.FileHeader
if history != 0 {
header = pretty.FileHeaderWithCommit
}
writer := tabwriter.NewWriter(os.Stdout, header)
if err := c.ListFileF(file.Commit.Repo.Name, file.Commit.ID, file.Path, history, func(fi *pfsclient.FileInfo) error {
pretty.PrintFileInfo(writer, fi, fullTimestamps, history != 0)
return nil
}); err != nil {
return err
}
return writer.Flush()
}),
}
listFile.Flags().AddFlagSet(rawFlags)
listFile.Flags().AddFlagSet(fullTimestampsFlags)
listFile.Flags().StringVar(&history, "history", "none", "Return revision history for files.")
shell.RegisterCompletionFunc(listFile, shell.FileCompletion)
commands = append(commands, cmdutil.CreateAlias(listFile, "list file"))
globFile := &cobra.Command{
Use: "{{alias}} <repo>@<branch-or-commit>:<pattern>",
Short: "Return files that match a glob pattern in a commit.",
Long: "Return files that match a glob pattern in a commit (that is, match a glob pattern in a repo at the state represented by a commit). Glob patterns are documented [here](https://golang.org/pkg/path/filepath/#Match).",
Example: `
# Return files in repo "foo" on branch "master" that start
# with the character "A". Note how the double quotation marks around the
# parameter are necessary because otherwise your shell might interpret the "*".
$ {{alias}} "foo@master:A*"
# Return files in repo "foo" on branch "master" under directory "data".
$ {{alias}} "foo@master:data/*"`,
Run: cmdutil.RunFixedArgs(1, func(args []string) error {
file, err := cmdutil.ParseFile(args[0])
if err != nil {
return err
}
c, err := client.NewOnUserMachine("user")
if err != nil {
return err
}
defer c.Close()
fileInfos, err := c.GlobFile(file.Commit.Repo.Name, file.Commit.ID, file.Path)
if err != nil {
return err
}
if raw {
for _, fileInfo := range fileInfos {
if err := marshaller.Marshal(os.Stdout, fileInfo); err != nil {
return err
}
}
return nil
}
writer := tabwriter.NewWriter(os.Stdout, pretty.FileHeader)
for _, fileInfo := range fileInfos {
pretty.PrintFileInfo(writer, fileInfo, fullTimestamps, false)
}
return writer.Flush()
}),
}
globFile.Flags().AddFlagSet(rawFlags)
globFile.Flags().AddFlagSet(fullTimestampsFlags)
shell.RegisterCompletionFunc(globFile, shell.FileCompletion)
commands = append(commands, cmdutil.CreateAlias(globFile, "glob file"))
var shallow bool
var nameOnly bool
var diffCmdArg string
diffFile := &cobra.Command{
Use: "{{alias}} <new-repo>@<new-branch-or-commit>:<new-path> [<old-repo>@<old-branch-or-commit>:<old-path>]",
Short: "Return a diff of two file trees.",
Long: "Return a diff of two file trees.",
Example: `
# Return the diff of the file "path" of the repo "foo" between the head of the
# "master" branch and its parent.
$ {{alias}} foo@master:path
# Return the diff between the master branches of repos foo and bar at paths
# path1 and path2, respectively.
$ {{alias}} foo@master:path1 bar@master:path2`,
Run: cmdutil.RunBoundedArgs(1, 2, func(args []string) error {
newFile, err := cmdutil.ParseFile(args[0])
if err != nil {
return err
}
oldFile := client.NewFile("", "", "")
if len(args) == 2 {
oldFile, err = cmdutil.ParseFile(args[1])
if err != nil {
return err
}
}
c, err := client.NewOnUserMachine("user")
if err != nil {
return err
}
defer c.Close()
return pager.Page(noPager, os.Stdout, func(w io.Writer) (retErr error) {
var writer *tabwriter.Writer
if nameOnly {
writer = tabwriter.NewWriter(w, pretty.DiffFileHeader)
defer func() {
if err := writer.Flush(); err != nil && retErr == nil {
retErr = err
}
}()
}
newFiles, oldFiles, err := c.DiffFile(
newFile.Commit.Repo.Name, newFile.Commit.ID, newFile.Path,
oldFile.Commit.Repo.Name, oldFile.Commit.ID, oldFile.Path,
shallow,
)
if err != nil {
return err
}
diffCmd := diffCommand(diffCmdArg)
return forEachDiffFile(newFiles, oldFiles, func(nFI, oFI *pfsclient.FileInfo) error {
if nameOnly {
if nFI != nil {
pretty.PrintDiffFileInfo(writer, true, nFI, fullTimestamps)
}
if oFI != nil {
pretty.PrintDiffFileInfo(writer, false, oFI, fullTimestamps)
}
return nil
}
nPath, oPath := "/dev/null", "/dev/null"
if nFI != nil {
nPath, err = dlFile(c, nFI.File)
if err != nil {
return err
}
defer func() {
if err := os.RemoveAll(nPath); err != nil && retErr == nil {
retErr = err
}
}()
}
if oFI != nil {
oPath, err = dlFile(c, oFI.File)
defer func() {
if err := os.RemoveAll(oPath); err != nil && retErr == nil {
retErr = err
}
}()
}
cmd := exec.Command(diffCmd[0], append(diffCmd[1:], oPath, nPath)...)
cmd.Stdout = w
cmd.Stderr = os.Stderr
// Diff returns exit code 1 when it finds differences
// between the files, so we catch it.
if err := cmd.Run(); err != nil && cmd.ProcessState.ExitCode() != 1 {
return err
}
return nil
})
})
}),
}
diffFile.Flags().BoolVarP(&shallow, "shallow", "s", false, "Don't descend into sub directories.")
diffFile.Flags().BoolVar(&nameOnly, "name-only", false, "Show only the names of changed files.")
diffFile.Flags().StringVar(&diffCmdArg, "diff-command", "", "Use a program other than git to diff files.")
diffFile.Flags().AddFlagSet(fullTimestampsFlags)
diffFile.Flags().AddFlagSet(noPagerFlags)
shell.RegisterCompletionFunc(diffFile, shell.FileCompletion)
commands = append(commands, cmdutil.CreateAlias(diffFile, "diff file"))
deleteFile := &cobra.Command{
Use: "{{alias}} <repo>@<branch-or-commit>:<path/in/pfs>",
Short: "Delete a file.",
Long: "Delete a file.",
Run: cmdutil.RunFixedArgs(1, func(args []string) error {
file, err := cmdutil.ParseFile(args[0])
if err != nil {
return err
}
c, err := client.NewOnUserMachine("user")
if err != nil {
return err
}
defer c.Close()
return c.DeleteFile(file.Commit.Repo.Name, file.Commit.ID, file.Path)
}),
}
shell.RegisterCompletionFunc(deleteFile, shell.FileCompletion)
commands = append(commands, cmdutil.CreateAlias(deleteFile, "delete file"))
objectDocs := &cobra.Command{
Short: "Docs for objects.",
Long: `Objects are content-addressed blobs of data that are directly stored in the backend object store.
Objects are a low-level resource and should not be accessed directly by most users.`,
}
commands = append(commands, cmdutil.CreateDocsAlias(objectDocs, "object", " object$"))
getObject := &cobra.Command{
Use: "{{alias}} <hash>",
Short: "Print the contents of an object.",
Long: "Print the contents of an object.",
Run: cmdutil.RunFixedArgs(1, func(args []string) error {
c, err := client.NewOnUserMachine("user")
if err != nil {
return err
}
defer c.Close()
return c.GetObject(args[0], os.Stdout)
}),
}
commands = append(commands, cmdutil.CreateAlias(getObject, "get object"))
tagDocs := &cobra.Command{
Short: "Docs for tags.",
Long: `Tags are aliases for objects. Many tags can refer to the same object.
Tags are a low-level resource and should not be accessed directly by most users.`,
}
commands = append(commands, cmdutil.CreateDocsAlias(tagDocs, "tag", " tag$"))
getTag := &cobra.Command{
Use: "{{alias}} <tag>",
Short: "Print the contents of a tag.",
Long: "Print the contents of a tag.",
Run: cmdutil.RunFixedArgs(1, func(args []string) error {
c, err := client.NewOnUserMachine("user")
if err != nil {
return err
}
defer c.Close()
return c.GetTag(args[0], os.Stdout)
}),
}
commands = append(commands, cmdutil.CreateAlias(getTag, "get tag"))
var fix bool
fsck := &cobra.Command{
Use: "{{alias}}",
Short: "Run a file system consistency check on pfs.",
Long: "Run a file system consistency check on the pachyderm file system, ensuring the correct provenance relationships are satisfied.",
Run: cmdutil.RunFixedArgs(0, func(args []string) error {
c, err := client.NewOnUserMachine("user")
if err != nil {
return err
}
defer c.Close()
errors := false
if err = c.Fsck(fix, func(resp *pfsclient.FsckResponse) error {
if resp.Error != "" {
errors = true
fmt.Printf("Error: %s\n", resp.Error)
} else {
fmt.Printf("Fix applied: %v", resp.Fix)
}
return nil
}); err != nil {
return err
}
if !errors {
fmt.Println("No errors found.")
}
return nil
}),
}
fsck.Flags().BoolVarP(&fix, "fix", "f", false, "Attempt to fix as many issues as possible.")
commands = append(commands, cmdutil.CreateAlias(fsck, "fsck"))
// Add the mount commands (which aren't available on Windows, so they're in
// their own file)
commands = append(commands, mountCmds()...)
return commands
}
func putFileHelper(c *client.APIClient, pfc client.PutFileClient,
repo, commit, path, source string, recursive, overwrite bool, // destination
limiter limit.ConcurrencyLimiter,
split string, targetFileDatums, targetFileBytes, headerRecords uint, // split
filesPut *gosync.Map) (retErr error) {
// Resolve the path, then trim any prefixed '../' to avoid sending bad paths
// to the server
path = filepath.Clean(path)
for strings.HasPrefix(path, "../") {
path = strings.TrimPrefix(path, "../")
}
if _, ok := filesPut.LoadOrStore(path, nil); ok {
return fmt.Errorf("multiple files put with the path %s, aborting, "+
"some files may already have been put and should be cleaned up with "+
"'delete file' or 'delete commit'", path)
}
putFile := func(reader io.ReadSeeker) error {
if split == "" {
pipe, err := isPipe(reader)
if err != nil {
return err
}
if overwrite && !pipe {
return sync.PushFile(c, pfc, client.NewFile(repo, commit, path), reader)
}
if overwrite {
_, err = pfc.PutFileOverwrite(repo, commit, path, reader, 0)
return err
}
_, err = pfc.PutFile(repo, commit, path, reader)
return err
}
var delimiter pfsclient.Delimiter
switch split {
case "line":
delimiter = pfsclient.Delimiter_LINE
case "json":
delimiter = pfsclient.Delimiter_JSON
case "sql":
delimiter = pfsclient.Delimiter_SQL
case "csv":
delimiter = pfsclient.Delimiter_CSV
default:
return fmt.Errorf("unrecognized delimiter '%s'; only accepts one of "+
"{json,line,sql,csv}", split)
}
_, err := pfc.PutFileSplit(repo, commit, path, delimiter, int64(targetFileDatums), int64(targetFileBytes), int64(headerRecords), overwrite, reader)
return err
}
if source == "-" {
if recursive {
return errors.New("cannot set -r and read from stdin (must also set -f or -i)")
}
limiter.Acquire()
defer limiter.Release()
fmt.Fprintln(os.Stderr, "Reading from stdin.")
return putFile(os.Stdin)
}
// try parsing the filename as a url, if it is one do a PutFileURL
if url, err := url.Parse(source); err == nil && url.Scheme != "" {
limiter.Acquire()
defer limiter.Release()
return pfc.PutFileURL(repo, commit, path, url.String(), recursive, overwrite)
}
if recursive {
var eg errgroup.Group
if err := filepath.Walk(source, func(filePath string, info os.FileInfo, err error) error {
// file doesn't exist
if info == nil {
return fmt.Errorf("%s doesn't exist", filePath)
}
if info.IsDir() {
return nil
}
childDest := filepath.Join(path, strings.TrimPrefix(filePath, source))
eg.Go(func() error {
// don't do a second recursive 'put file', just put the one file at
// filePath into childDest, and then this walk loop will go on to the
// next one
return putFileHelper(c, pfc, repo, commit, childDest, filePath, false,
overwrite, limiter, split, targetFileDatums, targetFileBytes,
headerRecords, filesPut)
})
return nil
}); err != nil {
return err
}
return eg.Wait()
}
limiter.Acquire()
defer limiter.Release()
f, err := os.Open(source)
if err != nil {
return err
}
defer func() {
if err := f.Close(); err != nil && retErr == nil {
retErr = err
}
}()
return putFile(f)
}
func joinPaths(prefix, filePath string) string {
if url, err := url.Parse(filePath); err == nil && url.Scheme != "" {
if url.Scheme == "pfs" {
// pfs paths are of the form pfs://host/repo/branch/path we don't
// want to prefix every file with host/repo so we remove those
splitPath := strings.Split(strings.TrimPrefix(url.Path, "/"), "/")
if len(splitPath) < 3 {
return prefix
}
return filepath.Join(append([]string{prefix}, splitPath[2:]...)...)
}
return filepath.Join(prefix, strings.TrimPrefix(url.Path, "/"))
}
return filepath.Join(prefix, filePath)
}
func isPipe(r io.ReadSeeker) (bool, error) {
file, ok := r.(*os.File)
if !ok {
return false, nil
}
fi, err := file.Stat()
if err != nil {
return false, err
}
return fi.Mode()&os.ModeNamedPipe != 0, nil
}
func dlFile(pachClient *client.APIClient, f *pfsclient.File) (_ string, retErr error) {
if err := os.MkdirAll(filepath.Join(os.TempDir(), filepath.Dir(f.Path)), 0777); err != nil {
return "", err
}
file, err := ioutil.TempFile("", f.Path+"_")
if err != nil {
return "", err
}
defer func() {
if err := file.Close(); err != nil && retErr == nil {
retErr = err
}
}()
if err := pachClient.GetFile(f.Commit.Repo.Name, f.Commit.ID, f.Path, 0, 0, file); err != nil {
return "", err
}
return file.Name(), nil
}
func diffCommand(cmdArg string) []string {
if cmdArg != "" {
return strings.Fields(cmdArg)
}
_, err := exec.LookPath("git")
if err == nil {
return []string{"git", "-c", "color.ui=always", "--no-pager", "diff", "--no-index"}
}
return []string{"diff"}
}
func forEachDiffFile(newFiles, oldFiles []*pfsclient.FileInfo, f func(newFile, oldFile *pfsclient.FileInfo) error) error {
nI, oI := 0, 0
for {
if nI == len(newFiles) && oI == len(oldFiles) {
return nil
}
var oFI *pfsclient.FileInfo
var nFI *pfsclient.FileInfo
switch {
case oI == len(oldFiles) || (nI < len(newFiles) && newFiles[nI].File.Path < oldFiles[oI].File.Path):
nFI = newFiles[nI]
nI++
case nI == len(newFiles) || (oI < len(oldFiles) && oldFiles[oI].File.Path < newFiles[nI].File.Path):
oFI = oldFiles[oI]
oI++
case newFiles[nI].File.Path == oldFiles[oI].File.Path:
nFI = newFiles[nI]
nI++
oFI = oldFiles[oI]
oI++
}
if err := f(nFI, oFI); err != nil {
if err == errutil.ErrBreak {
return nil
}
return err
}
}
}
|
package native
import (
"bufio"
"github.com/ziutek/mymysql/mysql"
"io"
)
type pktReader struct {
rd *bufio.Reader
seq *byte
remain int
last bool
buf [8]byte
ibuf [3]byte
}
func (my *Conn) newPktReader() *pktReader {
return &pktReader{rd: my.rd, seq: &my.seq}
}
func (pr *pktReader) readHeader() {
// Read next packet header
buf := pr.ibuf[:]
for {
n, err := pr.rd.Read(buf)
if err != nil {
panic(err)
}
buf = buf[n:]
if len(buf) == 0 {
break
}
}
pr.remain = int(DecodeU24(pr.ibuf[:]))
seq, err := pr.rd.ReadByte()
if err != nil {
panic(err)
}
// Chceck sequence number
if *pr.seq != seq {
panic(mysql.ErrSeq)
}
*pr.seq++
// Last packet?
pr.last = (pr.remain != 0xffffff)
}
func (pr *pktReader) readFull(buf []byte) {
for len(buf) > 0 {
if pr.remain == 0 {
if pr.last {
// No more packets
panic(io.EOF)
}
pr.readHeader()
}
n := len(buf)
if n > pr.remain {
n = pr.remain
}
n, err := pr.rd.Read(buf[:n])
pr.remain -= n
if err != nil {
panic(err)
}
buf = buf[n:]
}
return
}
func (pr *pktReader) readByte() byte {
if pr.remain == 0 {
if pr.last {
// No more packets
panic(io.EOF)
}
pr.readHeader()
}
b, err := pr.rd.ReadByte()
if err != nil {
panic(err)
}
pr.remain--
return b
}
func (pr *pktReader) readAll() (buf []byte) {
m := 0
for {
if pr.remain == 0 {
if pr.last {
break
}
pr.readHeader()
}
new_buf := make([]byte, m+pr.remain)
copy(new_buf, buf)
buf = new_buf
n, err := pr.rd.Read(buf[m:])
pr.remain -= n
m += n
if err != nil {
panic(err)
}
}
return
}
var skipBuf [4069]byte
func (pr *pktReader) skipAll() {
for {
if pr.remain == 0 {
if pr.last {
break
}
pr.readHeader()
}
n := len(skipBuf)
if n > pr.remain {
n = pr.remain
}
n, err := pr.rd.Read(skipBuf[:n])
pr.remain -= n
if err != nil {
panic(err)
}
}
return
}
// works only for n <= len(skipBuf)
func (pr *pktReader) skipN(n int) {
for n > 0 {
if pr.remain == 0 {
if pr.last {
panic(io.EOF)
}
pr.readHeader()
}
m := n
if m > len(skipBuf) {
m = len(skipBuf)
}
if m > pr.remain {
m = pr.remain
}
m, err := pr.rd.Read(skipBuf[:m])
pr.remain -= m
n -= m
if err != nil {
panic(err)
}
}
return
}
func (pr *pktReader) unreadByte() {
if err := pr.rd.UnreadByte(); err != nil {
panic(err)
}
pr.remain++
}
func (pr *pktReader) eof() bool {
return pr.remain == 0 && pr.last
}
func (pr *pktReader) checkEof() {
if !pr.eof() {
panic(mysql.ErrPktLong)
}
}
type pktWriter struct {
wr *bufio.Writer
seq *byte
remain int
to_write int
last bool
buf [23]byte
ibuf [3]byte
}
func (my *Conn) newPktWriter(to_write int) *pktWriter {
return &pktWriter{wr: my.wr, seq: &my.seq, to_write: to_write}
}
func (pw *pktWriter) writeHeader(l int) {
buf := pw.ibuf[:]
EncodeU24(buf, uint32(l))
if _, err := pw.wr.Write(buf); err != nil {
panic(err)
}
if err := pw.wr.WriteByte(*pw.seq); err != nil {
panic(err)
}
// Update sequence number
*pw.seq++
}
func (pw *pktWriter) write(buf []byte) {
if len(buf) == 0 {
return
}
var nn int
for len(buf) != 0 {
if pw.remain == 0 {
if pw.to_write == 0 {
panic("too many data for write as packet")
}
if pw.to_write >= 0xffffff {
pw.remain = 0xffffff
} else {
pw.remain = pw.to_write
pw.last = true
}
pw.to_write -= pw.remain
pw.writeHeader(pw.remain)
}
nn = len(buf)
if nn > pw.remain {
nn = pw.remain
}
var err error
nn, err = pw.wr.Write(buf[0:nn])
pw.remain -= nn
if err != nil {
panic(err)
}
buf = buf[nn:]
}
if pw.remain+pw.to_write == 0 {
if !pw.last {
// Write header for empty packet
pw.writeHeader(0)
}
// Flush bufio buffers
if err := pw.wr.Flush(); err != nil {
panic(err)
}
}
return
}
func (pw *pktWriter) writeByte(b byte) {
pw.buf[0] = b
pw.write(pw.buf[:1])
}
// n should be <= 23
func (pw *pktWriter) writeZeros(n int) {
buf := pw.buf[:n]
for i := range buf {
buf[i] = 0
}
pw.write(buf)
}
Don't use common buffer for multiple gorutines to skip data.
package native
import (
"bufio"
"github.com/ziutek/mymysql/mysql"
"io"
"io/ioutil"
)
type pktReader struct {
rd *bufio.Reader
seq *byte
remain int
last bool
buf [8]byte
ibuf [3]byte
}
func (my *Conn) newPktReader() *pktReader {
return &pktReader{rd: my.rd, seq: &my.seq}
}
func (pr *pktReader) readHeader() {
// Read next packet header
buf := pr.ibuf[:]
for {
n, err := pr.rd.Read(buf)
if err != nil {
panic(err)
}
buf = buf[n:]
if len(buf) == 0 {
break
}
}
pr.remain = int(DecodeU24(pr.ibuf[:]))
seq, err := pr.rd.ReadByte()
if err != nil {
panic(err)
}
// Chceck sequence number
if *pr.seq != seq {
panic(mysql.ErrSeq)
}
*pr.seq++
// Last packet?
pr.last = (pr.remain != 0xffffff)
}
func (pr *pktReader) readFull(buf []byte) {
for len(buf) > 0 {
if pr.remain == 0 {
if pr.last {
// No more packets
panic(io.EOF)
}
pr.readHeader()
}
n := len(buf)
if n > pr.remain {
n = pr.remain
}
n, err := pr.rd.Read(buf[:n])
pr.remain -= n
if err != nil {
panic(err)
}
buf = buf[n:]
}
return
}
func (pr *pktReader) readByte() byte {
if pr.remain == 0 {
if pr.last {
// No more packets
panic(io.EOF)
}
pr.readHeader()
}
b, err := pr.rd.ReadByte()
if err != nil {
panic(err)
}
pr.remain--
return b
}
func (pr *pktReader) readAll() (buf []byte) {
m := 0
for {
if pr.remain == 0 {
if pr.last {
break
}
pr.readHeader()
}
new_buf := make([]byte, m+pr.remain)
copy(new_buf, buf)
buf = new_buf
n, err := pr.rd.Read(buf[m:])
pr.remain -= n
m += n
if err != nil {
panic(err)
}
}
return
}
func (pr *pktReader) skipAll() {
for {
if pr.remain == 0 {
if pr.last {
break
}
pr.readHeader()
}
n, err := io.CopyN(ioutil.Discard, pr.rd, int64(pr.remain))
pr.remain -= int(n)
if err != nil {
panic(err)
}
}
return
}
func (pr *pktReader) skipN(n int) {
for n > 0 {
if pr.remain == 0 {
if pr.last {
panic(io.EOF)
}
pr.readHeader()
}
m := int64(n)
if n > pr.remain {
m = int64(pr.remain)
}
m, err := io.CopyN(ioutil.Discard, pr.rd, m)
pr.remain -= int(m)
n -= int(m)
if err != nil {
panic(err)
}
}
return
}
func (pr *pktReader) unreadByte() {
if err := pr.rd.UnreadByte(); err != nil {
panic(err)
}
pr.remain++
}
func (pr *pktReader) eof() bool {
return pr.remain == 0 && pr.last
}
func (pr *pktReader) checkEof() {
if !pr.eof() {
panic(mysql.ErrPktLong)
}
}
type pktWriter struct {
wr *bufio.Writer
seq *byte
remain int
to_write int
last bool
buf [23]byte
ibuf [3]byte
}
func (my *Conn) newPktWriter(to_write int) *pktWriter {
return &pktWriter{wr: my.wr, seq: &my.seq, to_write: to_write}
}
func (pw *pktWriter) writeHeader(l int) {
buf := pw.ibuf[:]
EncodeU24(buf, uint32(l))
if _, err := pw.wr.Write(buf); err != nil {
panic(err)
}
if err := pw.wr.WriteByte(*pw.seq); err != nil {
panic(err)
}
// Update sequence number
*pw.seq++
}
func (pw *pktWriter) write(buf []byte) {
if len(buf) == 0 {
return
}
var nn int
for len(buf) != 0 {
if pw.remain == 0 {
if pw.to_write == 0 {
panic("too many data for write as packet")
}
if pw.to_write >= 0xffffff {
pw.remain = 0xffffff
} else {
pw.remain = pw.to_write
pw.last = true
}
pw.to_write -= pw.remain
pw.writeHeader(pw.remain)
}
nn = len(buf)
if nn > pw.remain {
nn = pw.remain
}
var err error
nn, err = pw.wr.Write(buf[0:nn])
pw.remain -= nn
if err != nil {
panic(err)
}
buf = buf[nn:]
}
if pw.remain+pw.to_write == 0 {
if !pw.last {
// Write header for empty packet
pw.writeHeader(0)
}
// Flush bufio buffers
if err := pw.wr.Flush(); err != nil {
panic(err)
}
}
return
}
func (pw *pktWriter) writeByte(b byte) {
pw.buf[0] = b
pw.write(pw.buf[:1])
}
// n should be <= 23
func (pw *pktWriter) writeZeros(n int) {
buf := pw.buf[:n]
for i := range buf {
buf[i] = 0
}
pw.write(buf)
}
|
package s3
import (
"bytes"
"io"
"net/http"
"strings"
"fmt"
"crypto/md5"
"encoding/base64"
"github.com/gogo/protobuf/types"
"github.com/pachyderm/pachyderm/src/client"
)
type objectHandler struct {
pc *client.APIClient
}
func newObjectHandler(pc *client.APIClient) *objectHandler {
return &objectHandler{pc: pc}
}
func (h *objectHandler) get(w http.ResponseWriter, r *http.Request) {
repo, branch, file := objectArgs(w, r)
branchInfo, err := h.pc.InspectBranch(repo, branch)
if err != nil {
maybeNotFoundError(w, r, err)
return
}
if branchInfo.Head == nil {
noSuchKeyError(w, r)
return
}
if strings.HasSuffix(file, "/") {
invalidFilePathError(w, r)
return
}
fileInfo, err := h.pc.InspectFile(branchInfo.Branch.Repo.Name, branchInfo.Head.ID, file)
if err != nil {
maybeNotFoundError(w, r, err)
return
}
timestamp, err := types.TimestampFromProto(fileInfo.Committed)
if err != nil {
internalError(w, r, err)
return
}
w.Header().Set("ETag", fmt.Sprintf("\"%x\"", fileInfo.Hash))
reader, err := h.pc.GetFileReadSeeker(branchInfo.Branch.Repo.Name, branchInfo.Head.ID, file)
if err != nil {
internalError(w, r, err)
return
}
http.ServeContent(w, r, file, timestamp, reader)
}
func (h *objectHandler) put(w http.ResponseWriter, r *http.Request) {
repo, branch, file := objectArgs(w, r)
branchInfo, err := h.pc.InspectBranch(repo, branch)
if err != nil {
maybeNotFoundError(w, r, err)
return
}
if strings.HasSuffix(file, "/") {
invalidFilePathError(w, r)
return
}
expectedHash, ok := r.Header["Content-Md5"]
var expectedHashBytes []uint8
if ok && len(expectedHash) == 1 {
expectedHashBytes, err = base64.StdEncoding.DecodeString(expectedHash[0])
if err != nil || len(expectedHashBytes) != 16 {
invalidDigestError(w, r)
return
}
}
hasher := md5.New()
reader := io.TeeReader(r.Body, hasher)
success := false
_, err = h.pc.PutFileOverwrite(branchInfo.Branch.Repo.Name, branchInfo.Branch.Name, file, reader, 0)
if err != nil {
internalError(w, r, err)
return
}
defer func() {
// try to clean up the file if an error occurred
if !success {
if err = h.pc.DeleteFile(branchInfo.Branch.Repo.Name, branchInfo.Branch.Name, file); err != nil {
requestLogger(r).Errorf("could not cleanup file after an error: %v", err)
}
}
}()
actualHashBytes := hasher.Sum(nil)
if expectedHashBytes != nil && !bytes.Equal(expectedHashBytes, actualHashBytes) {
badDigestError(w, r)
return
}
fileInfo, err := h.pc.InspectFile(branchInfo.Branch.Repo.Name, branchInfo.Branch.Name, file)
if err != nil {
internalError(w, r, err)
return
}
success = true
w.Header().Set("ETag", fmt.Sprintf("\"%x\"", fileInfo.Hash))
w.WriteHeader(http.StatusOK)
}
func (h *objectHandler) del(w http.ResponseWriter, r *http.Request) {
repo, branch, file := objectArgs(w, r)
branchInfo, err := h.pc.InspectBranch(repo, branch)
if err != nil {
maybeNotFoundError(w, r, err)
return
}
if branchInfo.Head == nil {
noSuchKeyError(w, r)
return
}
if strings.HasSuffix(file, "/") {
invalidFilePathError(w, r)
return
}
if err := h.pc.DeleteFile(branchInfo.Branch.Repo.Name, branchInfo.Branch.Name, file); err != nil {
maybeNotFoundError(w, r, err)
return
}
w.WriteHeader(http.StatusNoContent)
}
Pull more accurate timestamp
package s3
import (
"bytes"
"io"
"net/http"
"strings"
"fmt"
"crypto/md5"
"encoding/base64"
"github.com/gogo/protobuf/types"
"github.com/pachyderm/pachyderm/src/client"
"github.com/pachyderm/pachyderm/src/client/pfs"
)
type objectHandler struct {
pc *client.APIClient
}
func newObjectHandler(pc *client.APIClient) *objectHandler {
return &objectHandler{pc: pc}
}
func (h *objectHandler) get(w http.ResponseWriter, r *http.Request) {
repo, branch, file := objectArgs(w, r)
branchInfo, err := h.pc.InspectBranch(repo, branch)
if err != nil {
maybeNotFoundError(w, r, err)
return
}
if branchInfo.Head == nil {
noSuchKeyError(w, r)
return
}
if strings.HasSuffix(file, "/") {
invalidFilePathError(w, r)
return
}
fileInfos, err := h.pc.ListFileHistory(branchInfo.Branch.Repo.Name, branchInfo.Head.ID, file, 1)
if err != nil {
maybeNotFoundError(w, r, err)
return
}
if len(fileInfos) != 1 || fileInfos[0].FileType != pfs.FileType_FILE {
noSuchKeyError(w, r)
return
}
timestamp, err := types.TimestampFromProto(fileInfos[0].Committed)
if err != nil {
internalError(w, r, err)
return
}
w.Header().Set("ETag", fmt.Sprintf("\"%x\"", fileInfos[0].Hash))
reader, err := h.pc.GetFileReadSeeker(branchInfo.Branch.Repo.Name, branchInfo.Head.ID, file)
if err != nil {
internalError(w, r, err)
return
}
http.ServeContent(w, r, file, timestamp, reader)
}
func (h *objectHandler) put(w http.ResponseWriter, r *http.Request) {
repo, branch, file := objectArgs(w, r)
branchInfo, err := h.pc.InspectBranch(repo, branch)
if err != nil {
maybeNotFoundError(w, r, err)
return
}
if strings.HasSuffix(file, "/") {
invalidFilePathError(w, r)
return
}
expectedHash, ok := r.Header["Content-Md5"]
var expectedHashBytes []uint8
if ok && len(expectedHash) == 1 {
expectedHashBytes, err = base64.StdEncoding.DecodeString(expectedHash[0])
if err != nil || len(expectedHashBytes) != 16 {
invalidDigestError(w, r)
return
}
}
hasher := md5.New()
reader := io.TeeReader(r.Body, hasher)
success := false
_, err = h.pc.PutFileOverwrite(branchInfo.Branch.Repo.Name, branchInfo.Branch.Name, file, reader, 0)
if err != nil {
internalError(w, r, err)
return
}
defer func() {
// try to clean up the file if an error occurred
if !success {
if err = h.pc.DeleteFile(branchInfo.Branch.Repo.Name, branchInfo.Branch.Name, file); err != nil {
requestLogger(r).Errorf("could not cleanup file after an error: %v", err)
}
}
}()
actualHashBytes := hasher.Sum(nil)
if expectedHashBytes != nil && !bytes.Equal(expectedHashBytes, actualHashBytes) {
badDigestError(w, r)
return
}
fileInfo, err := h.pc.InspectFile(branchInfo.Branch.Repo.Name, branchInfo.Branch.Name, file)
if err != nil {
internalError(w, r, err)
return
}
success = true
w.Header().Set("ETag", fmt.Sprintf("\"%x\"", fileInfo.Hash))
w.WriteHeader(http.StatusOK)
}
func (h *objectHandler) del(w http.ResponseWriter, r *http.Request) {
repo, branch, file := objectArgs(w, r)
branchInfo, err := h.pc.InspectBranch(repo, branch)
if err != nil {
maybeNotFoundError(w, r, err)
return
}
if branchInfo.Head == nil {
noSuchKeyError(w, r)
return
}
if strings.HasSuffix(file, "/") {
invalidFilePathError(w, r)
return
}
if err := h.pc.DeleteFile(branchInfo.Branch.Repo.Name, branchInfo.Branch.Name, file); err != nil {
maybeNotFoundError(w, r, err)
return
}
w.WriteHeader(http.StatusNoContent)
}
|
package neko
// #include "redirect.h"
import "C"
var redirectFunc Printer
//export goRedirect
func goRedirect(data *C.char, size C.int, param interface{}) {
redirectFunc(C.GoStringN(data, size), param)
}
redirect.h is not needed in redirect.go
package neko
import "C"
var redirectFunc Printer
//export goRedirect
func goRedirect(data *C.char, size C.int, param interface{}) {
redirectFunc(C.GoStringN(data, size), param)
}
|
package yorm
import (
"database/sql"
"errors"
"reflect"
"strings"
"sync"
"time"
)
type tableSetter struct {
table string
dests []interface{}
columns []*column
pkColumn *column
}
var (
//TimeType time's reflect type.
TimeType = reflect.TypeOf(time.Time{})
BoolType = reflect.TypeOf(true)
Int64Type = reflect.TypeOf(int64(0))
StringType = reflect.TypeOf("")
// one struct reflect to a table query setter
tableMap = map[reflect.Value]*tableSetter{}
//table lock
tableRWLock sync.RWMutex
)
func newTableSetter(ri reflect.Value) (*tableSetter, error) {
if q, ok := tableMap[ri]; ok {
// if t, ok := ri.Interface().(YormTableStruct); ok {
// returnValue := *q
// returnValue.table = t.YormTableName()
// return &returnValue, nil
// }
return q, nil
}
tableRWLock.Lock()
defer tableRWLock.Unlock()
if q, ok := tableMap[ri]; ok {
return q, nil
}
if ri.Kind() != reflect.Ptr {
return nil, ErrNonPtr
}
if ri.IsNil() {
return nil, ErrNotSupported
}
q := new(tableSetter)
table, cs := structToTable(reflect.Indirect(ri).Interface())
if len(cs) == 0 {
return nil, ErrNotSupported
}
q.pkColumn, _ = findPkColumn(cs)
q.table = table
q.columns = cs
q.dests = make([]interface{}, len(cs))
for k, v := range cs {
q.dests[k] = newPtrInterface(v.typ)
}
tableMap[ri] = q
return q, nil
}
func findPkColumn(cs []*column) (*column, error) {
var c *column
var idColumn *column
isPk := false
for _, v := range cs {
if strings.ToLower(v.name) == "id" {
idColumn = v
}
if v.isPK {
if isPk {
return c, ErrDuplicatePkColumn
}
isPk = true
c = v
}
}
if c == nil && idColumn != nil {
idColumn.isPK = true
idColumn.isAuto = true
c = idColumn
}
if c == nil {
return nil, ErrNonePkColumn
}
return c, nil
}
func newPtrInterface(t reflect.Type) interface{} {
k := t.Kind()
var ti interface{}
switch k {
case reflect.Bool:
fallthrough
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
fallthrough
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
ti = new(sql.NullInt64)
case reflect.String:
ti = new(sql.NullString)
case reflect.Float32, reflect.Float64:
ti = new(sql.NullFloat64)
case reflect.Struct:
switch t {
case TimeType:
ti = new(sql.NullString)
}
}
return ti
}
func scanValue(sc sqlScanner, q *tableSetter, st reflect.Value) error {
err := sc.Scan(q.dests...)
if err != nil {
return err
}
for idx, c := range q.columns {
// different assign func here
fv := st.Field(c.fieldNum)
fi := q.dests[idx]
err := setValue(fv, fi)
if err != nil {
continue
}
}
return nil
}
func setValue(fv reflect.Value, fi interface{}) error {
switch typ := fv.Type().Kind(); typ {
case reflect.Bool:
sqlValue := sql.NullInt64(*(fi.(*sql.NullInt64)))
if !sqlValue.Valid {
fv.SetBool(false)
return errors.New("sqlValue is invalid")
}
fv.SetBool(sqlValue.Int64 > 0)
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
fallthrough
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
sqlValue := sql.NullInt64(*(fi.(*sql.NullInt64)))
if !sqlValue.Valid {
fv.SetInt(0)
return errors.New("sqlValue is invalid")
}
fv.SetInt(sqlValue.Int64)
case reflect.String:
sqlValue := sql.NullString(*(fi.(*sql.NullString)))
if !sqlValue.Valid {
fv.SetString("")
return errors.New("sqlValue is invalid")
}
fv.SetString(sqlValue.String)
case reflect.Float32, reflect.Float64:
sqlValue := sql.NullFloat64(*(fi.(*sql.NullFloat64)))
if !sqlValue.Valid {
fv.SetFloat(0.0)
return errors.New("sqlValue is invalid")
}
fv.SetFloat(sqlValue.Float64)
case reflect.Struct:
switch fv.Type() {
case TimeType:
sqlValue := sql.NullString(*(fi.(*sql.NullString)))
if !sqlValue.Valid {
return errors.New("sqlValue is invalid")
}
timeStr := sqlValue.String
var layout string
if len(timeStr) == 10 {
layout = shortSimpleTimeFormat
}
if len(timeStr) == 19 {
layout = longSimpleTimeFormat
}
timeTime, err := time.ParseInLocation(layout, timeStr, time.Local)
if timeTime.IsZero() {
return err
}
fv.Set(reflect.ValueOf(timeTime))
}
}
return nil
}
maybe a bug here
package yorm
import (
"database/sql"
"errors"
"reflect"
"strings"
"sync"
"time"
)
type tableSetter struct {
table string
dests []interface{}
columns []*column
pkColumn *column
}
var (
//TimeType time's reflect type.
TimeType = reflect.TypeOf(time.Time{})
BoolType = reflect.TypeOf(true)
Int64Type = reflect.TypeOf(int64(0))
StringType = reflect.TypeOf("")
// one struct reflect to a table query setter
tableMap = map[reflect.Value]*tableSetter{}
//table lock
tableRWLock sync.RWMutex
)
func newTableSetter(ri reflect.Value) (*tableSetter, error) {
if q, ok := tableMap[ri]; ok {
// if t, ok := ri.Interface().(YormTableStruct); ok {
// returnValue := *q
// returnValue.table = t.YormTableName()
// return &returnValue, nil
// }
return q, nil
}
tableRWLock.Lock()
defer tableRWLock.Unlock()
if q, ok := tableMap[ri]; ok {
return q, nil
}
if ri.Kind() != reflect.Ptr {
return nil, ErrNonPtr
}
// todo maybe a bug here
if ri.IsNil() {
return nil, ErrNotSupported
}
q := new(tableSetter)
table, cs := structToTable(reflect.Indirect(ri).Interface())
if len(cs) == 0 {
return nil, ErrNotSupported
}
q.pkColumn, _ = findPkColumn(cs)
q.table = table
q.columns = cs
q.dests = make([]interface{}, len(cs))
for k, v := range cs {
q.dests[k] = newPtrInterface(v.typ)
}
tableMap[ri] = q
return q, nil
}
func findPkColumn(cs []*column) (*column, error) {
var c *column
var idColumn *column
isPk := false
for _, v := range cs {
if strings.ToLower(v.name) == "id" {
idColumn = v
}
if v.isPK {
if isPk {
return c, ErrDuplicatePkColumn
}
isPk = true
c = v
}
}
if c == nil && idColumn != nil {
idColumn.isPK = true
idColumn.isAuto = true
c = idColumn
}
if c == nil {
return nil, ErrNonePkColumn
}
return c, nil
}
func newPtrInterface(t reflect.Type) interface{} {
k := t.Kind()
var ti interface{}
switch k {
case reflect.Bool:
fallthrough
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
fallthrough
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
ti = new(sql.NullInt64)
case reflect.String:
ti = new(sql.NullString)
case reflect.Float32, reflect.Float64:
ti = new(sql.NullFloat64)
case reflect.Struct:
switch t {
case TimeType:
ti = new(sql.NullString)
}
}
return ti
}
func scanValue(sc sqlScanner, q *tableSetter, st reflect.Value) error {
err := sc.Scan(q.dests...)
if err != nil {
return err
}
for idx, c := range q.columns {
// different assign func here
fv := st.Field(c.fieldNum)
fi := q.dests[idx]
err := setValue(fv, fi)
if err != nil {
continue
}
}
return nil
}
func setValue(fv reflect.Value, fi interface{}) error {
switch typ := fv.Type().Kind(); typ {
case reflect.Bool:
sqlValue := sql.NullInt64(*(fi.(*sql.NullInt64)))
if !sqlValue.Valid {
fv.SetBool(false)
return errors.New("sqlValue is invalid")
}
fv.SetBool(sqlValue.Int64 > 0)
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
fallthrough
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
sqlValue := sql.NullInt64(*(fi.(*sql.NullInt64)))
if !sqlValue.Valid {
fv.SetInt(0)
return errors.New("sqlValue is invalid")
}
fv.SetInt(sqlValue.Int64)
case reflect.String:
sqlValue := sql.NullString(*(fi.(*sql.NullString)))
if !sqlValue.Valid {
fv.SetString("")
return errors.New("sqlValue is invalid")
}
fv.SetString(sqlValue.String)
case reflect.Float32, reflect.Float64:
sqlValue := sql.NullFloat64(*(fi.(*sql.NullFloat64)))
if !sqlValue.Valid {
fv.SetFloat(0.0)
return errors.New("sqlValue is invalid")
}
fv.SetFloat(sqlValue.Float64)
case reflect.Struct:
switch fv.Type() {
case TimeType:
sqlValue := sql.NullString(*(fi.(*sql.NullString)))
if !sqlValue.Valid {
return errors.New("sqlValue is invalid")
}
timeStr := sqlValue.String
var layout string
if len(timeStr) == 10 {
layout = shortSimpleTimeFormat
}
if len(timeStr) == 19 {
layout = longSimpleTimeFormat
}
timeTime, err := time.ParseInLocation(layout, timeStr, time.Local)
if timeTime.IsZero() {
return err
}
fv.Set(reflect.ValueOf(timeTime))
}
}
return nil
}
|
// Copyright 2018 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package nftables
import (
"fmt"
"github.com/mdlayher/netlink"
"golang.org/x/sys/unix"
)
var tableHeaderType = netlink.HeaderType((unix.NFNL_SUBSYS_NFTABLES << 8) | unix.NFT_MSG_NEWTABLE)
// TableFamily specifies the address family for this table.
type TableFamily byte
// Possible TableFamily values.
const (
TableFamilyIPv4 TableFamily = unix.AF_INET
TableFamilyIPv6 TableFamily = unix.AF_INET6
TableFamilyBridge TableFamily = unix.AF_BRIDGE
)
// A Table contains Chains. See also
// https://wiki.nftables.org/wiki-nftables/index.php/Configuring_tables
type Table struct {
Name string // NFTA_TABLE_NAME
Use uint32 // NFTA_TABLE_USE (Number of chains in table)
Flags uint32 // NFTA_TABLE_FLAGS
Family TableFamily
}
// DelTable deletes a specific table, along with all chains/rules it contains.
func (cc *Conn) DelTable(t *Table) {
data := cc.marshalAttr([]netlink.Attribute{
{Type: unix.NFTA_TABLE_NAME, Data: []byte(t.Name + "\x00")},
{Type: unix.NFTA_TABLE_FLAGS, Data: []byte{0, 0, 0, 0}},
})
cc.messages = append(cc.messages, netlink.Message{
Header: netlink.Header{
Type: netlink.HeaderType((unix.NFNL_SUBSYS_NFTABLES << 8) | unix.NFT_MSG_DELTABLE),
Flags: netlink.Request | netlink.Acknowledge,
},
Data: append(extraHeader(uint8(t.Family), 0), data...),
})
}
// AddTable adds the specified Table. See also
// https://wiki.nftables.org/wiki-nftables/index.php/Configuring_tables
func (cc *Conn) AddTable(t *Table) *Table {
data := cc.marshalAttr([]netlink.Attribute{
{Type: unix.NFTA_TABLE_NAME, Data: []byte(t.Name + "\x00")},
{Type: unix.NFTA_TABLE_FLAGS, Data: []byte{0, 0, 0, 0}},
})
cc.messages = append(cc.messages, netlink.Message{
Header: netlink.Header{
Type: netlink.HeaderType((unix.NFNL_SUBSYS_NFTABLES << 8) | unix.NFT_MSG_NEWTABLE),
Flags: netlink.Request | netlink.Acknowledge | netlink.Create,
},
Data: append(extraHeader(uint8(t.Family), 0), data...),
})
return t
}
// ListTables returns currently configured tables in the kernel
func (cc *Conn) ListTables() ([]*Table, error) {
conn, err := cc.dialNetlink()
if err != nil {
return nil, err
}
defer conn.Close()
msg := netlink.Message{
Header: netlink.Header{
Type: netlink.HeaderType((unix.NFNL_SUBSYS_NFTABLES << 8) | unix.NFT_MSG_GETTABLE),
Flags: netlink.Request | netlink.Dump,
},
Data: extraHeader(uint8(unix.AF_UNSPEC), 0),
}
response, err := conn.Execute(msg)
if err != nil {
return nil, err
}
var tables []*Table
for _, m := range response {
t, err := tableFromMsg(m)
if err != nil {
return nil, err
}
tables = append(tables, t)
}
return tables, nil
}
func tableFromMsg(msg netlink.Message) (*Table, error) {
if got, want := msg.Header.Type, tableHeaderType; got != want {
return nil, fmt.Errorf("unexpected header type: got %v, want %v", got, want)
}
var t Table
t.Family = TableFamily(msg.Data[0])
ad, err := netlink.NewAttributeDecoder(msg.Data[4:])
if err != nil {
return nil, err
}
for ad.Next() {
switch ad.Type() {
case unix.NFTA_TABLE_NAME:
t.Name = ad.String()
case unix.NFTA_TABLE_USE:
t.Use = ad.Uint32()
case unix.NFTA_TABLE_FLAGS:
t.Flags = ad.Uint32()
}
}
return &t, nil
}
[table] Add FlushTable function.
// Copyright 2018 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package nftables
import (
"fmt"
"github.com/mdlayher/netlink"
"golang.org/x/sys/unix"
)
var tableHeaderType = netlink.HeaderType((unix.NFNL_SUBSYS_NFTABLES << 8) | unix.NFT_MSG_NEWTABLE)
// TableFamily specifies the address family for this table.
type TableFamily byte
// Possible TableFamily values.
const (
TableFamilyIPv4 TableFamily = unix.AF_INET
TableFamilyIPv6 TableFamily = unix.AF_INET6
TableFamilyBridge TableFamily = unix.AF_BRIDGE
)
// A Table contains Chains. See also
// https://wiki.nftables.org/wiki-nftables/index.php/Configuring_tables
type Table struct {
Name string // NFTA_TABLE_NAME
Use uint32 // NFTA_TABLE_USE (Number of chains in table)
Flags uint32 // NFTA_TABLE_FLAGS
Family TableFamily
}
// DelTable deletes a specific table, along with all chains/rules it contains.
func (cc *Conn) DelTable(t *Table) {
data := cc.marshalAttr([]netlink.Attribute{
{Type: unix.NFTA_TABLE_NAME, Data: []byte(t.Name + "\x00")},
{Type: unix.NFTA_TABLE_FLAGS, Data: []byte{0, 0, 0, 0}},
})
cc.messages = append(cc.messages, netlink.Message{
Header: netlink.Header{
Type: netlink.HeaderType((unix.NFNL_SUBSYS_NFTABLES << 8) | unix.NFT_MSG_DELTABLE),
Flags: netlink.Request | netlink.Acknowledge,
},
Data: append(extraHeader(uint8(t.Family), 0), data...),
})
}
// AddTable adds the specified Table. See also
// https://wiki.nftables.org/wiki-nftables/index.php/Configuring_tables
func (cc *Conn) AddTable(t *Table) *Table {
data := cc.marshalAttr([]netlink.Attribute{
{Type: unix.NFTA_TABLE_NAME, Data: []byte(t.Name + "\x00")},
{Type: unix.NFTA_TABLE_FLAGS, Data: []byte{0, 0, 0, 0}},
})
cc.messages = append(cc.messages, netlink.Message{
Header: netlink.Header{
Type: netlink.HeaderType((unix.NFNL_SUBSYS_NFTABLES << 8) | unix.NFT_MSG_NEWTABLE),
Flags: netlink.Request | netlink.Acknowledge | netlink.Create,
},
Data: append(extraHeader(uint8(t.Family), 0), data...),
})
return t
}
// FlushTable removes all rules in all chains within the specified Table. See also
// https://wiki.nftables.org/wiki-nftables/index.php/Configuring_tables#Flushing_tables
func (cc *Conn) FlushTable(t *Table) {
data := cc.marshalAttr([]netlink.Attribute{
{Type: unix.NFTA_RULE_TABLE, Data: []byte(t.Name + "\x00")},
})
cc.messages = append(cc.messages, netlink.Message{
Header: netlink.Header{
Type: netlink.HeaderType((unix.NFNL_SUBSYS_NFTABLES << 8) | unix.NFT_MSG_DELRULE),
Flags: netlink.Request | netlink.Acknowledge,
},
Data: append(extraHeader(uint8(t.Family), 0), data...),
})
}
// ListTables returns currently configured tables in the kernel
func (cc *Conn) ListTables() ([]*Table, error) {
conn, err := cc.dialNetlink()
if err != nil {
return nil, err
}
defer conn.Close()
msg := netlink.Message{
Header: netlink.Header{
Type: netlink.HeaderType((unix.NFNL_SUBSYS_NFTABLES << 8) | unix.NFT_MSG_GETTABLE),
Flags: netlink.Request | netlink.Dump,
},
Data: extraHeader(uint8(unix.AF_UNSPEC), 0),
}
response, err := conn.Execute(msg)
if err != nil {
return nil, err
}
var tables []*Table
for _, m := range response {
t, err := tableFromMsg(m)
if err != nil {
return nil, err
}
tables = append(tables, t)
}
return tables, nil
}
func tableFromMsg(msg netlink.Message) (*Table, error) {
if got, want := msg.Header.Type, tableHeaderType; got != want {
return nil, fmt.Errorf("unexpected header type: got %v, want %v", got, want)
}
var t Table
t.Family = TableFamily(msg.Data[0])
ad, err := netlink.NewAttributeDecoder(msg.Data[4:])
if err != nil {
return nil, err
}
for ad.Next() {
switch ad.Type() {
case unix.NFTA_TABLE_NAME:
t.Name = ad.String()
case unix.NFTA_TABLE_USE:
t.Use = ad.Uint32()
case unix.NFTA_TABLE_FLAGS:
t.Flags = ad.Uint32()
}
}
return &t, nil
}
|
package main
import (
"fmt"
"strings"
"time"
"github.com/jaracil/ei"
. "github.com/jaracil/nexus/log"
"github.com/sirupsen/logrus"
r "gopkg.in/rethinkdb/rethinkdb-go.v5"
)
type Task struct {
Id string `rethinkdb:"id" json:"id"`
Stat string `rethinkdb:"stat" json:"state""`
Path string `rethinkdb:"path" json:"path"`
Prio int `rethinkdb:"prio" json:"priority"`
Ttl int `rethinkdb:"ttl" json:"ttl"`
Detach bool `rethinkdb:"detach" json:"detached"`
User string `rethinkdb:"user" json:"user"`
Method string `rethinkdb:"method" json:"method"`
Params interface{} `rethinkdb:"params" json:"params"`
LocalId interface{} `rethinkdb:"localId" json:"-"`
Tses string `rethinkdb:"tses" json:"targetSession"`
Result interface{} `rethinkdb:"result,omitempty" json:"result"`
ErrCode *int `rethinkdb:"errCode,omitempty" json:"errCode"`
ErrStr string `rethinkdb:"errStr,omitempty" json:"errString"`
ErrObj interface{} `rethinkdb:"errObj,omitempty" json:"errObject"`
Tags interface{} `rethinkdb:"tags,omitempty" json:"tags"`
CreationTime interface{} `rethinkdb:"creationTime,omitempty" json:"creationTime"`
WorkingTime interface{} `rethinkdb:"workingTime,omitempty" json:"workingTime"`
DeadLine interface{} `rethinkdb:"deadLine,omitempty" json:"deadline"`
}
type TaskFeed struct {
Old *Task `rethinkdb:"old_val"`
New *Task `rethinkdb:"new_val"`
}
func taskPurge() {
defer exit("purge goroutine error")
tick := time.NewTicker(time.Second)
defer tick.Stop()
for {
select {
case <-tick.C:
if isMasterNode() {
wres, err := r.Table("tasks").
Between(r.MinVal, r.Now(), r.BetweenOpts{Index: "deadLine"}).
Update(r.Branch(r.Row.Field("stat").Ne("done"),
ei.M{"stat": "done", "errCode": ErrTimeout, "errStr": ErrStr[ErrTimeout], "deadLine": r.Now().Add(600)},
ei.M{}),
r.UpdateOpts{ReturnChanges: true}).
RunWrite(db, r.RunOpts{Durability: "soft"})
if err == nil {
for _, change := range wres.Changes {
task := ei.N(change.OldValue)
if path := task.M("path").StringZ(); !strings.HasPrefix(path, "@pull.") {
hook("task", path+task.M("method").StringZ(), task.M("user").StringZ(), ei.M{
"action": "timeout",
"id": task.M("id").StringZ(),
"timestamp": time.Now().UTC(),
})
}
}
}
r.Table("tasks").
Between(r.MinVal, r.Now(), r.BetweenOpts{Index: "deadLine"}).
Filter(r.Row.Field("stat").Eq("done")).
Delete().
RunWrite(db, r.RunOpts{Durability: "soft"})
}
case <-mainContext.Done():
return
}
}
}
func taskTrack() {
defer exit("task change-feed error")
for retry := 0; retry < 10; retry++ {
iter, err := r.Table("tasks").
Between(nodeId, nodeId+"\uffff").
Changes(r.ChangesOpts{IncludeInitial: true, Squash: false}).
Filter(r.Row.Field("new_val").Ne(nil)).
Pluck(ei.M{"new_val": []string{
"id",
"stat",
"localId",
"detach",
"user",
"prio",
"ttl",
"path",
"method",
"result",
"errCode",
"errStr",
"errObj",
"tses",
"creationTime",
"workingTime"}}).
Run(db)
if err != nil {
Log.WithFields(logrus.Fields{
"error": err.Error(),
}).Errorln("Error opening taskTrack iterator")
time.Sleep(time.Second)
continue
}
retry = 0 //Reset retrys
for {
tf := &TaskFeed{}
if !iter.Next(tf) {
Log.WithFields(logrus.Fields{
"error": iter.Err().Error(),
}).Errorln("Error processing taskTrack feed")
iter.Close()
break
}
task := tf.New
switch task.Stat {
case "done":
if !task.Detach {
sesNotify.Notify(task.Id[0:16], task)
}
go deleteTask(task.Id)
case "working":
if strings.HasPrefix(task.Path, "@pull.") {
go taskPull(task)
}
case "waiting":
if !strings.HasPrefix(task.Path, "@pull.") {
if task.Ttl <= 0 {
go taskExpireTtl(task.Id)
} else {
go taskWakeup(task)
}
}
}
}
}
}
func taskPull(task *Task) bool {
prefix := task.Path
if strings.HasPrefix(prefix, "@pull.") {
prefix = prefix[6:]
}
for {
wres, err := r.Table("tasks").
OrderBy(r.OrderByOpts{Index: "pspc"}).
Between(ei.S{prefix, "waiting", r.MinVal, r.MinVal}, ei.S{prefix, "waiting", r.MaxVal, r.MaxVal}, r.BetweenOpts{RightBound: "closed", Index: "pspc"}).
Limit(1).
Update(r.Branch(r.Row.Field("stat").Eq("waiting"),
ei.M{"stat": "working", "tses": task.Id[0:16], "workingTime": r.Now()},
ei.M{}),
r.UpdateOpts{ReturnChanges: true}).
RunWrite(db, r.RunOpts{Durability: "soft"})
if err != nil {
break
}
if wres.Replaced > 0 {
newTask := ei.N(wres.Changes[0].NewValue)
result := make(ei.M)
result["taskid"] = newTask.M("id").StringZ()
result["path"] = newTask.M("path").StringZ()
result["method"] = newTask.M("method").StringZ()
result["params"] = newTask.M("params").RawZ()
result["tags"] = newTask.M("tags").MapStrZ()
result["prio"] = -newTask.M("prio").IntZ()
result["detach"] = newTask.M("detach").BoolZ()
result["user"] = newTask.M("user").StringZ()
pres, err := r.Table("tasks").
Get(task.Id).
Update(r.Branch(r.Row.Field("stat").Eq("working"),
ei.M{"stat": "done", "result": result, "deadLine": r.Now().Add(600)},
ei.M{})).
RunWrite(db, r.RunOpts{Durability: "soft"})
if err != nil || pres.Replaced != 1 {
r.Table("tasks").
Get(result["taskid"]).
Update(ei.M{"stat": "waiting"}).
RunWrite(db, r.RunOpts{Durability: "soft"})
break
}
hook("task", newTask.M("path").StringZ()+newTask.M("method").StringZ(), newTask.M("user").StringZ(), ei.M{
"action": "pull",
"id": result["taskid"],
"connid": task.Id[0:16],
"user": task.User,
"ttl": newTask.M("ttl").IntZ(),
"timestamp": time.Now().UTC(),
})
return true
}
if wres.Unchanged > 0 {
continue
}
break
}
r.Table("tasks").
Get(task.Id).
Update(r.Branch(r.Row.Field("stat").Eq("working"),
ei.M{"stat": "waiting"},
ei.M{})).
RunWrite(db, r.RunOpts{Durability: "soft"})
// On the previous step where the pull transitions from working to waiting
// there is a race condition where a push could enter and a single pull on that
// path wouldnt be able to notice, and a deadlock would happen.
// Here we check again for any task waiting that we could accept, and set ourselves
// as working again to restart the loop on taskTrack()
stuck, _ := r.Table("tasks").
OrderBy(r.OrderByOpts{Index: "pspc"}).
Between(ei.S{prefix, "waiting", r.MinVal, r.MinVal}, ei.S{prefix, "waiting", r.MaxVal, r.MaxVal}, r.BetweenOpts{RightBound: "closed", Index: "pspc"}).
Limit(1).
Run(db, r.RunOpts{Durability: "soft"})
if !stuck.IsNil() {
r.Table("tasks").
Get(task.Id).
Update(r.Branch(r.Row.Field("stat").Eq("waiting"),
ei.M{"stat": "working"},
ei.M{})).
RunWrite(db, r.RunOpts{Durability: "soft"})
}
return false
}
func taskWakeup(task *Task) bool {
for {
wres, err := r.Table("tasks").
Between(ei.S{"@pull." + task.Path, "waiting", r.MinVal, r.MinVal},
ei.S{"@pull." + task.Path, "waiting", r.MaxVal, r.MaxVal},
r.BetweenOpts{RightBound: "closed", Index: "pspc"}).
Sample(1).
Update(r.Branch(r.Row.Field("stat").Eq("waiting"),
ei.M{"stat": "working", "workingTime": r.Now()},
ei.M{})).
RunWrite(db, r.RunOpts{Durability: "soft"})
if err != nil {
return false
}
if wres.Replaced > 0 {
return true
}
if wres.Unchanged > 0 {
continue
}
break
}
return false
}
func deleteTask(id string) {
r.Table("tasks").Get(id).Delete().RunWrite(db, r.RunOpts{Durability: "soft"})
}
func taskExpireTtl(taskid string) {
wres, err := r.Table("tasks").
Get(taskid).
Update(ei.M{"stat": "done", "errCode": ErrTtlExpired, "errStr": ErrStr[ErrTtlExpired], "deadLine": r.Now().Add(600)}, r.UpdateOpts{ReturnChanges: true}).
RunWrite(db, r.RunOpts{Durability: "soft"})
if err == nil {
for _, change := range wres.Changes {
task := ei.N(change.OldValue)
hook("task", task.M("path").StringZ()+task.M("method").StringZ(), task.M("user").StringZ(), ei.M{
"action": "ttlExpired",
"id": task.M("id").StringZ(),
"timestamp": time.Now().UTC(),
})
}
}
}
func (nc *NexusConn) handleTaskReq(req *JsonRpcReq) {
switch req.Method {
case "task.push":
method, err := ei.N(req.Params).M("method").Lower().F(checkRegexp, _taskRegexp).F(checkNotEmptyLabels).String()
if err != nil {
req.Error(ErrInvalidParams, "method", nil)
return
}
params, err := ei.N(req.Params).M("params").Raw()
if err != nil {
req.Error(ErrInvalidParams, "params", nil)
return
}
prio := -ei.N(req.Params).M("prio").IntZ()
ttl := ei.N(req.Params).M("ttl").IntZ()
if ttl <= 0 {
ttl = 5
}
detach := ei.N(req.Params).M("detach").BoolZ()
tags := nc.getTags(method)
if !(ei.N(tags).M("@"+req.Method).BoolZ() || ei.N(tags).M("@admin").BoolZ()) {
req.Error(ErrPermissionDenied, "", nil)
return
}
path, met := getPathMethod(method)
timeout := ei.N(req.Params).M("timeout").Float64Z()
if timeout <= 0 {
timeout = 60 * 60 * 24 * 10 // Ten days
}
task := &Task{
Id: nc.connId + safeId(10),
Stat: "waiting",
Path: path,
Prio: prio,
Ttl: ttl,
Detach: detach,
Method: met,
Params: params,
Tags: tags,
User: nc.user.User,
LocalId: req.Id,
CreationTime: r.Now(),
DeadLine: r.Now().Add(timeout),
}
nc.log.WithFields(logrus.Fields{
"connid": req.nc.connId,
"id": req.Id,
"taskid": task.Id,
}).Info("taskid generated")
_, err = r.Table("tasks").Insert(task, r.InsertOpts{}).RunWrite(db, r.RunOpts{Durability: "soft"})
if err != nil {
req.Error(ErrInternal, "", nil)
return
}
hook("task", task.Path+task.Method, task.User, ei.M{
"action": "push",
"id": task.Id,
"connid": nc.connId,
"user": nc.user.User,
"tags": nc.user.Tags,
"path": path,
"method": met,
"params": params,
"detach": detach,
"ttl": ttl,
"prio": prio,
"creationTime": time.Now().UTC(),
"timeout": timeout,
})
if detach {
req.Result(ei.M{"ok": true})
}
case "task.pull":
if req.Id == nil {
return
}
prefix := ei.N(req.Params).M("prefix").Lower().F(checkRegexp, _taskRegexp).F(checkNotEmptyLabels).StringZ()
if prefix == "" {
req.Error(ErrInvalidParams, "prefix", nil)
return
}
if !strings.HasSuffix(prefix, ".") {
prefix += "."
}
tags := nc.getTags(prefix)
if !(ei.N(tags).M("@"+req.Method).BoolZ() || ei.N(tags).M("@admin").BoolZ()) {
req.Error(ErrPermissionDenied, "", nil)
return
}
timeout := ei.N(req.Params).M("timeout").Float64Z()
if timeout <= 0 {
timeout = 60 * 60 * 24 * 10 // Ten days
}
task := &Task{
Id: nc.connId + safeId(10),
Stat: "working",
Path: "@pull." + prefix,
Method: "",
Params: nil,
LocalId: req.Id,
CreationTime: r.Now(),
DeadLine: r.Now().Add(timeout),
User: nc.user.User,
}
_, err := r.Table("tasks").Insert(task).RunWrite(db, r.RunOpts{Durability: "soft"})
if err != nil {
req.Error(ErrInternal, "", nil)
return
}
case "task.result":
taskid := ei.N(req.Params).M("taskid").StringZ()
result := ei.N(req.Params).M("result").RawZ()
res, err := r.Table("tasks").
Get(taskid).
Update(ei.M{"stat": "done", "result": result, "deadLine": r.Now().Add(600)}, r.UpdateOpts{ReturnChanges: true}).
RunWrite(db, r.RunOpts{Durability: "soft"})
if err != nil {
req.Error(ErrInternal, "", nil)
return
}
if res.Replaced > 0 {
task := ei.N(res.Changes[0].OldValue)
hook("task", task.M("path").StringZ()+task.M("method").StringZ(), task.M("user").StringZ(), ei.M{
"action": "result",
"id": taskid,
"result": result,
"timestamp": time.Now().UTC(),
})
req.Result(ei.M{"ok": true})
} else {
req.Error(ErrInvalidTask, "", nil)
}
case "task.error":
taskid := ei.N(req.Params).M("taskid").StringZ()
code := ei.N(req.Params).M("code").IntZ()
message := ei.N(req.Params).M("message").StringZ()
data := ei.N(req.Params).M("data").RawZ()
res, err := r.Table("tasks").
Get(taskid).
Update(ei.M{"stat": "done", "errCode": code, "errStr": message, "errObj": data, "deadLine": r.Now().Add(600)}, r.UpdateOpts{ReturnChanges: true}).
RunWrite(db, r.RunOpts{Durability: "soft"})
if err != nil {
req.Error(ErrInternal, "", nil)
return
}
if res.Replaced > 0 {
task := ei.N(res.Changes[0].OldValue)
hook("task", task.M("path").StringZ()+task.M("method").StringZ(), task.M("user").StringZ(), ei.M{
"action": "error",
"id": taskid,
"code": code,
"message": message,
"data": data,
"timestamp": time.Now().UTC(),
})
req.Result(ei.M{"ok": true})
} else {
req.Error(ErrInvalidTask, "", nil)
}
case "task.reject":
taskid := ei.N(req.Params).M("taskid").StringZ()
res, err := r.Table("tasks").
Get(taskid).
Update(ei.M{"stat": "waiting", "tses": nil, "ttl": r.Row.Field("ttl").Add(-1)}, r.UpdateOpts{ReturnChanges: true}).
RunWrite(db, r.RunOpts{Durability: "soft"})
if err != nil {
req.Error(ErrInternal, "", nil)
return
}
if res.Replaced > 0 {
task := ei.N(res.Changes[0].OldValue)
hook("task", task.M("path").StringZ()+task.M("method").StringZ(), task.M("user").StringZ(), ei.M{
"action": "reject",
"id": taskid,
"timestamp": time.Now().UTC(),
})
req.Result(ei.M{"ok": true})
} else {
req.Error(ErrInvalidTask, "", nil)
}
case "task.cancel":
id := ei.N(req.Params).M("id").RawZ()
wres, err := r.Table("tasks").
Between(nc.connId, nc.connId+"\uffff").
Filter(r.Row.Field("localId").Eq(id)).
Update(r.Branch(r.Row.Field("stat").Ne("done"),
ei.M{"stat": "done", "errCode": ErrCancel, "errStr": ErrStr[ErrCancel], "deadLine": r.Now().Add(600)},
ei.M{}),
r.UpdateOpts{ReturnChanges: true}).
RunWrite(db, r.RunOpts{Durability: "soft"})
if err != nil {
req.Error(ErrInternal, "", nil)
return
}
if wres.Replaced > 0 {
task := ei.N(wres.Changes[0].NewValue)
hook("task", task.M("path").StringZ()+task.M("method").StringZ(), task.M("user").StringZ(), ei.M{
"action": "cancel",
"id": task.M("taskid").StringZ(),
"timestamp": time.Now().UTC(),
})
req.Result(ei.M{"ok": true})
} else {
req.Error(ErrInvalidTask, "", nil)
}
case "task.list":
prefix, depth, filter, limit, skip := getListParams(req.Params)
tags := nc.getTags(prefix)
if !(ei.N(tags).M("@task.list").BoolZ() || ei.N(tags).M("@admin").BoolZ()) {
req.Error(ErrPermissionDenied, "", nil)
return
}
var term r.Term
if prefix == "" {
if depth < 0 {
term = r.Table("tasks")
} else if depth == 0 {
term = r.Table("tasks").GetAllByIndex("path", ".", "@pull.")
} else {
term = r.Table("tasks").Filter(r.Row.Field("path").Match(fmt.Sprintf("^(?:@pull[.])??(?:[^.]*[.]){0,%d}$", depth)))
}
} else {
if depth != 0 {
term = r.Table("tasks").Between(prefix+".", prefix+".\uffff", r.BetweenOpts{Index: "path"}).Union(r.Table("tasks").Between("@pull."+prefix+".", "@pull."+prefix+".\uffff", r.BetweenOpts{Index: "path"}))
} else {
term = r.Table("tasks").GetAllByIndex("path", prefix+".", "@pull."+prefix+".")
}
if depth > 0 {
term = term.Filter(r.Row.Field("path").Match(fmt.Sprintf("^%s(?:[.][^.]*){0,%d}[.]$", prefix, depth)))
}
}
if filter != "" {
term = term.Filter(r.Row.Field("path").Match(filter))
}
if skip >= 0 {
term = term.Skip(skip)
}
if limit > 0 {
term = term.Limit(limit)
}
cur, err := term.Run(db)
if err != nil {
req.Error(ErrInternal, "", nil)
return
}
ret := make([]*Task, 0)
if err := cur.All(&ret); err != nil {
req.Error(ErrInternal, "", nil)
return
}
for _, task := range ret {
task.Path = strings.TrimPrefix(task.Path, "@pull.")
task.Params = truncateJson(task.Params)
task.ErrObj = truncateJson(task.ErrObj)
}
req.Result(ret)
case "task.count":
prefix := getPrefixParam(req.Params)
filter := ei.N(req.Params).M("filter").StringZ()
countSubprefixes := ei.N(req.Params).M("subprefixes").BoolZ()
tags := nc.getTags(prefix)
if !(ei.N(tags).M("@task.count").BoolZ() || ei.N(tags).M("@admin").BoolZ()) {
req.Error(ErrPermissionDenied, "", nil)
return
}
var pushTerm, pullTerm, term r.Term
if countSubprefixes {
if prefix == "" {
pushTerm = r.Table("tasks")
pullTerm = r.Table("tasks").Between("@pull.", "@pull.\uffff", r.BetweenOpts{Index: "path"})
if filter != "" {
pushTerm = pushTerm.Filter(r.Row.Field("path").Match(filter))
pullTerm = pullTerm.Filter(r.Row.Field("path").Match(filter))
}
pushTerm = pushTerm.Group(r.Row.Field("path").Match("^([^@.][^.]*)[.](?:[^.]*[.])*$").Field("groups").Nth(0).Field("str")).Count().Ungroup().Filter(func(t r.Term) r.Term {
return t.HasFields("group")
}).Map(func(t r.Term) r.Term {
return r.Object("prefix", t.Field("group"), "count", t.Field("reduction"))
})
pullTerm = pullTerm.Group(r.Row.Field("path").Match("^@pull[.]([^.]*)[.](?:[^.]*[.])*$").Field("groups").Nth(0).Field("str")).Count().Ungroup().Filter(func(t r.Term) r.Term {
return t.HasFields("group")
}).Map(func(t r.Term) r.Term {
return r.Object("prefix", t.Field("group"), "count", t.Field("reduction"))
})
} else {
pushTerm = r.Table("tasks").Between(prefix+".", prefix+".\uffff", r.BetweenOpts{Index: "path"})
pullTerm = r.Table("tasks").Between("@pull."+prefix+".", "@pull."+prefix+".\uffff", r.BetweenOpts{Index: "path"})
if filter != "" {
pushTerm = pushTerm.Filter(r.Row.Field("path").Match(filter))
pullTerm = pullTerm.Filter(r.Row.Field("path").Match(filter))
}
pushTerm = pushTerm.Group(r.Row.Field("path").Match(fmt.Sprintf("^(%s(?:[.][^.]*)?)[.](?:[^.]*[.])*$", prefix)).Field("groups").Nth(0).Field("str")).Count().Ungroup().Filter(func(t r.Term) r.Term {
return t.HasFields("group")
}).Map(func(t r.Term) r.Term {
return r.Object("prefix", t.Field("group"), "count", t.Field("reduction"))
})
pullTerm = pullTerm.Group(r.Row.Field("path").Match(fmt.Sprintf("^@pull[.](%s(?:[.][^.]*)?)[.](?:[^.]*[.])*$", prefix)).Field("groups").Nth(0).Field("str")).Count().Ungroup().Filter(func(t r.Term) r.Term {
return t.HasFields("group")
}).Map(func(t r.Term) r.Term {
return r.Object("prefix", t.Field("group"), "count", t.Field("reduction"))
})
}
pushCur, err := pushTerm.Run(db)
if err != nil {
req.Error(ErrInternal, err.Error(), nil)
return
}
var pushAll []interface{}
if err := pushCur.All(&pushAll); err != nil {
req.Error(ErrInternal, "", nil)
return
}
pullCur, err := pullTerm.Run(db)
if err != nil {
req.Error(ErrInternal, err.Error(), nil)
return
}
var pullAll []interface{}
if err := pullCur.All(&pullAll); err != nil {
req.Error(ErrInternal, "", nil)
return
}
res := []interface{}{}
countPulls := map[string]int{}
for _, v := range pullAll {
countPulls[ei.N(v).M("prefix").StringZ()] = ei.N(v).M("count").IntZ()
}
for _, v := range pushAll {
p := ei.N(v).M("prefix").StringZ()
if !strings.HasPrefix(p, "@pull.") {
pullCount := countPulls[p]
delete(countPulls, p)
pushCount := ei.N(v).M("count").IntZ()
res = append(res, ei.M{"prefix": p, "count": pushCount + pullCount, "pullCount": pullCount, "pushCount": pushCount})
}
}
for p, v := range countPulls {
res = append(res, ei.M{"prefix": p, "count": v, "pullCount": v, "pushCount": 0})
}
req.Result(res)
} else {
if prefix == "" {
term = r.Table("tasks")
} else {
term = r.Table("tasks").Between(prefix+".", prefix+".\uffff", r.BetweenOpts{Index: "path"}).Union(r.Table("tasks").Between("@pull."+prefix+".", "@pull."+prefix+".\uffff", r.BetweenOpts{Index: "path"}))
}
if filter != "" {
term = term.Filter(r.Row.Field("path").Match(filter))
}
term = term.Count()
cur, err := term.Run(db)
if err != nil {
req.Error(ErrInternal, err.Error(), nil)
return
}
var count int
if err := cur.One(&count); err != nil {
req.Error(ErrInternal, "", nil)
return
}
if prefix == "" {
term = r.Table("tasks").Between("@pull.", "@pull.\uffff", r.BetweenOpts{Index: "path"})
} else {
term = r.Table("tasks").Between("@pull."+prefix+".", "@pull."+prefix+".\uffff", r.BetweenOpts{Index: "path"})
}
if filter != "" {
term = term.Filter(r.Row.Field("path").Match(filter))
}
term = term.Count()
cur, err = term.Run(db)
if err != nil {
req.Error(ErrInternal, err.Error(), nil)
return
}
var countPulls int
if err := cur.One(&countPulls); err != nil {
req.Error(ErrInternal, "", nil)
return
}
countPushes := count - countPulls
if countPushes < 0 {
countPushes = 0
}
req.Result(ei.M{"count": count, "pullCount": countPulls, "pushCount": countPushes})
}
default:
req.Error(ErrMethodNotFound, "", nil)
}
}
Add more task pull info
package main
import (
"fmt"
"strings"
"time"
"github.com/jaracil/ei"
. "github.com/jaracil/nexus/log"
"github.com/sirupsen/logrus"
r "gopkg.in/rethinkdb/rethinkdb-go.v5"
)
type Task struct {
Id string `rethinkdb:"id" json:"id"`
Stat string `rethinkdb:"stat" json:"state""`
Path string `rethinkdb:"path" json:"path"`
Prio int `rethinkdb:"prio" json:"priority"`
Ttl int `rethinkdb:"ttl" json:"ttl"`
Detach bool `rethinkdb:"detach" json:"detached"`
User string `rethinkdb:"user" json:"user"`
Method string `rethinkdb:"method" json:"method"`
Params interface{} `rethinkdb:"params" json:"params"`
LocalId interface{} `rethinkdb:"localId" json:"-"`
Tses string `rethinkdb:"tses" json:"targetSession"`
Result interface{} `rethinkdb:"result,omitempty" json:"result"`
ErrCode *int `rethinkdb:"errCode,omitempty" json:"errCode"`
ErrStr string `rethinkdb:"errStr,omitempty" json:"errString"`
ErrObj interface{} `rethinkdb:"errObj,omitempty" json:"errObject"`
Tags interface{} `rethinkdb:"tags,omitempty" json:"tags"`
CreationTime interface{} `rethinkdb:"creationTime,omitempty" json:"creationTime"`
WorkingTime interface{} `rethinkdb:"workingTime,omitempty" json:"workingTime"`
DeadLine interface{} `rethinkdb:"deadLine,omitempty" json:"deadline"`
}
type TaskFeed struct {
Old *Task `rethinkdb:"old_val"`
New *Task `rethinkdb:"new_val"`
}
func taskPurge() {
defer exit("purge goroutine error")
tick := time.NewTicker(time.Second)
defer tick.Stop()
for {
select {
case <-tick.C:
if isMasterNode() {
wres, err := r.Table("tasks").
Between(r.MinVal, r.Now(), r.BetweenOpts{Index: "deadLine"}).
Update(r.Branch(r.Row.Field("stat").Ne("done"),
ei.M{"stat": "done", "errCode": ErrTimeout, "errStr": ErrStr[ErrTimeout], "deadLine": r.Now().Add(600)},
ei.M{}),
r.UpdateOpts{ReturnChanges: true}).
RunWrite(db, r.RunOpts{Durability: "soft"})
if err == nil {
for _, change := range wres.Changes {
task := ei.N(change.OldValue)
if path := task.M("path").StringZ(); !strings.HasPrefix(path, "@pull.") {
hook("task", path+task.M("method").StringZ(), task.M("user").StringZ(), ei.M{
"action": "timeout",
"id": task.M("id").StringZ(),
"timestamp": time.Now().UTC(),
})
}
}
}
r.Table("tasks").
Between(r.MinVal, r.Now(), r.BetweenOpts{Index: "deadLine"}).
Filter(r.Row.Field("stat").Eq("done")).
Delete().
RunWrite(db, r.RunOpts{Durability: "soft"})
}
case <-mainContext.Done():
return
}
}
}
func taskTrack() {
defer exit("task change-feed error")
for retry := 0; retry < 10; retry++ {
iter, err := r.Table("tasks").
Between(nodeId, nodeId+"\uffff").
Changes(r.ChangesOpts{IncludeInitial: true, Squash: false}).
Filter(r.Row.Field("new_val").Ne(nil)).
Pluck(ei.M{"new_val": []string{
"id",
"stat",
"localId",
"detach",
"user",
"prio",
"ttl",
"path",
"method",
"result",
"errCode",
"errStr",
"errObj",
"tses",
"creationTime",
"workingTime"}}).
Run(db)
if err != nil {
Log.WithFields(logrus.Fields{
"error": err.Error(),
}).Errorln("Error opening taskTrack iterator")
time.Sleep(time.Second)
continue
}
retry = 0 //Reset retrys
for {
tf := &TaskFeed{}
if !iter.Next(tf) {
Log.WithFields(logrus.Fields{
"error": iter.Err().Error(),
}).Errorln("Error processing taskTrack feed")
iter.Close()
break
}
task := tf.New
switch task.Stat {
case "done":
if !task.Detach {
sesNotify.Notify(task.Id[0:16], task)
}
go deleteTask(task.Id)
case "working":
if strings.HasPrefix(task.Path, "@pull.") {
go taskPull(task)
}
case "waiting":
if !strings.HasPrefix(task.Path, "@pull.") {
if task.Ttl <= 0 {
go taskExpireTtl(task.Id)
} else {
go taskWakeup(task)
}
}
}
}
}
}
func taskPull(task *Task) bool {
prefix := task.Path
if strings.HasPrefix(prefix, "@pull.") {
prefix = prefix[6:]
}
for {
wres, err := r.Table("tasks").
OrderBy(r.OrderByOpts{Index: "pspc"}).
Between(ei.S{prefix, "waiting", r.MinVal, r.MinVal}, ei.S{prefix, "waiting", r.MaxVal, r.MaxVal}, r.BetweenOpts{RightBound: "closed", Index: "pspc"}).
Limit(1).
Update(r.Branch(r.Row.Field("stat").Eq("waiting"),
ei.M{"stat": "working", "tses": task.Id[0:16], "workingTime": r.Now()},
ei.M{}),
r.UpdateOpts{ReturnChanges: true}).
RunWrite(db, r.RunOpts{Durability: "soft"})
if err != nil {
break
}
if wres.Replaced > 0 {
newTask := ei.N(wres.Changes[0].NewValue)
result := make(ei.M)
result["taskid"] = newTask.M("id").StringZ()
result["path"] = newTask.M("path").StringZ()
result["method"] = newTask.M("method").StringZ()
result["params"] = newTask.M("params").RawZ()
result["tags"] = newTask.M("tags").MapStrZ()
result["prio"] = -newTask.M("prio").IntZ()
result["detach"] = newTask.M("detach").BoolZ()
result["user"] = newTask.M("user").StringZ()
result["ttl"] = newTask.M("ttl").IntZ()
result["creationTime"] = newTask.M("creationTime").TimeZ()
result["deadLine"] = newTask.M("deadLine").TimeZ()
pres, err := r.Table("tasks").
Get(task.Id).
Update(r.Branch(r.Row.Field("stat").Eq("working"),
ei.M{"stat": "done", "result": result, "deadLine": r.Now().Add(600)},
ei.M{})).
RunWrite(db, r.RunOpts{Durability: "soft"})
if err != nil || pres.Replaced != 1 {
r.Table("tasks").
Get(result["taskid"]).
Update(ei.M{"stat": "waiting"}).
RunWrite(db, r.RunOpts{Durability: "soft"})
break
}
hook("task", newTask.M("path").StringZ()+newTask.M("method").StringZ(), newTask.M("user").StringZ(), ei.M{
"action": "pull",
"id": result["taskid"],
"connid": task.Id[0:16],
"user": task.User,
"ttl": newTask.M("ttl").IntZ(),
"timestamp": time.Now().UTC(),
})
return true
}
if wres.Unchanged > 0 {
continue
}
break
}
r.Table("tasks").
Get(task.Id).
Update(r.Branch(r.Row.Field("stat").Eq("working"),
ei.M{"stat": "waiting"},
ei.M{})).
RunWrite(db, r.RunOpts{Durability: "soft"})
// On the previous step where the pull transitions from working to waiting
// there is a race condition where a push could enter and a single pull on that
// path wouldnt be able to notice, and a deadlock would happen.
// Here we check again for any task waiting that we could accept, and set ourselves
// as working again to restart the loop on taskTrack()
stuck, _ := r.Table("tasks").
OrderBy(r.OrderByOpts{Index: "pspc"}).
Between(ei.S{prefix, "waiting", r.MinVal, r.MinVal}, ei.S{prefix, "waiting", r.MaxVal, r.MaxVal}, r.BetweenOpts{RightBound: "closed", Index: "pspc"}).
Limit(1).
Run(db, r.RunOpts{Durability: "soft"})
if !stuck.IsNil() {
r.Table("tasks").
Get(task.Id).
Update(r.Branch(r.Row.Field("stat").Eq("waiting"),
ei.M{"stat": "working"},
ei.M{})).
RunWrite(db, r.RunOpts{Durability: "soft"})
}
return false
}
func taskWakeup(task *Task) bool {
for {
wres, err := r.Table("tasks").
Between(ei.S{"@pull." + task.Path, "waiting", r.MinVal, r.MinVal},
ei.S{"@pull." + task.Path, "waiting", r.MaxVal, r.MaxVal},
r.BetweenOpts{RightBound: "closed", Index: "pspc"}).
Sample(1).
Update(r.Branch(r.Row.Field("stat").Eq("waiting"),
ei.M{"stat": "working", "workingTime": r.Now()},
ei.M{})).
RunWrite(db, r.RunOpts{Durability: "soft"})
if err != nil {
return false
}
if wres.Replaced > 0 {
return true
}
if wres.Unchanged > 0 {
continue
}
break
}
return false
}
func deleteTask(id string) {
r.Table("tasks").Get(id).Delete().RunWrite(db, r.RunOpts{Durability: "soft"})
}
func taskExpireTtl(taskid string) {
wres, err := r.Table("tasks").
Get(taskid).
Update(ei.M{"stat": "done", "errCode": ErrTtlExpired, "errStr": ErrStr[ErrTtlExpired], "deadLine": r.Now().Add(600)}, r.UpdateOpts{ReturnChanges: true}).
RunWrite(db, r.RunOpts{Durability: "soft"})
if err == nil {
for _, change := range wres.Changes {
task := ei.N(change.OldValue)
hook("task", task.M("path").StringZ()+task.M("method").StringZ(), task.M("user").StringZ(), ei.M{
"action": "ttlExpired",
"id": task.M("id").StringZ(),
"timestamp": time.Now().UTC(),
})
}
}
}
func (nc *NexusConn) handleTaskReq(req *JsonRpcReq) {
switch req.Method {
case "task.push":
method, err := ei.N(req.Params).M("method").Lower().F(checkRegexp, _taskRegexp).F(checkNotEmptyLabels).String()
if err != nil {
req.Error(ErrInvalidParams, "method", nil)
return
}
params, err := ei.N(req.Params).M("params").Raw()
if err != nil {
req.Error(ErrInvalidParams, "params", nil)
return
}
prio := -ei.N(req.Params).M("prio").IntZ()
ttl := ei.N(req.Params).M("ttl").IntZ()
if ttl <= 0 {
ttl = 5
}
detach := ei.N(req.Params).M("detach").BoolZ()
tags := nc.getTags(method)
if !(ei.N(tags).M("@"+req.Method).BoolZ() || ei.N(tags).M("@admin").BoolZ()) {
req.Error(ErrPermissionDenied, "", nil)
return
}
path, met := getPathMethod(method)
timeout := ei.N(req.Params).M("timeout").Float64Z()
if timeout <= 0 {
timeout = 60 * 60 * 24 * 10 // Ten days
}
task := &Task{
Id: nc.connId + safeId(10),
Stat: "waiting",
Path: path,
Prio: prio,
Ttl: ttl,
Detach: detach,
Method: met,
Params: params,
Tags: tags,
User: nc.user.User,
LocalId: req.Id,
CreationTime: r.Now(),
DeadLine: r.Now().Add(timeout),
}
nc.log.WithFields(logrus.Fields{
"connid": req.nc.connId,
"id": req.Id,
"taskid": task.Id,
}).Info("taskid generated")
_, err = r.Table("tasks").Insert(task, r.InsertOpts{}).RunWrite(db, r.RunOpts{Durability: "soft"})
if err != nil {
req.Error(ErrInternal, "", nil)
return
}
hook("task", task.Path+task.Method, task.User, ei.M{
"action": "push",
"id": task.Id,
"connid": nc.connId,
"user": nc.user.User,
"tags": nc.user.Tags,
"path": path,
"method": met,
"params": params,
"detach": detach,
"ttl": ttl,
"prio": prio,
"creationTime": time.Now().UTC(),
"timeout": timeout,
})
if detach {
req.Result(ei.M{"ok": true})
}
case "task.pull":
if req.Id == nil {
return
}
prefix := ei.N(req.Params).M("prefix").Lower().F(checkRegexp, _taskRegexp).F(checkNotEmptyLabels).StringZ()
if prefix == "" {
req.Error(ErrInvalidParams, "prefix", nil)
return
}
if !strings.HasSuffix(prefix, ".") {
prefix += "."
}
tags := nc.getTags(prefix)
if !(ei.N(tags).M("@"+req.Method).BoolZ() || ei.N(tags).M("@admin").BoolZ()) {
req.Error(ErrPermissionDenied, "", nil)
return
}
timeout := ei.N(req.Params).M("timeout").Float64Z()
if timeout <= 0 {
timeout = 60 * 60 * 24 * 10 // Ten days
}
task := &Task{
Id: nc.connId + safeId(10),
Stat: "working",
Path: "@pull." + prefix,
Method: "",
Params: nil,
LocalId: req.Id,
CreationTime: r.Now(),
DeadLine: r.Now().Add(timeout),
User: nc.user.User,
}
_, err := r.Table("tasks").Insert(task).RunWrite(db, r.RunOpts{Durability: "soft"})
if err != nil {
req.Error(ErrInternal, "", nil)
return
}
case "task.result":
taskid := ei.N(req.Params).M("taskid").StringZ()
result := ei.N(req.Params).M("result").RawZ()
res, err := r.Table("tasks").
Get(taskid).
Update(ei.M{"stat": "done", "result": result, "deadLine": r.Now().Add(600)}, r.UpdateOpts{ReturnChanges: true}).
RunWrite(db, r.RunOpts{Durability: "soft"})
if err != nil {
req.Error(ErrInternal, "", nil)
return
}
if res.Replaced > 0 {
task := ei.N(res.Changes[0].OldValue)
hook("task", task.M("path").StringZ()+task.M("method").StringZ(), task.M("user").StringZ(), ei.M{
"action": "result",
"id": taskid,
"result": result,
"timestamp": time.Now().UTC(),
})
req.Result(ei.M{"ok": true})
} else {
req.Error(ErrInvalidTask, "", nil)
}
case "task.error":
taskid := ei.N(req.Params).M("taskid").StringZ()
code := ei.N(req.Params).M("code").IntZ()
message := ei.N(req.Params).M("message").StringZ()
data := ei.N(req.Params).M("data").RawZ()
res, err := r.Table("tasks").
Get(taskid).
Update(ei.M{"stat": "done", "errCode": code, "errStr": message, "errObj": data, "deadLine": r.Now().Add(600)}, r.UpdateOpts{ReturnChanges: true}).
RunWrite(db, r.RunOpts{Durability: "soft"})
if err != nil {
req.Error(ErrInternal, "", nil)
return
}
if res.Replaced > 0 {
task := ei.N(res.Changes[0].OldValue)
hook("task", task.M("path").StringZ()+task.M("method").StringZ(), task.M("user").StringZ(), ei.M{
"action": "error",
"id": taskid,
"code": code,
"message": message,
"data": data,
"timestamp": time.Now().UTC(),
})
req.Result(ei.M{"ok": true})
} else {
req.Error(ErrInvalidTask, "", nil)
}
case "task.reject":
taskid := ei.N(req.Params).M("taskid").StringZ()
res, err := r.Table("tasks").
Get(taskid).
Update(ei.M{"stat": "waiting", "tses": nil, "ttl": r.Row.Field("ttl").Add(-1)}, r.UpdateOpts{ReturnChanges: true}).
RunWrite(db, r.RunOpts{Durability: "soft"})
if err != nil {
req.Error(ErrInternal, "", nil)
return
}
if res.Replaced > 0 {
task := ei.N(res.Changes[0].OldValue)
hook("task", task.M("path").StringZ()+task.M("method").StringZ(), task.M("user").StringZ(), ei.M{
"action": "reject",
"id": taskid,
"timestamp": time.Now().UTC(),
})
req.Result(ei.M{"ok": true})
} else {
req.Error(ErrInvalidTask, "", nil)
}
case "task.cancel":
id := ei.N(req.Params).M("id").RawZ()
wres, err := r.Table("tasks").
Between(nc.connId, nc.connId+"\uffff").
Filter(r.Row.Field("localId").Eq(id)).
Update(r.Branch(r.Row.Field("stat").Ne("done"),
ei.M{"stat": "done", "errCode": ErrCancel, "errStr": ErrStr[ErrCancel], "deadLine": r.Now().Add(600)},
ei.M{}),
r.UpdateOpts{ReturnChanges: true}).
RunWrite(db, r.RunOpts{Durability: "soft"})
if err != nil {
req.Error(ErrInternal, "", nil)
return
}
if wres.Replaced > 0 {
task := ei.N(wres.Changes[0].NewValue)
hook("task", task.M("path").StringZ()+task.M("method").StringZ(), task.M("user").StringZ(), ei.M{
"action": "cancel",
"id": task.M("taskid").StringZ(),
"timestamp": time.Now().UTC(),
})
req.Result(ei.M{"ok": true})
} else {
req.Error(ErrInvalidTask, "", nil)
}
case "task.list":
prefix, depth, filter, limit, skip := getListParams(req.Params)
tags := nc.getTags(prefix)
if !(ei.N(tags).M("@task.list").BoolZ() || ei.N(tags).M("@admin").BoolZ()) {
req.Error(ErrPermissionDenied, "", nil)
return
}
var term r.Term
if prefix == "" {
if depth < 0 {
term = r.Table("tasks")
} else if depth == 0 {
term = r.Table("tasks").GetAllByIndex("path", ".", "@pull.")
} else {
term = r.Table("tasks").Filter(r.Row.Field("path").Match(fmt.Sprintf("^(?:@pull[.])??(?:[^.]*[.]){0,%d}$", depth)))
}
} else {
if depth != 0 {
term = r.Table("tasks").Between(prefix+".", prefix+".\uffff", r.BetweenOpts{Index: "path"}).Union(r.Table("tasks").Between("@pull."+prefix+".", "@pull."+prefix+".\uffff", r.BetweenOpts{Index: "path"}))
} else {
term = r.Table("tasks").GetAllByIndex("path", prefix+".", "@pull."+prefix+".")
}
if depth > 0 {
term = term.Filter(r.Row.Field("path").Match(fmt.Sprintf("^%s(?:[.][^.]*){0,%d}[.]$", prefix, depth)))
}
}
if filter != "" {
term = term.Filter(r.Row.Field("path").Match(filter))
}
if skip >= 0 {
term = term.Skip(skip)
}
if limit > 0 {
term = term.Limit(limit)
}
cur, err := term.Run(db)
if err != nil {
req.Error(ErrInternal, "", nil)
return
}
ret := make([]*Task, 0)
if err := cur.All(&ret); err != nil {
req.Error(ErrInternal, "", nil)
return
}
for _, task := range ret {
task.Path = strings.TrimPrefix(task.Path, "@pull.")
task.Params = truncateJson(task.Params)
task.ErrObj = truncateJson(task.ErrObj)
}
req.Result(ret)
case "task.count":
prefix := getPrefixParam(req.Params)
filter := ei.N(req.Params).M("filter").StringZ()
countSubprefixes := ei.N(req.Params).M("subprefixes").BoolZ()
tags := nc.getTags(prefix)
if !(ei.N(tags).M("@task.count").BoolZ() || ei.N(tags).M("@admin").BoolZ()) {
req.Error(ErrPermissionDenied, "", nil)
return
}
var pushTerm, pullTerm, term r.Term
if countSubprefixes {
if prefix == "" {
pushTerm = r.Table("tasks")
pullTerm = r.Table("tasks").Between("@pull.", "@pull.\uffff", r.BetweenOpts{Index: "path"})
if filter != "" {
pushTerm = pushTerm.Filter(r.Row.Field("path").Match(filter))
pullTerm = pullTerm.Filter(r.Row.Field("path").Match(filter))
}
pushTerm = pushTerm.Group(r.Row.Field("path").Match("^([^@.][^.]*)[.](?:[^.]*[.])*$").Field("groups").Nth(0).Field("str")).Count().Ungroup().Filter(func(t r.Term) r.Term {
return t.HasFields("group")
}).Map(func(t r.Term) r.Term {
return r.Object("prefix", t.Field("group"), "count", t.Field("reduction"))
})
pullTerm = pullTerm.Group(r.Row.Field("path").Match("^@pull[.]([^.]*)[.](?:[^.]*[.])*$").Field("groups").Nth(0).Field("str")).Count().Ungroup().Filter(func(t r.Term) r.Term {
return t.HasFields("group")
}).Map(func(t r.Term) r.Term {
return r.Object("prefix", t.Field("group"), "count", t.Field("reduction"))
})
} else {
pushTerm = r.Table("tasks").Between(prefix+".", prefix+".\uffff", r.BetweenOpts{Index: "path"})
pullTerm = r.Table("tasks").Between("@pull."+prefix+".", "@pull."+prefix+".\uffff", r.BetweenOpts{Index: "path"})
if filter != "" {
pushTerm = pushTerm.Filter(r.Row.Field("path").Match(filter))
pullTerm = pullTerm.Filter(r.Row.Field("path").Match(filter))
}
pushTerm = pushTerm.Group(r.Row.Field("path").Match(fmt.Sprintf("^(%s(?:[.][^.]*)?)[.](?:[^.]*[.])*$", prefix)).Field("groups").Nth(0).Field("str")).Count().Ungroup().Filter(func(t r.Term) r.Term {
return t.HasFields("group")
}).Map(func(t r.Term) r.Term {
return r.Object("prefix", t.Field("group"), "count", t.Field("reduction"))
})
pullTerm = pullTerm.Group(r.Row.Field("path").Match(fmt.Sprintf("^@pull[.](%s(?:[.][^.]*)?)[.](?:[^.]*[.])*$", prefix)).Field("groups").Nth(0).Field("str")).Count().Ungroup().Filter(func(t r.Term) r.Term {
return t.HasFields("group")
}).Map(func(t r.Term) r.Term {
return r.Object("prefix", t.Field("group"), "count", t.Field("reduction"))
})
}
pushCur, err := pushTerm.Run(db)
if err != nil {
req.Error(ErrInternal, err.Error(), nil)
return
}
var pushAll []interface{}
if err := pushCur.All(&pushAll); err != nil {
req.Error(ErrInternal, "", nil)
return
}
pullCur, err := pullTerm.Run(db)
if err != nil {
req.Error(ErrInternal, err.Error(), nil)
return
}
var pullAll []interface{}
if err := pullCur.All(&pullAll); err != nil {
req.Error(ErrInternal, "", nil)
return
}
res := []interface{}{}
countPulls := map[string]int{}
for _, v := range pullAll {
countPulls[ei.N(v).M("prefix").StringZ()] = ei.N(v).M("count").IntZ()
}
for _, v := range pushAll {
p := ei.N(v).M("prefix").StringZ()
if !strings.HasPrefix(p, "@pull.") {
pullCount := countPulls[p]
delete(countPulls, p)
pushCount := ei.N(v).M("count").IntZ()
res = append(res, ei.M{"prefix": p, "count": pushCount + pullCount, "pullCount": pullCount, "pushCount": pushCount})
}
}
for p, v := range countPulls {
res = append(res, ei.M{"prefix": p, "count": v, "pullCount": v, "pushCount": 0})
}
req.Result(res)
} else {
if prefix == "" {
term = r.Table("tasks")
} else {
term = r.Table("tasks").Between(prefix+".", prefix+".\uffff", r.BetweenOpts{Index: "path"}).Union(r.Table("tasks").Between("@pull."+prefix+".", "@pull."+prefix+".\uffff", r.BetweenOpts{Index: "path"}))
}
if filter != "" {
term = term.Filter(r.Row.Field("path").Match(filter))
}
term = term.Count()
cur, err := term.Run(db)
if err != nil {
req.Error(ErrInternal, err.Error(), nil)
return
}
var count int
if err := cur.One(&count); err != nil {
req.Error(ErrInternal, "", nil)
return
}
if prefix == "" {
term = r.Table("tasks").Between("@pull.", "@pull.\uffff", r.BetweenOpts{Index: "path"})
} else {
term = r.Table("tasks").Between("@pull."+prefix+".", "@pull."+prefix+".\uffff", r.BetweenOpts{Index: "path"})
}
if filter != "" {
term = term.Filter(r.Row.Field("path").Match(filter))
}
term = term.Count()
cur, err = term.Run(db)
if err != nil {
req.Error(ErrInternal, err.Error(), nil)
return
}
var countPulls int
if err := cur.One(&countPulls); err != nil {
req.Error(ErrInternal, "", nil)
return
}
countPushes := count - countPulls
if countPushes < 0 {
countPushes = 0
}
req.Result(ei.M{"count": count, "pullCount": countPulls, "pushCount": countPushes})
}
default:
req.Error(ErrMethodNotFound, "", nil)
}
}
|
package node
import (
"GoOnchain/common"
. "GoOnchain/config"
"GoOnchain/core/ledger"
"GoOnchain/core/transaction"
. "GoOnchain/net/message"
. "GoOnchain/net/protocol"
//"bytes"
//"crypto/sha256"
//"encoding/binary"
"errors"
"fmt"
"math/rand"
"net"
"runtime"
"sync/atomic"
"time"
)
// The node capability flag
const (
RELAY = 0x01
SERVER = 0x02
NODESERVICES = 0x01
)
type node struct {
state uint // node status
id string // The nodes's id, MAC or IP?
addr string // The address of the node
conn net.Conn // Connect socket with the peer node
nonce uint32 // Random number to identify different entity from the same IP
cap uint32 // The node capability set
version uint32 // The network protocol the node used
services uint64 // The services the node supplied
port uint16 // The server port of the node
relay bool // The relay capability of the node (merge into capbility flag)
handshakeRetry uint32 // Handshake retry times
handshakeTime time.Time // Last Handshake trigger time
height uint64 // The node latest block height
time time.Time // The latest time the node activity
// TODO does this channel should be a buffer channel
chF chan func() error // Channel used to operate the node without lock
rxBuf struct { // The RX buffer of this node to solve mutliple packets problem
p []byte
len int
}
link // The link status and infomation
local *node // The pointer to local node
neighb nodeMap // The neighbor node connect with currently node except itself
//neighborNodes *nodeMAP // The node connect with it except the local node
eventQueue // The event queue to notice notice other modules
TXNPool // Unconfirmed transaction pool
idCache // The buffer to store the id of the items which already be processed
ledger *ledger.Ledger // The Local ledger
private *uint // Reserver for future using
}
func (node node) DumpInfo() {
fmt.Printf("Node info:\n")
fmt.Printf("\t state = %d\n", node.state)
fmt.Printf("\t id = %s\n", node.id)
fmt.Printf("\t addr = %s\n", node.addr)
fmt.Printf("\t conn = %v\n", node.conn)
fmt.Printf("\t nonce = %d\n", node.nonce)
fmt.Printf("\t cap = %d\n", node.cap)
fmt.Printf("\t version = %d\n", node.version)
fmt.Printf("\t services = %d\n", node.services)
fmt.Printf("\t port = %d\n", node.port)
fmt.Printf("\t relay = %v\n", node.relay)
fmt.Printf("\t height = %v\n", node.height)
}
func (node *node) UpdateInfo(t time.Time, version uint32, services uint64,
port uint16, nonce uint32, relay uint8, height uint32) {
// TODO need lock
node.UpdateTime(t)
node.nonce = nonce
node.version = version
node.services = services
node.port = port
if relay == 0 {
node.relay = false
} else {
node.relay = true
}
node.height = uint64(height)
}
func NewNode() *node {
n := node{
state: INIT,
chF: make(chan func() error),
}
// Update nonce
runtime.SetFinalizer(&n, rmNode)
go n.backend()
return &n
}
func InitNode() Tmper {
var err error
n := NewNode()
n.version = PROTOCOLVERSION
n.services = NODESERVICES
n.port = uint16(Parameters.NodePort)
n.relay = true
rand.Seed(time.Now().UTC().UnixNano())
// Fixme replace with the real random number
n.nonce = rand.Uint32()
fmt.Printf("Init node ID to %d \n", n.nonce)
n.neighb.init()
n.local = n
n.TXNPool.init()
n.eventQueue.init()
n.ledger, err = ledger.GetDefaultLedger()
if err != nil {
fmt.Printf("Get Default Ledger error\n")
errors.New("Get Default Ledger error")
}
go n.initConnection()
go n.updateNodeInfo()
return n
}
func rmNode(node *node) {
fmt.Printf("Remove node %s\n", node.addr)
}
// TODO pass pointer to method only need modify it
func (node *node) backend() {
common.Trace()
for f := range node.chF {
f()
}
}
func (node node) GetID() string {
return node.id
}
func (node node) GetState() uint {
return node.state
}
func (node node) getConn() net.Conn {
return node.conn
}
func (node node) GetPort() uint16 {
return node.port
}
func (node node) GetNonce() uint32 {
return node.nonce
}
func (node node) GetRelay() bool {
return node.relay
}
func (node node) Version() uint32 {
return node.version
}
func (node node) Services() uint64 {
return node.services
}
func (node *node) SetState(state uint) {
node.state = state
}
func (node node) GetHandshakeTime() time.Time {
return node.handshakeTime
}
func (node *node) SetHandshakeTime(t time.Time) {
node.handshakeTime = t
}
func (node *node) LocalNode() Noder {
return node.local
}
func (node node) GetHandshakeRetry() uint32 {
return atomic.LoadUint32(&(node.handshakeRetry))
}
func (node *node) SetHandshakeRetry(r uint32) {
node.handshakeRetry = r
atomic.StoreUint32(&(node.handshakeRetry), r)
}
func (node node) GetHeight() uint64 {
return node.height
}
func (node node) GetLedger() *ledger.Ledger {
return node.ledger
}
func (node *node) UpdateTime(t time.Time) {
node.time = t
}
func (node node) GetMemoryPool() map[common.Uint256]*transaction.Transaction {
return node.GetTxnPool()
// TODO refresh the pending transaction pool
}
func (node node) SynchronizeMemoryPool() {
// Fixme need lock
for _, n := range node.neighb.List {
if n.state == ESTABLISH {
ReqMemoryPool(&node)
}
}
}
func (node node) Xmit(inv common.Inventory) error {
fmt.Println("****** node Xmit ********")
var buffer []byte
var err error
if inv.Type() == common.TRANSACTION {
fmt.Printf("****TX transaction message*****\n")
transaction, isTransaction := inv.(*transaction.Transaction)
if isTransaction {
//transaction.Serialize(tmpBuffer)
buffer, err = NewTx(transaction)
if err != nil {
fmt.Println("Error New Tx message ", err.Error())
return err
}
}
} else if inv.Type() == common.BLOCK {
fmt.Printf("****TX block message****\n")
block, isBlock := inv.(*ledger.Block)
if isBlock {
buffer, err = NewBlock(block)
if err != nil {
fmt.Println("Error New Block message ", err.Error())
return err
}
}
} else if inv.Type() == common.CONSENSUS {
fmt.Printf("*****TX consensus message****\n")
payload, isConsensusPayload := inv.(*ConsensusPayload)
if isConsensusPayload {
buffer, err = NewConsensus(payload)
if err != nil {
fmt.Println("Error New Block message ", err.Error())
return err
}
}
}
node.neighb.Broadcast(buffer)
return nil
}
func (node node) GetAddr() string {
return node.addr
}
func (node node) GetAddr16() ([16]byte, error) {
common.Trace()
var result [16]byte
ip := net.ParseIP(node.addr).To16()
if ip == nil {
fmt.Printf("Parse IP address error\n")
return result, errors.New("Parse IP address error")
}
copy(result[:], ip[:16])
return result, nil
}
func (node node) GetTime() int64 {
t := time.Now()
return t.UnixNano()
}
func (node node) getNbrNum() uint {
var i uint
for _, n := range node.local.neighb.List {
if n.GetState() == ESTABLISH {
fmt.Printf("The establish node address is %s\n", n.GetAddr())
i++
}
}
return i
}
func (node node) GetNeighborAddrs() ([]NodeAddr, uint64) {
var i uint64
var addrs []NodeAddr
// TODO read lock
for _, n := range node.local.neighb.List {
if n.GetState() != ESTABLISH {
continue
}
var addr NodeAddr
addr.IpAddr, _ = n.GetAddr16()
addr.Time = n.GetTime()
addr.Services = n.Services()
addr.Port = n.GetPort()
addr.Uid = n.GetNonce()
addrs = append(addrs, addr)
i++
}
return addrs, i
}
Change node Xmit function
The node Xmit can send three types inventory, such as block,
tx and consusens. So change build the new inventory message
in every message file: block.go, transaction.go and consusens.go.
Signed-off-by: Jin Qing <82160768a1b0174bd1d090c239be37508c32549f@qq.com>
package node
import (
"GoOnchain/common"
. "GoOnchain/config"
"GoOnchain/core/ledger"
"GoOnchain/core/transaction"
. "GoOnchain/net/message"
. "GoOnchain/net/protocol"
//"bytes"
//"crypto/sha256"
//"encoding/binary"
"errors"
"fmt"
"math/rand"
"net"
"runtime"
"sync/atomic"
"time"
)
// The node capability flag
const (
RELAY = 0x01
SERVER = 0x02
NODESERVICES = 0x01
)
type node struct {
state uint // node status
id string // The nodes's id, MAC or IP?
addr string // The address of the node
conn net.Conn // Connect socket with the peer node
nonce uint32 // Random number to identify different entity from the same IP
cap uint32 // The node capability set
version uint32 // The network protocol the node used
services uint64 // The services the node supplied
port uint16 // The server port of the node
relay bool // The relay capability of the node (merge into capbility flag)
handshakeRetry uint32 // Handshake retry times
handshakeTime time.Time // Last Handshake trigger time
height uint64 // The node latest block height
time time.Time // The latest time the node activity
// TODO does this channel should be a buffer channel
chF chan func() error // Channel used to operate the node without lock
rxBuf struct { // The RX buffer of this node to solve mutliple packets problem
p []byte
len int
}
link // The link status and infomation
local *node // The pointer to local node
neighb nodeMap // The neighbor node connect with currently node except itself
//neighborNodes *nodeMAP // The node connect with it except the local node
eventQueue // The event queue to notice notice other modules
TXNPool // Unconfirmed transaction pool
idCache // The buffer to store the id of the items which already be processed
ledger *ledger.Ledger // The Local ledger
private *uint // Reserver for future using
}
func (node node) DumpInfo() {
fmt.Printf("Node info:\n")
fmt.Printf("\t state = %d\n", node.state)
fmt.Printf("\t id = %s\n", node.id)
fmt.Printf("\t addr = %s\n", node.addr)
fmt.Printf("\t conn = %v\n", node.conn)
fmt.Printf("\t nonce = %d\n", node.nonce)
fmt.Printf("\t cap = %d\n", node.cap)
fmt.Printf("\t version = %d\n", node.version)
fmt.Printf("\t services = %d\n", node.services)
fmt.Printf("\t port = %d\n", node.port)
fmt.Printf("\t relay = %v\n", node.relay)
fmt.Printf("\t height = %v\n", node.height)
}
func (node *node) UpdateInfo(t time.Time, version uint32, services uint64,
port uint16, nonce uint32, relay uint8, height uint32) {
// TODO need lock
node.UpdateTime(t)
node.nonce = nonce
node.version = version
node.services = services
node.port = port
if relay == 0 {
node.relay = false
} else {
node.relay = true
}
node.height = uint64(height)
}
func NewNode() *node {
n := node{
state: INIT,
chF: make(chan func() error),
}
// Update nonce
runtime.SetFinalizer(&n, rmNode)
go n.backend()
return &n
}
func InitNode() Tmper {
var err error
n := NewNode()
n.version = PROTOCOLVERSION
n.services = NODESERVICES
n.port = uint16(Parameters.NodePort)
n.relay = true
rand.Seed(time.Now().UTC().UnixNano())
// Fixme replace with the real random number
n.nonce = rand.Uint32()
fmt.Printf("Init node ID to %d \n", n.nonce)
n.neighb.init()
n.local = n
n.TXNPool.init()
n.eventQueue.init()
n.ledger, err = ledger.GetDefaultLedger()
if err != nil {
fmt.Printf("Get Default Ledger error\n")
errors.New("Get Default Ledger error")
}
go n.initConnection()
go n.updateNodeInfo()
return n
}
func rmNode(node *node) {
fmt.Printf("Remove node %s\n", node.addr)
}
// TODO pass pointer to method only need modify it
func (node *node) backend() {
common.Trace()
for f := range node.chF {
f()
}
}
func (node node) GetID() string {
return node.id
}
func (node node) GetState() uint {
return node.state
}
func (node node) getConn() net.Conn {
return node.conn
}
func (node node) GetPort() uint16 {
return node.port
}
func (node node) GetNonce() uint32 {
return node.nonce
}
func (node node) GetRelay() bool {
return node.relay
}
func (node node) Version() uint32 {
return node.version
}
func (node node) Services() uint64 {
return node.services
}
func (node *node) SetState(state uint) {
node.state = state
}
func (node node) GetHandshakeTime() time.Time {
return node.handshakeTime
}
func (node *node) SetHandshakeTime(t time.Time) {
node.handshakeTime = t
}
func (node *node) LocalNode() Noder {
return node.local
}
func (node node) GetHandshakeRetry() uint32 {
return atomic.LoadUint32(&(node.handshakeRetry))
}
func (node *node) SetHandshakeRetry(r uint32) {
node.handshakeRetry = r
atomic.StoreUint32(&(node.handshakeRetry), r)
}
func (node node) GetHeight() uint64 {
return node.height
}
func (node node) GetLedger() *ledger.Ledger {
return node.ledger
}
func (node *node) UpdateTime(t time.Time) {
node.time = t
}
func (node node) GetMemoryPool() map[common.Uint256]*transaction.Transaction {
return node.GetTxnPool()
// TODO refresh the pending transaction pool
}
func (node node) SynchronizeMemoryPool() {
// Fixme need lock
for _, n := range node.neighb.List {
if n.state == ESTABLISH {
ReqMemoryPool(&node)
}
}
}
func (node node) Xmit(inv common.Inventory) error {
fmt.Println("****** node Xmit ********")
var buffer []byte
var err error
if inv.Type() == common.TRANSACTION {
fmt.Printf("****TX transaction message*****\n")
transaction, isTransaction := inv.(*transaction.Transaction)
if isTransaction {
//transaction.Serialize(tmpBuffer)
buffer, err = NewTx(transaction)
if err != nil {
fmt.Println("Error New Tx message ", err.Error())
return err
}
}
} else if inv.Type() == common.BLOCK {
fmt.Printf("****TX block message****\n")
block, isBlock := inv.(*ledger.Block)
if isBlock {
buffer, err = NewBlock(block)
if err != nil {
fmt.Println("Error New Block message ", err.Error())
return err
}
}
} else if inv.Type() == common.CONSENSUS {
fmt.Printf("*****TX consensus message****\n")
payload, isConsensusPayload := inv.(*ConsensusPayload)
if isConsensusPayload {
buffer, err = NewConsensus(payload)
if err != nil {
fmt.Println("Error New Block message ", err.Error())
return err
}
}
}
node.neighb.Broadcast(buffer)
return nil
}
func (node node) GetAddr() string {
return node.addr
}
func (node node) GetAddr16() ([16]byte, error) {
common.Trace()
var result [16]byte
ip := net.ParseIP(node.addr).To16()
if ip == nil {
fmt.Printf("Parse IP address error\n")
return result, errors.New("Parse IP address error")
}
copy(result[:], ip[:16])
return result, nil
}
func (node node) GetTime() int64 {
t := time.Now()
return t.UnixNano()
}
func (node node) getNbrNum() uint {
var i uint
for _, n := range node.local.neighb.List {
if n.GetState() == ESTABLISH {
fmt.Printf("The establish node address is %s\n", n.GetAddr())
i++
}
}
return i
}
func (node node) GetNeighborAddrs() ([]NodeAddr, uint64) {
var i uint64
var addrs []NodeAddr
// TODO read lock
for _, n := range node.local.neighb.List {
if n.GetState() != ESTABLISH {
continue
}
var addr NodeAddr
addr.IpAddr, _ = n.GetAddr16()
addr.Time = n.GetTime()
addr.Services = n.Services()
addr.Port = n.GetPort()
addr.Uid = n.GetNonce()
addrs = append(addrs, addr)
i++
}
return addrs, i
}
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
package network
import (
"encoding/json"
"errors"
"log"
"strings"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/ernestio/ernestaws"
"github.com/ernestio/ernestaws/credentials"
)
var (
// ErrDatacenterIDInvalid ...
ErrDatacenterIDInvalid = errors.New("Datacenter VPC ID invalid")
// ErrDatacenterRegionInvalid ...
ErrDatacenterRegionInvalid = errors.New("Datacenter Region invalid")
// ErrDatacenterCredentialsInvalid ...
ErrDatacenterCredentialsInvalid = errors.New("Datacenter credentials invalid")
// ErrNetworkSubnetInvalid ...
ErrNetworkSubnetInvalid = errors.New("Network subnet invalid")
// ErrNetworkAWSIDInvalid ...
ErrNetworkAWSIDInvalid = errors.New("Network aws id invalid")
)
// Event stores the network data
type Event struct {
ProviderType string `json:"_provider"`
ComponentType string `json:"_component"`
ComponentID string `json:"_component_id"`
State string `json:"_state"`
Action string `json:"_action"`
NetworkAWSID *string `json:"network_aws_id"`
Name *string `json:"name"`
Subnet *string `json:"range"`
IsPublic *bool `json:"is_public"`
AvailabilityZone *string `json:"availability_zone"`
Tags map[string]string `json:"tags"`
DatacenterType string `json:"datacenter_type"`
DatacenterName string `json:"datacenter_name"`
DatacenterRegion string `json:"datacenter_region"`
AccessKeyID string `json:"aws_access_key_id"`
SecretAccessKey string `json:"aws_secret_access_key"`
Vpc string `json:"vpc"`
VpcID string `json:"vpc_id"`
Service string `json:"service"`
ErrorMessage string `json:"error,omitempty"`
Subject string `json:"-"`
Body []byte `json:"-"`
CryptoKey string `json:"-"`
}
// New : Constructor
func New(subject string, body []byte, cryptoKey string) ernestaws.Event {
if strings.Split(subject, ".")[1] == "find" {
return &Collection{Subject: subject, Body: body, CryptoKey: cryptoKey}
}
return &Event{Subject: subject, Body: body, CryptoKey: cryptoKey}
}
// Validate checks if all criteria are met
func (ev *Event) Validate() error {
if ev.VpcID == "" {
return ErrDatacenterIDInvalid
}
if ev.DatacenterRegion == "" {
return ErrDatacenterRegionInvalid
}
if ev.AccessKeyID == "" || ev.SecretAccessKey == "" {
return ErrDatacenterCredentialsInvalid
}
if ev.Subject == "network.delete.aws" {
if ev.NetworkAWSID == nil {
return ErrNetworkAWSIDInvalid
}
} else {
if ev.Subnet == nil {
return ErrNetworkSubnetInvalid
}
}
return nil
}
// Process : starts processing the current message
func (ev *Event) Process() (err error) {
if err := json.Unmarshal(ev.Body, &ev); err != nil {
ev.Error(err)
return err
}
if err := ev.Validate(); err != nil {
ev.Error(err)
return err
}
return nil
}
// Error : Will respond the current event with an error
func (ev *Event) Error(err error) {
log.Printf("Error: %s", err.Error())
ev.ErrorMessage = err.Error()
ev.State = "errored"
ev.Body, err = json.Marshal(ev)
}
// Complete : sets the state of the event to completed
func (ev *Event) Complete() {
ev.State = "completed"
}
// Find : Find an object on aws
func (ev *Event) Find() error {
return errors.New(ev.Subject + " not supported")
}
// Create : Creates a nat object on aws
func (ev *Event) Create() error {
svc := ev.getEC2Client()
req := ec2.CreateSubnetInput{
VpcId: aws.String(ev.VpcID),
CidrBlock: ev.Subnet,
AvailabilityZone: ev.AvailabilityZone,
}
resp, err := svc.CreateSubnet(&req)
if err != nil {
return err
}
if *ev.IsPublic {
// Create Internet Gateway
gateway, err := ev.createInternetGateway(svc, ev.VpcID)
if err != nil {
return err
}
// Create Route Table and direct traffic to Internet Gateway
rt, err := ev.createRouteTable(svc, ev.VpcID, *resp.Subnet.SubnetId)
if err != nil {
return err
}
err = ev.createGatewayRoutes(svc, rt, gateway)
if err != nil {
return err
}
// Modify subnet to assign public IP's on launch
mod := ec2.ModifySubnetAttributeInput{
SubnetId: resp.Subnet.SubnetId,
MapPublicIpOnLaunch: &ec2.AttributeBooleanValue{Value: aws.Bool(true)},
}
_, err = svc.ModifySubnetAttribute(&mod)
if err != nil {
return err
}
}
ev.NetworkAWSID = resp.Subnet.SubnetId
ev.AvailabilityZone = resp.Subnet.AvailabilityZone
return ev.setTags()
}
// Update : Updates a nat object on aws
func (ev *Event) Update() error {
return errors.New(ev.Subject + " not supported")
}
// Delete : Deletes a nat object on aws
func (ev *Event) Delete() error {
svc := ev.getEC2Client()
err := ev.waitForInterfaceRemoval(svc, ev.NetworkAWSID)
if err != nil {
return err
}
req := ec2.DeleteSubnetInput{
SubnetId: ev.NetworkAWSID,
}
_, err = svc.DeleteSubnet(&req)
return err
}
// Get : Gets a nat object on aws
func (ev *Event) Get() error {
return errors.New(ev.Subject + " not supported")
}
// GetBody : Gets the body for this event
func (ev *Event) GetBody() []byte {
var err error
if ev.Body, err = json.Marshal(ev); err != nil {
log.Println(err.Error())
}
return ev.Body
}
// GetSubject : Gets the subject for this event
func (ev *Event) GetSubject() string {
return ev.Subject
}
func (ev *Event) getEC2Client() *ec2.EC2 {
creds, _ := credentials.NewStaticCredentials(ev.AccessKeyID, ev.SecretAccessKey, ev.CryptoKey)
return ec2.New(session.New(), &aws.Config{
Region: aws.String(ev.DatacenterRegion),
Credentials: creds,
})
}
func (ev *Event) internetGatewayByVPCID(svc *ec2.EC2, vpc string) (*ec2.InternetGateway, error) {
f := []*ec2.Filter{
&ec2.Filter{
Name: aws.String("attachment.vpc-id"),
Values: []*string{aws.String(vpc)},
},
}
req := ec2.DescribeInternetGatewaysInput{
Filters: f,
}
resp, err := svc.DescribeInternetGateways(&req)
if err != nil {
return nil, err
}
if len(resp.InternetGateways) == 0 {
return nil, nil
}
return resp.InternetGateways[0], nil
}
func (ev *Event) routingTableBySubnetID(svc *ec2.EC2, subnet string) (*ec2.RouteTable, error) {
f := []*ec2.Filter{
&ec2.Filter{
Name: aws.String("association.subnet-id"),
Values: []*string{aws.String(subnet)},
},
}
req := ec2.DescribeRouteTablesInput{
Filters: f,
}
resp, err := svc.DescribeRouteTables(&req)
if err != nil {
return nil, err
}
if len(resp.RouteTables) == 0 {
return nil, nil
}
return resp.RouteTables[0], nil
}
func (ev *Event) createInternetGateway(svc *ec2.EC2, vpc string) (*ec2.InternetGateway, error) {
ig, err := ev.internetGatewayByVPCID(svc, vpc)
if err != nil {
return nil, err
}
if ig != nil {
return ig, nil
}
resp, err := svc.CreateInternetGateway(nil)
if err != nil {
return nil, err
}
req := ec2.AttachInternetGatewayInput{
InternetGatewayId: resp.InternetGateway.InternetGatewayId,
VpcId: aws.String(vpc),
}
_, err = svc.AttachInternetGateway(&req)
if err != nil {
return nil, err
}
return resp.InternetGateway, nil
}
func (ev *Event) createRouteTable(svc *ec2.EC2, vpc, subnet string) (*ec2.RouteTable, error) {
rt, err := ev.routingTableBySubnetID(svc, subnet)
if err != nil {
return nil, err
}
if rt != nil {
return rt, nil
}
req := ec2.CreateRouteTableInput{
VpcId: aws.String(vpc),
}
resp, err := svc.CreateRouteTable(&req)
if err != nil {
return nil, err
}
acreq := ec2.AssociateRouteTableInput{
RouteTableId: resp.RouteTable.RouteTableId,
SubnetId: aws.String(subnet),
}
_, err = svc.AssociateRouteTable(&acreq)
if err != nil {
return nil, err
}
return resp.RouteTable, nil
}
func (ev *Event) createGatewayRoutes(svc *ec2.EC2, rt *ec2.RouteTable, gw *ec2.InternetGateway) error {
req := ec2.CreateRouteInput{
RouteTableId: rt.RouteTableId,
DestinationCidrBlock: aws.String("0.0.0.0/0"),
GatewayId: gw.InternetGatewayId,
}
_, err := svc.CreateRoute(&req)
if err != nil {
return err
}
return nil
}
func (ev *Event) waitForInterfaceRemoval(svc *ec2.EC2, networkID *string) error {
for {
resp, err := ev.getNetworkInterfaces(svc, networkID)
if err != nil {
return err
}
if len(resp.NetworkInterfaces) == 0 {
return nil
}
time.Sleep(time.Second)
}
}
func (ev *Event) getNetworkInterfaces(svc *ec2.EC2, networkID *string) (*ec2.DescribeNetworkInterfacesOutput, error) {
f := []*ec2.Filter{
&ec2.Filter{
Name: aws.String("subnet-id"),
Values: []*string{networkID},
},
}
req := ec2.DescribeNetworkInterfacesInput{
Filters: f,
}
return svc.DescribeNetworkInterfaces(&req)
}
func (ev *Event) setTags() error {
svc := ev.getEC2Client()
for key, val := range ev.Tags {
req := &ec2.CreateTagsInput{
Resources: []*string{ev.NetworkAWSID},
}
req.Tags = append(req.Tags, &ec2.Tag{
Key: &key,
Value: &val,
})
_, err := svc.CreateTags(req)
if err != nil {
return err
}
}
return nil
}
adding missing internet gateway fields on network
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
package network
import (
"encoding/json"
"errors"
"log"
"strings"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/ernestio/ernestaws"
"github.com/ernestio/ernestaws/credentials"
)
var (
// ErrDatacenterIDInvalid ...
ErrDatacenterIDInvalid = errors.New("Datacenter VPC ID invalid")
// ErrDatacenterRegionInvalid ...
ErrDatacenterRegionInvalid = errors.New("Datacenter Region invalid")
// ErrDatacenterCredentialsInvalid ...
ErrDatacenterCredentialsInvalid = errors.New("Datacenter credentials invalid")
// ErrNetworkSubnetInvalid ...
ErrNetworkSubnetInvalid = errors.New("Network subnet invalid")
// ErrNetworkAWSIDInvalid ...
ErrNetworkAWSIDInvalid = errors.New("Network aws id invalid")
)
// Event stores the network data
type Event struct {
ProviderType string `json:"_provider"`
ComponentType string `json:"_component"`
ComponentID string `json:"_component_id"`
State string `json:"_state"`
Action string `json:"_action"`
NetworkAWSID *string `json:"network_aws_id"`
Name *string `json:"name"`
Subnet *string `json:"range"`
IsPublic *bool `json:"is_public"`
InternetGateway string `json:"internet_gateway"`
InternetGatewayAWSID string `json:"internet_gateway_aws_id"`
AvailabilityZone *string `json:"availability_zone"`
Tags map[string]string `json:"tags"`
DatacenterType string `json:"datacenter_type"`
DatacenterName string `json:"datacenter_name"`
DatacenterRegion string `json:"datacenter_region"`
AccessKeyID string `json:"aws_access_key_id"`
SecretAccessKey string `json:"aws_secret_access_key"`
Vpc string `json:"vpc"`
VpcID string `json:"vpc_id"`
Service string `json:"service"`
ErrorMessage string `json:"error,omitempty"`
Subject string `json:"-"`
Body []byte `json:"-"`
CryptoKey string `json:"-"`
}
// New : Constructor
func New(subject string, body []byte, cryptoKey string) ernestaws.Event {
if strings.Split(subject, ".")[1] == "find" {
return &Collection{Subject: subject, Body: body, CryptoKey: cryptoKey}
}
return &Event{Subject: subject, Body: body, CryptoKey: cryptoKey}
}
// Validate checks if all criteria are met
func (ev *Event) Validate() error {
if ev.VpcID == "" {
return ErrDatacenterIDInvalid
}
if ev.DatacenterRegion == "" {
return ErrDatacenterRegionInvalid
}
if ev.AccessKeyID == "" || ev.SecretAccessKey == "" {
return ErrDatacenterCredentialsInvalid
}
if ev.Subject == "network.delete.aws" {
if ev.NetworkAWSID == nil {
return ErrNetworkAWSIDInvalid
}
} else {
if ev.Subnet == nil {
return ErrNetworkSubnetInvalid
}
}
return nil
}
// Process : starts processing the current message
func (ev *Event) Process() (err error) {
if err := json.Unmarshal(ev.Body, &ev); err != nil {
ev.Error(err)
return err
}
if err := ev.Validate(); err != nil {
ev.Error(err)
return err
}
return nil
}
// Error : Will respond the current event with an error
func (ev *Event) Error(err error) {
log.Printf("Error: %s", err.Error())
ev.ErrorMessage = err.Error()
ev.State = "errored"
ev.Body, err = json.Marshal(ev)
}
// Complete : sets the state of the event to completed
func (ev *Event) Complete() {
ev.State = "completed"
}
// Find : Find an object on aws
func (ev *Event) Find() error {
return errors.New(ev.Subject + " not supported")
}
// Create : Creates a nat object on aws
func (ev *Event) Create() error {
svc := ev.getEC2Client()
req := ec2.CreateSubnetInput{
VpcId: aws.String(ev.VpcID),
CidrBlock: ev.Subnet,
AvailabilityZone: ev.AvailabilityZone,
}
resp, err := svc.CreateSubnet(&req)
if err != nil {
return err
}
if *ev.IsPublic {
// Create Internet Gateway
gateway, err := ev.createInternetGateway(svc, ev.VpcID)
if err != nil {
return err
}
// Create Route Table and direct traffic to Internet Gateway
rt, err := ev.createRouteTable(svc, ev.VpcID, *resp.Subnet.SubnetId)
if err != nil {
return err
}
err = ev.createGatewayRoutes(svc, rt, gateway)
if err != nil {
return err
}
// Modify subnet to assign public IP's on launch
mod := ec2.ModifySubnetAttributeInput{
SubnetId: resp.Subnet.SubnetId,
MapPublicIpOnLaunch: &ec2.AttributeBooleanValue{Value: aws.Bool(true)},
}
_, err = svc.ModifySubnetAttribute(&mod)
if err != nil {
return err
}
}
ev.NetworkAWSID = resp.Subnet.SubnetId
ev.AvailabilityZone = resp.Subnet.AvailabilityZone
return ev.setTags()
}
// Update : Updates a nat object on aws
func (ev *Event) Update() error {
return errors.New(ev.Subject + " not supported")
}
// Delete : Deletes a nat object on aws
func (ev *Event) Delete() error {
svc := ev.getEC2Client()
err := ev.waitForInterfaceRemoval(svc, ev.NetworkAWSID)
if err != nil {
return err
}
req := ec2.DeleteSubnetInput{
SubnetId: ev.NetworkAWSID,
}
_, err = svc.DeleteSubnet(&req)
return err
}
// Get : Gets a nat object on aws
func (ev *Event) Get() error {
return errors.New(ev.Subject + " not supported")
}
// GetBody : Gets the body for this event
func (ev *Event) GetBody() []byte {
var err error
if ev.Body, err = json.Marshal(ev); err != nil {
log.Println(err.Error())
}
return ev.Body
}
// GetSubject : Gets the subject for this event
func (ev *Event) GetSubject() string {
return ev.Subject
}
func (ev *Event) getEC2Client() *ec2.EC2 {
creds, _ := credentials.NewStaticCredentials(ev.AccessKeyID, ev.SecretAccessKey, ev.CryptoKey)
return ec2.New(session.New(), &aws.Config{
Region: aws.String(ev.DatacenterRegion),
Credentials: creds,
})
}
func (ev *Event) internetGatewayByVPCID(svc *ec2.EC2, vpc string) (*ec2.InternetGateway, error) {
f := []*ec2.Filter{
&ec2.Filter{
Name: aws.String("attachment.vpc-id"),
Values: []*string{aws.String(vpc)},
},
}
req := ec2.DescribeInternetGatewaysInput{
Filters: f,
}
resp, err := svc.DescribeInternetGateways(&req)
if err != nil {
return nil, err
}
if len(resp.InternetGateways) == 0 {
return nil, nil
}
return resp.InternetGateways[0], nil
}
func (ev *Event) routingTableBySubnetID(svc *ec2.EC2, subnet string) (*ec2.RouteTable, error) {
f := []*ec2.Filter{
&ec2.Filter{
Name: aws.String("association.subnet-id"),
Values: []*string{aws.String(subnet)},
},
}
req := ec2.DescribeRouteTablesInput{
Filters: f,
}
resp, err := svc.DescribeRouteTables(&req)
if err != nil {
return nil, err
}
if len(resp.RouteTables) == 0 {
return nil, nil
}
return resp.RouteTables[0], nil
}
func (ev *Event) createInternetGateway(svc *ec2.EC2, vpc string) (*ec2.InternetGateway, error) {
ig, err := ev.internetGatewayByVPCID(svc, vpc)
if err != nil {
return nil, err
}
if ig != nil {
return ig, nil
}
resp, err := svc.CreateInternetGateway(nil)
if err != nil {
return nil, err
}
req := ec2.AttachInternetGatewayInput{
InternetGatewayId: resp.InternetGateway.InternetGatewayId,
VpcId: aws.String(vpc),
}
_, err = svc.AttachInternetGateway(&req)
if err != nil {
return nil, err
}
return resp.InternetGateway, nil
}
func (ev *Event) createRouteTable(svc *ec2.EC2, vpc, subnet string) (*ec2.RouteTable, error) {
rt, err := ev.routingTableBySubnetID(svc, subnet)
if err != nil {
return nil, err
}
if rt != nil {
return rt, nil
}
req := ec2.CreateRouteTableInput{
VpcId: aws.String(vpc),
}
resp, err := svc.CreateRouteTable(&req)
if err != nil {
return nil, err
}
acreq := ec2.AssociateRouteTableInput{
RouteTableId: resp.RouteTable.RouteTableId,
SubnetId: aws.String(subnet),
}
_, err = svc.AssociateRouteTable(&acreq)
if err != nil {
return nil, err
}
return resp.RouteTable, nil
}
func (ev *Event) createGatewayRoutes(svc *ec2.EC2, rt *ec2.RouteTable, gw *ec2.InternetGateway) error {
req := ec2.CreateRouteInput{
RouteTableId: rt.RouteTableId,
DestinationCidrBlock: aws.String("0.0.0.0/0"),
GatewayId: gw.InternetGatewayId,
}
_, err := svc.CreateRoute(&req)
if err != nil {
return err
}
return nil
}
func (ev *Event) waitForInterfaceRemoval(svc *ec2.EC2, networkID *string) error {
for {
resp, err := ev.getNetworkInterfaces(svc, networkID)
if err != nil {
return err
}
if len(resp.NetworkInterfaces) == 0 {
return nil
}
time.Sleep(time.Second)
}
}
func (ev *Event) getNetworkInterfaces(svc *ec2.EC2, networkID *string) (*ec2.DescribeNetworkInterfacesOutput, error) {
f := []*ec2.Filter{
&ec2.Filter{
Name: aws.String("subnet-id"),
Values: []*string{networkID},
},
}
req := ec2.DescribeNetworkInterfacesInput{
Filters: f,
}
return svc.DescribeNetworkInterfaces(&req)
}
func (ev *Event) setTags() error {
svc := ev.getEC2Client()
for key, val := range ev.Tags {
req := &ec2.CreateTagsInput{
Resources: []*string{ev.NetworkAWSID},
}
req.Tags = append(req.Tags, &ec2.Tag{
Key: &key,
Value: &val,
})
_, err := svc.CreateTags(req)
if err != nil {
return err
}
}
return nil
}
|
//
// telgo
//
//
// Copyright (C) 2015 Christian Pointner <equinox@helsinki.at>
//
// This file is part of telgo.
//
// telgo is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// any later version.
//
// telgo is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with telgo. If not, see <http://www.gnu.org/licenses/>.
//
// Package telgo contains a simple telnet server which can be used as a
// control/debug interface for applications.
// The telgo telnet server does all the client handling and runs configurable
// commands as go routines. It also supports handling of basic inline telnet
// commands used by variaus telnet clients to configure the connection.
// For now every negotiable telnet option will be discarded but the telnet
// command IP (interrupt process) is understood and can be used to terminate
// long running user commands.
package telgo
import (
"bufio"
"bytes"
"fmt"
"log"
"net"
"os"
"regexp"
"strings"
"unicode"
)
var (
tl = log.New(os.Stderr, "[telnet]\t", log.LstdFlags)
)
const (
EOT = byte(4)
IAC = byte(255)
DONT = byte(254)
DO = byte(253)
WONT = byte(252)
WILL = byte(251)
SB = byte(250)
GA = byte(249)
EL = byte(248)
EC = byte(247)
AYT = byte(246)
AO = byte(245)
IP = byte(244)
BREA = byte(243)
DM = byte(242)
NOP = byte(241)
SE = byte(240)
)
type telnet_command struct {
length int
name string
description string
}
var (
telnet_commands = map[byte]telnet_command{
DONT: telnet_command{3, "DONT", "don't use option"},
DO: telnet_command{3, "DO", "do use option"},
WONT: telnet_command{3, "WONT", "won't use option"},
WILL: telnet_command{3, "WILL", "will use option"},
SB: telnet_command{2, "SB", "Begin of subnegotiation parameters"},
GA: telnet_command{2, "GA", "go ahead signal"},
EL: telnet_command{2, "EL", "erase line"},
EC: telnet_command{2, "EC", "erase character"},
AYT: telnet_command{2, "AYT", "are you there"},
AO: telnet_command{2, "AO", "abort output"},
IP: telnet_command{2, "IP", "interrupt process"},
BREA: telnet_command{2, "BREA", "break"},
DM: telnet_command{2, "DM", "data mark"},
NOP: telnet_command{2, "NOP", "no operation"},
SE: telnet_command{2, "SE", "End of subnegotiation parameters"},
}
)
// This is the signature of telgo command functions. It receives a pointer to
// the telgo client struct and a slice of strings containing the arguments the
// user has supplied. The first argument is always the command name itself.
// If this function returns true the client connection will be terminated.
type TelgoCmd func(c *TelnetClient, args []string) bool
type TelgoCmdList map[string]TelgoCmd
// This struct is used to export the raw tcp connection to the client as well as
// the UserData which got supplied to NewTelnetServer.
// The Cancel channel will get ready for reading when the user hits Ctrl-C or
// the connection got terminated. This can be used for long running telgo commands
// to be aborted.
type TelnetClient struct {
Conn net.Conn
UserData interface{}
Cancel chan bool
scanner *bufio.Scanner
writer *bufio.Writer
prompt string
commands *TelgoCmdList
iacout chan []byte
stdout chan []byte
}
func newTelnetClient(conn net.Conn, prompt string, commands *TelgoCmdList, userdata interface{}) (c *TelnetClient) {
tl.Println("telgo: new client from:", conn.RemoteAddr())
c = &TelnetClient{}
c.Conn = conn
c.scanner = bufio.NewScanner(conn)
c.writer = bufio.NewWriter(conn)
c.prompt = prompt
c.commands = commands
c.UserData = userdata
c.stdout = make(chan []byte)
c.Cancel = make(chan bool, 1)
// the telnet split function needs some closures to handle inline telnet commands
c.iacout = make(chan []byte)
lastiiac := 0
c.scanner.Split(func(data []byte, atEOF bool) (advance int, token []byte, err error) {
return scanLines(data, atEOF, c.iacout, &lastiiac)
})
return c
}
// This writes a 'raw' string to the client. For the most part the usage of Say
// and Sayln is recommended. WriteString will take care of escaping IAC bytes
// inside your string.
func (c *TelnetClient) WriteString(text string) {
c.stdout <- bytes.Replace([]byte(text), []byte{IAC}, []byte{IAC, IAC}, -1)
}
// This is a simple Printf like interface which sends responses to the client.
func (c *TelnetClient) Say(format string, a ...interface{}) {
c.WriteString(fmt.Sprintf(format, a...))
}
// This is the same as Say but also adds a new-line at the end of the string.
func (c *TelnetClient) Sayln(format string, a ...interface{}) {
c.WriteString(fmt.Sprintf(format, a...) + "\r\n")
}
var (
escapeRe = regexp.MustCompile("\\\\.")
)
func replEscapeChars(m string) (r string) {
switch m {
case "\\a":
return "\a"
case "\\b":
return "\b"
case "\\t":
return "\t"
case "\\n":
return "\n"
case "\\v":
return "\v"
case "\\f":
return "\f"
case "\\r":
return "\r"
}
return string(m[1])
}
func stripEscapeChars(s string) string {
return escapeRe.ReplaceAllStringFunc(s, replEscapeChars)
}
func spacesAndQuotes(r rune) bool {
return unicode.IsSpace(r) || r == rune('"')
}
func backslashAndQuotes(r rune) bool {
return r == rune('\\') || r == rune('"')
}
// TODO: handle quotes which are not surrounded by spaces
func splitCmdArguments(cmdstr string) (cmds []string, err error) {
sepFunc := spacesAndQuotes
foundQuote := false
lastesc := 0
for {
i := strings.IndexFunc(cmdstr[lastesc:], sepFunc)
if i < 0 {
if foundQuote {
err = fmt.Errorf("closing \" is missing")
}
if len(cmdstr) > 0 {
cmds = append(cmds, cmdstr)
}
return
}
i += lastesc
tl.Printf("found %q at %d", cmdstr[i], i)
switch cmdstr[i] {
case '\t', ' ':
if i > 0 {
cmds = append(cmds, cmdstr[0:i])
}
lastesc = 0
cmdstr = cmdstr[i+1:]
case '"':
if foundQuote {
cmds = append(cmds, stripEscapeChars(cmdstr[0:i]))
foundQuote = false
sepFunc = spacesAndQuotes
} else {
foundQuote = true
sepFunc = backslashAndQuotes
}
lastesc = 0
cmdstr = cmdstr[i+1:]
case '\\':
if len(cmdstr[lastesc:]) < 2 {
err = fmt.Errorf("sole \\ at the end and no closing \"")
}
lastesc = i + 2
}
}
}
func (c *TelnetClient) handleCmd(cmdstr string, done chan<- bool) {
quit := false
defer func() { done <- quit }()
tl.Printf("cmdstr = %q", cmdstr)
cmdslice, err := splitCmdArguments(cmdstr)
if err != nil {
c.Sayln("can't parse command: %s", err)
return
}
tl.Printf("cmdslice = %q", cmdslice)
if len(cmdslice) == 0 || cmdslice[0] == "" {
return
}
select {
case <-c.Cancel: // consume potentially pending cancel request
default:
}
for cmd, cmdfunc := range *c.commands {
if cmdslice[0] == cmd {
quit = cmdfunc(c, cmdslice)
return
}
}
c.Sayln("unknown command '%s'", cmdslice[0])
}
// parse the telnet command and send out out-of-band responses to them
func handleIac(iac []byte, iacout chan<- []byte) {
switch iac[1] {
case WILL, WONT:
iac[1] = DONT // deny the client to use any proposed options
case DO, DONT:
iac[1] = WONT // refuse the usage of any requested options
case IP:
// pass this through to client.handle which will cancel the process
case IAC:
return // just an escaped IAC, this will be dealt with by dropIAC
default:
tl.Printf("ignoring unimplemented telnet command: %s (%s)", telnet_commands[iac[1]].name, telnet_commands[iac[1]].description)
return
}
iacout <- iac
}
// remove the carriage return at the end of the line
func dropCR(data []byte) []byte {
if len(data) > 0 && data[len(data)-1] == '\r' {
return data[0 : len(data)-1]
}
return data
}
// remove all telnet commands which are still on the read buffer and were
// handled already using out-of-band messages
func dropIAC(data []byte) []byte {
token := []byte("")
iiac := 0
for {
niiac := bytes.IndexByte(data[iiac:], IAC)
if niiac >= 0 {
token = append(token, data[iiac:iiac+niiac]...)
iiac += niiac
if (len(data) - iiac) < 2 { // check if the data at least contains a command code
return token // something is fishy.. found an IAC but this is the last byte of the token...
}
l := 2 // if we don't know this command - assume it has a length of 2
if cmd, found := telnet_commands[data[iiac+1]]; found {
l = cmd.length
}
if (len(data) - iiac) < l { // check if the command is complete
return token // something is fishy.. found an IAC but the command is too short...
}
if data[iiac+1] == IAC { // escaped IAC found
token = append(token, IAC)
}
iiac += l
} else {
token = append(token, data[iiac:]...)
break
}
}
return token
}
// This compares two indexes as returned by bytes.IndexByte treating -1 as the
// highest possible index.
func compareIdx(a, b int) int {
if a < 0 {
a = int(^uint(0) >> 1)
}
if b < 0 {
b = int(^uint(0) >> 1)
}
return a - b
}
func scanLines(data []byte, atEOF bool, iacout chan<- []byte, lastiiac *int) (advance int, token []byte, err error) {
if atEOF && len(data) == 0 {
return 0, nil, nil
}
inl := bytes.IndexByte(data, '\n') // index of first newline character
ieot := bytes.IndexByte(data, EOT) // index of first End of Transmission
iiac := *lastiiac
for {
niiac := bytes.IndexByte(data[iiac:], IAC) // index of first/next telnet IAC
if niiac >= 0 {
iiac += niiac
} else {
iiac = niiac
}
if inl >= 0 && compareIdx(inl, ieot) < 0 && compareIdx(inl, iiac) < 0 {
*lastiiac = 0
return inl + 1, dropIAC(dropCR(data[0:inl])), nil // found a complete line and no EOT or IAC
}
if ieot >= 0 && compareIdx(ieot, iiac) < 0 {
*lastiiac = 0
return ieot + 1, data[ieot : ieot+1], nil // found an EOT (aka Ctrl-D was hit) and no IAC
}
if iiac >= 0 { // found an IAC
if (len(data) - iiac) < 2 {
return 0, nil, nil // data does not yet contain the telnet command code -> need more data
}
l := 2 // if we don't know this command - assume it has a length of 2
if cmd, found := telnet_commands[data[iiac+1]]; found {
l = cmd.length
}
if (len(data) - iiac) < l {
return 0, nil, nil // data does not yet contain the complete telnet command -> need more data
}
handleIac(data[iiac:iiac+l], iacout)
iiac += l
*lastiiac = iiac
} else {
break
}
}
if atEOF {
return len(data), dropCR(data), nil // allow last line to have no new line
}
return 0, nil, nil // we have found none of the escape codes -> need more data
}
func (c *TelnetClient) recv(in chan<- string) {
defer close(in)
for c.scanner.Scan() {
b := c.scanner.Bytes()
if len(b) > 0 && b[0] == EOT {
tl.Printf("telgo(%s): Ctrl-D received, closing", c.Conn.RemoteAddr())
return
}
in <- string(b)
}
if err := c.scanner.Err(); err != nil {
tl.Printf("telgo(%s): recv() error: %s", c.Conn.RemoteAddr(), err)
} else {
tl.Printf("telgo(%s): Connection closed by foreign host", c.Conn.RemoteAddr())
}
}
func (c *TelnetClient) cancel() {
select {
case c.Cancel <- true:
default: // process got canceled already
}
}
func (c *TelnetClient) send(quit <-chan bool) {
for {
select {
case <-quit:
return
case iac := <-c.iacout:
if iac[1] == IP {
c.cancel()
} else {
c.writer.Write(iac)
c.writer.Flush()
}
case data := <-c.stdout:
c.writer.Write(data)
c.writer.Flush()
}
}
}
func (c *TelnetClient) handle() {
defer c.Conn.Close()
in := make(chan string)
go c.recv(in)
quit_send := make(chan bool)
go c.send(quit_send)
defer func() { quit_send <- true }()
defer c.cancel() // make sure to cancel possible running job when closing connection
done := make(chan bool)
busy := false
c.WriteString(c.prompt)
for {
select {
case cmd, ok := <-in:
if !ok { // Ctrl-D or recv error (connection closed...)
return
}
if !busy { // ignore everything except Ctrl-D while executing a command
if len(cmd) > 0 {
go c.handleCmd(cmd, done)
busy = true
} else {
c.WriteString(c.prompt)
}
}
case exit := <-done:
if exit {
return
}
c.WriteString(c.prompt)
busy = false
}
}
}
type TelnetServer struct {
addr string
prompt string
commands TelgoCmdList
userdata interface{}
}
// This creates a new telnet server. addr is the address to bind/listen to on and will be passed through
// to net.Listen(). The prompt will be sent to the client whenever the telgo server is ready for a new command.
// TelgoCmdList is a list of available commands and userdata will be made available to called telgo
// commands through the client struct.
func NewTelnetServer(addr, prompt string, commands TelgoCmdList, userdata interface{}) (s *TelnetServer) {
s = &TelnetServer{}
s.addr = addr
s.prompt = prompt
s.commands = commands
s.userdata = userdata
return s
}
// This runs the telnet server and spawns go routines for every connecting client.
func (self *TelnetServer) Run() error {
tl.Println("telgo: listening on", self.addr)
server, err := net.Listen("tcp", self.addr)
if err != nil {
tl.Println("telgo: Listen() Error:", err)
return err
}
for {
conn, err := server.Accept()
if err != nil {
tl.Println("telgo: Accept() Error:", err)
return err
}
c := newTelnetClient(conn, self.prompt, &self.commands, self.userdata)
go c.handle()
}
}
quotes need be separated with spaces
//
// telgo
//
//
// Copyright (C) 2015 Christian Pointner <equinox@helsinki.at>
//
// This file is part of telgo.
//
// telgo is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// any later version.
//
// telgo is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with telgo. If not, see <http://www.gnu.org/licenses/>.
//
// Package telgo contains a simple telnet server which can be used as a
// control/debug interface for applications.
// The telgo telnet server does all the client handling and runs configurable
// commands as go routines. It also supports handling of basic inline telnet
// commands used by variaus telnet clients to configure the connection.
// For now every negotiable telnet option will be discarded but the telnet
// command IP (interrupt process) is understood and can be used to terminate
// long running user commands.
package telgo
import (
"bufio"
"bytes"
"fmt"
"log"
"net"
"os"
"regexp"
"strings"
"unicode"
)
var (
tl = log.New(os.Stderr, "[telnet]\t", log.LstdFlags)
)
const (
EOT = byte(4)
IAC = byte(255)
DONT = byte(254)
DO = byte(253)
WONT = byte(252)
WILL = byte(251)
SB = byte(250)
GA = byte(249)
EL = byte(248)
EC = byte(247)
AYT = byte(246)
AO = byte(245)
IP = byte(244)
BREA = byte(243)
DM = byte(242)
NOP = byte(241)
SE = byte(240)
)
type telnet_command struct {
length int
name string
description string
}
var (
telnet_commands = map[byte]telnet_command{
DONT: telnet_command{3, "DONT", "don't use option"},
DO: telnet_command{3, "DO", "do use option"},
WONT: telnet_command{3, "WONT", "won't use option"},
WILL: telnet_command{3, "WILL", "will use option"},
SB: telnet_command{2, "SB", "Begin of subnegotiation parameters"},
GA: telnet_command{2, "GA", "go ahead signal"},
EL: telnet_command{2, "EL", "erase line"},
EC: telnet_command{2, "EC", "erase character"},
AYT: telnet_command{2, "AYT", "are you there"},
AO: telnet_command{2, "AO", "abort output"},
IP: telnet_command{2, "IP", "interrupt process"},
BREA: telnet_command{2, "BREA", "break"},
DM: telnet_command{2, "DM", "data mark"},
NOP: telnet_command{2, "NOP", "no operation"},
SE: telnet_command{2, "SE", "End of subnegotiation parameters"},
}
)
// This is the signature of telgo command functions. It receives a pointer to
// the telgo client struct and a slice of strings containing the arguments the
// user has supplied. The first argument is always the command name itself.
// If this function returns true the client connection will be terminated.
type TelgoCmd func(c *TelnetClient, args []string) bool
type TelgoCmdList map[string]TelgoCmd
// This struct is used to export the raw tcp connection to the client as well as
// the UserData which got supplied to NewTelnetServer.
// The Cancel channel will get ready for reading when the user hits Ctrl-C or
// the connection got terminated. This can be used for long running telgo commands
// to be aborted.
type TelnetClient struct {
Conn net.Conn
UserData interface{}
Cancel chan bool
scanner *bufio.Scanner
writer *bufio.Writer
prompt string
commands *TelgoCmdList
iacout chan []byte
stdout chan []byte
}
func newTelnetClient(conn net.Conn, prompt string, commands *TelgoCmdList, userdata interface{}) (c *TelnetClient) {
tl.Println("telgo: new client from:", conn.RemoteAddr())
c = &TelnetClient{}
c.Conn = conn
c.scanner = bufio.NewScanner(conn)
c.writer = bufio.NewWriter(conn)
c.prompt = prompt
c.commands = commands
c.UserData = userdata
c.stdout = make(chan []byte)
c.Cancel = make(chan bool, 1)
// the telnet split function needs some closures to handle inline telnet commands
c.iacout = make(chan []byte)
lastiiac := 0
c.scanner.Split(func(data []byte, atEOF bool) (advance int, token []byte, err error) {
return scanLines(data, atEOF, c.iacout, &lastiiac)
})
return c
}
// This writes a 'raw' string to the client. For the most part the usage of Say
// and Sayln is recommended. WriteString will take care of escaping IAC bytes
// inside your string.
func (c *TelnetClient) WriteString(text string) {
c.stdout <- bytes.Replace([]byte(text), []byte{IAC}, []byte{IAC, IAC}, -1)
}
// This is a simple Printf like interface which sends responses to the client.
func (c *TelnetClient) Say(format string, a ...interface{}) {
c.WriteString(fmt.Sprintf(format, a...))
}
// This is the same as Say but also adds a new-line at the end of the string.
func (c *TelnetClient) Sayln(format string, a ...interface{}) {
c.WriteString(fmt.Sprintf(format, a...) + "\r\n")
}
var (
escapeRe = regexp.MustCompile("\\\\.")
)
func replEscapeChars(m string) (r string) {
switch m {
case "\\a":
return "\a"
case "\\b":
return "\b"
case "\\t":
return "\t"
case "\\n":
return "\n"
case "\\v":
return "\v"
case "\\f":
return "\f"
case "\\r":
return "\r"
}
return string(m[1])
}
func stripEscapeChars(s string) string {
return escapeRe.ReplaceAllStringFunc(s, replEscapeChars)
}
func spacesAndQuotes(r rune) bool {
return unicode.IsSpace(r) || r == rune('"')
}
func backslashAndQuotes(r rune) bool {
return r == rune('\\') || r == rune('"')
}
func splitCmdArguments(cmdstr string) (cmds []string, err error) {
sepFunc := spacesAndQuotes
foundQuote := false
lastesc := 0
for {
i := strings.IndexFunc(cmdstr[lastesc:], sepFunc)
if i < 0 {
if foundQuote {
err = fmt.Errorf("closing \" is missing")
}
if len(cmdstr) > 0 {
cmds = append(cmds, cmdstr)
}
return
}
i += lastesc
switch cmdstr[i] {
case '\t', ' ':
if i > 0 {
cmds = append(cmds, cmdstr[0:i])
}
lastesc = 0
cmdstr = cmdstr[i+1:]
case '"':
if foundQuote {
cmds = append(cmds, stripEscapeChars(cmdstr[0:i]))
foundQuote = false
sepFunc = spacesAndQuotes
if len(cmdstr) == i+1 { // is this the end?
return
}
if !unicode.IsSpace(rune(cmdstr[i+1])) {
err = fmt.Errorf("there must be a space after a closing \"")
return
}
} else {
foundQuote = true
sepFunc = backslashAndQuotes
}
lastesc = 0
cmdstr = cmdstr[i+1:]
case '\\':
if len(cmdstr[i:]) < 2 {
err = fmt.Errorf("sole \\ at the end and no closing \"")
return
}
lastesc = i + 2
}
}
}
func (c *TelnetClient) handleCmd(cmdstr string, done chan<- bool) {
quit := false
defer func() { done <- quit }()
cmdslice, err := splitCmdArguments(cmdstr)
if err != nil {
c.Sayln("can't parse command: %s", err)
return
}
if len(cmdslice) == 0 || cmdslice[0] == "" {
return
}
select {
case <-c.Cancel: // consume potentially pending cancel request
default:
}
for cmd, cmdfunc := range *c.commands {
if cmdslice[0] == cmd {
quit = cmdfunc(c, cmdslice)
return
}
}
c.Sayln("unknown command '%s'", cmdslice[0])
}
// parse the telnet command and send out out-of-band responses to them
func handleIac(iac []byte, iacout chan<- []byte) {
switch iac[1] {
case WILL, WONT:
iac[1] = DONT // deny the client to use any proposed options
case DO, DONT:
iac[1] = WONT // refuse the usage of any requested options
case IP:
// pass this through to client.handle which will cancel the process
case IAC:
return // just an escaped IAC, this will be dealt with by dropIAC
default:
tl.Printf("ignoring unimplemented telnet command: %s (%s)", telnet_commands[iac[1]].name, telnet_commands[iac[1]].description)
return
}
iacout <- iac
}
// remove the carriage return at the end of the line
func dropCR(data []byte) []byte {
if len(data) > 0 && data[len(data)-1] == '\r' {
return data[0 : len(data)-1]
}
return data
}
// remove all telnet commands which are still on the read buffer and were
// handled already using out-of-band messages
func dropIAC(data []byte) []byte {
token := []byte("")
iiac := 0
for {
niiac := bytes.IndexByte(data[iiac:], IAC)
if niiac >= 0 {
token = append(token, data[iiac:iiac+niiac]...)
iiac += niiac
if (len(data) - iiac) < 2 { // check if the data at least contains a command code
return token // something is fishy.. found an IAC but this is the last byte of the token...
}
l := 2 // if we don't know this command - assume it has a length of 2
if cmd, found := telnet_commands[data[iiac+1]]; found {
l = cmd.length
}
if (len(data) - iiac) < l { // check if the command is complete
return token // something is fishy.. found an IAC but the command is too short...
}
if data[iiac+1] == IAC { // escaped IAC found
token = append(token, IAC)
}
iiac += l
} else {
token = append(token, data[iiac:]...)
break
}
}
return token
}
// This compares two indexes as returned by bytes.IndexByte treating -1 as the
// highest possible index.
func compareIdx(a, b int) int {
if a < 0 {
a = int(^uint(0) >> 1)
}
if b < 0 {
b = int(^uint(0) >> 1)
}
return a - b
}
func scanLines(data []byte, atEOF bool, iacout chan<- []byte, lastiiac *int) (advance int, token []byte, err error) {
if atEOF && len(data) == 0 {
return 0, nil, nil
}
inl := bytes.IndexByte(data, '\n') // index of first newline character
ieot := bytes.IndexByte(data, EOT) // index of first End of Transmission
iiac := *lastiiac
for {
niiac := bytes.IndexByte(data[iiac:], IAC) // index of first/next telnet IAC
if niiac >= 0 {
iiac += niiac
} else {
iiac = niiac
}
if inl >= 0 && compareIdx(inl, ieot) < 0 && compareIdx(inl, iiac) < 0 {
*lastiiac = 0
return inl + 1, dropIAC(dropCR(data[0:inl])), nil // found a complete line and no EOT or IAC
}
if ieot >= 0 && compareIdx(ieot, iiac) < 0 {
*lastiiac = 0
return ieot + 1, data[ieot : ieot+1], nil // found an EOT (aka Ctrl-D was hit) and no IAC
}
if iiac >= 0 { // found an IAC
if (len(data) - iiac) < 2 {
return 0, nil, nil // data does not yet contain the telnet command code -> need more data
}
l := 2 // if we don't know this command - assume it has a length of 2
if cmd, found := telnet_commands[data[iiac+1]]; found {
l = cmd.length
}
if (len(data) - iiac) < l {
return 0, nil, nil // data does not yet contain the complete telnet command -> need more data
}
handleIac(data[iiac:iiac+l], iacout)
iiac += l
*lastiiac = iiac
} else {
break
}
}
if atEOF {
return len(data), dropCR(data), nil // allow last line to have no new line
}
return 0, nil, nil // we have found none of the escape codes -> need more data
}
func (c *TelnetClient) recv(in chan<- string) {
defer close(in)
for c.scanner.Scan() {
b := c.scanner.Bytes()
if len(b) > 0 && b[0] == EOT {
tl.Printf("telgo(%s): Ctrl-D received, closing", c.Conn.RemoteAddr())
return
}
in <- string(b)
}
if err := c.scanner.Err(); err != nil {
tl.Printf("telgo(%s): recv() error: %s", c.Conn.RemoteAddr(), err)
} else {
tl.Printf("telgo(%s): Connection closed by foreign host", c.Conn.RemoteAddr())
}
}
func (c *TelnetClient) cancel() {
select {
case c.Cancel <- true:
default: // process got canceled already
}
}
func (c *TelnetClient) send(quit <-chan bool) {
for {
select {
case <-quit:
return
case iac := <-c.iacout:
if iac[1] == IP {
c.cancel()
} else {
c.writer.Write(iac)
c.writer.Flush()
}
case data := <-c.stdout:
c.writer.Write(data)
c.writer.Flush()
}
}
}
func (c *TelnetClient) handle() {
defer c.Conn.Close()
in := make(chan string)
go c.recv(in)
quit_send := make(chan bool)
go c.send(quit_send)
defer func() { quit_send <- true }()
defer c.cancel() // make sure to cancel possible running job when closing connection
done := make(chan bool)
busy := false
c.WriteString(c.prompt)
for {
select {
case cmd, ok := <-in:
if !ok { // Ctrl-D or recv error (connection closed...)
return
}
if !busy { // ignore everything except Ctrl-D while executing a command
if len(cmd) > 0 {
go c.handleCmd(cmd, done)
busy = true
} else {
c.WriteString(c.prompt)
}
}
case exit := <-done:
if exit {
return
}
c.WriteString(c.prompt)
busy = false
}
}
}
type TelnetServer struct {
addr string
prompt string
commands TelgoCmdList
userdata interface{}
}
// This creates a new telnet server. addr is the address to bind/listen to on and will be passed through
// to net.Listen(). The prompt will be sent to the client whenever the telgo server is ready for a new command.
// TelgoCmdList is a list of available commands and userdata will be made available to called telgo
// commands through the client struct.
func NewTelnetServer(addr, prompt string, commands TelgoCmdList, userdata interface{}) (s *TelnetServer) {
s = &TelnetServer{}
s.addr = addr
s.prompt = prompt
s.commands = commands
s.userdata = userdata
return s
}
// This runs the telnet server and spawns go routines for every connecting client.
func (self *TelnetServer) Run() error {
tl.Println("telgo: listening on", self.addr)
server, err := net.Listen("tcp", self.addr)
if err != nil {
tl.Println("telgo: Listen() Error:", err)
return err
}
for {
conn, err := server.Accept()
if err != nil {
tl.Println("telgo: Accept() Error:", err)
return err
}
c := newTelnetClient(conn, self.prompt, &self.commands, self.userdata)
go c.handle()
}
}
|
//
// telgo
//
//
// Copyright (C) 2015 Christian Pointner <equinox@helsinki.at>
//
// This file is part of telgo.
//
// telgo is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// any later version.
//
// telgo is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with telgo. If not, see <http://www.gnu.org/licenses/>.
//
// Package telgo contains a simple telnet server which can be used as a
// control/debug interface for applications.
// The telgo telnet server does all the client handling and runs configurable
// commands as go routines. It also supports handling of basic inline telnet
// commands used by variaus telnet clients to configure the connection.
// For now every negotiable telnet option will be discarded but the telnet
// command IP (interrupt process) is understood and can be used to terminate
// long running user commands.
package telgo
import (
"bufio"
"bytes"
"fmt"
"log"
"net"
"os"
"regexp"
"strings"
"unicode"
)
var (
tl = log.New(os.Stderr, "[telnet]\t", log.LstdFlags)
)
const (
EOT = byte(4)
IAC = byte(255)
DONT = byte(254)
DO = byte(253)
WONT = byte(252)
WILL = byte(251)
SB = byte(250)
GA = byte(249)
EL = byte(248)
EC = byte(247)
AYT = byte(246)
AO = byte(245)
IP = byte(244)
BREA = byte(243)
DM = byte(242)
NOP = byte(241)
SE = byte(240)
)
type telnetCmd struct {
length int
name string
description string
}
var (
telnetCmds = map[byte]telnetCmd{
DONT: telnetCmd{3, "DONT", "don't use option"},
DO: telnetCmd{3, "DO", "do use option"},
WONT: telnetCmd{3, "WONT", "won't use option"},
WILL: telnetCmd{3, "WILL", "will use option"},
SB: telnetCmd{2, "SB", "Begin of subnegotiation parameters"},
GA: telnetCmd{2, "GA", "go ahead signal"},
EL: telnetCmd{2, "EL", "erase line"},
EC: telnetCmd{2, "EC", "erase character"},
AYT: telnetCmd{2, "AYT", "are you there"},
AO: telnetCmd{2, "AO", "abort output"},
IP: telnetCmd{2, "IP", "interrupt process"},
BREA: telnetCmd{2, "BREA", "break"},
DM: telnetCmd{2, "DM", "data mark"},
NOP: telnetCmd{2, "NOP", "no operation"},
SE: telnetCmd{2, "SE", "End of subnegotiation parameters"},
}
)
// This is the signature of telgo command functions. It receives a pointer to
// the telgo client struct and a slice of strings containing the arguments the
// user has supplied. The first argument is always the command name itself.
// If this function returns true the client connection will be terminated.
type TelgoCmd func(c *TelnetClient, args []string) bool
type TelgoCmdList map[string]TelgoCmd
// This struct is used to export the raw tcp connection to the client as well as
// the UserData which got supplied to NewTelnetServer.
// The Cancel channel will get ready for reading when the user hits Ctrl-C or
// the connection got terminated. This can be used for long running telgo commands
// to be aborted.
type TelnetClient struct {
Conn net.Conn
UserData interface{}
Cancel chan bool
scanner *bufio.Scanner
writer *bufio.Writer
prompt string
commands *TelgoCmdList
iacout chan []byte
stdout chan []byte
}
func newTelnetClient(conn net.Conn, prompt string, commands *TelgoCmdList, userdata interface{}) (c *TelnetClient) {
tl.Println("telgo: new client from:", conn.RemoteAddr())
c = &TelnetClient{}
c.Conn = conn
c.scanner = bufio.NewScanner(conn)
c.writer = bufio.NewWriter(conn)
c.prompt = prompt
c.commands = commands
c.UserData = userdata
c.stdout = make(chan []byte)
c.Cancel = make(chan bool, 1)
// the telnet split function needs some closures to handle inline telnet commands
c.iacout = make(chan []byte)
lastiiac := 0
c.scanner.Split(func(data []byte, atEOF bool) (advance int, token []byte, err error) {
return scanLines(data, atEOF, c.iacout, &lastiiac)
})
return c
}
// This writes a 'raw' string to the client. For the most part the usage of Say
// and Sayln is recommended. WriteString will take care of escaping IAC bytes
// inside your string.
func (c *TelnetClient) WriteString(text string) {
c.stdout <- bytes.Replace([]byte(text), []byte{IAC}, []byte{IAC, IAC}, -1)
}
// This is a simple Printf like interface which sends responses to the client.
func (c *TelnetClient) Say(format string, a ...interface{}) {
c.WriteString(fmt.Sprintf(format, a...))
}
// This is the same as Say but also adds a new-line at the end of the string.
func (c *TelnetClient) Sayln(format string, a ...interface{}) {
c.WriteString(fmt.Sprintf(format, a...) + "\r\n")
}
var (
escapeRe = regexp.MustCompile("\\\\.")
)
func replEscapeChars(m string) (r string) {
switch m {
case "\\a":
return "\a"
case "\\b":
return "\b"
case "\\t":
return "\t"
case "\\n":
return "\n"
case "\\v":
return "\v"
case "\\f":
return "\f"
case "\\r":
return "\r"
}
return string(m[1])
}
func stripEscapeChars(s string) string {
return escapeRe.ReplaceAllStringFunc(s, replEscapeChars)
}
func spacesAndQuotes(r rune) bool {
return unicode.IsSpace(r) || r == rune('"')
}
func backslashAndQuotes(r rune) bool {
return r == rune('\\') || r == rune('"')
}
func splitCmdArguments(cmdstr string) (cmds []string, err error) {
sepFunc := spacesAndQuotes
foundQuote := false
lastesc := 0
for {
i := strings.IndexFunc(cmdstr[lastesc:], sepFunc)
if i < 0 {
if foundQuote {
err = fmt.Errorf("closing \" is missing")
}
if len(cmdstr) > 0 {
cmds = append(cmds, cmdstr)
}
return
}
i += lastesc
switch cmdstr[i] {
case '\t', ' ':
if i > 0 {
cmds = append(cmds, cmdstr[0:i])
}
lastesc = 0
cmdstr = cmdstr[i+1:]
case '"':
if foundQuote {
cmds = append(cmds, stripEscapeChars(cmdstr[0:i]))
foundQuote = false
sepFunc = spacesAndQuotes
if len(cmdstr) == i+1 { // is this the end?
return
}
if !unicode.IsSpace(rune(cmdstr[i+1])) {
err = fmt.Errorf("there must be a space after a closing \"")
return
}
} else {
foundQuote = true
sepFunc = backslashAndQuotes
}
lastesc = 0
cmdstr = cmdstr[i+1:]
case '\\':
if len(cmdstr[i:]) < 2 {
err = fmt.Errorf("sole \\ at the end and no closing \"")
return
}
lastesc = i + 2
}
}
}
func (c *TelnetClient) handleCmd(cmdstr string, done chan<- bool) {
quit := false
defer func() { done <- quit }()
cmdslice, err := splitCmdArguments(cmdstr)
if err != nil {
c.Sayln("can't parse command: %s", err)
return
}
if len(cmdslice) == 0 || cmdslice[0] == "" {
return
}
select {
case <-c.Cancel: // consume potentially pending cancel request
default:
}
for cmd, cmdfunc := range *c.commands {
if cmdslice[0] == cmd {
quit = cmdfunc(c, cmdslice)
return
}
}
c.Sayln("unknown command '%s'", cmdslice[0])
}
// parse the telnet command and send out out-of-band responses to them
func handleIac(iac []byte, iacout chan<- []byte) {
switch iac[1] {
case WILL, WONT:
iac[1] = DONT // deny the client to use any proposed options
case DO, DONT:
iac[1] = WONT // refuse the usage of any requested options
case IP:
// pass this through to client.handle which will cancel the process
case IAC:
return // just an escaped IAC, this will be dealt with by dropIAC
default:
tl.Printf("ignoring unimplemented telnet command: %s (%s)", telnetCmds[iac[1]].name, telnetCmds[iac[1]].description)
return
}
iacout <- iac
}
// remove the carriage return at the end of the line
func dropCR(data []byte) []byte {
if len(data) > 0 && data[len(data)-1] == '\r' {
return data[0 : len(data)-1]
}
return data
}
// remove all telnet commands which are still on the read buffer and were
// handled already using out-of-band messages
func dropIAC(data []byte) []byte {
token := []byte("")
iiac := 0
for {
niiac := bytes.IndexByte(data[iiac:], IAC)
if niiac >= 0 {
token = append(token, data[iiac:iiac+niiac]...)
iiac += niiac
if (len(data) - iiac) < 2 { // check if the data at least contains a command code
return token // something is fishy.. found an IAC but this is the last byte of the token...
}
l := 2 // if we don't know this command - assume it has a length of 2
if cmd, found := telnetCmds[data[iiac+1]]; found {
l = cmd.length
}
if (len(data) - iiac) < l { // check if the command is complete
return token // something is fishy.. found an IAC but the command is too short...
}
if data[iiac+1] == IAC { // escaped IAC found
token = append(token, IAC)
}
iiac += l
} else {
token = append(token, data[iiac:]...)
break
}
}
return token
}
// This compares two indexes as returned by bytes.IndexByte treating -1 as the
// highest possible index.
func compareIdx(a, b int) int {
if a < 0 {
a = int(^uint(0) >> 1)
}
if b < 0 {
b = int(^uint(0) >> 1)
}
return a - b
}
func scanLines(data []byte, atEOF bool, iacout chan<- []byte, lastiiac *int) (advance int, token []byte, err error) {
if atEOF && len(data) == 0 {
return 0, nil, nil
}
inl := bytes.IndexByte(data, '\n') // index of first newline character
ieot := bytes.IndexByte(data, EOT) // index of first End of Transmission
iiac := *lastiiac
for {
niiac := bytes.IndexByte(data[iiac:], IAC) // index of first/next telnet IAC
if niiac >= 0 {
iiac += niiac
} else {
iiac = niiac
}
if inl >= 0 && compareIdx(inl, ieot) < 0 && compareIdx(inl, iiac) < 0 {
*lastiiac = 0
return inl + 1, dropIAC(dropCR(data[0:inl])), nil // found a complete line and no EOT or IAC
}
if ieot >= 0 && compareIdx(ieot, iiac) < 0 {
*lastiiac = 0
return ieot + 1, data[ieot : ieot+1], nil // found an EOT (aka Ctrl-D was hit) and no IAC
}
if iiac >= 0 { // found an IAC
if (len(data) - iiac) < 2 {
return 0, nil, nil // data does not yet contain the telnet command code -> need more data
}
l := 2 // if we don't know this command - assume it has a length of 2
if cmd, found := telnetCmds[data[iiac+1]]; found {
l = cmd.length
}
if (len(data) - iiac) < l {
return 0, nil, nil // data does not yet contain the complete telnet command -> need more data
}
handleIac(data[iiac:iiac+l], iacout)
iiac += l
*lastiiac = iiac
} else {
break
}
}
if atEOF {
return len(data), dropCR(data), nil // allow last line to have no new line
}
return 0, nil, nil // we have found none of the escape codes -> need more data
}
func (c *TelnetClient) recv(in chan<- string) {
defer close(in)
for c.scanner.Scan() {
b := c.scanner.Bytes()
if len(b) > 0 && b[0] == EOT {
tl.Printf("telgo(%s): Ctrl-D received, closing", c.Conn.RemoteAddr())
return
}
in <- string(b)
}
if err := c.scanner.Err(); err != nil {
tl.Printf("telgo(%s): recv() error: %s", c.Conn.RemoteAddr(), err)
} else {
tl.Printf("telgo(%s): Connection closed by foreign host", c.Conn.RemoteAddr())
}
}
func (c *TelnetClient) cancel() {
select {
case c.Cancel <- true:
default: // process got canceled already
}
}
func (c *TelnetClient) send(quit <-chan bool) {
for {
select {
case <-quit:
return
case iac := <-c.iacout:
if iac[1] == IP {
c.cancel()
} else {
c.writer.Write(iac)
c.writer.Flush()
}
case data := <-c.stdout:
c.writer.Write(data)
c.writer.Flush()
}
}
}
func (c *TelnetClient) handle() {
defer c.Conn.Close()
in := make(chan string)
go c.recv(in)
quitSend := make(chan bool)
go c.send(quitSend)
defer func() { quitSend <- true }()
defer c.cancel() // make sure to cancel possible running job when closing connection
done := make(chan bool)
busy := false
c.WriteString(c.prompt)
for {
select {
case cmd, ok := <-in:
if !ok { // Ctrl-D or recv error (connection closed...)
return
}
if !busy { // ignore everything except Ctrl-D while executing a command
if len(cmd) > 0 {
go c.handleCmd(cmd, done)
busy = true
} else {
c.WriteString(c.prompt)
}
}
case exit := <-done:
if exit {
return
}
c.WriteString(c.prompt)
busy = false
}
}
}
type TelnetServer struct {
addr string
prompt string
commands TelgoCmdList
userdata interface{}
}
// This creates a new telnet server. addr is the address to bind/listen to on and will be passed through
// to net.Listen(). The prompt will be sent to the client whenever the telgo server is ready for a new command.
// TelgoCmdList is a list of available commands and userdata will be made available to called telgo
// commands through the client struct.
func NewTelnetServer(addr, prompt string, commands TelgoCmdList, userdata interface{}) (s *TelnetServer) {
s = &TelnetServer{}
s.addr = addr
s.prompt = prompt
s.commands = commands
s.userdata = userdata
return s
}
// This runs the telnet server and spawns go routines for every connecting client.
func (self *TelnetServer) Run() error {
tl.Println("telgo: listening on", self.addr)
server, err := net.Listen("tcp", self.addr)
if err != nil {
tl.Println("telgo: Listen() Error:", err)
return err
}
for {
conn, err := server.Accept()
if err != nil {
tl.Println("telgo: Accept() Error:", err)
return err
}
c := newTelnetClient(conn, self.prompt, &self.commands, self.userdata)
go c.handle()
}
}
improved logging
//
// telgo
//
//
// Copyright (C) 2015 Christian Pointner <equinox@helsinki.at>
//
// This file is part of telgo.
//
// telgo is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// any later version.
//
// telgo is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with telgo. If not, see <http://www.gnu.org/licenses/>.
//
// Package telgo contains a simple telnet server which can be used as a
// control/debug interface for applications.
// The telgo telnet server does all the client handling and runs configurable
// commands as go routines. It also supports handling of basic inline telnet
// commands used by variaus telnet clients to configure the connection.
// For now every negotiable telnet option will be discarded but the telnet
// command IP (interrupt process) is understood and can be used to terminate
// long running user commands.
// If the environment contains the variable TELGO_DEBUG logging will be enabled.
// By default telgo doesn't log anything.
package telgo
import (
"bufio"
"bytes"
"fmt"
"io/ioutil"
"log"
"net"
"os"
"regexp"
"strings"
"unicode"
)
var (
tl = log.New(ioutil.Discard, "[telgo]\t", log.LstdFlags)
)
func init() {
if _, exists := os.LookupEnv("TELGO_DEBUG"); exists {
tl = log.New(os.Stderr, "[telgo]\t", log.LstdFlags)
}
}
const (
EOT = byte(4)
IAC = byte(255)
DONT = byte(254)
DO = byte(253)
WONT = byte(252)
WILL = byte(251)
SB = byte(250)
GA = byte(249)
EL = byte(248)
EC = byte(247)
AYT = byte(246)
AO = byte(245)
IP = byte(244)
BREA = byte(243)
DM = byte(242)
NOP = byte(241)
SE = byte(240)
)
type telnetCmd struct {
length int
name string
description string
}
var (
telnetCmds = map[byte]telnetCmd{
DONT: telnetCmd{3, "DONT", "don't use option"},
DO: telnetCmd{3, "DO", "do use option"},
WONT: telnetCmd{3, "WONT", "won't use option"},
WILL: telnetCmd{3, "WILL", "will use option"},
SB: telnetCmd{2, "SB", "Begin of subnegotiation parameters"},
GA: telnetCmd{2, "GA", "go ahead signal"},
EL: telnetCmd{2, "EL", "erase line"},
EC: telnetCmd{2, "EC", "erase character"},
AYT: telnetCmd{2, "AYT", "are you there"},
AO: telnetCmd{2, "AO", "abort output"},
IP: telnetCmd{2, "IP", "interrupt process"},
BREA: telnetCmd{2, "BREA", "break"},
DM: telnetCmd{2, "DM", "data mark"},
NOP: telnetCmd{2, "NOP", "no operation"},
SE: telnetCmd{2, "SE", "End of subnegotiation parameters"},
}
)
// This is the signature of telgo command functions. It receives a pointer to
// the telgo client struct and a slice of strings containing the arguments the
// user has supplied. The first argument is always the command name itself.
// If this function returns true the client connection will be terminated.
type TelgoCmd func(c *TelnetClient, args []string) bool
type TelgoCmdList map[string]TelgoCmd
// This struct is used to export the raw tcp connection to the client as well as
// the UserData which got supplied to NewTelnetServer.
// The Cancel channel will get ready for reading when the user hits Ctrl-C or
// the connection got terminated. This can be used for long running telgo commands
// to be aborted.
type TelnetClient struct {
Conn net.Conn
UserData interface{}
Cancel chan bool
scanner *bufio.Scanner
writer *bufio.Writer
prompt string
commands *TelgoCmdList
iacout chan []byte
stdout chan []byte
}
func newTelnetClient(conn net.Conn, prompt string, commands *TelgoCmdList, userdata interface{}) (c *TelnetClient) {
tl.Println("new client from:", conn.RemoteAddr())
c = &TelnetClient{}
c.Conn = conn
c.scanner = bufio.NewScanner(conn)
c.writer = bufio.NewWriter(conn)
c.prompt = prompt
c.commands = commands
c.UserData = userdata
c.stdout = make(chan []byte)
c.Cancel = make(chan bool, 1)
// the telnet split function needs some closures to handle inline telnet commands
c.iacout = make(chan []byte)
lastiiac := 0
c.scanner.Split(func(data []byte, atEOF bool) (advance int, token []byte, err error) {
return scanLines(data, atEOF, c.iacout, &lastiiac)
})
return c
}
// This writes a 'raw' string to the client. For the most part the usage of Say
// and Sayln is recommended. WriteString will take care of escaping IAC bytes
// inside your string.
func (c *TelnetClient) WriteString(text string) {
c.stdout <- bytes.Replace([]byte(text), []byte{IAC}, []byte{IAC, IAC}, -1)
}
// This is a simple Printf like interface which sends responses to the client.
func (c *TelnetClient) Say(format string, a ...interface{}) {
c.WriteString(fmt.Sprintf(format, a...))
}
// This is the same as Say but also adds a new-line at the end of the string.
func (c *TelnetClient) Sayln(format string, a ...interface{}) {
c.WriteString(fmt.Sprintf(format, a...) + "\r\n")
}
var (
escapeRe = regexp.MustCompile("\\\\.")
)
func replEscapeChars(m string) (r string) {
switch m {
case "\\a":
return "\a"
case "\\b":
return "\b"
case "\\t":
return "\t"
case "\\n":
return "\n"
case "\\v":
return "\v"
case "\\f":
return "\f"
case "\\r":
return "\r"
}
return string(m[1])
}
func stripEscapeChars(s string) string {
return escapeRe.ReplaceAllStringFunc(s, replEscapeChars)
}
func spacesAndQuotes(r rune) bool {
return unicode.IsSpace(r) || r == rune('"')
}
func backslashAndQuotes(r rune) bool {
return r == rune('\\') || r == rune('"')
}
func splitCmdArguments(cmdstr string) (cmds []string, err error) {
sepFunc := spacesAndQuotes
foundQuote := false
lastesc := 0
for {
i := strings.IndexFunc(cmdstr[lastesc:], sepFunc)
if i < 0 {
if foundQuote {
err = fmt.Errorf("closing \" is missing")
}
if len(cmdstr) > 0 {
cmds = append(cmds, cmdstr)
}
return
}
i += lastesc
switch cmdstr[i] {
case '\t', ' ':
if i > 0 {
cmds = append(cmds, cmdstr[0:i])
}
lastesc = 0
cmdstr = cmdstr[i+1:]
case '"':
if foundQuote {
cmds = append(cmds, stripEscapeChars(cmdstr[0:i]))
foundQuote = false
sepFunc = spacesAndQuotes
if len(cmdstr) == i+1 { // is this the end?
return
}
if !unicode.IsSpace(rune(cmdstr[i+1])) {
err = fmt.Errorf("there must be a space after a closing \"")
return
}
} else {
foundQuote = true
sepFunc = backslashAndQuotes
}
lastesc = 0
cmdstr = cmdstr[i+1:]
case '\\':
if len(cmdstr[i:]) < 2 {
err = fmt.Errorf("sole \\ at the end and no closing \"")
return
}
lastesc = i + 2
}
}
}
func (c *TelnetClient) handleCmd(cmdstr string, done chan<- bool) {
quit := false
defer func() { done <- quit }()
cmdslice, err := splitCmdArguments(cmdstr)
if err != nil {
c.Sayln("can't parse command: %s", err)
return
}
if len(cmdslice) == 0 || cmdslice[0] == "" {
return
}
select {
case <-c.Cancel: // consume potentially pending cancel request
default:
}
for cmd, cmdfunc := range *c.commands {
if cmdslice[0] == cmd {
quit = cmdfunc(c, cmdslice)
return
}
}
c.Sayln("unknown command '%s'", cmdslice[0])
}
// parse the telnet command and send out out-of-band responses to them
func handleIac(iac []byte, iacout chan<- []byte) {
switch iac[1] {
case WILL, WONT:
iac[1] = DONT // deny the client to use any proposed options
case DO, DONT:
iac[1] = WONT // refuse the usage of any requested options
case IP:
// pass this through to client.handle which will cancel the process
case IAC:
return // just an escaped IAC, this will be dealt with by dropIAC
default:
tl.Printf("ignoring unimplemented telnet command: %s (%s)", telnetCmds[iac[1]].name, telnetCmds[iac[1]].description)
return
}
iacout <- iac
}
// remove the carriage return at the end of the line
func dropCR(data []byte) []byte {
if len(data) > 0 && data[len(data)-1] == '\r' {
return data[0 : len(data)-1]
}
return data
}
// remove all telnet commands which are still on the read buffer and were
// handled already using out-of-band messages
func dropIAC(data []byte) []byte {
token := []byte("")
iiac := 0
for {
niiac := bytes.IndexByte(data[iiac:], IAC)
if niiac >= 0 {
token = append(token, data[iiac:iiac+niiac]...)
iiac += niiac
if (len(data) - iiac) < 2 { // check if the data at least contains a command code
return token // something is fishy.. found an IAC but this is the last byte of the token...
}
l := 2 // if we don't know this command - assume it has a length of 2
if cmd, found := telnetCmds[data[iiac+1]]; found {
l = cmd.length
}
if (len(data) - iiac) < l { // check if the command is complete
return token // something is fishy.. found an IAC but the command is too short...
}
if data[iiac+1] == IAC { // escaped IAC found
token = append(token, IAC)
}
iiac += l
} else {
token = append(token, data[iiac:]...)
break
}
}
return token
}
// This compares two indexes as returned by bytes.IndexByte treating -1 as the
// highest possible index.
func compareIdx(a, b int) int {
if a < 0 {
a = int(^uint(0) >> 1)
}
if b < 0 {
b = int(^uint(0) >> 1)
}
return a - b
}
func scanLines(data []byte, atEOF bool, iacout chan<- []byte, lastiiac *int) (advance int, token []byte, err error) {
if atEOF && len(data) == 0 {
return 0, nil, nil
}
inl := bytes.IndexByte(data, '\n') // index of first newline character
ieot := bytes.IndexByte(data, EOT) // index of first End of Transmission
iiac := *lastiiac
for {
niiac := bytes.IndexByte(data[iiac:], IAC) // index of first/next telnet IAC
if niiac >= 0 {
iiac += niiac
} else {
iiac = niiac
}
if inl >= 0 && compareIdx(inl, ieot) < 0 && compareIdx(inl, iiac) < 0 {
*lastiiac = 0
return inl + 1, dropIAC(dropCR(data[0:inl])), nil // found a complete line and no EOT or IAC
}
if ieot >= 0 && compareIdx(ieot, iiac) < 0 {
*lastiiac = 0
return ieot + 1, data[ieot : ieot+1], nil // found an EOT (aka Ctrl-D was hit) and no IAC
}
if iiac >= 0 { // found an IAC
if (len(data) - iiac) < 2 {
return 0, nil, nil // data does not yet contain the telnet command code -> need more data
}
l := 2 // if we don't know this command - assume it has a length of 2
if cmd, found := telnetCmds[data[iiac+1]]; found {
l = cmd.length
}
if (len(data) - iiac) < l {
return 0, nil, nil // data does not yet contain the complete telnet command -> need more data
}
handleIac(data[iiac:iiac+l], iacout)
iiac += l
*lastiiac = iiac
} else {
break
}
}
if atEOF {
return len(data), dropCR(data), nil // allow last line to have no new line
}
return 0, nil, nil // we have found none of the escape codes -> need more data
}
func (c *TelnetClient) recv(in chan<- string) {
defer close(in)
for c.scanner.Scan() {
b := c.scanner.Bytes()
if len(b) > 0 && b[0] == EOT {
tl.Printf("client(%s): Ctrl-D received, closing", c.Conn.RemoteAddr())
return
}
in <- string(b)
}
if err := c.scanner.Err(); err != nil {
tl.Printf("client(%s): recv() error: %s", c.Conn.RemoteAddr(), err)
} else {
tl.Printf("client(%s): Connection closed by foreign host", c.Conn.RemoteAddr())
}
}
func (c *TelnetClient) cancel() {
select {
case c.Cancel <- true:
default: // process got canceled already
}
}
func (c *TelnetClient) send(quit <-chan bool) {
for {
select {
case <-quit:
return
case iac := <-c.iacout:
if iac[1] == IP {
c.cancel()
} else {
c.writer.Write(iac)
c.writer.Flush()
}
case data := <-c.stdout:
c.writer.Write(data)
c.writer.Flush()
}
}
}
func (c *TelnetClient) handle() {
defer c.Conn.Close()
in := make(chan string)
go c.recv(in)
quitSend := make(chan bool)
go c.send(quitSend)
defer func() { quitSend <- true }()
defer c.cancel() // make sure to cancel possible running job when closing connection
done := make(chan bool)
busy := false
c.WriteString(c.prompt)
for {
select {
case cmd, ok := <-in:
if !ok { // Ctrl-D or recv error (connection closed...)
return
}
if !busy { // ignore everything except Ctrl-D while executing a command
if len(cmd) > 0 {
go c.handleCmd(cmd, done)
busy = true
} else {
c.WriteString(c.prompt)
}
}
case exit := <-done:
if exit {
return
}
c.WriteString(c.prompt)
busy = false
}
}
}
type TelnetServer struct {
addr string
prompt string
commands TelgoCmdList
userdata interface{}
}
// This creates a new telnet server. addr is the address to bind/listen to on and will be passed through
// to net.Listen(). The prompt will be sent to the client whenever the telgo server is ready for a new command.
// TelgoCmdList is a list of available commands and userdata will be made available to called telgo
// commands through the client struct.
func NewTelnetServer(addr, prompt string, commands TelgoCmdList, userdata interface{}) (s *TelnetServer) {
s = &TelnetServer{}
s.addr = addr
s.prompt = prompt
s.commands = commands
s.userdata = userdata
return s
}
// This runs the telnet server and spawns go routines for every connecting client.
func (self *TelnetServer) Run() error {
tl.Println("listening on", self.addr)
server, err := net.Listen("tcp", self.addr)
if err != nil {
tl.Println("Listen() Error:", err)
return err
}
for {
conn, err := server.Accept()
if err != nil {
tl.Println("Accept() Error:", err)
return err
}
c := newTelnetClient(conn, self.prompt, &self.commands, self.userdata)
go c.handle()
}
}
|
/*
client_events.go provides constants for making VNC keyboard and mouse events.
Sample usage:
// Move mouse to x=100, y=200.
x, y := 100, 200
conn.PointerEvent(vnc.Mouse_none, x, y)
// Give mouse some time to "settle."
time.Sleep(10*time.Millisecond)
// Left click.
conn.PointerEvent(vnc.Mouse_left, x, y)
conn.PointerEvent(vnc.Mouse_none, x, y)
// Press return key
conn.KeyEvent(vnc.Key_return, true)
// Release the key.
conn.KeyEvent(vnc.Key_return, false)
*/
package vnc
// Latin 1 (byte 3 = 0)
// ISO/IEC 8859-1 = Unicode U+0020..U+00FF
const (
Key_space = iota + 0x0020
Key_exclam
Key_quotedbl
Key_numbersign
Key_dollar
Key_percent
Key_ampersand
Key_apostrophe
Key_parenleft
Key_parenright
Key_asterisk
Key_plus
Key_comma
Key_minus
Key_period
Key_slash
Key_0
Key_1
Key_2
Key_3
Key_4
Key_5
Key_6
Key_7
Key_8
Key_9
Key_colon
Key_semicolon
Key_less
Key_equal
Key_greater
Key_question
Key_at
Key_A
Key_B
Key_C
Key_D
Key_E
Key_F
Key_G
Key_H
Key_I
Key_J
Key_K
Key_L
Key_M
Key_N
Key_O
Key_P
Key_Q
Key_R
Key_S
Key_T
Key_U
Key_V
Key_W
Key_X
Key_Y
Key_Z
Key_bracketleft
Key_backslash
Key_bracketright
Key_asciicircum
Key_underscore
Key_grave
Key_a
Key_b
Key_c
Key_d
Key_e
Key_f
Key_g
Key_h
Key_i
Key_j
Key_k
Key_l
Key_m
Key_n
Key_o
Key_p
Key_q
Key_r
Key_s
Key_t
Key_u
Key_v
Key_w
Key_x
Key_y
Key_z
Key_braceleft
Key_bar
Key_braceright
Key_asciitilde
)
const (
Key_backspace = iota + 0xff08
Key_tab
Key_linefeed
Key_clear
_
Key_return
)
const (
Key_pause = 0xff13
Key_scroll_lock = 0xff14
Key_sys_req = 0xff15
Key_escape = 0xff1b
)
const (
Key_f1 = iota + 0xffbe
Key_f2
Key_f3
Key_f4
Key_f5
Key_f6
Key_f7
Key_f8
Key_f9
Key_f10
Key_f11
Key_f12
)
const (
Key_shift_l = iota + 0xffe1
Key_shift_r
Key_control_l
Key_control_r
Key_caps_lock
_
_
_
Key_alt_l
Key_alt_r
Key_delete = 0xffff
)
const (
// Mouse buttons
Mouse_left = 1 << iota
Mouse_middle
Mouse_right
Mouse_none = 0
)
fixed constant naming according to Effective Go standards
/*
client_events.go provides constants for making VNC keyboard and mouse events.
Sample usage:
// Move mouse to x=100, y=200.
x, y := 100, 200
conn.PointerEvent(vnc.MouseNone, x, y)
// Give mouse some time to "settle."
time.Sleep(10*time.Millisecond)
// Left click.
conn.PointerEvent(vnc.MouseLeft, x, y)
conn.PointerEvent(vnc.MouseNone, x, y)
// Press return key
conn.KeyEvent(vnc.KeyReturn, true)
// Release the key.
conn.KeyEvent(vnc.KeyReturn, false)
*/
package vnc
// Latin 1 (byte 3 = 0)
// ISO/IEC 8859-1 = Unicode U+0020..U+00FF
const (
KeySpace = iota + 0x0020
KeyExclam
KeyQuoteDbl
KeyNumberSign
KeyDollar
KeyPercent
KeyAmpersand
KeyApostrophe
KeyParenLeft
KeyParenRight
KeyAsterisk
KeyPlus
KeyComma
KeyMinus
KeyPeriod
KeySlash
Key0
Key1
Key2
Key3
Key4
Key5
Key6
Key7
Key8
Key9
KeyColon
KeySemicolon
KeyLess
KeyEqual
KeyGreater
KeyQuestion
KeyAt
KeyA
KeyB
KeyC
KeyD
KeyE
KeyF
KeyG
KeyH
KeyI
KeyJ
KeyK
KeyL
KeyM
KeyN
KeyO
KeyP
KeyQ
KeyR
KeyS
KeyT
KeyU
KeyV
KeyW
KeyX
KeyY
KeyZ
KeyBracketLeft
KeyBackslash
KeyBracketRight
KeyAsciiCircum
KeyUnderscore
KeyGrave
Keya
Keyb
Keyc
Keyd
Keye
Keyf
Keyg
Keyh
Keyi
Keyj
Keyk
Keyl
Keym
Keyn
Keyo
Keyp
Keyq
Keyr
Keys
Keyt
Keyu
Keyv
Keyw
Keyx
Keyy
Keyz
KeyBraceLeft
KeyBar
KeyBraceRight
KeyAsciiTilde
)
const (
KeyBackspace = iota + 0xff08
KeyTab
KeyLinefeed
KeyClear
_
KeyReturn
)
const (
KeyPause = 0xff13
KeyScrollLock = 0xff14
KeySysReq = 0xff15
KeyEscape = 0xff1b
)
const (
KeyF1 = iota + 0xffbe
KeyF2
KeyF3
KeyF4
KeyF5
KeyF6
KeyF7
KeyF8
KeyF9
KeyF10
KeyF11
KeyF12
)
const (
KeyShiftL = iota + 0xffe1
KeyShiftR
KeyControlL
KeyControlR
KeyCapsLock
_
_
_
KeyAltL
KeyAltR
KeyDelete = 0xffff
)
const (
// Mouse buttons
MouseLeft = 1 << iota
MouseMiddle
MouseRight
MouseNone = 0
)
|
package restorer
import (
"bufio"
"context"
"io"
"math"
"path/filepath"
"sort"
"sync"
"golang.org/x/sync/errgroup"
"github.com/restic/restic/internal/crypto"
"github.com/restic/restic/internal/debug"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/restic"
)
// TODO if a blob is corrupt, there may be good blob copies in other packs
// TODO evaluate if it makes sense to split download and processing workers
// pro: can (slowly) read network and decrypt/write files concurrently
// con: each worker needs to keep one pack in memory
const (
workerCount = 8
largeFileBlobCount = 25
)
// information about regular file being restored
type fileInfo struct {
lock sync.Mutex
inProgress bool
size int64
location string // file on local filesystem relative to restorer basedir
blobs interface{} // blobs of the file
}
type fileBlobInfo struct {
id restic.ID // the blob id
offset int64 // blob offset in the file
}
// information about a data pack required to restore one or more files
type packInfo struct {
id restic.ID // the pack id
files map[*fileInfo]struct{} // set of files that use blobs from this pack
}
// fileRestorer restores set of files
type fileRestorer struct {
key *crypto.Key
idx func(restic.BlobHandle) []restic.PackedBlob
packLoader func(ctx context.Context, h restic.Handle, length int, offset int64, fn func(rd io.Reader) error) error
filesWriter *filesWriter
dst string
files []*fileInfo
Error func(string, error) error
}
func newFileRestorer(dst string,
packLoader func(ctx context.Context, h restic.Handle, length int, offset int64, fn func(rd io.Reader) error) error,
key *crypto.Key,
idx func(restic.BlobHandle) []restic.PackedBlob) *fileRestorer {
return &fileRestorer{
key: key,
idx: idx,
packLoader: packLoader,
filesWriter: newFilesWriter(workerCount),
dst: dst,
Error: restorerAbortOnAllErrors,
}
}
func (r *fileRestorer) addFile(location string, content restic.IDs, size int64) {
r.files = append(r.files, &fileInfo{location: location, blobs: content, size: size})
}
func (r *fileRestorer) targetPath(location string) string {
return filepath.Join(r.dst, location)
}
func (r *fileRestorer) forEachBlob(blobIDs []restic.ID, fn func(packID restic.ID, packBlob restic.Blob)) error {
if len(blobIDs) == 0 {
return nil
}
for _, blobID := range blobIDs {
packs := r.idx(restic.BlobHandle{ID: blobID, Type: restic.DataBlob})
if len(packs) == 0 {
return errors.Errorf("Unknown blob %s", blobID.String())
}
fn(packs[0].PackID, packs[0].Blob)
}
return nil
}
func (r *fileRestorer) restoreFiles(ctx context.Context) error {
packs := make(map[restic.ID]*packInfo) // all packs
// Process packs in order of first access. While this cannot guarantee
// that file chunks are restored sequentially, it offers a good enough
// approximation to shorten restore times by up to 19% in some test.
var packOrder restic.IDs
// create packInfo from fileInfo
for _, file := range r.files {
fileBlobs := file.blobs.(restic.IDs)
largeFile := len(fileBlobs) > largeFileBlobCount
var packsMap map[restic.ID][]fileBlobInfo
if largeFile {
packsMap = make(map[restic.ID][]fileBlobInfo)
}
fileOffset := int64(0)
err := r.forEachBlob(fileBlobs, func(packID restic.ID, blob restic.Blob) {
if largeFile {
packsMap[packID] = append(packsMap[packID], fileBlobInfo{id: blob.ID, offset: fileOffset})
fileOffset += int64(blob.Length) - crypto.Extension
}
pack, ok := packs[packID]
if !ok {
pack = &packInfo{
id: packID,
files: make(map[*fileInfo]struct{}),
}
packs[packID] = pack
packOrder = append(packOrder, packID)
}
pack.files[file] = struct{}{}
})
if err != nil {
// repository index is messed up, can't do anything
return err
}
if largeFile {
file.blobs = packsMap
}
}
wg, ctx := errgroup.WithContext(ctx)
downloadCh := make(chan *packInfo)
worker := func() error {
for pack := range downloadCh {
if err := r.downloadPack(ctx, pack); err != nil {
return err
}
}
return nil
}
for i := 0; i < workerCount; i++ {
wg.Go(worker)
}
// the main restore loop
wg.Go(func() error {
for _, id := range packOrder {
pack := packs[id]
select {
case <-ctx.Done():
return ctx.Err()
case downloadCh <- pack:
debug.Log("Scheduled download pack %s", pack.id.Str())
}
}
close(downloadCh)
return nil
})
return wg.Wait()
}
const maxBufferSize = 4 * 1024 * 1024
func (r *fileRestorer) downloadPack(ctx context.Context, pack *packInfo) error {
// calculate pack byte range and blob->[]files->[]offsets mappings
start, end := int64(math.MaxInt64), int64(0)
blobs := make(map[restic.ID]struct {
offset int64 // offset of the blob in the pack
length int // length of the blob
files map[*fileInfo][]int64 // file -> offsets (plural!) of the blob in the file
})
for file := range pack.files {
addBlob := func(blob restic.Blob, fileOffset int64) {
if start > int64(blob.Offset) {
start = int64(blob.Offset)
}
if end < int64(blob.Offset+blob.Length) {
end = int64(blob.Offset + blob.Length)
}
blobInfo, ok := blobs[blob.ID]
if !ok {
blobInfo.offset = int64(blob.Offset)
blobInfo.length = int(blob.Length)
blobInfo.files = make(map[*fileInfo][]int64)
blobs[blob.ID] = blobInfo
}
blobInfo.files[file] = append(blobInfo.files[file], fileOffset)
}
if fileBlobs, ok := file.blobs.(restic.IDs); ok {
fileOffset := int64(0)
r.forEachBlob(fileBlobs, func(packID restic.ID, blob restic.Blob) {
if packID.Equal(pack.id) {
addBlob(blob, fileOffset)
}
fileOffset += int64(blob.Length) - crypto.Extension
})
} else if packsMap, ok := file.blobs.(map[restic.ID][]fileBlobInfo); ok {
for _, blob := range packsMap[pack.id] {
idxPacks := r.idx(restic.BlobHandle{ID: blob.id, Type: restic.DataBlob})
for _, idxPack := range idxPacks {
if idxPack.PackID.Equal(pack.id) {
addBlob(idxPack.Blob, blob.offset)
break
}
}
}
}
}
sortedBlobs := make([]restic.ID, 0, len(blobs))
for blobID := range blobs {
sortedBlobs = append(sortedBlobs, blobID)
}
sort.Slice(sortedBlobs, func(i, j int) bool {
return blobs[sortedBlobs[i]].offset < blobs[sortedBlobs[j]].offset
})
sanitizeError := func(file *fileInfo, err error) error {
if err != nil {
err = r.Error(file.location, err)
}
return err
}
h := restic.Handle{Type: restic.PackFile, Name: pack.id.String()}
err := r.packLoader(ctx, h, int(end-start), start, func(rd io.Reader) error {
bufferSize := int(end - start)
if bufferSize > maxBufferSize {
bufferSize = maxBufferSize
}
bufRd := bufio.NewReaderSize(rd, bufferSize)
currentBlobEnd := start
var blobData, buf []byte
for _, blobID := range sortedBlobs {
blob := blobs[blobID]
_, err := bufRd.Discard(int(blob.offset - currentBlobEnd))
if err != nil {
return err
}
blobData, buf, err = r.loadBlob(bufRd, blobID, blob.length, buf)
if err != nil {
for file := range blob.files {
if errFile := sanitizeError(file, err); errFile != nil {
return errFile
}
}
continue
}
currentBlobEnd = blob.offset + int64(blob.length)
for file, offsets := range blob.files {
for _, offset := range offsets {
writeToFile := func() error {
// this looks overly complicated and needs explanation
// two competing requirements:
// - must create the file once and only once
// - should allow concurrent writes to the file
// so write the first blob while holding file lock
// write other blobs after releasing the lock
createSize := int64(-1)
file.lock.Lock()
if file.inProgress {
file.lock.Unlock()
} else {
defer file.lock.Unlock()
file.inProgress = true
createSize = file.size
}
return r.filesWriter.writeToFile(r.targetPath(file.location), blobData, offset, createSize)
}
err := sanitizeError(file, writeToFile())
if err != nil {
return err
}
}
}
}
return nil
})
if err != nil {
for file := range pack.files {
if errFile := sanitizeError(file, err); errFile != nil {
return errFile
}
}
}
return nil
}
func (r *fileRestorer) loadBlob(rd io.Reader, blobID restic.ID, length int, buf []byte) ([]byte, []byte, error) {
// TODO reconcile with Repository#loadBlob implementation
if cap(buf) < length {
buf = make([]byte, length)
} else {
buf = buf[:length]
}
n, err := io.ReadFull(rd, buf)
if err != nil {
return nil, nil, err
}
if n != length {
return nil, nil, errors.Errorf("error loading blob %v: wrong length returned, want %d, got %d", blobID.Str(), length, n)
}
// decrypt
nonce, ciphertext := buf[:r.key.NonceSize()], buf[r.key.NonceSize():]
plaintext, err := r.key.Open(ciphertext[:0], nonce, ciphertext, nil)
if err != nil {
return nil, nil, errors.Errorf("decrypting blob %v failed: %v", blobID, err)
}
// check hash
if !restic.Hash(plaintext).Equal(blobID) {
return nil, nil, errors.Errorf("blob %v returned invalid hash", blobID)
}
return plaintext, buf, nil
}
restorer: Check dropped error
package restorer
import (
"bufio"
"context"
"io"
"math"
"path/filepath"
"sort"
"sync"
"golang.org/x/sync/errgroup"
"github.com/restic/restic/internal/crypto"
"github.com/restic/restic/internal/debug"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/restic"
)
// TODO if a blob is corrupt, there may be good blob copies in other packs
// TODO evaluate if it makes sense to split download and processing workers
// pro: can (slowly) read network and decrypt/write files concurrently
// con: each worker needs to keep one pack in memory
const (
workerCount = 8
largeFileBlobCount = 25
)
// information about regular file being restored
type fileInfo struct {
lock sync.Mutex
inProgress bool
size int64
location string // file on local filesystem relative to restorer basedir
blobs interface{} // blobs of the file
}
type fileBlobInfo struct {
id restic.ID // the blob id
offset int64 // blob offset in the file
}
// information about a data pack required to restore one or more files
type packInfo struct {
id restic.ID // the pack id
files map[*fileInfo]struct{} // set of files that use blobs from this pack
}
// fileRestorer restores set of files
type fileRestorer struct {
key *crypto.Key
idx func(restic.BlobHandle) []restic.PackedBlob
packLoader func(ctx context.Context, h restic.Handle, length int, offset int64, fn func(rd io.Reader) error) error
filesWriter *filesWriter
dst string
files []*fileInfo
Error func(string, error) error
}
func newFileRestorer(dst string,
packLoader func(ctx context.Context, h restic.Handle, length int, offset int64, fn func(rd io.Reader) error) error,
key *crypto.Key,
idx func(restic.BlobHandle) []restic.PackedBlob) *fileRestorer {
return &fileRestorer{
key: key,
idx: idx,
packLoader: packLoader,
filesWriter: newFilesWriter(workerCount),
dst: dst,
Error: restorerAbortOnAllErrors,
}
}
func (r *fileRestorer) addFile(location string, content restic.IDs, size int64) {
r.files = append(r.files, &fileInfo{location: location, blobs: content, size: size})
}
func (r *fileRestorer) targetPath(location string) string {
return filepath.Join(r.dst, location)
}
func (r *fileRestorer) forEachBlob(blobIDs []restic.ID, fn func(packID restic.ID, packBlob restic.Blob)) error {
if len(blobIDs) == 0 {
return nil
}
for _, blobID := range blobIDs {
packs := r.idx(restic.BlobHandle{ID: blobID, Type: restic.DataBlob})
if len(packs) == 0 {
return errors.Errorf("Unknown blob %s", blobID.String())
}
fn(packs[0].PackID, packs[0].Blob)
}
return nil
}
func (r *fileRestorer) restoreFiles(ctx context.Context) error {
packs := make(map[restic.ID]*packInfo) // all packs
// Process packs in order of first access. While this cannot guarantee
// that file chunks are restored sequentially, it offers a good enough
// approximation to shorten restore times by up to 19% in some test.
var packOrder restic.IDs
// create packInfo from fileInfo
for _, file := range r.files {
fileBlobs := file.blobs.(restic.IDs)
largeFile := len(fileBlobs) > largeFileBlobCount
var packsMap map[restic.ID][]fileBlobInfo
if largeFile {
packsMap = make(map[restic.ID][]fileBlobInfo)
}
fileOffset := int64(0)
err := r.forEachBlob(fileBlobs, func(packID restic.ID, blob restic.Blob) {
if largeFile {
packsMap[packID] = append(packsMap[packID], fileBlobInfo{id: blob.ID, offset: fileOffset})
fileOffset += int64(blob.Length) - crypto.Extension
}
pack, ok := packs[packID]
if !ok {
pack = &packInfo{
id: packID,
files: make(map[*fileInfo]struct{}),
}
packs[packID] = pack
packOrder = append(packOrder, packID)
}
pack.files[file] = struct{}{}
})
if err != nil {
// repository index is messed up, can't do anything
return err
}
if largeFile {
file.blobs = packsMap
}
}
wg, ctx := errgroup.WithContext(ctx)
downloadCh := make(chan *packInfo)
worker := func() error {
for pack := range downloadCh {
if err := r.downloadPack(ctx, pack); err != nil {
return err
}
}
return nil
}
for i := 0; i < workerCount; i++ {
wg.Go(worker)
}
// the main restore loop
wg.Go(func() error {
for _, id := range packOrder {
pack := packs[id]
select {
case <-ctx.Done():
return ctx.Err()
case downloadCh <- pack:
debug.Log("Scheduled download pack %s", pack.id.Str())
}
}
close(downloadCh)
return nil
})
return wg.Wait()
}
const maxBufferSize = 4 * 1024 * 1024
func (r *fileRestorer) downloadPack(ctx context.Context, pack *packInfo) error {
// calculate pack byte range and blob->[]files->[]offsets mappings
start, end := int64(math.MaxInt64), int64(0)
blobs := make(map[restic.ID]struct {
offset int64 // offset of the blob in the pack
length int // length of the blob
files map[*fileInfo][]int64 // file -> offsets (plural!) of the blob in the file
})
for file := range pack.files {
addBlob := func(blob restic.Blob, fileOffset int64) {
if start > int64(blob.Offset) {
start = int64(blob.Offset)
}
if end < int64(blob.Offset+blob.Length) {
end = int64(blob.Offset + blob.Length)
}
blobInfo, ok := blobs[blob.ID]
if !ok {
blobInfo.offset = int64(blob.Offset)
blobInfo.length = int(blob.Length)
blobInfo.files = make(map[*fileInfo][]int64)
blobs[blob.ID] = blobInfo
}
blobInfo.files[file] = append(blobInfo.files[file], fileOffset)
}
if fileBlobs, ok := file.blobs.(restic.IDs); ok {
fileOffset := int64(0)
err := r.forEachBlob(fileBlobs, func(packID restic.ID, blob restic.Blob) {
if packID.Equal(pack.id) {
addBlob(blob, fileOffset)
}
fileOffset += int64(blob.Length) - crypto.Extension
})
if err != nil {
// restoreFiles should have caught this error before
panic(err)
}
} else if packsMap, ok := file.blobs.(map[restic.ID][]fileBlobInfo); ok {
for _, blob := range packsMap[pack.id] {
idxPacks := r.idx(restic.BlobHandle{ID: blob.id, Type: restic.DataBlob})
for _, idxPack := range idxPacks {
if idxPack.PackID.Equal(pack.id) {
addBlob(idxPack.Blob, blob.offset)
break
}
}
}
}
}
sortedBlobs := make([]restic.ID, 0, len(blobs))
for blobID := range blobs {
sortedBlobs = append(sortedBlobs, blobID)
}
sort.Slice(sortedBlobs, func(i, j int) bool {
return blobs[sortedBlobs[i]].offset < blobs[sortedBlobs[j]].offset
})
sanitizeError := func(file *fileInfo, err error) error {
if err != nil {
err = r.Error(file.location, err)
}
return err
}
h := restic.Handle{Type: restic.PackFile, Name: pack.id.String()}
err := r.packLoader(ctx, h, int(end-start), start, func(rd io.Reader) error {
bufferSize := int(end - start)
if bufferSize > maxBufferSize {
bufferSize = maxBufferSize
}
bufRd := bufio.NewReaderSize(rd, bufferSize)
currentBlobEnd := start
var blobData, buf []byte
for _, blobID := range sortedBlobs {
blob := blobs[blobID]
_, err := bufRd.Discard(int(blob.offset - currentBlobEnd))
if err != nil {
return err
}
blobData, buf, err = r.loadBlob(bufRd, blobID, blob.length, buf)
if err != nil {
for file := range blob.files {
if errFile := sanitizeError(file, err); errFile != nil {
return errFile
}
}
continue
}
currentBlobEnd = blob.offset + int64(blob.length)
for file, offsets := range blob.files {
for _, offset := range offsets {
writeToFile := func() error {
// this looks overly complicated and needs explanation
// two competing requirements:
// - must create the file once and only once
// - should allow concurrent writes to the file
// so write the first blob while holding file lock
// write other blobs after releasing the lock
createSize := int64(-1)
file.lock.Lock()
if file.inProgress {
file.lock.Unlock()
} else {
defer file.lock.Unlock()
file.inProgress = true
createSize = file.size
}
return r.filesWriter.writeToFile(r.targetPath(file.location), blobData, offset, createSize)
}
err := sanitizeError(file, writeToFile())
if err != nil {
return err
}
}
}
}
return nil
})
if err != nil {
for file := range pack.files {
if errFile := sanitizeError(file, err); errFile != nil {
return errFile
}
}
}
return nil
}
func (r *fileRestorer) loadBlob(rd io.Reader, blobID restic.ID, length int, buf []byte) ([]byte, []byte, error) {
// TODO reconcile with Repository#loadBlob implementation
if cap(buf) < length {
buf = make([]byte, length)
} else {
buf = buf[:length]
}
n, err := io.ReadFull(rd, buf)
if err != nil {
return nil, nil, err
}
if n != length {
return nil, nil, errors.Errorf("error loading blob %v: wrong length returned, want %d, got %d", blobID.Str(), length, n)
}
// decrypt
nonce, ciphertext := buf[:r.key.NonceSize()], buf[r.key.NonceSize():]
plaintext, err := r.key.Open(ciphertext[:0], nonce, ciphertext, nil)
if err != nil {
return nil, nil, errors.Errorf("decrypting blob %v failed: %v", blobID, err)
}
// check hash
if !restic.Hash(plaintext).Equal(blobID) {
return nil, nil, errors.Errorf("blob %v returned invalid hash", blobID)
}
return plaintext, buf, nil
}
|
//go:build integration
// +build integration
package tntengine
import (
"context"
"log"
"strconv"
"testing"
"time"
"github.com/centrifugal/centrifuge"
"github.com/centrifugal/protocol"
"github.com/stretchr/testify/require"
)
func newTestTarantoolEngine(tb testing.TB) (*Broker, *PresenceManager) {
n, _ := centrifuge.New(centrifuge.DefaultConfig)
var shards []*Shard
for _, port := range []string{"3301"} {
shard, err := NewShard(ShardConfig{Addresses: []string{"127.0.0.1:" + port}})
if err != nil {
log.Fatal(err)
}
shards = append(shards, shard)
}
broker, err := NewBroker(n, BrokerConfig{
UsePolling: false,
Shards: shards,
})
if err != nil {
tb.Fatal(err)
}
presenceManager, err := NewPresenceManager(n, PresenceManagerConfig{
Shards: shards,
})
if err != nil {
tb.Fatal(err)
}
n.SetBroker(broker)
n.SetPresenceManager(presenceManager)
err = n.Run()
if err != nil {
tb.Fatal(err)
}
return broker, presenceManager
}
type recoverTest struct {
Name string
HistorySize int
HistoryLifetime int
NumPublications int
SinceOffset uint64
NumRecovered int
Sleep int
Limit int
Recovered bool
}
var recoverTests = []recoverTest{
{"empty_stream", 10, 60, 0, 0, 0, 0, 0, true},
{"from_position", 10, 60, 10, 8, 2, 0, 0, true},
{"from_position_limited", 10, 60, 10, 5, 2, 0, 2, false},
{"from_position_with_server_limit", 10, 60, 10, 5, 1, 0, 1, false},
{"from_position_that_already_gone", 10, 60, 20, 8, 10, 0, 0, false},
{"from_position_that_not_exist_yet", 10, 60, 20, 108, 0, 0, 0, false},
{"same_position_no_pubs_expected", 10, 60, 7, 7, 0, 0, 0, true},
{"empty_position_recover_expected", 10, 60, 4, 0, 4, 0, 0, true},
{"from_position_in_expired_stream", 10, 1, 10, 8, 0, 3, 0, false},
{"from_same_position_in_expired_stream", 10, 1, 1, 1, 0, 3, 0, true},
}
func TestTarantoolClientSubscribeRecover(t *testing.T) {
for _, tt := range recoverTests {
t.Run(tt.Name, func(t *testing.T) {
testTarantoolClientSubscribeRecover(t, tt)
})
}
}
func nodeWithTarantoolBroker(tb testing.TB) *centrifuge.Node {
c := centrifuge.DefaultConfig
return nodeWithTarantoolBrokerWithConfig(tb, c)
}
func nodeWithTarantoolBrokerWithConfig(tb testing.TB, c centrifuge.Config) *centrifuge.Node {
n, err := centrifuge.New(c)
if err != nil {
tb.Fatal(err)
}
e, _ := newTestTarantoolEngine(tb)
n.SetBroker(e)
err = n.Run()
if err != nil {
tb.Fatal(err)
}
return n
}
func pubToProto(pub *centrifuge.Publication) *protocol.Publication {
if pub == nil {
return nil
}
return &protocol.Publication{
Offset: pub.Offset,
Data: pub.Data,
Info: infoToProto(pub.Info),
}
}
func isRecovered(historyResult centrifuge.HistoryResult, cmdOffset uint64, cmdEpoch string) ([]*protocol.Publication, bool) {
latestOffset := historyResult.Offset
latestEpoch := historyResult.Epoch
recoveredPubs := make([]*protocol.Publication, 0, len(historyResult.Publications))
for _, pub := range historyResult.Publications {
protoPub := pubToProto(pub)
recoveredPubs = append(recoveredPubs, protoPub)
}
nextOffset := cmdOffset + 1
var recovered bool
if len(recoveredPubs) == 0 {
recovered = latestOffset == cmdOffset && latestEpoch == cmdEpoch
} else {
recovered = recoveredPubs[0].Offset == nextOffset &&
recoveredPubs[len(recoveredPubs)-1].Offset == latestOffset &&
latestEpoch == cmdEpoch
}
return recoveredPubs, recovered
}
// recoverHistory recovers publications since StreamPosition last seen by client.
func recoverHistory(node *centrifuge.Node, ch string, since centrifuge.StreamPosition, maxPublicationLimit int) (centrifuge.HistoryResult, error) {
limit := centrifuge.NoLimit
if maxPublicationLimit > 0 {
limit = maxPublicationLimit
}
return node.History(ch, centrifuge.WithLimit(limit), centrifuge.WithSince(&since))
}
func testTarantoolClientSubscribeRecover(t *testing.T, tt recoverTest) {
node := nodeWithTarantoolBroker(t)
defer func() { _ = node.Shutdown(context.Background()) }()
channel := "test_recovery_tarantool_" + tt.Name
for i := 1; i <= tt.NumPublications; i++ {
_, err := node.Publish(channel, []byte(`{"n": `+strconv.Itoa(i)+`}`), centrifuge.WithHistory(tt.HistorySize, time.Duration(tt.HistoryLifetime)*time.Second))
require.NoError(t, err)
}
time.Sleep(time.Duration(tt.Sleep) * time.Second)
res, err := node.History(channel)
require.NoError(t, err)
streamTop := res.StreamPosition
historyResult, err := recoverHistory(node, channel, centrifuge.StreamPosition{Offset: tt.SinceOffset, Epoch: streamTop.Epoch}, tt.Limit)
require.NoError(t, err)
recoveredPubs, recovered := isRecovered(historyResult, tt.SinceOffset, streamTop.Epoch)
require.Equal(t, tt.NumRecovered, len(recoveredPubs))
require.Equal(t, tt.Recovered, recovered)
}
const historyIterationChannel = "test"
type historyIterationTest struct {
NumMessages int
IterateBy int
}
func (it *historyIterationTest) prepareHistoryIteration(t testing.TB, node *centrifuge.Node) centrifuge.StreamPosition {
numMessages := it.NumMessages
channel := historyIterationChannel
historyResult, err := node.History(channel)
require.NoError(t, err)
startPosition := historyResult.StreamPosition
for i := 1; i <= numMessages; i++ {
_, err := node.Publish(channel, []byte(`{}`), centrifuge.WithHistory(numMessages, time.Hour))
require.NoError(t, err)
}
historyResult, err = node.History(channel, centrifuge.WithLimit(centrifuge.NoLimit))
require.NoError(t, err)
require.Equal(t, numMessages, len(historyResult.Publications))
return startPosition
}
func (it *historyIterationTest) testHistoryIteration(t testing.TB, node *centrifuge.Node, startPosition centrifuge.StreamPosition) {
var (
n int
offset = startPosition.Offset
epoch = startPosition.Epoch
iterateBy = it.IterateBy
)
for {
res, err := node.History(
historyIterationChannel,
centrifuge.WithSince(¢rifuge.StreamPosition{Offset: offset, Epoch: epoch}),
centrifuge.WithLimit(iterateBy),
)
if err != nil {
t.Fatal(err)
}
offset += uint64(iterateBy)
if len(res.Publications) == 0 {
break
}
n += len(res.Publications)
}
if n != it.NumMessages {
t.Fatal("num messages mismatch")
}
}
func (it *historyIterationTest) testHistoryIterationReverse(t testing.TB, node *centrifuge.Node, startPosition centrifuge.StreamPosition) {
var (
n int
offset = startPosition.Offset
epoch = startPosition.Epoch
iterateBy = it.IterateBy
)
var since *centrifuge.StreamPosition
outer:
for {
res, err := node.History(
historyIterationChannel,
centrifuge.WithSince(since),
centrifuge.WithLimit(iterateBy),
centrifuge.WithReverse(true),
)
if err != nil {
t.Fatal(err)
}
var checkOffset uint64
loop:
for _, pub := range res.Publications {
n += 1
if pub.Offset == startPosition.Offset+1 {
break outer
}
if checkOffset == 0 {
checkOffset = pub.Offset
continue loop
}
if pub.Offset > checkOffset {
t.Fatal("incorrect order")
}
checkOffset = pub.Offset
}
if len(res.Publications) < iterateBy {
break
}
earliestPub := res.Publications[len(res.Publications)-1]
offset = earliestPub.Offset
since = ¢rifuge.StreamPosition{Offset: offset, Epoch: epoch}
}
if n != it.NumMessages {
t.Fatalf("num messages mismatch, expected %d, got %d", it.NumMessages, n)
}
}
func TestTarantoolBrokerHistoryIteration(t *testing.T) {
e, _ := newTestTarantoolEngine(t)
it := historyIterationTest{100, 10}
startPosition := it.prepareHistoryIteration(t, e.node)
it.testHistoryIteration(t, e.node, startPosition)
}
func TestTarantoolBrokerHistoryIterationReverse(t *testing.T) {
e, _ := newTestTarantoolEngine(t)
it := historyIterationTest{100, 10}
startPosition := it.prepareHistoryIteration(t, e.node)
it.testHistoryIterationReverse(t, e.node, startPosition)
}
func BenchmarkTarantoolPublish_1Ch(b *testing.B) {
broker, _ := newTestTarantoolEngine(b)
rawData := []byte(`{"bench": true}`)
b.SetParallelism(128)
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
_, err := broker.Publish("channel", rawData, centrifuge.PublishOptions{})
if err != nil {
b.Fatal(err)
}
}
})
}
func BenchmarkTarantoolSubscribe(b *testing.B) {
broker, _ := newTestTarantoolEngine(b)
i := 0
b.SetParallelism(128)
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
i++
err := broker.Subscribe("subscribe" + strconv.Itoa(i))
if err != nil {
b.Fatal(err)
}
}
})
}
func BenchmarkTarantoolRecover_1Ch(b *testing.B) {
broker, _ := newTestTarantoolEngine(b)
rawData := []byte("{}")
numMessages := 1000
numMissing := 5
for i := 1; i <= numMessages; i++ {
_, err := broker.Publish("channel", rawData, centrifuge.PublishOptions{HistorySize: numMessages, HistoryTTL: 300 * time.Second})
require.NoError(b, err)
}
_, sp, err := broker.History("channel", centrifuge.HistoryFilter{})
require.NoError(b, err)
b.ResetTimer()
b.SetParallelism(128)
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
pubs, _, err := broker.History("channel", centrifuge.HistoryFilter{
Limit: -1,
Since: ¢rifuge.StreamPosition{Offset: sp.Offset - uint64(numMissing), Epoch: ""},
})
if err != nil {
b.Fatal(err)
}
if len(pubs) != numMissing {
b.Fatalf("len pubs: %d, expected: %d", len(pubs), numMissing)
}
}
})
}
func BenchmarkTarantoolPresence_1Ch(b *testing.B) {
_, pm := newTestTarantoolEngine(b)
b.SetParallelism(128)
_ = pm.AddPresence("channel", "uid", ¢rifuge.ClientInfo{})
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
p, err := pm.Presence("channel")
if err != nil {
b.Fatal(err)
}
if len(p) != 1 {
b.Fatal("wrong presence len")
}
}
})
}
func BenchmarkTarantoolAddPresence_1Ch(b *testing.B) {
_, pm := newTestTarantoolEngine(b)
b.SetParallelism(128)
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
err := pm.AddPresence("channel", "uid", ¢rifuge.ClientInfo{})
if err != nil {
b.Fatal(err)
}
}
})
}
func BenchmarkTarantoolPresenceStats_1Ch(b *testing.B) {
_, pm := newTestTarantoolEngine(b)
_ = pm.AddPresence("channel", "uid", ¢rifuge.ClientInfo{})
b.SetParallelism(128)
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
p, err := pm.PresenceStats("channel")
if err != nil {
b.Fatal(err)
}
if p.NumClients != 1 {
b.Fatal("wrong presence stats")
}
}
})
}
fix tnt broker tests
//go:build integration
// +build integration
package tntengine
import (
"context"
"log"
"strconv"
"testing"
"time"
"github.com/centrifugal/centrifuge"
"github.com/centrifugal/protocol"
"github.com/stretchr/testify/require"
)
func newTestTarantoolEngine(tb testing.TB) (*Broker, *PresenceManager) {
n, _ := centrifuge.New(centrifuge.Config{})
var shards []*Shard
for _, port := range []string{"3301"} {
shard, err := NewShard(ShardConfig{Addresses: []string{"127.0.0.1:" + port}})
if err != nil {
log.Fatal(err)
}
shards = append(shards, shard)
}
broker, err := NewBroker(n, BrokerConfig{
UsePolling: false,
Shards: shards,
})
if err != nil {
tb.Fatal(err)
}
presenceManager, err := NewPresenceManager(n, PresenceManagerConfig{
Shards: shards,
})
if err != nil {
tb.Fatal(err)
}
n.SetBroker(broker)
n.SetPresenceManager(presenceManager)
err = n.Run()
if err != nil {
tb.Fatal(err)
}
return broker, presenceManager
}
type recoverTest struct {
Name string
HistorySize int
HistoryLifetime int
NumPublications int
SinceOffset uint64
NumRecovered int
Sleep int
Limit int
Recovered bool
}
var recoverTests = []recoverTest{
{"empty_stream", 10, 60, 0, 0, 0, 0, 0, true},
{"from_position", 10, 60, 10, 8, 2, 0, 0, true},
{"from_position_limited", 10, 60, 10, 5, 2, 0, 2, false},
{"from_position_with_server_limit", 10, 60, 10, 5, 1, 0, 1, false},
{"from_position_that_already_gone", 10, 60, 20, 8, 10, 0, 0, false},
{"from_position_that_not_exist_yet", 10, 60, 20, 108, 0, 0, 0, false},
{"same_position_no_pubs_expected", 10, 60, 7, 7, 0, 0, 0, true},
{"empty_position_recover_expected", 10, 60, 4, 0, 4, 0, 0, true},
{"from_position_in_expired_stream", 10, 1, 10, 8, 0, 3, 0, false},
{"from_same_position_in_expired_stream", 10, 1, 1, 1, 0, 3, 0, true},
}
func TestTarantoolClientSubscribeRecover(t *testing.T) {
for _, tt := range recoverTests {
t.Run(tt.Name, func(t *testing.T) {
testTarantoolClientSubscribeRecover(t, tt)
})
}
}
func nodeWithTarantoolBroker(tb testing.TB) *centrifuge.Node {
c := centrifuge.Config{}
return nodeWithTarantoolBrokerWithConfig(tb, c)
}
func nodeWithTarantoolBrokerWithConfig(tb testing.TB, c centrifuge.Config) *centrifuge.Node {
n, err := centrifuge.New(c)
if err != nil {
tb.Fatal(err)
}
e, _ := newTestTarantoolEngine(tb)
n.SetBroker(e)
err = n.Run()
if err != nil {
tb.Fatal(err)
}
return n
}
func pubToProto(pub *centrifuge.Publication) *protocol.Publication {
if pub == nil {
return nil
}
return &protocol.Publication{
Offset: pub.Offset,
Data: pub.Data,
Info: infoToProto(pub.Info),
}
}
func isRecovered(historyResult centrifuge.HistoryResult, cmdOffset uint64, cmdEpoch string) ([]*protocol.Publication, bool) {
latestOffset := historyResult.Offset
latestEpoch := historyResult.Epoch
recoveredPubs := make([]*protocol.Publication, 0, len(historyResult.Publications))
for _, pub := range historyResult.Publications {
protoPub := pubToProto(pub)
recoveredPubs = append(recoveredPubs, protoPub)
}
nextOffset := cmdOffset + 1
var recovered bool
if len(recoveredPubs) == 0 {
recovered = latestOffset == cmdOffset && latestEpoch == cmdEpoch
} else {
recovered = recoveredPubs[0].Offset == nextOffset &&
recoveredPubs[len(recoveredPubs)-1].Offset == latestOffset &&
latestEpoch == cmdEpoch
}
return recoveredPubs, recovered
}
// recoverHistory recovers publications since StreamPosition last seen by client.
func recoverHistory(node *centrifuge.Node, ch string, since centrifuge.StreamPosition, maxPublicationLimit int) (centrifuge.HistoryResult, error) {
limit := centrifuge.NoLimit
if maxPublicationLimit > 0 {
limit = maxPublicationLimit
}
return node.History(ch, centrifuge.WithLimit(limit), centrifuge.WithSince(&since))
}
func testTarantoolClientSubscribeRecover(t *testing.T, tt recoverTest) {
node := nodeWithTarantoolBroker(t)
defer func() { _ = node.Shutdown(context.Background()) }()
channel := "test_recovery_tarantool_" + tt.Name
for i := 1; i <= tt.NumPublications; i++ {
_, err := node.Publish(channel, []byte(`{"n": `+strconv.Itoa(i)+`}`), centrifuge.WithHistory(tt.HistorySize, time.Duration(tt.HistoryLifetime)*time.Second))
require.NoError(t, err)
}
time.Sleep(time.Duration(tt.Sleep) * time.Second)
res, err := node.History(channel)
require.NoError(t, err)
streamTop := res.StreamPosition
historyResult, err := recoverHistory(node, channel, centrifuge.StreamPosition{Offset: tt.SinceOffset, Epoch: streamTop.Epoch}, tt.Limit)
require.NoError(t, err)
recoveredPubs, recovered := isRecovered(historyResult, tt.SinceOffset, streamTop.Epoch)
require.Equal(t, tt.NumRecovered, len(recoveredPubs))
require.Equal(t, tt.Recovered, recovered)
}
const historyIterationChannel = "test"
type historyIterationTest struct {
NumMessages int
IterateBy int
}
func (it *historyIterationTest) prepareHistoryIteration(t testing.TB, node *centrifuge.Node) centrifuge.StreamPosition {
numMessages := it.NumMessages
channel := historyIterationChannel
historyResult, err := node.History(channel)
require.NoError(t, err)
startPosition := historyResult.StreamPosition
for i := 1; i <= numMessages; i++ {
_, err := node.Publish(channel, []byte(`{}`), centrifuge.WithHistory(numMessages, time.Hour))
require.NoError(t, err)
}
historyResult, err = node.History(channel, centrifuge.WithLimit(centrifuge.NoLimit))
require.NoError(t, err)
require.Equal(t, numMessages, len(historyResult.Publications))
return startPosition
}
func (it *historyIterationTest) testHistoryIteration(t testing.TB, node *centrifuge.Node, startPosition centrifuge.StreamPosition) {
var (
n int
offset = startPosition.Offset
epoch = startPosition.Epoch
iterateBy = it.IterateBy
)
for {
res, err := node.History(
historyIterationChannel,
centrifuge.WithSince(¢rifuge.StreamPosition{Offset: offset, Epoch: epoch}),
centrifuge.WithLimit(iterateBy),
)
if err != nil {
t.Fatal(err)
}
offset += uint64(iterateBy)
if len(res.Publications) == 0 {
break
}
n += len(res.Publications)
}
if n != it.NumMessages {
t.Fatal("num messages mismatch")
}
}
func (it *historyIterationTest) testHistoryIterationReverse(t testing.TB, node *centrifuge.Node, startPosition centrifuge.StreamPosition) {
var (
n int
offset = startPosition.Offset
epoch = startPosition.Epoch
iterateBy = it.IterateBy
)
var since *centrifuge.StreamPosition
outer:
for {
res, err := node.History(
historyIterationChannel,
centrifuge.WithSince(since),
centrifuge.WithLimit(iterateBy),
centrifuge.WithReverse(true),
)
if err != nil {
t.Fatal(err)
}
var checkOffset uint64
loop:
for _, pub := range res.Publications {
n += 1
if pub.Offset == startPosition.Offset+1 {
break outer
}
if checkOffset == 0 {
checkOffset = pub.Offset
continue loop
}
if pub.Offset > checkOffset {
t.Fatal("incorrect order")
}
checkOffset = pub.Offset
}
if len(res.Publications) < iterateBy {
break
}
earliestPub := res.Publications[len(res.Publications)-1]
offset = earliestPub.Offset
since = ¢rifuge.StreamPosition{Offset: offset, Epoch: epoch}
}
if n != it.NumMessages {
t.Fatalf("num messages mismatch, expected %d, got %d", it.NumMessages, n)
}
}
func TestTarantoolBrokerHistoryIteration(t *testing.T) {
e, _ := newTestTarantoolEngine(t)
it := historyIterationTest{100, 10}
startPosition := it.prepareHistoryIteration(t, e.node)
it.testHistoryIteration(t, e.node, startPosition)
}
func TestTarantoolBrokerHistoryIterationReverse(t *testing.T) {
e, _ := newTestTarantoolEngine(t)
it := historyIterationTest{100, 10}
startPosition := it.prepareHistoryIteration(t, e.node)
it.testHistoryIterationReverse(t, e.node, startPosition)
}
func BenchmarkTarantoolPublish_1Ch(b *testing.B) {
broker, _ := newTestTarantoolEngine(b)
rawData := []byte(`{"bench": true}`)
b.SetParallelism(128)
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
_, err := broker.Publish("channel", rawData, centrifuge.PublishOptions{})
if err != nil {
b.Fatal(err)
}
}
})
}
func BenchmarkTarantoolSubscribe(b *testing.B) {
broker, _ := newTestTarantoolEngine(b)
i := 0
b.SetParallelism(128)
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
i++
err := broker.Subscribe("subscribe" + strconv.Itoa(i))
if err != nil {
b.Fatal(err)
}
}
})
}
func BenchmarkTarantoolRecover_1Ch(b *testing.B) {
broker, _ := newTestTarantoolEngine(b)
rawData := []byte("{}")
numMessages := 1000
numMissing := 5
for i := 1; i <= numMessages; i++ {
_, err := broker.Publish("channel", rawData, centrifuge.PublishOptions{HistorySize: numMessages, HistoryTTL: 300 * time.Second})
require.NoError(b, err)
}
_, sp, err := broker.History("channel", centrifuge.HistoryFilter{})
require.NoError(b, err)
b.ResetTimer()
b.SetParallelism(128)
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
pubs, _, err := broker.History("channel", centrifuge.HistoryFilter{
Limit: -1,
Since: ¢rifuge.StreamPosition{Offset: sp.Offset - uint64(numMissing), Epoch: ""},
})
if err != nil {
b.Fatal(err)
}
if len(pubs) != numMissing {
b.Fatalf("len pubs: %d, expected: %d", len(pubs), numMissing)
}
}
})
}
func BenchmarkTarantoolPresence_1Ch(b *testing.B) {
_, pm := newTestTarantoolEngine(b)
b.SetParallelism(128)
_ = pm.AddPresence("channel", "uid", ¢rifuge.ClientInfo{})
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
p, err := pm.Presence("channel")
if err != nil {
b.Fatal(err)
}
if len(p) != 1 {
b.Fatal("wrong presence len")
}
}
})
}
func BenchmarkTarantoolAddPresence_1Ch(b *testing.B) {
_, pm := newTestTarantoolEngine(b)
b.SetParallelism(128)
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
err := pm.AddPresence("channel", "uid", ¢rifuge.ClientInfo{})
if err != nil {
b.Fatal(err)
}
}
})
}
func BenchmarkTarantoolPresenceStats_1Ch(b *testing.B) {
_, pm := newTestTarantoolEngine(b)
_ = pm.AddPresence("channel", "uid", ¢rifuge.ClientInfo{})
b.SetParallelism(128)
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
p, err := pm.PresenceStats("channel")
if err != nil {
b.Fatal(err)
}
if p.NumClients != 1 {
b.Fatal("wrong presence stats")
}
}
})
}
|
package vecbackup
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"math/rand"
"os"
"os/exec"
"path"
"strings"
"time"
)
var rcloneBinary string = "rclone"
func SetRcloneBinary(p string) {
rcloneBinary = p
}
type StorageMgrLsDir2Func func(dir, file string)
type StorageMgr interface {
JoinPath(d, f string) string
IsDirFast() bool
LsDir(p string) ([]string, error)
LsDir2(p string, f StorageMgrLsDir2Func) error
FileExists(f string) (bool, error)
MkdirAll(p string) error
ReadFile(p string, out, errOut *bytes.Buffer) ([]byte, error)
WriteFile(p string, d []byte) error
DeleteFile(p string) error
WriteLockFile(p string) error
RemoveLockFile(p string) error
}
type rcloneSMgr struct{}
type localSMgr struct{}
var TheRcloneSMgr = rcloneSMgr{}
var TheLocalSMgr = localSMgr{}
func GetStorageMgr(p string) (StorageMgr, string) {
if len(p) > 7 && p[:7] == "rclone:" {
return TheRcloneSMgr, p[7:]
}
return TheLocalSMgr, p
}
func runCmd(cmd *exec.Cmd, out, errOut *bytes.Buffer) error {
out.Reset()
errOut.Reset()
cmd.Stdout = out
cmd.Stderr = errOut
return cmd.Run()
}
func (sm rcloneSMgr) JoinPath(d, f string) string {
return d + "/" + f
}
func (sm localSMgr) JoinPath(d, f string) string {
return path.Join(d, f)
}
func (sm rcloneSMgr) IsDirFast() bool {
return true
}
func (sm localSMgr) IsDirFast() bool {
return false
}
func (sm rcloneSMgr) LsDir(p string) ([]string, error) {
catCmd := exec.Command(rcloneBinary, "lsjson", "--no-modtime", "--no-mimetype", "--fast-list", "--max-depth", "1", "--files-only", p)
catOut, err := catCmd.Output()
if err != nil {
return nil, err
}
var recs []rcloneLsRecord
if err := json.Unmarshal(catOut, &recs); err != nil {
return nil, err
}
var files []string
for _, r := range recs {
files = append(files, r.Path)
}
return files, nil
}
func (sm localSMgr) LsDir(p string) ([]string, error) {
files, err := ioutil.ReadDir(p)
if err != nil {
return nil, err
}
var names []string
for _, f := range files {
if f.Mode().IsRegular() {
names = append(names, f.Name())
}
}
return names, nil
}
type rcloneLsRecord struct {
Path string
//Name string
//Size int
//ModTime string
//IsDir bool
//Tier string
}
func (sm rcloneSMgr) LsDir2(p string, f StorageMgrLsDir2Func) error {
catCmd := exec.Command(rcloneBinary, "lsjson", "--no-modtime", "--no-mimetype", "--fast-list", "--max-depth", "2", "--files-only", p)
catOut, err := catCmd.Output()
if err != nil {
return err
}
var recs []rcloneLsRecord
if err := json.Unmarshal(catOut, &recs); err != nil {
return err
}
for _, r := range recs {
ss := strings.Split(r.Path, "/")
if len(ss) == 2 {
f(ss[0], ss[1])
}
}
return nil
}
func (sm localSMgr) LsDir2(p string, f StorageMgrLsDir2Func) error {
l1, err := ioutil.ReadDir(p)
if err != nil {
return err
}
for _, d := range l1 {
if d.Mode().IsDir() {
l2, err := ioutil.ReadDir(path.Join(p, d.Name()))
if err == nil {
for _, x := range l2 {
if x.Mode().IsRegular() {
f(d.Name(), x.Name())
}
}
}
}
}
return nil
}
func (sm rcloneSMgr) FileExists(f string) (bool, error) {
filename := path.Base(f)
if filename == "/" || filename == "." {
return false, fmt.Errorf("Invalid path: %s", f)
}
catCmd := exec.Command(rcloneBinary, "lsjson", "--no-modtime", "--no-mimetype", "--fast-list", "--max-depth", "1", "--files-only", f)
catOut, err := catCmd.Output()
if err != nil {
return false, err
}
var recs []rcloneLsRecord
if err := json.Unmarshal(catOut, &recs); err != nil {
return false, err
}
for _, r := range recs {
if r.Path == filename {
return true, nil
}
}
return false, nil
}
func (sm localSMgr) FileExists(f string) (bool, error) {
_, err := os.Lstat(f)
if err == nil {
return true, nil
} else if os.IsNotExist(err) {
return false, nil
}
return false, err
}
func (sm rcloneSMgr) MkdirAll(p string) error {
return nil
}
func (sm localSMgr) MkdirAll(p string) error {
return os.MkdirAll(p, DEFAULT_DIR_PERM)
}
func (sm rcloneSMgr) ReadFile(p string, out, errOut *bytes.Buffer) ([]byte, error) {
catCmd := exec.Command(rcloneBinary, "cat", p)
err := runCmd(catCmd, out, errOut)
if err != nil {
return nil, err
}
return out.Bytes(), nil
}
func (sm localSMgr) ReadFile(p string, out, _ *bytes.Buffer) ([]byte, error) {
f, err := os.Open(p)
if err != nil {
return nil, err
}
defer f.Close()
out.Reset()
_, err = out.ReadFrom(f)
return out.Bytes(), err
}
func (sm rcloneSMgr) WriteFile(p string, d []byte) error {
cmd := exec.Command(rcloneBinary, "rcat", p)
cmdIn, _ := cmd.StdinPipe()
if err := cmd.Start(); err != nil {
return err
}
if _, err := cmdIn.Write(d); err != nil {
return err
}
if err := cmdIn.Close(); err != nil {
return err
}
if err := cmd.Wait(); err != nil {
return err
}
return nil
}
func (sm localSMgr) WriteFile(p string, d []byte) error {
tp := p + "-temp"
out, err := os.OpenFile(tp, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, DEFAULT_FILE_PERM)
if err != nil {
return err
}
_, err = out.Write(d)
if err != nil {
out.Close()
os.Remove(tp)
return err
}
err = out.Close()
if err != nil {
os.Remove(tp)
return err
}
err = os.Rename(tp, p)
if err != nil {
os.Remove(tp)
return err
}
return nil
}
func (sm rcloneSMgr) DeleteFile(p string) error {
cmd := exec.Command(rcloneBinary, "deletefile", p)
return cmd.Run()
}
func (sm localSMgr) DeleteFile(p string) error {
return os.Remove(p)
}
func (sm rcloneSMgr) WriteLockFile(p string) error {
exists, err := TheRcloneSMgr.FileExists(p)
if err != nil {
return err
}
if exists {
return os.ErrExist
}
d := []byte(fmt.Sprintf("%s\n%d\n", time.Now().UTC().Format(time.RFC3339Nano), rand.Int63()))
err = TheRcloneSMgr.WriteFile(p, []byte(d))
if err != nil {
return err
}
var buf, buf2 bytes.Buffer
d2, err := TheRcloneSMgr.ReadFile(p, &buf, &buf2)
if err != nil {
return err
}
if bytes.Compare(d, d2) != 0 {
return os.ErrExist
}
return nil
}
func (sm localSMgr) WriteLockFile(p string) error {
exists, err := TheLocalSMgr.FileExists(p)
if err != nil {
return err
}
if exists {
return os.ErrExist
}
lockFile, err := os.OpenFile(p, os.O_WRONLY|os.O_CREATE|os.O_EXCL, DEFAULT_FILE_PERM)
if err != nil {
return err
}
lockFile.Close()
return nil
}
func (sm rcloneSMgr) RemoveLockFile(p string) error {
exists, err := TheRcloneSMgr.FileExists(p)
if err != nil {
return err
} else if !exists {
return os.ErrNotExist
}
TheRcloneSMgr.DeleteFile(p)
return nil
}
func (sm localSMgr) RemoveLockFile(p string) error {
return os.Remove(p)
}
Workaround for "rclone" not differentiating between missing file and empty file.
package vecbackup
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"math/rand"
"os"
"os/exec"
"path"
"strings"
"time"
)
var rcloneBinary string = "rclone"
func SetRcloneBinary(p string) {
rcloneBinary = p
}
type StorageMgrLsDir2Func func(dir, file string)
type StorageMgr interface {
JoinPath(d, f string) string
IsDirFast() bool
LsDir(p string) ([]string, error)
LsDir2(p string, f StorageMgrLsDir2Func) error
FileExists(f string) (bool, error)
MkdirAll(p string) error
ReadFile(p string, out, errOut *bytes.Buffer) ([]byte, error)
WriteFile(p string, d []byte) error
DeleteFile(p string) error
WriteLockFile(p string) error
RemoveLockFile(p string) error
}
type rcloneSMgr struct{}
type localSMgr struct{}
var TheRcloneSMgr = rcloneSMgr{}
var TheLocalSMgr = localSMgr{}
func GetStorageMgr(p string) (StorageMgr, string) {
if len(p) > 7 && p[:7] == "rclone:" {
return TheRcloneSMgr, p[7:]
}
return TheLocalSMgr, p
}
func runCmd(cmd *exec.Cmd, out, errOut *bytes.Buffer) error {
out.Reset()
errOut.Reset()
cmd.Stdout = out
cmd.Stderr = errOut
return cmd.Run()
}
func (sm rcloneSMgr) JoinPath(d, f string) string {
return d + "/" + f
}
func (sm localSMgr) JoinPath(d, f string) string {
return path.Join(d, f)
}
func (sm rcloneSMgr) IsDirFast() bool {
return true
}
func (sm localSMgr) IsDirFast() bool {
return false
}
func (sm rcloneSMgr) LsDir(p string) ([]string, error) {
catCmd := exec.Command(rcloneBinary, "lsjson", "--no-modtime", "--no-mimetype", "--fast-list", "--max-depth", "1", "--files-only", p)
catOut, err := catCmd.Output()
if err != nil {
return nil, err
}
var recs []rcloneLsRecord
if err := json.Unmarshal(catOut, &recs); err != nil {
return nil, err
}
var files []string
for _, r := range recs {
files = append(files, r.Path)
}
return files, nil
}
func (sm localSMgr) LsDir(p string) ([]string, error) {
files, err := ioutil.ReadDir(p)
if err != nil {
return nil, err
}
var names []string
for _, f := range files {
if f.Mode().IsRegular() {
names = append(names, f.Name())
}
}
return names, nil
}
type rcloneLsRecord struct {
Path string
//Name string
//Size int
//ModTime string
//IsDir bool
//Tier string
}
func (sm rcloneSMgr) LsDir2(p string, f StorageMgrLsDir2Func) error {
catCmd := exec.Command(rcloneBinary, "lsjson", "--no-modtime", "--no-mimetype", "--fast-list", "--max-depth", "2", "--files-only", p)
catOut, err := catCmd.Output()
if err != nil {
return err
}
var recs []rcloneLsRecord
if err := json.Unmarshal(catOut, &recs); err != nil {
return err
}
for _, r := range recs {
ss := strings.Split(r.Path, "/")
if len(ss) == 2 {
f(ss[0], ss[1])
}
}
return nil
}
func (sm localSMgr) LsDir2(p string, f StorageMgrLsDir2Func) error {
l1, err := ioutil.ReadDir(p)
if err != nil {
return err
}
for _, d := range l1 {
if d.Mode().IsDir() {
l2, err := ioutil.ReadDir(path.Join(p, d.Name()))
if err == nil {
for _, x := range l2 {
if x.Mode().IsRegular() {
f(d.Name(), x.Name())
}
}
}
}
}
return nil
}
func (sm rcloneSMgr) FileExists(f string) (bool, error) {
filename := path.Base(f)
if filename == "/" || filename == "." {
return false, fmt.Errorf("Invalid path: %s", f)
}
catCmd := exec.Command(rcloneBinary, "lsjson", "--no-modtime", "--no-mimetype", "--fast-list", "--max-depth", "1", "--files-only", f)
catOut, err := catCmd.Output()
if err != nil {
return false, err
}
var recs []rcloneLsRecord
if err := json.Unmarshal(catOut, &recs); err != nil {
return false, err
}
for _, r := range recs {
if r.Path == filename {
return true, nil
}
}
return false, nil
}
func (sm localSMgr) FileExists(f string) (bool, error) {
_, err := os.Lstat(f)
if err == nil {
return true, nil
} else if os.IsNotExist(err) {
return false, nil
}
return false, err
}
func (sm rcloneSMgr) MkdirAll(p string) error {
return nil
}
func (sm localSMgr) MkdirAll(p string) error {
return os.MkdirAll(p, DEFAULT_DIR_PERM)
}
func (sm rcloneSMgr) ReadFile(p string, out, errOut *bytes.Buffer) ([]byte, error) {
catCmd := exec.Command(rcloneBinary, "cat", p)
err := runCmd(catCmd, out, errOut)
if err != nil {
return nil, err
}
if len(out.Bytes()) == 0 {
return nil, os.ErrNotExist
}
return out.Bytes(), nil
}
func (sm localSMgr) ReadFile(p string, out, _ *bytes.Buffer) ([]byte, error) {
f, err := os.Open(p)
if err != nil {
return nil, err
}
defer f.Close()
out.Reset()
_, err = out.ReadFrom(f)
return out.Bytes(), err
}
func (sm rcloneSMgr) WriteFile(p string, d []byte) error {
cmd := exec.Command(rcloneBinary, "rcat", p)
cmdIn, _ := cmd.StdinPipe()
if err := cmd.Start(); err != nil {
return err
}
if _, err := cmdIn.Write(d); err != nil {
return err
}
if err := cmdIn.Close(); err != nil {
return err
}
if err := cmd.Wait(); err != nil {
return err
}
return nil
}
func (sm localSMgr) WriteFile(p string, d []byte) error {
tp := p + "-temp"
out, err := os.OpenFile(tp, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, DEFAULT_FILE_PERM)
if err != nil {
return err
}
_, err = out.Write(d)
if err != nil {
out.Close()
os.Remove(tp)
return err
}
err = out.Close()
if err != nil {
os.Remove(tp)
return err
}
err = os.Rename(tp, p)
if err != nil {
os.Remove(tp)
return err
}
return nil
}
func (sm rcloneSMgr) DeleteFile(p string) error {
cmd := exec.Command(rcloneBinary, "deletefile", p)
return cmd.Run()
}
func (sm localSMgr) DeleteFile(p string) error {
return os.Remove(p)
}
func (sm rcloneSMgr) WriteLockFile(p string) error {
exists, err := TheRcloneSMgr.FileExists(p)
if err != nil {
return err
}
if exists {
return os.ErrExist
}
d := []byte(fmt.Sprintf("%s\n%d\n", time.Now().UTC().Format(time.RFC3339Nano), rand.Int63()))
err = TheRcloneSMgr.WriteFile(p, []byte(d))
if err != nil {
return err
}
var buf, buf2 bytes.Buffer
d2, err := TheRcloneSMgr.ReadFile(p, &buf, &buf2)
if err != nil {
return err
}
if bytes.Compare(d, d2) != 0 {
return os.ErrExist
}
return nil
}
func (sm localSMgr) WriteLockFile(p string) error {
exists, err := TheLocalSMgr.FileExists(p)
if err != nil {
return err
}
if exists {
return os.ErrExist
}
lockFile, err := os.OpenFile(p, os.O_WRONLY|os.O_CREATE|os.O_EXCL, DEFAULT_FILE_PERM)
if err != nil {
return err
}
lockFile.Close()
return nil
}
func (sm rcloneSMgr) RemoveLockFile(p string) error {
exists, err := TheRcloneSMgr.FileExists(p)
if err != nil {
return err
} else if !exists {
return os.ErrNotExist
}
TheRcloneSMgr.DeleteFile(p)
return nil
}
func (sm localSMgr) RemoveLockFile(p string) error {
return os.Remove(p)
}
|
package zk
import (
"encoding/json"
"github.com/golang/glog"
"github.com/qorio/maestro/pkg/registry"
"strconv"
"strings"
)
const (
PrefixEnv = "env://"
PrefixZk = "zk://"
)
// Node value
func Follow(zc ZK, key registry.Path) (*Node, error) {
n, err := zc.Get(key.Path())
if err != nil {
return nil, err
}
switch {
case strings.Index(n.GetValueString(), PrefixEnv) == 0:
next := n.GetValueString()[len(PrefixEnv):]
return Follow(zc, registry.Path(next))
case strings.Index(n.GetValueString(), PrefixZk) == 0:
next := n.GetValueString()[len(PrefixZk):]
return Follow(zc, registry.Path(next))
default:
return n, nil
}
// if strings.Index(n.GetValueString(), PrefixEnv) == 0 {
// next := n.GetValueString()[len(PrefixEnv):]
// return Follow(zc, registry.Path(next))
// } else {
// return n, nil
// }
}
// If value begins with env:// then automatically resolve the pointer recursively.
// Returns key, value, error
func Resolve(zc ZK, key registry.Path, value string) (registry.Path, string, error) {
// de-reference the pointer...
if strings.Index(value, PrefixEnv) == 0 {
p := value[len(PrefixEnv):]
n, err := zc.Get(p)
switch {
case err == ErrNotExist:
return key, "", nil
case err != nil:
return key, "", err
}
glog.Infoln("Resolving", key, "=", value, "==>", n.GetValueString())
return Resolve(zc, key, n.GetValueString())
} else {
return key, value, nil
}
}
func PathExists(zc ZK, key registry.Path) bool {
_, err := zc.Get(key.Path())
switch {
case err == ErrNotExist:
return false
case err != nil:
return true
}
return true
}
func GetObject(zc ZK, key registry.Path, value interface{}) error {
n, err := zc.Get(key.Path())
switch {
case err == ErrNotExist:
return nil
case err != nil:
return nil
}
return json.Unmarshal(n.GetValue(), value)
}
func GetString(zc ZK, key registry.Path) *string {
n, err := zc.Get(key.Path())
switch {
case err == ErrNotExist:
return nil
case err != nil:
return nil
}
v := n.GetValueString()
if v == "" {
return nil
}
return &v
}
func GetBytes(zc ZK, key registry.Path) []byte {
n, err := zc.Get(key.Path())
switch {
case err == ErrNotExist:
return nil
case err != nil:
return nil
}
return n.GetValue()
}
func GetInt(zc ZK, key registry.Path) *int {
n, err := zc.Get(key.Path())
switch {
case err == ErrNotExist:
return nil
case err != nil:
return nil
}
v := n.GetValueString()
if v == "" {
return nil
}
i, err := strconv.Atoi(v)
if err != nil {
return nil
}
return &i
}
func CreateOrSet(zc ZK, key registry.Path, value interface{}, ephemeral ...bool) error {
switch value.(type) {
case string:
return CreateOrSetString(zc, key, value.(string), ephemeral...)
case []byte:
return CreateOrSetBytes(zc, key, value.([]byte), ephemeral...)
default:
serialized, err := json.Marshal(value)
if err != nil {
return err
}
return CreateOrSetBytes(zc, key, serialized, ephemeral...)
}
}
func CreateOrSetInt(zc ZK, key registry.Path, value int, ephemeral ...bool) error {
v := strconv.Itoa(value)
return CreateOrSetBytes(zc, key, []byte(v), ephemeral...)
}
func CreateOrSetString(zc ZK, key registry.Path, value string, ephemeral ...bool) error {
return CreateOrSetBytes(zc, key, []byte(value), ephemeral...)
}
func CreateOrSetBytes(zc ZK, key registry.Path, value []byte, ephemeral ...bool) error {
if len(ephemeral) > 0 && ephemeral[0] {
_, err := zc.CreateEphemeral(key.Path(), value)
return err
}
n, err := zc.Get(key.Path())
switch {
case err == ErrNotExist:
n, err = zc.Create(key.Path(), value)
if err != nil {
return err
}
case err != nil:
return err
}
err = n.Set(value)
if err != nil {
return err
}
return nil
}
func Increment(zc ZK, key registry.Path, increment int) error {
n, err := zc.Get(key.Path())
switch {
case err == ErrNotExist:
n, err = zc.Create(key.Path(), []byte(strconv.Itoa(0)))
if err != nil {
return err
}
case err != nil:
return err
}
_, err = n.Increment(increment)
return err
}
func CheckAndIncrement(zc ZK, key registry.Path, current, increment int) (int, error) {
n, err := zc.Get(key.Path())
switch {
case err == ErrNotExist, len(n.GetValue()) == 0:
val := 0
n, err = zc.Create(key.Path(), []byte(strconv.Itoa(val)))
if err != nil {
return -1, err
}
return val, nil
case err != nil:
return -1, err
}
return n.CheckAndIncrement(current, increment)
}
func DeleteObject(zc ZK, key registry.Path) error {
err := zc.Delete(key.Path())
switch err {
case ErrNotExist:
return nil
default:
return err
}
}
func Visit(zc ZK, key registry.Path, v func(registry.Path, []byte) bool) error {
zn, err := zc.Get(key.Path())
if err != nil {
return err
}
children, err := zn.Children()
if err != nil {
return err
}
for _, n := range children {
if !v(registry.NewPath(n.GetPath()), n.GetValue()) {
return nil
}
}
return nil
}
// A simple non-ephemeral lock held at key and we use simply by incrementing and
// using it like a compare and swap.
func VersionLockAndExecute(zc ZK, key registry.Path, rev int, f func() error) (int, error) {
cas, err := CheckAndIncrement(zc, key, rev, 1)
if err != nil {
return -1, ErrConflict
}
if err := f(); err != nil {
return -1, err
}
return CheckAndIncrement(zc, key, cas, 1)
}
support for zk:// or env:// when resolving references
package zk
import (
"encoding/json"
"github.com/golang/glog"
"github.com/qorio/maestro/pkg/registry"
"strconv"
"strings"
)
const (
PrefixEnv = "env://"
PrefixZk = "zk://"
)
// Node value
func Follow(zc ZK, key registry.Path) (*Node, error) {
n, err := zc.Get(key.Path())
if err != nil {
return nil, err
}
switch {
case strings.Index(n.GetValueString(), PrefixEnv) == 0:
next := n.GetValueString()[len(PrefixEnv):]
return Follow(zc, registry.Path(next))
case strings.Index(n.GetValueString(), PrefixZk) == 0:
next := n.GetValueString()[len(PrefixZk):]
return Follow(zc, registry.Path(next))
default:
return n, nil
}
}
// If value begins with env:// then automatically resolve the pointer recursively.
// Returns key, value, error
func Resolve(zc ZK, key registry.Path, value string) (registry.Path, string, error) {
// de-reference the pointer...
switch {
case strings.Index(value, PrefixEnv) == 0:
p := value[len(PrefixEnv):]
n, err := zc.Get(p)
switch {
case err == ErrNotExist:
return key, "", nil
case err != nil:
return key, "", err
}
glog.Infoln("Resolving", key, "=", value, "==>", n.GetValueString())
return Resolve(zc, key, n.GetValueString())
case strings.Index(value, PrefixZk) == 0:
p := value[len(PrefixZk):]
n, err := zc.Get(p)
switch {
case err == ErrNotExist:
return key, "", nil
case err != nil:
return key, "", err
}
glog.Infoln("Resolving", key, "=", value, "==>", n.GetValueString())
return Resolve(zc, key, n.GetValueString())
default:
return key, value, nil
}
}
func PathExists(zc ZK, key registry.Path) bool {
_, err := zc.Get(key.Path())
switch {
case err == ErrNotExist:
return false
case err != nil:
return true
}
return true
}
func GetObject(zc ZK, key registry.Path, value interface{}) error {
n, err := zc.Get(key.Path())
switch {
case err == ErrNotExist:
return nil
case err != nil:
return nil
}
return json.Unmarshal(n.GetValue(), value)
}
func GetString(zc ZK, key registry.Path) *string {
n, err := zc.Get(key.Path())
switch {
case err == ErrNotExist:
return nil
case err != nil:
return nil
}
v := n.GetValueString()
if v == "" {
return nil
}
return &v
}
func GetBytes(zc ZK, key registry.Path) []byte {
n, err := zc.Get(key.Path())
switch {
case err == ErrNotExist:
return nil
case err != nil:
return nil
}
return n.GetValue()
}
func GetInt(zc ZK, key registry.Path) *int {
n, err := zc.Get(key.Path())
switch {
case err == ErrNotExist:
return nil
case err != nil:
return nil
}
v := n.GetValueString()
if v == "" {
return nil
}
i, err := strconv.Atoi(v)
if err != nil {
return nil
}
return &i
}
func CreateOrSet(zc ZK, key registry.Path, value interface{}, ephemeral ...bool) error {
switch value.(type) {
case string:
return CreateOrSetString(zc, key, value.(string), ephemeral...)
case []byte:
return CreateOrSetBytes(zc, key, value.([]byte), ephemeral...)
default:
serialized, err := json.Marshal(value)
if err != nil {
return err
}
return CreateOrSetBytes(zc, key, serialized, ephemeral...)
}
}
func CreateOrSetInt(zc ZK, key registry.Path, value int, ephemeral ...bool) error {
v := strconv.Itoa(value)
return CreateOrSetBytes(zc, key, []byte(v), ephemeral...)
}
func CreateOrSetString(zc ZK, key registry.Path, value string, ephemeral ...bool) error {
return CreateOrSetBytes(zc, key, []byte(value), ephemeral...)
}
func CreateOrSetBytes(zc ZK, key registry.Path, value []byte, ephemeral ...bool) error {
if len(ephemeral) > 0 && ephemeral[0] {
_, err := zc.CreateEphemeral(key.Path(), value)
return err
}
n, err := zc.Get(key.Path())
switch {
case err == ErrNotExist:
n, err = zc.Create(key.Path(), value)
if err != nil {
return err
}
case err != nil:
return err
}
err = n.Set(value)
if err != nil {
return err
}
return nil
}
func Increment(zc ZK, key registry.Path, increment int) error {
n, err := zc.Get(key.Path())
switch {
case err == ErrNotExist:
n, err = zc.Create(key.Path(), []byte(strconv.Itoa(0)))
if err != nil {
return err
}
case err != nil:
return err
}
_, err = n.Increment(increment)
return err
}
func CheckAndIncrement(zc ZK, key registry.Path, current, increment int) (int, error) {
n, err := zc.Get(key.Path())
switch {
case err == ErrNotExist, len(n.GetValue()) == 0:
val := 0
n, err = zc.Create(key.Path(), []byte(strconv.Itoa(val)))
if err != nil {
return -1, err
}
return val, nil
case err != nil:
return -1, err
}
return n.CheckAndIncrement(current, increment)
}
func DeleteObject(zc ZK, key registry.Path) error {
err := zc.Delete(key.Path())
switch err {
case ErrNotExist:
return nil
default:
return err
}
}
func Visit(zc ZK, key registry.Path, v func(registry.Path, []byte) bool) error {
zn, err := zc.Get(key.Path())
if err != nil {
return err
}
children, err := zn.Children()
if err != nil {
return err
}
for _, n := range children {
if !v(registry.NewPath(n.GetPath()), n.GetValue()) {
return nil
}
}
return nil
}
// A simple non-ephemeral lock held at key and we use simply by incrementing and
// using it like a compare and swap.
func VersionLockAndExecute(zc ZK, key registry.Path, rev int, f func() error) (int, error) {
cas, err := CheckAndIncrement(zc, key, rev, 1)
if err != nil {
return -1, ErrConflict
}
if err := f(); err != nil {
return -1, err
}
return CheckAndIncrement(zc, key, cas, 1)
}
|
// Copyright 2019 Martin Holst Swende. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be found
// in the COPYING file.
//
// Package math provides integer math utilities.
package uint256
import (
"fmt"
"math"
"math/big"
"math/bits"
)
var (
SignedMax = &Int{
0xffffffffffffffff,
0xffffffffffffffff,
0xffffffffffffffff,
0x7fffffffffffffff,
}
SignedMin = &Int{
0x0000000000000000,
0x0000000000000000,
0x0000000000000000,
0x8000000000000000,
}
zero = &Int{}
)
// Int is represented as an array of 4 uint64, in little-endian order,
// so that Int[3] is the most significant, and Int[0] is the least significant
type Int [4]uint64
func NewInt() *Int {
return &Int{}
}
// SetBytes interprets buf as the bytes of a big-endian unsigned
// integer, sets z to that value, and returns z.
func (z *Int) SetBytes(buf []byte) *Int {
var d uint64
k := 0
s := uint64(0)
i := len(buf)
z[0], z[1], z[2], z[3] = 0, 0, 0, 0
for ; i > 0; i-- {
d |= uint64(buf[i-1]) << s
if s += 8; s == 64 {
z[k] = d
k++
s, d = 0, 0
if k >= len(z) {
break
}
}
}
if k < len(z) {
z[k] = d
}
return z
}
// Bytes32 returns a the a 32 byte big-endian array.
func (z *Int) Bytes32() [32]byte {
var b [32]byte
for i := 0; i < 32; i++ {
b[31-i] = byte(z[i/8] >> uint64(8*(i%8)))
}
return b
}
// Bytes20 returns a the a 32 byte big-endian array.
func (z *Int) Bytes20() [20]byte {
var b [20]byte
for i := 0; i < 20; i++ {
b[19-i] = byte(z[i/8] >> uint64(8*(i%8)))
}
return b
}
// Bytes returns the value of z as a big-endian byte slice.
func (z *Int) Bytes() []byte {
length := z.ByteLen()
buf := make([]byte, length)
for i := 0; i < length; i++ {
buf[length-1-i] = byte(z[i/8] >> uint64(8*(i%8)))
}
return buf
}
// WriteToSlice writes the content of z into the given byteslice.
// If dest is larger than 32 bytes, z will fill the first parts, and leave
// the end untouched.
// OBS! If dest is smaller than 32 bytes, only the end parts of z will be used
// for filling the array, making it useful for filling an Address object
func (z *Int) WriteToSlice(dest []byte) {
// ensure 32 bytes
// A too large buffer. Fill last 32 bytes
end := len(dest) - 1
if end > 31 {
end = 31
}
for i := 0; i <= end; i++ {
dest[end-i] = byte(z[i/8] >> uint64(8*(i%8)))
}
}
// WriteToArray32 writes all 32 bytes of z to the destination array, including zero-bytes
func (z *Int) WriteToArray32(dest *[32]byte) {
for i := 0; i < 32; i++ {
dest[31-i] = byte(z[i/8] >> uint64(8*(i%8)))
}
}
// WriteToArray20 writes the last 20 bytes of z to the destination array, including zero-bytes
func (z *Int) WriteToArray20(dest *[20]byte) {
for i := 0; i < 20; i++ {
dest[19-i] = byte(z[i/8] >> uint64(8*(i%8)))
}
}
//func (z *Int) WriteToArr32(dest [32]bytes){
// for i := 0; i < 32; i++ {
// dest[31-i] = byte(z[i/8] >> uint64(8*(i%8)))
// }
//}
// Uint64 returns the lower 64-bits of z
func (z *Int) Uint64() uint64 {
return z[0]
}
// Uint64 returns the lower 64-bits of z and bool whether overflow occurred
func (z *Int) Uint64WithOverflow() (uint64, bool) {
return z[0], z[1] != 0 || z[2] != 0 || z[3] != 0
}
// Uint64 returns the lower 63-bits of z as int64
func (z *Int) Int64() int64 {
return int64(z[0] & 0x7fffffffffffffff)
}
// Clone create a new Int identical to z
func (z *Int) Clone() *Int {
return &Int{z[0], z[1], z[2], z[3]}
}
// Add sets z to the sum x+y
func (z *Int) Add(x, y *Int) {
z.AddOverflow(x, y) // Inlined.
}
// AddOverflow sets z to the sum x+y, and returns whether overflow occurred
func (z *Int) AddOverflow(x, y *Int) bool {
var carry uint64
z[0], carry = bits.Add64(x[0], y[0], 0)
z[1], carry = bits.Add64(x[1], y[1], carry)
z[2], carry = bits.Add64(x[2], y[2], carry)
z[3], carry = bits.Add64(x[3], y[3], carry)
return carry != 0
}
// Add sets z to the sum ( x+y ) mod m
func (z *Int) AddMod(x, y, m *Int) {
if z == m { //z is an alias for m
m = m.Clone()
}
if overflow := z.AddOverflow(x, y); overflow {
// It overflowed. the actual value is
// 0x10 00..0 + 0x???..??
//
// We can split it into
// 0xffff...f + 0x1 + 0x???..??
// And mod each item individually
a := NewInt().SetAllOne()
a.Mod(a, m)
z.Mod(z, m)
z.Add(z, a)
// reuse a
a.SetOne()
z.Add(z, a)
}
z.Mod(z, m)
}
// addMiddle128 adds two uint64 integers to the upper part of z
func addTo128(z []uint64, x0, x1 uint64) {
var carry uint64
z[0], carry = bits.Add64(z[0], x0, carry) // TODO: The order of adding x, y is confusing.
z[1], _ = bits.Add64(z[1], x1, carry)
}
// PaddedBytes encodes a Int as a 0-padded byte slice. The length
// of the slice is at least n bytes.
// Example, z =1, n = 20 => [0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1]
func (z *Int) PaddedBytes(n int) []byte {
b := make([]byte, n)
for i := 0; i < 32 && i < n; i++ {
b[n-1-i] = byte(z[i/8] >> uint64(8*(i%8)))
}
return b
}
// Sub64 set z to the difference x - y, where y is a 64 bit uint
func (z *Int) Sub64(x *Int, y uint64) {
var carry uint64
if z[0], carry = bits.Sub64(x[0], y, carry); carry == 0 {
return
}
if z[1], carry = bits.Sub64(x[1], 0, carry); carry == 0 {
return
}
if z[2], carry = bits.Sub64(x[2], 0, carry); carry == 0 {
return
}
z[3]--
}
// Sub sets z to the difference x-y and returns true if the operation underflowed
func (z *Int) SubOverflow(x, y *Int) bool {
var carry uint64
z[0], carry = bits.Sub64(x[0], y[0], carry)
z[1], carry = bits.Sub64(x[1], y[1], carry)
z[2], carry = bits.Sub64(x[2], y[2], carry)
z[3], carry = bits.Sub64(x[3], y[3], carry)
return carry != 0
}
// Sub sets z to the difference x-y
func (z *Int) Sub(x, y *Int) {
z.SubOverflow(x, y) // Inlined.
}
// umulStep computes (carry, z) = z + (x * y) + carry.
func umulStep(z, x, y, carry uint64) (uint64, uint64) {
ph, p := bits.Mul64(x, y)
p, carry = bits.Add64(p, carry, 0)
carry, _ = bits.Add64(ph, 0, carry)
p, carry1 := bits.Add64(p, z, 0)
carry, _ = bits.Add64(carry, 0, carry1)
return p, carry
}
// umul computes full 256 x 256 -> 512 multiplication.
func umul(x, y *Int) [8]uint64 {
var res [8]uint64
for j := 0; j < len(y); j++ {
var carry uint64
res[j+0], carry = umulStep(res[j+0], x[0], y[j], carry)
res[j+1], carry = umulStep(res[j+1], x[1], y[j], carry)
res[j+2], carry = umulStep(res[j+2], x[2], y[j], carry)
res[j+3], carry = umulStep(res[j+3], x[3], y[j], carry)
res[j+4] = carry
}
return res
}
// Mul sets z to the sum x*y
func (z *Int) Mul(x, y *Int) {
var (
alfa = &Int{} // Aggregate results
beta = &Int{} // Calculate intermediate
)
// The numbers are internally represented as [ a, b, c, d ]
// We do the following operations
//
// d1 * d2
// d1 * c2 (upshift 64)
// d1 * b2 (upshift 128)
// d1 * a2 (upshift 192)
//
// c1 * d2 (upshift 64)
// c1 * c2 (upshift 128)
// c1 * b2 (upshift 192)
//
// b1 * d2 (upshift 128)
// b1 * c2 (upshift 192)
//
// a1 * d2 (upshift 192)
//
// And we aggregate results into 'alfa'
// One optimization, however, is reordering.
// For these ones, we don't care about if they overflow, thus we can use native multiplication
// and set the result immediately into `a` of the result.
// b1 * c2 (upshift 192)
// a1 * d2 (upshift 192)
// d1 * a2 (upshift 192)
// c1 * b2 11(upshift 192)
// Remaining ops:
//
// d1 * d2
// d1 * c2 (upshift 64)
// d1 * b2 (upshift 128)
//
// c1 * d2 (upshift 64)
// c1 * c2 (upshift 128)
//
// b1 * d2 (upshift 128)
alfa[1], alfa[0] = bits.Mul64(x[0], y[0])
alfa[3], alfa[2] = bits.Mul64(x[0], y[2])
alfa[3] += x[0]*y[3] + x[1]*y[2] + x[2]*y[1] + x[3]*y[0] // Top ones, ignore overflow
beta[2], beta[1] = bits.Mul64(x[0], y[1])
alfa.Add(alfa, beta)
beta[2], beta[1] = bits.Mul64(x[1], y[0])
alfa.Add(alfa, beta)
beta[3], beta[2] = bits.Mul64(x[1], y[1])
addTo128(alfa[2:], beta[2], beta[3])
beta[3], beta[2] = bits.Mul64(x[2], y[0])
addTo128(alfa[2:], beta[2], beta[3])
z.Copy(alfa)
}
func (z *Int) Squared() {
var (
alfa = &Int{} // Aggregate results
beta = &Int{} // Calculate intermediate
)
// This algo is based on Mul, but since it's squaring, we know that
// e.g. z.b*y.c + z.c*y.c == 2 * z.b * z.c, and can save some calculations
// 2 * d * b
alfa[3], alfa[2] = bits.Mul64(z[0], z[2])
alfa.lshOne()
alfa[1], alfa[0] = bits.Mul64(z[0], z[0])
// 2 * a * d + 2 * b * c
alfa[3] += (z[0]*z[3] + z[1]*z[2]) << 1
// 2 * d * c
beta[2], beta[1] = bits.Mul64(z[0], z[1])
beta.lshOne()
alfa.Add(alfa, beta)
// c * c
beta[3], beta[2] = bits.Mul64(z[1], z[1])
addTo128(alfa[2:], beta[2], beta[3])
z.Copy(alfa)
}
func (z *Int) setBit(n uint) *Int {
// n == 0 -> LSB
// n == 255 -> MSB
if n < 256 {
z[n>>6] |= 1 << (n & 0x3f)
}
return z
}
// isBitSet returns true if bit n is set, where n = 0 eq LSB
func (z *Int) isBitSet(n uint) bool {
if n > 255 {
return false
}
// z [ n / 64] & 1 << (n % 64)
return (z[n>>6] & (1 << (n & 0x3f))) != 0
}
func nlz(d *Int) uint {
for i := 3; i >= 0; i-- {
if d[i] != 0 {
return uint(bits.LeadingZeros64(d[i]) % 32)
}
}
return 0
}
// Normalized form of d.
func shl(d *Int, s uint, isdividend bool) []uint32 {
dn := make([]uint32, 9)
for i := 0; i < 4; i++ {
dn[2*i] = uint32(d[i])
dn[2*i+1] = uint32(d[i] >> 32)
}
var n int
for i := 7; i >= 0; i-- {
if dn[i] != 0 {
n = i
break
}
}
var prev, t uint32
for i := 0; i <= n; i++ {
t = dn[i]
dn[i] = prev | (dn[i] << s)
prev = t >> (32 - s)
}
if isdividend {
n = n + 1
dn[n] = prev
}
return dn[:n+1]
}
func divKnuth(x, y []uint32) []uint32 {
m, n := len(x)-1, len(y)
q := make([]uint32, m-n+1)
// Number base (2**32)
var b uint64 = 0x100000000
if n <= 2 {
panic("Should have been handled by udivremBy1()")
}
// Main Loop
var qhat, rhat uint64
for j := m - n; j >= 0; j-- {
qhat = (uint64(x[j+n])*b + uint64(x[j+n-1])) / uint64(y[n-1])
rhat = uint64(x[j+n])*b + uint64(x[j+n-1]) - qhat*uint64(y[n-1])
AGAIN:
if qhat >= b || (qhat*uint64(y[n-2]) > b*rhat+uint64(x[j+n-2])) {
qhat = qhat - 1
rhat = rhat + uint64(y[n-1])
if rhat < b {
goto AGAIN
}
}
// Multiply and subtract.
var p uint64
var t, k int64
for i := 0; i < n; i++ {
p = qhat * uint64(y[i])
t = int64(x[i+j]) - k - int64(p&0xffffffff)
x[i+j] = uint32(t)
k = int64(p>>32) - (t >> 32)
}
t = int64(x[j+n]) - k
x[j+n] = uint32(t)
q[j] = uint32(qhat)
if t < 0 {
// If we subtracted too much, add back.
q[j] = q[j] - 1
var k, t uint64
for i := 0; i < n; i++ {
t = uint64(x[i+j]) + uint64(y[i]) + k
x[i+j] = uint32(t)
k = t >> 32
}
x[j+n] = x[j+n] + uint32(k)
}
}
return q
}
func addTo(x, y []uint64) uint64 {
var carry uint64
for i := 0; i < len(y); i++ {
x[i], carry = bits.Add64(x[i], y[i], carry)
}
return carry
}
func subMulTo(x, y []uint64, multiplier uint64) uint64 {
var borrow uint64
for i := 0; i < len(y); i++ {
s, carry1 := bits.Sub64(x[i], borrow, 0)
ph, pl := bits.Mul64(y[i], multiplier)
t, carry2 := bits.Sub64(s, pl, 0)
x[i] = t
borrow = ph + carry1 + carry2
}
return borrow
}
// udivremBy1 divides u by single normalized word d and produces both quotient and remainder.
func udivremBy1(u []uint64, d uint64) (quot []uint64, rem uint64) {
quot = make([]uint64, len(u)-1)
rem = u[len(u)-1] // Set the top word as remainder.
for j := len(u) - 2; j >= 0; j-- {
quot[j], rem = bits.Div64(rem, u[j], d)
}
return quot, rem
}
func udivremKnuth(u, d []uint64) (quot []uint64) {
quot = make([]uint64, len(u)-len(d))
dh := d[len(d)-1]
dl := d[len(d)-2]
for j := len(u) - len(d) - 1; j >= 0; j-- {
u2 := u[j+len(d)]
u1 := u[j+len(d)-1]
u0 := u[j+len(d)-2]
var qhat, rhat uint64
if u2 >= dh { // Division overflows.
qhat = ^uint64(0)
// TODO: Add "qhat one to big" adjustment (not needed for correctness, but helps avoiding "add back" case).
} else {
qhat, rhat = bits.Div64(u2, u1, dh)
ph, pl := bits.Mul64(qhat, dl)
if ph > rhat || (ph == rhat && pl > u0) {
qhat--
// TODO: Add "qhat one to big" adjustment (not needed for correctness, but helps avoiding "add back" case).
}
}
// Multiply and subtract.
borrow := subMulTo(u[j:], d, qhat)
u[j+len(d)] = u2 - borrow
if u2 < borrow { // Too much subtracted, add back.
qhat--
u[j+len(d)] += addTo(u[j:], d)
}
quot[j] = qhat // Store quotient digit.
}
return quot
}
func udivrem(u []uint64, d *Int) (quot []uint64, rem *Int, err error) {
var dLen int
for i := len(d) - 1; i >= 0; i-- {
if d[i] != 0 {
dLen = i + 1
break
}
}
shift := bits.LeadingZeros64(d[dLen-1])
var dnStorage Int
dn := dnStorage[:dLen]
for i := dLen - 1; i > 0; i-- {
dn[i] = (d[i] << shift) | (d[i-1] >> (64 - shift))
}
dn[0] = d[0] << shift
var uLen int
for i := len(u) - 1; i >= 0; i-- {
if u[i] != 0 {
uLen = i + 1
break
}
}
var unStorage [9]uint64
un := unStorage[:uLen+1]
un[uLen] = u[uLen-1] >> (64 - shift)
for i := uLen - 1; i > 0; i-- {
un[i] = (u[i] << shift) | (u[i-1] >> (64 - shift))
}
un[0] = u[0] << shift
// TODO: Skip the highest word of numerator if not significant.
if dLen == 1 {
quot, r := udivremBy1(un, dn[0])
return quot, new(Int).SetUint64(r >> shift), nil
}
quot = udivremKnuth(un, dn)
rem = new(Int)
for i := 0; i < dLen-1; i++ {
rem[i] = (un[i] >> shift) | (un[i+1] << (64 - shift))
}
rem[dLen-1] = un[dLen-1] >> shift
return quot, rem, nil
}
// Div sets z to the quotient x/y for returns z.
// If d == 0, z is set to 0
func (z *Int) Div(x, y *Int) *Int {
if y.IsZero() || y.Gt(x) {
return z.Clear()
}
if x.Eq(y) {
return z.SetOne()
}
// Shortcut some cases
if x.IsUint64() {
return z.SetUint64(x.Uint64() / y.Uint64())
}
// At this point, we know
// x/y ; x > y > 0
if quot, _, err := udivrem(x[:], y); err == nil {
z.Clear()
copy(z[:len(quot)], quot)
return z
}
// See Knuth, Volume 2, section 4.3.1, Algorithm D.
// Normalize by shifting divisor left just enough so that its high-order
// bit is on and u left the same amount.
// function nlz do the caculating of the amount and shl do the left operation.
s := nlz(y)
xn := shl(x, s, true)
yn := shl(y, s, false)
// divKnuth do the division of normalized dividend and divisor with Knuth Algorithm D.
q := divKnuth(xn, yn)
z.Clear()
for i := 0; i < len(q); i++ {
z[i/2] = z[i/2] | uint64(q[i])<<(32*(uint64(i)%2))
}
return z
}
// Mod sets z to the modulus x%y for y != 0 and returns z.
// If y == 0, z is set to 0 (OBS: differs from the big.Int)
func (z *Int) Mod(x, y *Int) *Int {
if x.IsZero() || y.IsZero() {
return z.Clear()
}
switch x.Cmp(y) {
case -1:
// x < y
copy(z[:], x[:])
return z
case 0:
// x == y
return z.Clear() // They are equal
}
// At this point:
// x != 0
// y != 0
// x > y
// Shortcut trivial case
if x.IsUint64() {
return z.SetUint64(x.Uint64() % y.Uint64())
}
if _, rem, err := udivrem(x[:], y); err == nil {
return z.Copy(rem)
}
q := NewInt()
q.Div(x, y)
q.Mul(q, y)
z.Sub(x, q)
return z
}
// Smod interprets x and y as signed integers sets z to
// (sign x) * { abs(x) modulus abs(y) }
// If y == 0, z is set to 0 (OBS: differs from the big.Int)
// OBS! Modifies x and y
func (z *Int) Smod(x, y *Int) *Int {
ys := y.Sign()
xs := x.Sign()
// abs x
if xs == -1 {
x.Neg()
}
// abs y
if ys == -1 {
y.Neg()
}
z.Mod(x, y)
if xs == -1 {
z.Neg()
}
return z
}
// MulMod calculates the modulo-n multiplication of x and y and
// returns z
func (z *Int) MulMod(x, y, m *Int) *Int {
p := umul(x, y)
var (
pl Int
ph Int
)
copy(pl[:], p[:4])
copy(ph[:], p[4:])
// If the multiplication is within 256 bits use Mod().
if ph.IsZero() {
if z == m { //z is an alias for m; TODO: This should not be needed.
m = m.Clone()
}
z.Mod(&pl, m)
return z
}
if _, rem, err := udivrem(p[:], m); err == nil {
return z.Copy(rem)
}
var pbytes [len(p) * 8]byte
for i := 0; i < len(pbytes); i++ {
pbytes[len(pbytes)-1-i] = byte(p[i/8] >> uint64(8*(i%8)))
}
// At this point, we _could_ do x=x mod m, y = y mod m, and test again
// if they fit within 256 bytes. But for now just wrap big.Int instead
bp := new(big.Int)
bp.SetBytes(pbytes[:])
z.SetFromBig(bp.Mod(bp, m.ToBig()))
return z
}
// Abs interprets x as a a signed number, and sets z to the Abs value
// S256(0) = 0
// S256(1) = 1
// S256(2**255) = -2**255
// S256(2**256-1) = -1
func (z *Int) Abs() *Int {
if z.Lt(SignedMin) {
return z
}
z.Sub(zero, z)
return z
}
func (z *Int) Neg() *Int {
z.Sub(zero, z)
return z
}
// Sdiv interprets n and d as signed integers, does a
// signed division on the two operands and sets z to the result
// If d == 0, z is set to 0
// OBS! This method (potentially) modifies both n and d
func (z *Int) Sdiv(n, d *Int) *Int {
if n.Sign() > 0 {
if d.Sign() > 0 {
// pos / pos
z.Div(n, d)
return z
} else {
// pos / neg
z.Div(n, d.Neg())
return z.Neg()
}
}
if d.Sign() < 0 {
// neg / neg
z.Div(n.Neg(), d.Neg())
return z
}
// neg / pos
z.Div(n.Neg(), d)
return z.Neg()
}
// Sign returns:
//
// -1 if z < 0
// 0 if z == 0
// +1 if z > 0
// Where z is interpreted as a signed number
func (z *Int) Sign() int {
if z.IsZero() {
return 0
}
if z.Lt(SignedMin) {
return 1
}
return -1
}
// BitLen returns the number of bits required to represent x
func (z *Int) BitLen() int {
switch {
case z[3] != 0:
return 192 + bits.Len64(z[3])
case z[2] != 0:
return 128 + bits.Len64(z[2])
case z[1] != 0:
return 64 + bits.Len64(z[1])
default:
return bits.Len64(z[0])
}
}
func (z *Int) ByteLen() int {
return (z.BitLen() + 7) / 8
}
func (z *Int) lsh64(x *Int) *Int {
z[3], z[2], z[1], z[0] = x[2], x[1], x[0], 0
return z
}
func (z *Int) lsh128(x *Int) *Int {
z[3], z[2], z[1], z[0] = x[1], x[0], 0, 0
return z
}
func (z *Int) lsh192(x *Int) *Int {
z[3], z[2], z[1], z[0] = x[0], 0, 0, 0
return z
}
func (z *Int) rsh64(x *Int) *Int {
z[3], z[2], z[1], z[0] = 0, x[3], x[2], x[1]
return z
}
func (z *Int) rsh128(x *Int) *Int {
z[3], z[2], z[1], z[0] = 0, 0, x[3], x[2]
return z
}
func (z *Int) rsh192(x *Int) *Int {
z[3], z[2], z[1], z[0] = 0, 0, 0, x[3]
return z
}
func (z *Int) srsh64(x *Int) *Int {
z[3], z[2], z[1], z[0] = math.MaxUint64, x[3], x[2], x[1]
return z
}
func (z *Int) srsh128(x *Int) *Int {
z[3], z[2], z[1], z[0] = math.MaxUint64, math.MaxUint64, x[3], x[2]
return z
}
func (z *Int) srsh192(x *Int) *Int {
z[3], z[2], z[1], z[0] = math.MaxUint64, math.MaxUint64, math.MaxUint64, x[3]
return z
}
// Not sets z = ^x and returns z.
func (z *Int) Not() *Int {
z[3], z[2], z[1], z[0] = ^z[3], ^z[2], ^z[1], ^z[0]
return z
}
// Gt returns true if z > x
func (z *Int) Gt(x *Int) bool {
if z[3] > x[3] {
return true
}
if z[3] < x[3] {
return false
}
if z[2] > x[2] {
return true
}
if z[2] < x[2] {
return false
}
if z[1] > x[1] {
return true
}
if z[1] < x[1] {
return false
}
return z[0] > x[0]
}
// Slt interprets z and x as signed integers, and returns
// true if z < x
func (z *Int) Slt(x *Int) bool {
zSign := z.Sign()
xSign := x.Sign()
switch {
case zSign >= 0 && xSign < 0:
return false
case zSign < 0 && xSign >= 0:
return true
default:
return z.Lt(x)
}
}
// Sgt interprets z and x as signed integers, and returns
// true if z > x
func (z *Int) Sgt(x *Int) bool {
zSign := z.Sign()
xSign := x.Sign()
switch {
case zSign >= 0 && xSign < 0:
return true
case zSign < 0 && xSign >= 0:
return false
default:
return z.Gt(x)
}
}
// SetIfGt sets z to 1 if z > x
func (z *Int) SetIfGt(x *Int) {
if z.Gt(x) {
z.SetOne()
} else {
z.Clear()
}
}
// Lt returns true if z < x
func (z *Int) Lt(x *Int) bool {
if z[3] < x[3] {
return true
}
if z[3] > x[3] {
return false
}
if z[2] < x[2] {
return true
}
if z[2] > x[2] {
return false
}
if z[1] < x[1] {
return true
}
if z[1] > x[1] {
return false
}
return z[0] < x[0]
}
// SetIfLt sets z to 1 if z < x
func (z *Int) SetIfLt(x *Int) {
if z.Lt(x) {
z.SetOne()
} else {
z.Clear()
}
}
// SetUint64 sets z to the value x
func (z *Int) SetUint64(x uint64) *Int {
z[3], z[2], z[1], z[0] = 0, 0, 0, x
return z
}
// Eq returns true if z == x
func (z *Int) Eq(x *Int) bool {
return (z[0] == x[0]) && (z[1] == x[1]) && (z[2] == x[2]) && (z[3] == x[3])
}
// SetIfEq sets x to
// 1 if z == x
// 0 if Z != x
func (z *Int) SetIfEq(x *Int) {
if z.Eq(x) {
z.SetOne()
} else {
z.Clear()
}
}
// Cmp compares z and x and returns:
//
// -1 if z < x
// 0 if z == x
// +1 if z > x
//
func (z *Int) Cmp(x *Int) (r int) {
if z.Gt(x) {
return 1
}
if z.Lt(x) {
return -1
}
return 0
}
// LtUint64 returns true if x is smaller than n
func (z *Int) LtUint64(n uint64) bool {
return (z[3] == 0) && (z[2] == 0) && (z[1] == 0) && z[0] < n
}
// LtUint64 returns true if x is larger than n
func (z *Int) GtUint64(n uint64) bool {
return (z[3] != 0) || (z[2] != 0) || (z[1] != 0) || z[0] > n
}
// IsUint64 reports whether z can be represented as a uint64.
func (z *Int) IsUint64() bool {
return (z[3] == 0) && (z[2] == 0) && (z[1] == 0)
}
// IsUint128 reports whether z can be represented in 128 bits.
func (z *Int) IsUint128() bool {
return (z[3] == 0) && (z[2] == 0)
}
// IsZero returns true if z == 0
func (z *Int) IsZero() bool {
return (z[3] == 0) && (z[2] == 0) && (z[1] == 0) && (z[0] == 0)
}
// IsOne returns true if z == 1
func (z *Int) IsOne() bool {
return (z[3] == 0) && (z[2] == 0) && (z[1] == 0) && (z[0] == 1)
}
// Clear sets z to 0
func (z *Int) Clear() *Int {
z[3], z[2], z[1], z[0] = 0, 0, 0, 0
return z
}
// SetAllOne sets all the bits of z to 1
func (z *Int) SetAllOne() *Int {
z[3], z[2], z[1], z[0] = math.MaxUint64, math.MaxUint64, math.MaxUint64, math.MaxUint64
return z
}
// SetOne sets z to 1
func (z *Int) SetOne() *Int {
z[3], z[2], z[1], z[0] = 0, 0, 0, 1
return z
}
// Lsh shifts z by 1 bit.
func (z *Int) lshOne() {
var (
a, b uint64
)
a = z[0] >> 63
b = z[1] >> 63
z[0] = z[0] << 1
z[1] = z[1]<<1 | a
a = z[2] >> 63
z[2] = z[2]<<1 | b
z[3] = z[3]<<1 | a
}
// Lsh sets z = x << n and returns z.
func (z *Int) Lsh(x *Int, n uint) *Int {
// n % 64 == 0
if n&0x3f == 0 {
switch n {
case 0:
return z.Copy(x)
case 64:
return z.lsh64(x)
case 128:
return z.lsh128(x)
case 192:
return z.lsh192(x)
default:
return z.Clear()
}
}
var (
a, b uint64
)
// Big swaps first
switch {
case n > 192:
if n > 256 {
return z.Clear()
}
z.lsh192(x)
n -= 192
goto sh192
case n > 128:
z.lsh128(x)
n -= 128
goto sh128
case n > 64:
z.lsh64(x)
n -= 64
goto sh64
default:
z.Copy(x)
}
// remaining shifts
a = z[0] >> (64 - n)
z[0] = z[0] << n
sh64:
b = z[1] >> (64 - n)
z[1] = (z[1] << n) | a
sh128:
a = z[2] >> (64 - n)
z[2] = (z[2] << n) | b
sh192:
z[3] = (z[3] << n) | a
return z
}
// Rsh sets z = x >> n and returns z.
func (z *Int) Rsh(x *Int, n uint) *Int {
// n % 64 == 0
if n&0x3f == 0 {
switch n {
case 0:
return z.Copy(x)
case 64:
return z.rsh64(x)
case 128:
return z.rsh128(x)
case 192:
return z.rsh192(x)
default:
return z.Clear()
}
}
var (
a, b uint64
)
// Big swaps first
switch {
case n > 192:
if n > 256 {
return z.Clear()
}
z.rsh192(x)
n -= 192
goto sh192
case n > 128:
z.rsh128(x)
n -= 128
goto sh128
case n > 64:
z.rsh64(x)
n -= 64
goto sh64
default:
z.Copy(x)
}
// remaining shifts
a = z[3] << (64 - n)
z[3] = z[3] >> n
sh64:
b = z[2] << (64 - n)
z[2] = (z[2] >> n) | a
sh128:
a = z[1] << (64 - n)
z[1] = (z[1] >> n) | b
sh192:
z[0] = (z[0] >> n) | a
return z
}
// Srsh (Signed/Arithmetic right shift)
// considers z to be a signed integer, during right-shift
// and sets z = x >> n and returns z.
func (z *Int) Srsh(x *Int, n uint) *Int {
// If the MSB is 0, Srsh is same as Rsh.
if !z.isBitSet(255) {
return z.Rsh(x, n)
}
// n % 64 == 0
if n&0x3f == 0 {
switch n {
case 0:
return z.Copy(x)
case 64:
return z.srsh64(x)
case 128:
return z.srsh128(x)
case 192:
return z.srsh192(x)
default:
return z.SetAllOne()
}
}
var (
a uint64 = math.MaxUint64 << (64 - n%64)
)
// Big swaps first
switch {
case n > 192:
if n > 256 {
return z.SetAllOne()
}
z.srsh192(x)
n -= 192
goto sh192
case n > 128:
z.srsh128(x)
n -= 128
goto sh128
case n > 64:
z.srsh64(x)
n -= 64
goto sh64
default:
z.Copy(x)
}
// remaining shifts
z[3], a = (z[3]>>n)|a, z[3]<<(64-n)
sh64:
z[2], a = (z[2]>>n)|a, z[2]<<(64-n)
sh128:
z[1], a = (z[1]>>n)|a, z[1]<<(64-n)
sh192:
z[0] = (z[0] >> n) | a
return z
}
// Copy copies the value x into z, and returns z
func (z *Int) Copy(x *Int) *Int {
*z = *x
return z
}
// Or sets z = x | y and returns z.
func (z *Int) Or(x, y *Int) *Int {
z[0] = x[0] | y[0]
z[1] = x[1] | y[1]
z[2] = x[2] | y[2]
z[3] = x[3] | y[3]
return z
}
// And sets z = x & y and returns z.
func (z *Int) And(x, y *Int) *Int {
z[0] = x[0] & y[0]
z[1] = x[1] & y[1]
z[2] = x[2] & y[2]
z[3] = x[3] & y[3]
return z
}
// Xor sets z = x ^ y and returns z.
func (z *Int) Xor(x, y *Int) *Int {
z[0] = x[0] ^ y[0]
z[1] = x[1] ^ y[1]
z[2] = x[2] ^ y[2]
z[3] = x[3] ^ y[3]
return z
}
// Byte sets z to the value of the byte at position n,
// with 'z' considered as a big-endian 32-byte integer
// if 'n' > 32, f is set to 0
// Example: f = '5', n=31 => 5
func (z *Int) Byte(n *Int) *Int {
// in z, z[0] is the least significant
//
if number, overflow := n.Uint64WithOverflow(); !overflow {
if number < 32 {
number := z[4-1-number/8]
offset := (n[0] & 0x7) << 3 // 8*(n.d % 8)
z[0] = (number & (0xff00000000000000 >> offset)) >> (56 - offset)
z[3], z[2], z[1] = 0, 0, 0
return z
}
}
return z.Clear()
}
// Hex returns a hex representation of z
func (z *Int) Hex() string {
return fmt.Sprintf("%016x.%016x.%016x.%016x", z[3], z[2], z[1], z[0])
}
// Exp sets z = base**exponent mod 2**256, and returns z.
func (z *Int) Exp(base, exponent *Int) *Int {
res := Int{1, 0, 0, 0}
// b^0 == 1
if exponent.IsZero() || base.IsOne() {
return z.Copy(&res)
}
// b^1 == b
if exponent.IsOne() {
return z.Copy(base)
}
var (
word uint64
bits int
multiplier = *base
)
expBitlen := exponent.BitLen()
word = exponent[0]
bits = 0
for ; bits < expBitlen && bits < 64; bits++ {
if word&1 == 1 {
res.Mul(&res, &multiplier)
}
multiplier.Squared()
word >>= 1
}
word = exponent[1]
for ; bits < expBitlen && bits < 128; bits++ {
if word&1 == 1 {
res.Mul(&res, &multiplier)
}
multiplier.Squared()
word >>= 1
}
word = exponent[2]
for ; bits < expBitlen && bits < 192; bits++ {
if word&1 == 1 {
res.Mul(&res, &multiplier)
}
multiplier.Squared()
word >>= 1
}
word = exponent[3]
for ; bits < expBitlen && bits < 256; bits++ {
if word&1 == 1 {
res.Mul(&res, &multiplier)
}
multiplier.Squared()
word >>= 1
}
return z.Copy(&res)
}
//Extend length of two’s complement signed integer
// sets z to
// - num if back > 31
// - num interpreted as a signed number with sign-bit at (back*8+7), extended to the full 256 bits
func (z *Int) SignExtend(back, num *Int) {
if back.GtUint64(31) {
z.Copy(num)
return
}
bit := uint(back.Uint64()*8 + 7)
mask := back.Lsh(back.SetOne(), bit)
mask.Sub64(mask, 1)
if num.isBitSet(bit) {
num.Or(num, mask.Not())
} else {
num.And(num, mask)
}
}
var _ fmt.Formatter = zero
func (z *Int) Format(s fmt.State, ch rune) {
z.ToBig().Format(s, ch)
}
fixup! Implement Knuth based long division
// Copyright 2019 Martin Holst Swende. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be found
// in the COPYING file.
//
// Package math provides integer math utilities.
package uint256
import (
"fmt"
"math"
"math/big"
"math/bits"
)
var (
SignedMax = &Int{
0xffffffffffffffff,
0xffffffffffffffff,
0xffffffffffffffff,
0x7fffffffffffffff,
}
SignedMin = &Int{
0x0000000000000000,
0x0000000000000000,
0x0000000000000000,
0x8000000000000000,
}
zero = &Int{}
)
// Int is represented as an array of 4 uint64, in little-endian order,
// so that Int[3] is the most significant, and Int[0] is the least significant
type Int [4]uint64
func NewInt() *Int {
return &Int{}
}
// SetBytes interprets buf as the bytes of a big-endian unsigned
// integer, sets z to that value, and returns z.
func (z *Int) SetBytes(buf []byte) *Int {
var d uint64
k := 0
s := uint64(0)
i := len(buf)
z[0], z[1], z[2], z[3] = 0, 0, 0, 0
for ; i > 0; i-- {
d |= uint64(buf[i-1]) << s
if s += 8; s == 64 {
z[k] = d
k++
s, d = 0, 0
if k >= len(z) {
break
}
}
}
if k < len(z) {
z[k] = d
}
return z
}
// Bytes32 returns a the a 32 byte big-endian array.
func (z *Int) Bytes32() [32]byte {
var b [32]byte
for i := 0; i < 32; i++ {
b[31-i] = byte(z[i/8] >> uint64(8*(i%8)))
}
return b
}
// Bytes20 returns a the a 32 byte big-endian array.
func (z *Int) Bytes20() [20]byte {
var b [20]byte
for i := 0; i < 20; i++ {
b[19-i] = byte(z[i/8] >> uint64(8*(i%8)))
}
return b
}
// Bytes returns the value of z as a big-endian byte slice.
func (z *Int) Bytes() []byte {
length := z.ByteLen()
buf := make([]byte, length)
for i := 0; i < length; i++ {
buf[length-1-i] = byte(z[i/8] >> uint64(8*(i%8)))
}
return buf
}
// WriteToSlice writes the content of z into the given byteslice.
// If dest is larger than 32 bytes, z will fill the first parts, and leave
// the end untouched.
// OBS! If dest is smaller than 32 bytes, only the end parts of z will be used
// for filling the array, making it useful for filling an Address object
func (z *Int) WriteToSlice(dest []byte) {
// ensure 32 bytes
// A too large buffer. Fill last 32 bytes
end := len(dest) - 1
if end > 31 {
end = 31
}
for i := 0; i <= end; i++ {
dest[end-i] = byte(z[i/8] >> uint64(8*(i%8)))
}
}
// WriteToArray32 writes all 32 bytes of z to the destination array, including zero-bytes
func (z *Int) WriteToArray32(dest *[32]byte) {
for i := 0; i < 32; i++ {
dest[31-i] = byte(z[i/8] >> uint64(8*(i%8)))
}
}
// WriteToArray20 writes the last 20 bytes of z to the destination array, including zero-bytes
func (z *Int) WriteToArray20(dest *[20]byte) {
for i := 0; i < 20; i++ {
dest[19-i] = byte(z[i/8] >> uint64(8*(i%8)))
}
}
//func (z *Int) WriteToArr32(dest [32]bytes){
// for i := 0; i < 32; i++ {
// dest[31-i] = byte(z[i/8] >> uint64(8*(i%8)))
// }
//}
// Uint64 returns the lower 64-bits of z
func (z *Int) Uint64() uint64 {
return z[0]
}
// Uint64 returns the lower 64-bits of z and bool whether overflow occurred
func (z *Int) Uint64WithOverflow() (uint64, bool) {
return z[0], z[1] != 0 || z[2] != 0 || z[3] != 0
}
// Uint64 returns the lower 63-bits of z as int64
func (z *Int) Int64() int64 {
return int64(z[0] & 0x7fffffffffffffff)
}
// Clone create a new Int identical to z
func (z *Int) Clone() *Int {
return &Int{z[0], z[1], z[2], z[3]}
}
// Add sets z to the sum x+y
func (z *Int) Add(x, y *Int) {
z.AddOverflow(x, y) // Inlined.
}
// AddOverflow sets z to the sum x+y, and returns whether overflow occurred
func (z *Int) AddOverflow(x, y *Int) bool {
var carry uint64
z[0], carry = bits.Add64(x[0], y[0], 0)
z[1], carry = bits.Add64(x[1], y[1], carry)
z[2], carry = bits.Add64(x[2], y[2], carry)
z[3], carry = bits.Add64(x[3], y[3], carry)
return carry != 0
}
// Add sets z to the sum ( x+y ) mod m
func (z *Int) AddMod(x, y, m *Int) {
if z == m { //z is an alias for m
m = m.Clone()
}
if overflow := z.AddOverflow(x, y); overflow {
// It overflowed. the actual value is
// 0x10 00..0 + 0x???..??
//
// We can split it into
// 0xffff...f + 0x1 + 0x???..??
// And mod each item individually
a := NewInt().SetAllOne()
a.Mod(a, m)
z.Mod(z, m)
z.Add(z, a)
// reuse a
a.SetOne()
z.Add(z, a)
}
z.Mod(z, m)
}
// addMiddle128 adds two uint64 integers to the upper part of z
func addTo128(z []uint64, x0, x1 uint64) {
var carry uint64
z[0], carry = bits.Add64(z[0], x0, carry) // TODO: The order of adding x, y is confusing.
z[1], _ = bits.Add64(z[1], x1, carry)
}
// PaddedBytes encodes a Int as a 0-padded byte slice. The length
// of the slice is at least n bytes.
// Example, z =1, n = 20 => [0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1]
func (z *Int) PaddedBytes(n int) []byte {
b := make([]byte, n)
for i := 0; i < 32 && i < n; i++ {
b[n-1-i] = byte(z[i/8] >> uint64(8*(i%8)))
}
return b
}
// Sub64 set z to the difference x - y, where y is a 64 bit uint
func (z *Int) Sub64(x *Int, y uint64) {
var carry uint64
if z[0], carry = bits.Sub64(x[0], y, carry); carry == 0 {
return
}
if z[1], carry = bits.Sub64(x[1], 0, carry); carry == 0 {
return
}
if z[2], carry = bits.Sub64(x[2], 0, carry); carry == 0 {
return
}
z[3]--
}
// Sub sets z to the difference x-y and returns true if the operation underflowed
func (z *Int) SubOverflow(x, y *Int) bool {
var carry uint64
z[0], carry = bits.Sub64(x[0], y[0], carry)
z[1], carry = bits.Sub64(x[1], y[1], carry)
z[2], carry = bits.Sub64(x[2], y[2], carry)
z[3], carry = bits.Sub64(x[3], y[3], carry)
return carry != 0
}
// Sub sets z to the difference x-y
func (z *Int) Sub(x, y *Int) {
z.SubOverflow(x, y) // Inlined.
}
// umulStep computes (carry, z) = z + (x * y) + carry.
func umulStep(z, x, y, carry uint64) (uint64, uint64) {
ph, p := bits.Mul64(x, y)
p, carry = bits.Add64(p, carry, 0)
carry, _ = bits.Add64(ph, 0, carry)
p, carry1 := bits.Add64(p, z, 0)
carry, _ = bits.Add64(carry, 0, carry1)
return p, carry
}
// umul computes full 256 x 256 -> 512 multiplication.
func umul(x, y *Int) [8]uint64 {
var res [8]uint64
for j := 0; j < len(y); j++ {
var carry uint64
res[j+0], carry = umulStep(res[j+0], x[0], y[j], carry)
res[j+1], carry = umulStep(res[j+1], x[1], y[j], carry)
res[j+2], carry = umulStep(res[j+2], x[2], y[j], carry)
res[j+3], carry = umulStep(res[j+3], x[3], y[j], carry)
res[j+4] = carry
}
return res
}
// Mul sets z to the sum x*y
func (z *Int) Mul(x, y *Int) {
var (
alfa = &Int{} // Aggregate results
beta = &Int{} // Calculate intermediate
)
// The numbers are internally represented as [ a, b, c, d ]
// We do the following operations
//
// d1 * d2
// d1 * c2 (upshift 64)
// d1 * b2 (upshift 128)
// d1 * a2 (upshift 192)
//
// c1 * d2 (upshift 64)
// c1 * c2 (upshift 128)
// c1 * b2 (upshift 192)
//
// b1 * d2 (upshift 128)
// b1 * c2 (upshift 192)
//
// a1 * d2 (upshift 192)
//
// And we aggregate results into 'alfa'
// One optimization, however, is reordering.
// For these ones, we don't care about if they overflow, thus we can use native multiplication
// and set the result immediately into `a` of the result.
// b1 * c2 (upshift 192)
// a1 * d2 (upshift 192)
// d1 * a2 (upshift 192)
// c1 * b2 11(upshift 192)
// Remaining ops:
//
// d1 * d2
// d1 * c2 (upshift 64)
// d1 * b2 (upshift 128)
//
// c1 * d2 (upshift 64)
// c1 * c2 (upshift 128)
//
// b1 * d2 (upshift 128)
alfa[1], alfa[0] = bits.Mul64(x[0], y[0])
alfa[3], alfa[2] = bits.Mul64(x[0], y[2])
alfa[3] += x[0]*y[3] + x[1]*y[2] + x[2]*y[1] + x[3]*y[0] // Top ones, ignore overflow
beta[2], beta[1] = bits.Mul64(x[0], y[1])
alfa.Add(alfa, beta)
beta[2], beta[1] = bits.Mul64(x[1], y[0])
alfa.Add(alfa, beta)
beta[3], beta[2] = bits.Mul64(x[1], y[1])
addTo128(alfa[2:], beta[2], beta[3])
beta[3], beta[2] = bits.Mul64(x[2], y[0])
addTo128(alfa[2:], beta[2], beta[3])
z.Copy(alfa)
}
func (z *Int) Squared() {
var (
alfa = &Int{} // Aggregate results
beta = &Int{} // Calculate intermediate
)
// This algo is based on Mul, but since it's squaring, we know that
// e.g. z.b*y.c + z.c*y.c == 2 * z.b * z.c, and can save some calculations
// 2 * d * b
alfa[3], alfa[2] = bits.Mul64(z[0], z[2])
alfa.lshOne()
alfa[1], alfa[0] = bits.Mul64(z[0], z[0])
// 2 * a * d + 2 * b * c
alfa[3] += (z[0]*z[3] + z[1]*z[2]) << 1
// 2 * d * c
beta[2], beta[1] = bits.Mul64(z[0], z[1])
beta.lshOne()
alfa.Add(alfa, beta)
// c * c
beta[3], beta[2] = bits.Mul64(z[1], z[1])
addTo128(alfa[2:], beta[2], beta[3])
z.Copy(alfa)
}
func (z *Int) setBit(n uint) *Int {
// n == 0 -> LSB
// n == 255 -> MSB
if n < 256 {
z[n>>6] |= 1 << (n & 0x3f)
}
return z
}
// isBitSet returns true if bit n is set, where n = 0 eq LSB
func (z *Int) isBitSet(n uint) bool {
if n > 255 {
return false
}
// z [ n / 64] & 1 << (n % 64)
return (z[n>>6] & (1 << (n & 0x3f))) != 0
}
func nlz(d *Int) uint {
for i := 3; i >= 0; i-- {
if d[i] != 0 {
return uint(bits.LeadingZeros64(d[i]) % 32)
}
}
return 0
}
// Normalized form of d.
func shl(d *Int, s uint, isdividend bool) []uint32 {
dn := make([]uint32, 9)
for i := 0; i < 4; i++ {
dn[2*i] = uint32(d[i])
dn[2*i+1] = uint32(d[i] >> 32)
}
var n int
for i := 7; i >= 0; i-- {
if dn[i] != 0 {
n = i
break
}
}
var prev, t uint32
for i := 0; i <= n; i++ {
t = dn[i]
dn[i] = prev | (dn[i] << s)
prev = t >> (32 - s)
}
if isdividend {
n = n + 1
dn[n] = prev
}
return dn[:n+1]
}
func divKnuth(x, y []uint32) []uint32 {
m, n := len(x)-1, len(y)
q := make([]uint32, m-n+1)
// Number base (2**32)
var b uint64 = 0x100000000
if n <= 2 {
panic("Should have been handled by udivremBy1()")
}
// Main Loop
var qhat, rhat uint64
for j := m - n; j >= 0; j-- {
qhat = (uint64(x[j+n])*b + uint64(x[j+n-1])) / uint64(y[n-1])
rhat = uint64(x[j+n])*b + uint64(x[j+n-1]) - qhat*uint64(y[n-1])
AGAIN:
if qhat >= b || (qhat*uint64(y[n-2]) > b*rhat+uint64(x[j+n-2])) {
qhat = qhat - 1
rhat = rhat + uint64(y[n-1])
if rhat < b {
goto AGAIN
}
}
// Multiply and subtract.
var p uint64
var t, k int64
for i := 0; i < n; i++ {
p = qhat * uint64(y[i])
t = int64(x[i+j]) - k - int64(p&0xffffffff)
x[i+j] = uint32(t)
k = int64(p>>32) - (t >> 32)
}
t = int64(x[j+n]) - k
x[j+n] = uint32(t)
q[j] = uint32(qhat)
if t < 0 {
// If we subtracted too much, add back.
q[j] = q[j] - 1
var k, t uint64
for i := 0; i < n; i++ {
t = uint64(x[i+j]) + uint64(y[i]) + k
x[i+j] = uint32(t)
k = t >> 32
}
x[j+n] = x[j+n] + uint32(k)
}
}
return q
}
// addTo computes x += y.
// Requires len(x) >= len(y).
func addTo(x, y []uint64) uint64 {
var carry uint64
for i := 0; i < len(y); i++ {
x[i], carry = bits.Add64(x[i], y[i], carry)
}
return carry
}
// subMulTo computes x -= y * multiplier.
// Requires len(x) >= len(y).
func subMulTo(x, y []uint64, multiplier uint64) uint64 {
var borrow uint64
for i := 0; i < len(y); i++ {
s, carry1 := bits.Sub64(x[i], borrow, 0)
ph, pl := bits.Mul64(y[i], multiplier)
t, carry2 := bits.Sub64(s, pl, 0)
x[i] = t
borrow = ph + carry1 + carry2
}
return borrow
}
// udivremBy1 divides u by single normalized word d and produces both quotient and remainder.
func udivremBy1(u []uint64, d uint64) (quot []uint64, rem uint64) {
quot = make([]uint64, len(u)-1)
rem = u[len(u)-1] // Set the top word as remainder.
for j := len(u) - 2; j >= 0; j-- {
quot[j], rem = bits.Div64(rem, u[j], d)
}
return quot, rem
}
// udivremKnuth implements the division of u by normalized multiple word d from the Knuth's division algorithm.
// Returns quotient and updates u to contain the remainder.
func udivremKnuth(u, d []uint64) (quot []uint64) {
quot = make([]uint64, len(u)-len(d))
dh := d[len(d)-1]
dl := d[len(d)-2]
for j := len(u) - len(d) - 1; j >= 0; j-- {
u2 := u[j+len(d)]
u1 := u[j+len(d)-1]
u0 := u[j+len(d)-2]
var qhat, rhat uint64
if u2 >= dh { // Division overflows.
qhat = ^uint64(0)
// TODO: Add "qhat one to big" adjustment (not needed for correctness, but helps avoiding "add back" case).
} else {
qhat, rhat = bits.Div64(u2, u1, dh)
ph, pl := bits.Mul64(qhat, dl)
if ph > rhat || (ph == rhat && pl > u0) {
qhat--
// TODO: Add "qhat one to big" adjustment (not needed for correctness, but helps avoiding "add back" case).
}
}
// Multiply and subtract.
borrow := subMulTo(u[j:], d, qhat)
u[j+len(d)] = u2 - borrow
if u2 < borrow { // Too much subtracted, add back.
qhat--
u[j+len(d)] += addTo(u[j:], d)
}
quot[j] = qhat // Store quotient digit.
}
return quot
}
// udivrem divides u by d and produces both quotient and remainder.
// It loosely follows the Knuth's division algorithm (sometimes referenced as "schoolbook" division) using 64-bit words.
// See Knuth, Volume 2, section 4.3.1, Algorithm D.
func udivrem(u []uint64, d *Int) (quot []uint64, rem *Int, err error) {
var dLen int
for i := len(d) - 1; i >= 0; i-- {
if d[i] != 0 {
dLen = i + 1
break
}
}
shift := bits.LeadingZeros64(d[dLen-1])
var dnStorage Int
dn := dnStorage[:dLen]
for i := dLen - 1; i > 0; i-- {
dn[i] = (d[i] << shift) | (d[i-1] >> (64 - shift))
}
dn[0] = d[0] << shift
var uLen int
for i := len(u) - 1; i >= 0; i-- {
if u[i] != 0 {
uLen = i + 1
break
}
}
var unStorage [9]uint64
un := unStorage[:uLen+1]
un[uLen] = u[uLen-1] >> (64 - shift)
for i := uLen - 1; i > 0; i-- {
un[i] = (u[i] << shift) | (u[i-1] >> (64 - shift))
}
un[0] = u[0] << shift
// TODO: Skip the highest word of numerator if not significant.
if dLen == 1 {
quot, r := udivremBy1(un, dn[0])
return quot, new(Int).SetUint64(r >> shift), nil
}
quot = udivremKnuth(un, dn)
rem = new(Int)
for i := 0; i < dLen-1; i++ {
rem[i] = (un[i] >> shift) | (un[i+1] << (64 - shift))
}
rem[dLen-1] = un[dLen-1] >> shift
return quot, rem, nil
}
// Div sets z to the quotient x/y for returns z.
// If d == 0, z is set to 0
func (z *Int) Div(x, y *Int) *Int {
if y.IsZero() || y.Gt(x) {
return z.Clear()
}
if x.Eq(y) {
return z.SetOne()
}
// Shortcut some cases
if x.IsUint64() {
return z.SetUint64(x.Uint64() / y.Uint64())
}
// At this point, we know
// x/y ; x > y > 0
if quot, _, err := udivrem(x[:], y); err == nil {
z.Clear()
copy(z[:len(quot)], quot)
return z
}
// See Knuth, Volume 2, section 4.3.1, Algorithm D.
// Normalize by shifting divisor left just enough so that its high-order
// bit is on and u left the same amount.
// function nlz do the caculating of the amount and shl do the left operation.
s := nlz(y)
xn := shl(x, s, true)
yn := shl(y, s, false)
// divKnuth do the division of normalized dividend and divisor with Knuth Algorithm D.
q := divKnuth(xn, yn)
z.Clear()
for i := 0; i < len(q); i++ {
z[i/2] = z[i/2] | uint64(q[i])<<(32*(uint64(i)%2))
}
return z
}
// Mod sets z to the modulus x%y for y != 0 and returns z.
// If y == 0, z is set to 0 (OBS: differs from the big.Int)
func (z *Int) Mod(x, y *Int) *Int {
if x.IsZero() || y.IsZero() {
return z.Clear()
}
switch x.Cmp(y) {
case -1:
// x < y
copy(z[:], x[:])
return z
case 0:
// x == y
return z.Clear() // They are equal
}
// At this point:
// x != 0
// y != 0
// x > y
// Shortcut trivial case
if x.IsUint64() {
return z.SetUint64(x.Uint64() % y.Uint64())
}
if _, rem, err := udivrem(x[:], y); err == nil {
return z.Copy(rem)
}
q := NewInt()
q.Div(x, y)
q.Mul(q, y)
z.Sub(x, q)
return z
}
// Smod interprets x and y as signed integers sets z to
// (sign x) * { abs(x) modulus abs(y) }
// If y == 0, z is set to 0 (OBS: differs from the big.Int)
// OBS! Modifies x and y
func (z *Int) Smod(x, y *Int) *Int {
ys := y.Sign()
xs := x.Sign()
// abs x
if xs == -1 {
x.Neg()
}
// abs y
if ys == -1 {
y.Neg()
}
z.Mod(x, y)
if xs == -1 {
z.Neg()
}
return z
}
// MulMod calculates the modulo-n multiplication of x and y and
// returns z
func (z *Int) MulMod(x, y, m *Int) *Int {
p := umul(x, y)
var (
pl Int
ph Int
)
copy(pl[:], p[:4])
copy(ph[:], p[4:])
// If the multiplication is within 256 bits use Mod().
if ph.IsZero() {
if z == m { //z is an alias for m; TODO: This should not be needed.
m = m.Clone()
}
z.Mod(&pl, m)
return z
}
if _, rem, err := udivrem(p[:], m); err == nil {
return z.Copy(rem)
}
var pbytes [len(p) * 8]byte
for i := 0; i < len(pbytes); i++ {
pbytes[len(pbytes)-1-i] = byte(p[i/8] >> uint64(8*(i%8)))
}
// At this point, we _could_ do x=x mod m, y = y mod m, and test again
// if they fit within 256 bytes. But for now just wrap big.Int instead
bp := new(big.Int)
bp.SetBytes(pbytes[:])
z.SetFromBig(bp.Mod(bp, m.ToBig()))
return z
}
// Abs interprets x as a a signed number, and sets z to the Abs value
// S256(0) = 0
// S256(1) = 1
// S256(2**255) = -2**255
// S256(2**256-1) = -1
func (z *Int) Abs() *Int {
if z.Lt(SignedMin) {
return z
}
z.Sub(zero, z)
return z
}
func (z *Int) Neg() *Int {
z.Sub(zero, z)
return z
}
// Sdiv interprets n and d as signed integers, does a
// signed division on the two operands and sets z to the result
// If d == 0, z is set to 0
// OBS! This method (potentially) modifies both n and d
func (z *Int) Sdiv(n, d *Int) *Int {
if n.Sign() > 0 {
if d.Sign() > 0 {
// pos / pos
z.Div(n, d)
return z
} else {
// pos / neg
z.Div(n, d.Neg())
return z.Neg()
}
}
if d.Sign() < 0 {
// neg / neg
z.Div(n.Neg(), d.Neg())
return z
}
// neg / pos
z.Div(n.Neg(), d)
return z.Neg()
}
// Sign returns:
//
// -1 if z < 0
// 0 if z == 0
// +1 if z > 0
// Where z is interpreted as a signed number
func (z *Int) Sign() int {
if z.IsZero() {
return 0
}
if z.Lt(SignedMin) {
return 1
}
return -1
}
// BitLen returns the number of bits required to represent x
func (z *Int) BitLen() int {
switch {
case z[3] != 0:
return 192 + bits.Len64(z[3])
case z[2] != 0:
return 128 + bits.Len64(z[2])
case z[1] != 0:
return 64 + bits.Len64(z[1])
default:
return bits.Len64(z[0])
}
}
func (z *Int) ByteLen() int {
return (z.BitLen() + 7) / 8
}
func (z *Int) lsh64(x *Int) *Int {
z[3], z[2], z[1], z[0] = x[2], x[1], x[0], 0
return z
}
func (z *Int) lsh128(x *Int) *Int {
z[3], z[2], z[1], z[0] = x[1], x[0], 0, 0
return z
}
func (z *Int) lsh192(x *Int) *Int {
z[3], z[2], z[1], z[0] = x[0], 0, 0, 0
return z
}
func (z *Int) rsh64(x *Int) *Int {
z[3], z[2], z[1], z[0] = 0, x[3], x[2], x[1]
return z
}
func (z *Int) rsh128(x *Int) *Int {
z[3], z[2], z[1], z[0] = 0, 0, x[3], x[2]
return z
}
func (z *Int) rsh192(x *Int) *Int {
z[3], z[2], z[1], z[0] = 0, 0, 0, x[3]
return z
}
func (z *Int) srsh64(x *Int) *Int {
z[3], z[2], z[1], z[0] = math.MaxUint64, x[3], x[2], x[1]
return z
}
func (z *Int) srsh128(x *Int) *Int {
z[3], z[2], z[1], z[0] = math.MaxUint64, math.MaxUint64, x[3], x[2]
return z
}
func (z *Int) srsh192(x *Int) *Int {
z[3], z[2], z[1], z[0] = math.MaxUint64, math.MaxUint64, math.MaxUint64, x[3]
return z
}
// Not sets z = ^x and returns z.
func (z *Int) Not() *Int {
z[3], z[2], z[1], z[0] = ^z[3], ^z[2], ^z[1], ^z[0]
return z
}
// Gt returns true if z > x
func (z *Int) Gt(x *Int) bool {
if z[3] > x[3] {
return true
}
if z[3] < x[3] {
return false
}
if z[2] > x[2] {
return true
}
if z[2] < x[2] {
return false
}
if z[1] > x[1] {
return true
}
if z[1] < x[1] {
return false
}
return z[0] > x[0]
}
// Slt interprets z and x as signed integers, and returns
// true if z < x
func (z *Int) Slt(x *Int) bool {
zSign := z.Sign()
xSign := x.Sign()
switch {
case zSign >= 0 && xSign < 0:
return false
case zSign < 0 && xSign >= 0:
return true
default:
return z.Lt(x)
}
}
// Sgt interprets z and x as signed integers, and returns
// true if z > x
func (z *Int) Sgt(x *Int) bool {
zSign := z.Sign()
xSign := x.Sign()
switch {
case zSign >= 0 && xSign < 0:
return true
case zSign < 0 && xSign >= 0:
return false
default:
return z.Gt(x)
}
}
// SetIfGt sets z to 1 if z > x
func (z *Int) SetIfGt(x *Int) {
if z.Gt(x) {
z.SetOne()
} else {
z.Clear()
}
}
// Lt returns true if z < x
func (z *Int) Lt(x *Int) bool {
if z[3] < x[3] {
return true
}
if z[3] > x[3] {
return false
}
if z[2] < x[2] {
return true
}
if z[2] > x[2] {
return false
}
if z[1] < x[1] {
return true
}
if z[1] > x[1] {
return false
}
return z[0] < x[0]
}
// SetIfLt sets z to 1 if z < x
func (z *Int) SetIfLt(x *Int) {
if z.Lt(x) {
z.SetOne()
} else {
z.Clear()
}
}
// SetUint64 sets z to the value x
func (z *Int) SetUint64(x uint64) *Int {
z[3], z[2], z[1], z[0] = 0, 0, 0, x
return z
}
// Eq returns true if z == x
func (z *Int) Eq(x *Int) bool {
return (z[0] == x[0]) && (z[1] == x[1]) && (z[2] == x[2]) && (z[3] == x[3])
}
// SetIfEq sets x to
// 1 if z == x
// 0 if Z != x
func (z *Int) SetIfEq(x *Int) {
if z.Eq(x) {
z.SetOne()
} else {
z.Clear()
}
}
// Cmp compares z and x and returns:
//
// -1 if z < x
// 0 if z == x
// +1 if z > x
//
func (z *Int) Cmp(x *Int) (r int) {
if z.Gt(x) {
return 1
}
if z.Lt(x) {
return -1
}
return 0
}
// LtUint64 returns true if x is smaller than n
func (z *Int) LtUint64(n uint64) bool {
return (z[3] == 0) && (z[2] == 0) && (z[1] == 0) && z[0] < n
}
// LtUint64 returns true if x is larger than n
func (z *Int) GtUint64(n uint64) bool {
return (z[3] != 0) || (z[2] != 0) || (z[1] != 0) || z[0] > n
}
// IsUint64 reports whether z can be represented as a uint64.
func (z *Int) IsUint64() bool {
return (z[3] == 0) && (z[2] == 0) && (z[1] == 0)
}
// IsUint128 reports whether z can be represented in 128 bits.
func (z *Int) IsUint128() bool {
return (z[3] == 0) && (z[2] == 0)
}
// IsZero returns true if z == 0
func (z *Int) IsZero() bool {
return (z[3] == 0) && (z[2] == 0) && (z[1] == 0) && (z[0] == 0)
}
// IsOne returns true if z == 1
func (z *Int) IsOne() bool {
return (z[3] == 0) && (z[2] == 0) && (z[1] == 0) && (z[0] == 1)
}
// Clear sets z to 0
func (z *Int) Clear() *Int {
z[3], z[2], z[1], z[0] = 0, 0, 0, 0
return z
}
// SetAllOne sets all the bits of z to 1
func (z *Int) SetAllOne() *Int {
z[3], z[2], z[1], z[0] = math.MaxUint64, math.MaxUint64, math.MaxUint64, math.MaxUint64
return z
}
// SetOne sets z to 1
func (z *Int) SetOne() *Int {
z[3], z[2], z[1], z[0] = 0, 0, 0, 1
return z
}
// Lsh shifts z by 1 bit.
func (z *Int) lshOne() {
var (
a, b uint64
)
a = z[0] >> 63
b = z[1] >> 63
z[0] = z[0] << 1
z[1] = z[1]<<1 | a
a = z[2] >> 63
z[2] = z[2]<<1 | b
z[3] = z[3]<<1 | a
}
// Lsh sets z = x << n and returns z.
func (z *Int) Lsh(x *Int, n uint) *Int {
// n % 64 == 0
if n&0x3f == 0 {
switch n {
case 0:
return z.Copy(x)
case 64:
return z.lsh64(x)
case 128:
return z.lsh128(x)
case 192:
return z.lsh192(x)
default:
return z.Clear()
}
}
var (
a, b uint64
)
// Big swaps first
switch {
case n > 192:
if n > 256 {
return z.Clear()
}
z.lsh192(x)
n -= 192
goto sh192
case n > 128:
z.lsh128(x)
n -= 128
goto sh128
case n > 64:
z.lsh64(x)
n -= 64
goto sh64
default:
z.Copy(x)
}
// remaining shifts
a = z[0] >> (64 - n)
z[0] = z[0] << n
sh64:
b = z[1] >> (64 - n)
z[1] = (z[1] << n) | a
sh128:
a = z[2] >> (64 - n)
z[2] = (z[2] << n) | b
sh192:
z[3] = (z[3] << n) | a
return z
}
// Rsh sets z = x >> n and returns z.
func (z *Int) Rsh(x *Int, n uint) *Int {
// n % 64 == 0
if n&0x3f == 0 {
switch n {
case 0:
return z.Copy(x)
case 64:
return z.rsh64(x)
case 128:
return z.rsh128(x)
case 192:
return z.rsh192(x)
default:
return z.Clear()
}
}
var (
a, b uint64
)
// Big swaps first
switch {
case n > 192:
if n > 256 {
return z.Clear()
}
z.rsh192(x)
n -= 192
goto sh192
case n > 128:
z.rsh128(x)
n -= 128
goto sh128
case n > 64:
z.rsh64(x)
n -= 64
goto sh64
default:
z.Copy(x)
}
// remaining shifts
a = z[3] << (64 - n)
z[3] = z[3] >> n
sh64:
b = z[2] << (64 - n)
z[2] = (z[2] >> n) | a
sh128:
a = z[1] << (64 - n)
z[1] = (z[1] >> n) | b
sh192:
z[0] = (z[0] >> n) | a
return z
}
// Srsh (Signed/Arithmetic right shift)
// considers z to be a signed integer, during right-shift
// and sets z = x >> n and returns z.
func (z *Int) Srsh(x *Int, n uint) *Int {
// If the MSB is 0, Srsh is same as Rsh.
if !z.isBitSet(255) {
return z.Rsh(x, n)
}
// n % 64 == 0
if n&0x3f == 0 {
switch n {
case 0:
return z.Copy(x)
case 64:
return z.srsh64(x)
case 128:
return z.srsh128(x)
case 192:
return z.srsh192(x)
default:
return z.SetAllOne()
}
}
var (
a uint64 = math.MaxUint64 << (64 - n%64)
)
// Big swaps first
switch {
case n > 192:
if n > 256 {
return z.SetAllOne()
}
z.srsh192(x)
n -= 192
goto sh192
case n > 128:
z.srsh128(x)
n -= 128
goto sh128
case n > 64:
z.srsh64(x)
n -= 64
goto sh64
default:
z.Copy(x)
}
// remaining shifts
z[3], a = (z[3]>>n)|a, z[3]<<(64-n)
sh64:
z[2], a = (z[2]>>n)|a, z[2]<<(64-n)
sh128:
z[1], a = (z[1]>>n)|a, z[1]<<(64-n)
sh192:
z[0] = (z[0] >> n) | a
return z
}
// Copy copies the value x into z, and returns z
func (z *Int) Copy(x *Int) *Int {
*z = *x
return z
}
// Or sets z = x | y and returns z.
func (z *Int) Or(x, y *Int) *Int {
z[0] = x[0] | y[0]
z[1] = x[1] | y[1]
z[2] = x[2] | y[2]
z[3] = x[3] | y[3]
return z
}
// And sets z = x & y and returns z.
func (z *Int) And(x, y *Int) *Int {
z[0] = x[0] & y[0]
z[1] = x[1] & y[1]
z[2] = x[2] & y[2]
z[3] = x[3] & y[3]
return z
}
// Xor sets z = x ^ y and returns z.
func (z *Int) Xor(x, y *Int) *Int {
z[0] = x[0] ^ y[0]
z[1] = x[1] ^ y[1]
z[2] = x[2] ^ y[2]
z[3] = x[3] ^ y[3]
return z
}
// Byte sets z to the value of the byte at position n,
// with 'z' considered as a big-endian 32-byte integer
// if 'n' > 32, f is set to 0
// Example: f = '5', n=31 => 5
func (z *Int) Byte(n *Int) *Int {
// in z, z[0] is the least significant
//
if number, overflow := n.Uint64WithOverflow(); !overflow {
if number < 32 {
number := z[4-1-number/8]
offset := (n[0] & 0x7) << 3 // 8*(n.d % 8)
z[0] = (number & (0xff00000000000000 >> offset)) >> (56 - offset)
z[3], z[2], z[1] = 0, 0, 0
return z
}
}
return z.Clear()
}
// Hex returns a hex representation of z
func (z *Int) Hex() string {
return fmt.Sprintf("%016x.%016x.%016x.%016x", z[3], z[2], z[1], z[0])
}
// Exp sets z = base**exponent mod 2**256, and returns z.
func (z *Int) Exp(base, exponent *Int) *Int {
res := Int{1, 0, 0, 0}
// b^0 == 1
if exponent.IsZero() || base.IsOne() {
return z.Copy(&res)
}
// b^1 == b
if exponent.IsOne() {
return z.Copy(base)
}
var (
word uint64
bits int
multiplier = *base
)
expBitlen := exponent.BitLen()
word = exponent[0]
bits = 0
for ; bits < expBitlen && bits < 64; bits++ {
if word&1 == 1 {
res.Mul(&res, &multiplier)
}
multiplier.Squared()
word >>= 1
}
word = exponent[1]
for ; bits < expBitlen && bits < 128; bits++ {
if word&1 == 1 {
res.Mul(&res, &multiplier)
}
multiplier.Squared()
word >>= 1
}
word = exponent[2]
for ; bits < expBitlen && bits < 192; bits++ {
if word&1 == 1 {
res.Mul(&res, &multiplier)
}
multiplier.Squared()
word >>= 1
}
word = exponent[3]
for ; bits < expBitlen && bits < 256; bits++ {
if word&1 == 1 {
res.Mul(&res, &multiplier)
}
multiplier.Squared()
word >>= 1
}
return z.Copy(&res)
}
//Extend length of two’s complement signed integer
// sets z to
// - num if back > 31
// - num interpreted as a signed number with sign-bit at (back*8+7), extended to the full 256 bits
func (z *Int) SignExtend(back, num *Int) {
if back.GtUint64(31) {
z.Copy(num)
return
}
bit := uint(back.Uint64()*8 + 7)
mask := back.Lsh(back.SetOne(), bit)
mask.Sub64(mask, 1)
if num.isBitSet(bit) {
num.Or(num, mask.Not())
} else {
num.And(num, mask)
}
}
var _ fmt.Formatter = zero
func (z *Int) Format(s fmt.State, ch rune) {
z.ToBig().Format(s, ch)
}
|
package cluster
import (
"log"
"sync"
// dockerapi "github.com/fsouza/go-dockerclient"
)
type Node struct {
ID string
// status *Status
config *Container // rename it to container
sync.Mutex
}
// was Data changed to Graph
type Graph struct {
Nodes []*Node
sync.Mutex
Out map[*Node][]*Node
In map[*Node][]*Node
nodesIndex map[string]int
}
func NewGraph() *Graph {
return &Graph{
Nodes: []*Node{},
Out: make(map[*Node][]*Node),
In: make(map[*Node][]*Node),
nodesIndex: make(map[string]int),
}
}
func NewNode(id string) *Node {
return &Node{
ID: id,
// status: &Status {
// running: 0,
// exist: []string{ },
// start: 0,
// links: []string{},
// scale: 0,
// ports: make(map[dockerapi.Port][]dockerapi.PortBinding),
//},
}
}
func (g *Graph) AddNode(node *Node) (bool, error) {
if _, ok := g.nodesIndex[node.ID]; ok {
return false, node.ID + " already exists"
}
g.Mutex.Lock()
g.nodesIndex[node.ID] = len(g.Nodes)
g.Mutex.Unlock()
g.Nodes = append(g.Nodes, node)
return true, nil
}
func (g *Graph) FindNodeByID(id string) *Node {
if index, ok := g.nodesIndex[id]; ok && index >= 0 {
return g.Nodes[index]
}
return nil
}
func (g *Graph) DeleteNode(node *Node) {
if idx, ok := g.nodesIndex[node.ID]; ok && idx >= 0 {
copy(g.Nodes[idx:], g.Nodes[idx+1:])
g.Nodes[len(g.Nodes)-1] = nil
g.Nodes = g.Nodes[:len(g.Nodes)-1 : len(g.Nodes)-1]
}
g.Mutex.Lock()
delete(g.nodesIndex, node.ID)
g.Mutex.Unlock()
}
func (g *Graph) ConnectNodes(src, dst *Node, connection map[*Node][]*Node) {
if _, ok := connection[src]; ok {
isDuplicate := false
for _, node := range connection[src] {
if node == dst {
isDuplicate = true
break
}
}
if !isDuplicate {
connection[src] = append(connection[src], dst)
}
} else {
connection[src] = []*Node{dst}
}
}
func (g *Graph) FindConnection(node *Node, connection map[*Node][]*Node) []*Node {
if _, ok := connection[node]; ok {
return connection[node]
} else {
return nil
}
}
func (g *Graph) FindOutConnections(root *Node) []string {
connections := []string{}
nodes := g.FindConnection(root, g.Out)
if nodes != nil {
for _, node := range nodes {
connections = append(connections, node.ID)
}
}
return connections
}
func (g *Graph) Connect(src, dst *Node) {
isAdded, _ := g.AddNode(src)
if !isAdded {
src = g.FindNodeByID(src.ID)
}
isAdded, _ = g.AddNode(dst)
if !isAdded {
dst = g.FindNodeByID(dst.ID)
}
g.Mutex.Lock()
g.ConnectNodes(src, dst, g.In)
g.ConnectNodes(dst, src, g.Out)
g.Mutex.Unlock()
}
func (g *Graph) Topsort() []*Node {
sort := []*Node{}
noIncome := []*Node{}
income := make(map[*Node]int)
for _, node := range g.Nodes {
if _, ok := g.In[node]; ok {
income[node] = len(g.In[node])
} else {
noIncome = append(noIncome, node)
}
}
for len(noIncome) > 0 {
last := len(noIncome) - 1
n := noIncome[last]
noIncome = noIncome[:last]
sort = append(sort, n)
for _, m := range g.Out[n] {
if income[m] > 0 {
// log.Println(n.ID, " loaded from ", m.ID)
income[m]--
if income[m] == 0 {
noIncome = append(noIncome, m)
}
}
}
}
for c, in := range income {
if in > 0 {
log.Println("Cyclic ", c.ID, " = ", in);
// TODO
}
}
return sort
}
fix addNode return
package cluster
import (
"log"
"sync"
"fmt"
// dockerapi "github.com/fsouza/go-dockerclient"
)
type Node struct {
ID string
// status *Status
config *Container // rename it to container
sync.Mutex
}
// was Data changed to Graph
type Graph struct {
Nodes []*Node
sync.Mutex
Out map[*Node][]*Node
In map[*Node][]*Node
nodesIndex map[string]int
}
func NewGraph() *Graph {
return &Graph{
Nodes: []*Node{},
Out: make(map[*Node][]*Node),
In: make(map[*Node][]*Node),
nodesIndex: make(map[string]int),
}
}
func NewNode(id string) *Node {
return &Node{
ID: id,
// status: &Status {
// running: 0,
// exist: []string{ },
// start: 0,
// links: []string{},
// scale: 0,
// ports: make(map[dockerapi.Port][]dockerapi.PortBinding),
//},
}
}
func (g *Graph) AddNode(node *Node) (bool, error) {
if _, ok := g.nodesIndex[node.ID]; ok {
return false, fmt.Errorf("`%s` already exists", node.ID)
}
g.Mutex.Lock()
g.nodesIndex[node.ID] = len(g.Nodes)
g.Mutex.Unlock()
g.Nodes = append(g.Nodes, node)
return true, nil
}
func (g *Graph) FindNodeByID(id string) *Node {
if index, ok := g.nodesIndex[id]; ok && index >= 0 {
return g.Nodes[index]
}
return nil
}
func (g *Graph) DeleteNode(node *Node) {
if idx, ok := g.nodesIndex[node.ID]; ok && idx >= 0 {
copy(g.Nodes[idx:], g.Nodes[idx+1:])
g.Nodes[len(g.Nodes)-1] = nil
g.Nodes = g.Nodes[:len(g.Nodes)-1 : len(g.Nodes)-1]
}
g.Mutex.Lock()
delete(g.nodesIndex, node.ID)
g.Mutex.Unlock()
}
func (g *Graph) ConnectNodes(src, dst *Node, connection map[*Node][]*Node) {
if _, ok := connection[src]; ok {
isDuplicate := false
for _, node := range connection[src] {
if node == dst {
isDuplicate = true
break
}
}
if !isDuplicate {
connection[src] = append(connection[src], dst)
}
} else {
connection[src] = []*Node{dst}
}
}
func (g *Graph) FindConnection(node *Node, connection map[*Node][]*Node) []*Node {
if _, ok := connection[node]; ok {
return connection[node]
} else {
return nil
}
}
func (g *Graph) FindOutConnections(root *Node) []string {
connections := []string{}
nodes := g.FindConnection(root, g.Out)
if nodes != nil {
for _, node := range nodes {
connections = append(connections, node.ID)
}
}
return connections
}
func (g *Graph) Connect(src, dst *Node) {
isAdded, _ := g.AddNode(src)
if !isAdded {
src = g.FindNodeByID(src.ID)
}
isAdded, _ = g.AddNode(dst)
if !isAdded {
dst = g.FindNodeByID(dst.ID)
}
g.Mutex.Lock()
g.ConnectNodes(src, dst, g.In)
g.ConnectNodes(dst, src, g.Out)
g.Mutex.Unlock()
}
func (g *Graph) Topsort() []*Node {
sort := []*Node{}
noIncome := []*Node{}
income := make(map[*Node]int)
for _, node := range g.Nodes {
if _, ok := g.In[node]; ok {
income[node] = len(g.In[node])
} else {
noIncome = append(noIncome, node)
}
}
for len(noIncome) > 0 {
last := len(noIncome) - 1
n := noIncome[last]
noIncome = noIncome[:last]
sort = append(sort, n)
for _, m := range g.Out[n] {
if income[m] > 0 {
// log.Println(n.ID, " loaded from ", m.ID)
income[m]--
if income[m] == 0 {
noIncome = append(noIncome, m)
}
}
}
}
for c, in := range income {
if in > 0 {
log.Println("Cyclic ", c.ID, " = ", in);
// TODO
}
}
return sort
} |
/*
* Copyright (C) 2015 Red Hat, Inc.
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package topology
import (
"sync"
"github.com/socketplane/libovsdb"
"github.com/redhat-cip/skydive/ovs"
)
type OvsTopoUpdater struct {
sync.Mutex
Topology *Topology
uuidToPort map[string]*Port
uuidToIntf map[string]*Interface
intfPortQueue map[string]*Port
portBridgeQueue map[string]*OvsBridge
}
func (o *OvsTopoUpdater) OnOvsBridgeUpdate(monitor *ovsdb.OvsMonitor, uuid string, row *libovsdb.RowUpdate) {
o.OnOvsBridgeAdd(monitor, uuid, row)
}
func (o *OvsTopoUpdater) OnOvsBridgeAdd(monitor *ovsdb.OvsMonitor, uuid string, row *libovsdb.RowUpdate) {
o.Lock()
defer o.Unlock()
name := row.New.Fields["name"].(string)
bridge := o.Topology.GetOvsBridge(name)
if bridge == nil {
bridge = o.Topology.NewOvsBridge(name)
}
switch row.New.Fields["ports"].(type) {
case libovsdb.OvsSet:
set := row.New.Fields["ports"].(libovsdb.OvsSet)
for _, i := range set.GoSet {
u := i.(libovsdb.UUID).GoUuid
if port, ok := o.uuidToPort[u]; ok {
bridge.AddPort(port)
} else {
/* will be filled later when the port update for this port will be triggered */
o.portBridgeQueue[u] = bridge
}
}
case libovsdb.UUID:
u := row.New.Fields["ports"].(libovsdb.UUID).GoUuid
if port, ok := o.uuidToPort[u]; ok {
bridge.AddPort(port)
} else {
/* will be filled later when the port update for this port will be triggered */
o.portBridgeQueue[u] = bridge
}
}
}
func (o *OvsTopoUpdater) OnOvsBridgeDel(monitor *ovsdb.OvsMonitor, uuid string, row *libovsdb.RowUpdate) {
o.Topology.DelOvsBridge(row.Old.Fields["name"].(string))
}
func (o *OvsTopoUpdater) OnOvsInterfaceAdd(monitor *ovsdb.OvsMonitor, uuid string, row *libovsdb.RowUpdate) {
o.Lock()
defer o.Unlock()
var mac string
switch row.New.Fields["mac_in_use"].(type) {
case string:
mac = row.New.Fields["mac_in_use"].(string)
default:
}
intf, ok := o.uuidToIntf[uuid]
if !ok {
name := row.New.Fields["name"].(string)
intf = o.Topology.LookupInterface(LookupByMac(name, mac), NetNSScope|OvsScope)
if intf == nil {
intf = o.Topology.NewInterface(name, 0)
intf.SetType("openvswitch")
intf.SetMac(mac)
}
// peer resolution in case of a patch interface
if row.New.Fields["type"].(string) == "patch" {
intf.SetType("patch")
m := row.New.Fields["options"].(libovsdb.OvsMap)
if p, ok := m.GoMap["peer"]; ok {
peer := o.Topology.LookupInterface(LookupByID(p.(string)), OvsScope)
if peer != nil {
intf.SetPeer(peer)
} else {
// lookup in the intf queue
for _, peer = range o.uuidToIntf {
if peer.ID == p.(string) {
intf.SetPeer(peer)
}
}
}
}
}
o.uuidToIntf[uuid] = intf
}
/* set pending interface for a port */
if port, ok := o.intfPortQueue[uuid]; ok {
port.AddInterface(intf)
delete(o.intfPortQueue, uuid)
}
}
func (o *OvsTopoUpdater) OnOvsInterfaceUpdate(monitor *ovsdb.OvsMonitor, uuid string, row *libovsdb.RowUpdate) {
o.OnOvsInterfaceAdd(monitor, uuid, row)
}
func (o *OvsTopoUpdater) OnOvsInterfaceDel(monitor *ovsdb.OvsMonitor, uuid string, row *libovsdb.RowUpdate) {
o.Lock()
defer o.Unlock()
intf, ok := o.uuidToIntf[uuid]
if !ok {
return
}
intf.Del()
delete(o.uuidToIntf, uuid)
}
func (o *OvsTopoUpdater) OnOvsPortAdd(monitor *ovsdb.OvsMonitor, uuid string, row *libovsdb.RowUpdate) {
o.Lock()
defer o.Unlock()
port, ok := o.uuidToPort[uuid]
if !ok {
port = o.Topology.NewPort(row.New.Fields["name"].(string))
o.uuidToPort[uuid] = port
}
// vlan tag
if tag, ok := row.New.Fields["tag"]; ok {
switch tag.(type) {
case libovsdb.OvsSet:
set := tag.(libovsdb.OvsSet)
if len(set.GoSet) > 0 {
port.SetMetadata("Vlans", set.GoSet)
}
case float64:
port.SetMetadata("Vlans", int(tag.(float64)))
}
}
switch row.New.Fields["interfaces"].(type) {
case libovsdb.OvsSet:
set := row.New.Fields["interfaces"].(libovsdb.OvsSet)
for _, i := range set.GoSet {
u := i.(libovsdb.UUID).GoUuid
intf, ok := o.uuidToIntf[u]
if ok {
port.AddInterface(intf)
} else {
/* will be filled later when the interface update for this interface will be triggered */
o.intfPortQueue[u] = port
}
}
case libovsdb.UUID:
u := row.New.Fields["interfaces"].(libovsdb.UUID).GoUuid
intf, ok := o.uuidToIntf[u]
if ok {
port.AddInterface(intf)
} else {
/* will be filled later when the interface update for this interface will be triggered */
o.intfPortQueue[u] = port
}
}
/* set pending port of a container */
if bridge, ok := o.portBridgeQueue[uuid]; ok {
bridge.AddPort(port)
delete(o.portBridgeQueue, uuid)
}
}
func (o *OvsTopoUpdater) OnOvsPortUpdate(monitor *ovsdb.OvsMonitor, uuid string, row *libovsdb.RowUpdate) {
o.OnOvsPortAdd(monitor, uuid, row)
}
func (o *OvsTopoUpdater) OnOvsPortDel(monitor *ovsdb.OvsMonitor, uuid string, row *libovsdb.RowUpdate) {
o.Lock()
defer o.Unlock()
port, ok := o.uuidToPort[uuid]
if !ok {
return
}
port.Del()
delete(o.uuidToPort, uuid)
}
func (o *OvsTopoUpdater) Start() {
}
func NewOvsTopoUpdater(topo *Topology, ovsmon *ovsdb.OvsMonitor) *OvsTopoUpdater {
u := &OvsTopoUpdater{
Topology: topo,
uuidToPort: make(map[string]*Port),
uuidToIntf: make(map[string]*Interface),
intfPortQueue: make(map[string]*Port),
portBridgeQueue: make(map[string]*OvsBridge),
}
ovsmon.AddMonitorHandler(u)
return u
}
Add type of interface for ovs, now handles interface update
/*
* Copyright (C) 2015 Red Hat, Inc.
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package topology
import (
"sync"
"github.com/socketplane/libovsdb"
"github.com/redhat-cip/skydive/ovs"
)
type OvsTopoUpdater struct {
sync.Mutex
Topology *Topology
uuidToPort map[string]*Port
uuidToIntf map[string]*Interface
intfPortQueue map[string]*Port
portBridgeQueue map[string]*OvsBridge
}
func (o *OvsTopoUpdater) OnOvsBridgeUpdate(monitor *ovsdb.OvsMonitor, uuid string, row *libovsdb.RowUpdate) {
o.OnOvsBridgeAdd(monitor, uuid, row)
}
func (o *OvsTopoUpdater) OnOvsBridgeAdd(monitor *ovsdb.OvsMonitor, uuid string, row *libovsdb.RowUpdate) {
o.Lock()
defer o.Unlock()
name := row.New.Fields["name"].(string)
bridge := o.Topology.GetOvsBridge(name)
if bridge == nil {
bridge = o.Topology.NewOvsBridge(name)
}
switch row.New.Fields["ports"].(type) {
case libovsdb.OvsSet:
set := row.New.Fields["ports"].(libovsdb.OvsSet)
for _, i := range set.GoSet {
u := i.(libovsdb.UUID).GoUuid
if port, ok := o.uuidToPort[u]; ok {
bridge.AddPort(port)
} else {
/* will be filled later when the port update for this port will be triggered */
o.portBridgeQueue[u] = bridge
}
}
case libovsdb.UUID:
u := row.New.Fields["ports"].(libovsdb.UUID).GoUuid
if port, ok := o.uuidToPort[u]; ok {
bridge.AddPort(port)
} else {
/* will be filled later when the port update for this port will be triggered */
o.portBridgeQueue[u] = bridge
}
}
}
func (o *OvsTopoUpdater) OnOvsBridgeDel(monitor *ovsdb.OvsMonitor, uuid string, row *libovsdb.RowUpdate) {
o.Topology.DelOvsBridge(row.Old.Fields["name"].(string))
}
func (o *OvsTopoUpdater) OnOvsInterfaceAdd(monitor *ovsdb.OvsMonitor, uuid string, row *libovsdb.RowUpdate) {
o.Lock()
defer o.Unlock()
var mac string
switch row.New.Fields["mac_in_use"].(type) {
case string:
mac = row.New.Fields["mac_in_use"].(string)
}
name := row.New.Fields["name"].(string)
intf := o.Topology.LookupInterface(LookupByMac(name, mac), NetNSScope|OvsScope)
if intf == nil {
intf = o.Topology.NewInterface(name, 0)
intf.SetType("openvswitch")
intf.SetMac(mac)
o.uuidToIntf[uuid] = intf
}
// type
if t, ok := row.New.Fields["type"]; ok {
intf.SetMetadata("Type", t.(string))
}
// peer resolution in case of a patch interface
if row.New.Fields["type"].(string) == "patch" {
intf.SetType("patch")
m := row.New.Fields["options"].(libovsdb.OvsMap)
if p, ok := m.GoMap["peer"]; ok {
peer := o.Topology.LookupInterface(LookupByID(p.(string)), OvsScope)
if peer != nil {
intf.SetPeer(peer)
} else {
// lookup in the intf queue
for _, peer = range o.uuidToIntf {
if peer.ID == p.(string) {
intf.SetPeer(peer)
}
}
}
}
}
/* set pending interface for a port */
if port, ok := o.intfPortQueue[uuid]; ok {
port.AddInterface(intf)
delete(o.intfPortQueue, uuid)
}
}
func (o *OvsTopoUpdater) OnOvsInterfaceUpdate(monitor *ovsdb.OvsMonitor, uuid string, row *libovsdb.RowUpdate) {
o.OnOvsInterfaceAdd(monitor, uuid, row)
}
func (o *OvsTopoUpdater) OnOvsInterfaceDel(monitor *ovsdb.OvsMonitor, uuid string, row *libovsdb.RowUpdate) {
o.Lock()
defer o.Unlock()
intf, ok := o.uuidToIntf[uuid]
if !ok {
return
}
intf.Del()
delete(o.uuidToIntf, uuid)
}
func (o *OvsTopoUpdater) OnOvsPortAdd(monitor *ovsdb.OvsMonitor, uuid string, row *libovsdb.RowUpdate) {
o.Lock()
defer o.Unlock()
port, ok := o.uuidToPort[uuid]
if !ok {
port = o.Topology.NewPort(row.New.Fields["name"].(string))
o.uuidToPort[uuid] = port
}
// vlan tag
if tag, ok := row.New.Fields["tag"]; ok {
switch tag.(type) {
case libovsdb.OvsSet:
set := tag.(libovsdb.OvsSet)
if len(set.GoSet) > 0 {
port.SetMetadata("Vlans", set.GoSet)
}
case float64:
port.SetMetadata("Vlans", int(tag.(float64)))
}
}
switch row.New.Fields["interfaces"].(type) {
case libovsdb.OvsSet:
set := row.New.Fields["interfaces"].(libovsdb.OvsSet)
for _, i := range set.GoSet {
u := i.(libovsdb.UUID).GoUuid
intf, ok := o.uuidToIntf[u]
if ok {
port.AddInterface(intf)
} else {
/* will be filled later when the interface update for this interface will be triggered */
o.intfPortQueue[u] = port
}
}
case libovsdb.UUID:
u := row.New.Fields["interfaces"].(libovsdb.UUID).GoUuid
intf, ok := o.uuidToIntf[u]
if ok {
port.AddInterface(intf)
} else {
/* will be filled later when the interface update for this interface will be triggered */
o.intfPortQueue[u] = port
}
}
/* set pending port of a container */
if bridge, ok := o.portBridgeQueue[uuid]; ok {
bridge.AddPort(port)
delete(o.portBridgeQueue, uuid)
}
}
func (o *OvsTopoUpdater) OnOvsPortUpdate(monitor *ovsdb.OvsMonitor, uuid string, row *libovsdb.RowUpdate) {
o.OnOvsPortAdd(monitor, uuid, row)
}
func (o *OvsTopoUpdater) OnOvsPortDel(monitor *ovsdb.OvsMonitor, uuid string, row *libovsdb.RowUpdate) {
o.Lock()
defer o.Unlock()
port, ok := o.uuidToPort[uuid]
if !ok {
return
}
port.Del()
delete(o.uuidToPort, uuid)
}
func (o *OvsTopoUpdater) Start() {
}
func NewOvsTopoUpdater(topo *Topology, ovsmon *ovsdb.OvsMonitor) *OvsTopoUpdater {
u := &OvsTopoUpdater{
Topology: topo,
uuidToPort: make(map[string]*Port),
uuidToIntf: make(map[string]*Interface),
intfPortQueue: make(map[string]*Port),
portBridgeQueue: make(map[string]*OvsBridge),
}
ovsmon.AddMonitorHandler(u)
return u
}
|
package cmd
import (
"bytes"
"github.com/timeredbull/tsuru/fs/testing"
. "launchpad.net/gocheck"
"net/http"
)
func (s *S) TestLogin(c *C) {
fsystem = &testing.RecordingFs{FileContent: "old-token"}
defer func() {
fsystem = nil
}()
s.patchStdin(c, []byte("chico\n"))
defer s.unpatchStdin()
expected := "Password: \nSuccessfully logged!\n"
context := Context{[]string{}, []string{"foo@foo.com"}, manager.Stdout, manager.Stderr}
client := NewClient(&http.Client{Transport: &transport{msg: `{"token": "sometoken"}`, status: http.StatusOK}})
command := Login{}
err := command.Run(&context, client)
c.Assert(err, IsNil)
c.Assert(manager.Stdout.(*bytes.Buffer).String(), Equals, expected)
token, err := ReadToken()
c.Assert(err, IsNil)
c.Assert(token, Equals, "sometoken")
}
func (s *S) TestLoginShouldNotDependOnTsuruTokenFile(c *C) {
fsystem = &testing.FailureFs{}
defer func() {
fsystem = nil
}()
s.patchStdin(c, []byte("bar123\n"))
defer s.unpatchStdin()
expected := "Password: \n" + `Successfully logged!` + "\n"
context := Context{[]string{}, []string{"foo@foo.com"}, manager.Stdout, manager.Stderr}
client := NewClient(&http.Client{Transport: &transport{msg: `{"token":"anothertoken"}`, status: http.StatusOK}})
command := Login{}
err := command.Run(&context, client)
c.Assert(err, IsNil)
c.Assert(manager.Stdout.(*bytes.Buffer).String(), Equals, expected)
}
func (s *S) TestLoginShouldReturnErrorIfThePasswordIsNotGiven(c *C) {
s.patchStdin(c, []byte("\n"))
defer s.unpatchStdin()
expected := "Password: \nYou must provide the password!\n"
context := Context{[]string{}, []string{"foo@foo.com"}, manager.Stdout, manager.Stderr}
command := Login{}
err := command.Run(&context, nil)
c.Assert(err, NotNil)
c.Assert(manager.Stdout.(*bytes.Buffer).String(), Equals, expected)
}
func (s *S) TestLogout(c *C) {
rfs := &testing.RecordingFs{}
fsystem = rfs
defer func() {
fsystem = nil
}()
expected := "Successfully logout!\n"
context := Context{[]string{}, []string{}, manager.Stdout, manager.Stderr}
command := Logout{}
err := command.Run(&context, nil)
c.Assert(err, IsNil)
c.Assert(manager.Stdout.(*bytes.Buffer).String(), Equals, expected)
tokenPath, err := joinWithUserDir(".tsuru_token")
c.Assert(err, IsNil)
c.Assert(rfs.HasAction("remove "+tokenPath), Equals, true)
}
func (s *S) TestLogoutWhenNotLoggedIn(c *C) {
fsystem = &testing.FailureFs{}
defer func() {
fsystem = nil
}()
context := Context{[]string{}, []string{}, manager.Stdout, manager.Stderr}
command := Logout{}
err := command.Run(&context, nil)
c.Assert(err, NotNil)
c.Assert(err.Error(), Equals, "You're not logged in!")
}
func (s *S) TestAddUserIsSubcommandOfTeam(c *C) {
t := Team{}
subc, ok := t.Subcommands()["add-user"]
c.Assert(ok, Equals, true)
c.Assert(subc, FitsTypeOf, &TeamAddUser{})
}
func (s *S) TestRemoveUserIsASubcommandOfTeam(c *C) {
t := Team{}
subc, ok := t.Subcommands()["remove-user"]
c.Assert(ok, Equals, true)
c.Assert(subc, FitsTypeOf, &TeamRemoveUser{})
}
func (s *S) TestCreateUsASubcommandOfTeam(c *C) {
t := Team{}
subc, ok := t.Subcommands()["create"]
c.Assert(ok, Equals, true)
c.Assert(subc, FitsTypeOf, &TeamCreate{})
}
func (s *S) TestTeamAddUser(c *C) {
expected := `User "andorito" was added to the "cobrateam" team` + "\n"
context := Context{[]string{}, []string{"cobrateam", "andorito"}, manager.Stdout, manager.Stderr}
command := TeamAddUser{}
client := NewClient(&http.Client{Transport: &transport{msg: "", status: http.StatusOK}})
err := command.Run(&context, client)
c.Assert(err, IsNil)
c.Assert(manager.Stdout.(*bytes.Buffer).String(), Equals, expected)
}
func (s *S) TestTeamRemoveUser(c *C) {
expected := `User "andorito" was removed from the "cobrateam" team` + "\n"
context := Context{[]string{}, []string{"cobrateam", "andorito"}, manager.Stdout, manager.Stderr}
command := TeamRemoveUser{}
client := NewClient(&http.Client{Transport: &transport{msg: "", status: http.StatusOK}})
err := command.Run(&context, client)
c.Assert(err, IsNil)
c.Assert(manager.Stdout.(*bytes.Buffer).String(), Equals, expected)
}
func (s *S) TestTeamCreate(c *C) {
expected := `Team "core" successfully created!` + "\n"
context := Context{[]string{}, []string{"core"}, manager.Stdout, manager.Stderr}
client := NewClient(&http.Client{Transport: &transport{msg: "", status: http.StatusCreated}})
command := TeamCreate{}
err := command.Run(&context, client)
c.Assert(err, IsNil)
c.Assert(manager.Stdout.(*bytes.Buffer).String(), Equals, expected)
}
func (s *S) TestTeamListRun(c *C) {
var called bool
trans := &conditionalTransport{
transport{
msg: `[{"name":"timeredbull"},{"name":"cobrateam"}]`,
status: http.StatusOK,
},
func(req *http.Request) bool {
called = true
return req.Method == "GET" && req.URL.Path == "/teams"
},
}
expected := `Teams:
- timeredbull
- cobrateam
`
client := NewClient(&http.Client{Transport: trans})
err := (&TeamList{}).Run(&Context{[]string{}, []string{}, manager.Stdout, manager.Stderr}, client)
c.Assert(err, IsNil)
c.Assert(called, Equals, true)
c.Assert(manager.Stdout.(*bytes.Buffer).String(), Equals, expected)
}
func (s *S) TestTeamListRunWithNoContent(c *C) {
client := NewClient(&http.Client{Transport: &transport{msg: "", status: http.StatusNoContent}})
err := (&TeamList{}).Run(&Context{[]string{}, []string{}, manager.Stdout, manager.Stderr}, client)
c.Assert(err, IsNil)
c.Assert(manager.Stdout.(*bytes.Buffer).String(), Equals, "")
}
func (s *S) TeatTeamListInfo(c *C) {
expected := &Info{
Name: "list",
Usage: "team list",
Desc: "List all teams that you are member.",
MinArgs: 0,
}
c.Assert((&TeamList{}).Info(), DeepEquals, expected)
}
func (s *S) TestTeamListIsACommand(c *C) {
var command Command
c.Assert(&TeamList{}, Implements, &command)
}
func (s *S) TeamTeamListIsAnInfoer(c *C) {
var infoer Infoer
c.Assert(&TeamList{}, Implements, &infoer)
}
func (s *S) TestTeamListIsASubCommandOfTeam(c *C) {
t := Team{}
subc, ok := t.Subcommands()["list"]
c.Assert(ok, Equals, true)
c.Assert(subc, FitsTypeOf, &TeamList{})
}
func (s *S) TestUser(c *C) {
expect := map[string]interface{}{
"create": &UserCreate{},
}
command := User{}
c.Assert(command.Subcommands(), DeepEquals, expect)
}
func (s *S) TestUserCreateShouldNotDependOnTsuruTokenFile(c *C) {
fsystem = &testing.FailureFs{}
defer func() {
fsystem = nil
}()
s.patchStdin(c, []byte("bar123\n"))
defer s.unpatchStdin()
expected := "Password: \n" + `User "foo@foo.com" successfully created!` + "\n"
context := Context{[]string{}, []string{"foo@foo.com"}, manager.Stdout, manager.Stderr}
client := NewClient(&http.Client{Transport: &transport{msg: "", status: http.StatusCreated}})
command := UserCreate{}
err := command.Run(&context, client)
c.Assert(err, IsNil)
c.Assert(manager.Stdout.(*bytes.Buffer).String(), Equals, expected)
}
func (s *S) TestUserCreate(c *C) {
s.patchStdin(c, []byte("bar123\n"))
defer s.unpatchStdin()
expected := "Password: \n" + `User "foo@foo.com" successfully created!` + "\n"
context := Context{[]string{}, []string{"foo@foo.com"}, manager.Stdout, manager.Stderr}
client := NewClient(&http.Client{Transport: &transport{msg: "", status: http.StatusCreated}})
command := UserCreate{}
err := command.Run(&context, client)
c.Assert(err, IsNil)
c.Assert(manager.Stdout.(*bytes.Buffer).String(), Equals, expected)
}
func (s *S) TestUserCreateShouldReturnErrorIfThePasswordIsNotGiven(c *C) {
s.patchStdin(c, []byte("\n"))
defer s.unpatchStdin()
expected := "Password: \nYou must provide the password!\n"
context := Context{[]string{}, []string{"foo@foo.com"}, manager.Stdout, manager.Stderr}
command := UserCreate{}
err := command.Run(&context, nil)
c.Assert(err, NotNil)
c.Assert(manager.Stdout.(*bytes.Buffer).String(), Equals, expected)
}
cmd: typo
package cmd
import (
"bytes"
"github.com/timeredbull/tsuru/fs/testing"
. "launchpad.net/gocheck"
"net/http"
)
func (s *S) TestLogin(c *C) {
fsystem = &testing.RecordingFs{FileContent: "old-token"}
defer func() {
fsystem = nil
}()
s.patchStdin(c, []byte("chico\n"))
defer s.unpatchStdin()
expected := "Password: \nSuccessfully logged!\n"
context := Context{[]string{}, []string{"foo@foo.com"}, manager.Stdout, manager.Stderr}
client := NewClient(&http.Client{Transport: &transport{msg: `{"token": "sometoken"}`, status: http.StatusOK}})
command := Login{}
err := command.Run(&context, client)
c.Assert(err, IsNil)
c.Assert(manager.Stdout.(*bytes.Buffer).String(), Equals, expected)
token, err := ReadToken()
c.Assert(err, IsNil)
c.Assert(token, Equals, "sometoken")
}
func (s *S) TestLoginShouldNotDependOnTsuruTokenFile(c *C) {
fsystem = &testing.FailureFs{}
defer func() {
fsystem = nil
}()
s.patchStdin(c, []byte("bar123\n"))
defer s.unpatchStdin()
expected := "Password: \n" + `Successfully logged!` + "\n"
context := Context{[]string{}, []string{"foo@foo.com"}, manager.Stdout, manager.Stderr}
client := NewClient(&http.Client{Transport: &transport{msg: `{"token":"anothertoken"}`, status: http.StatusOK}})
command := Login{}
err := command.Run(&context, client)
c.Assert(err, IsNil)
c.Assert(manager.Stdout.(*bytes.Buffer).String(), Equals, expected)
}
func (s *S) TestLoginShouldReturnErrorIfThePasswordIsNotGiven(c *C) {
s.patchStdin(c, []byte("\n"))
defer s.unpatchStdin()
expected := "Password: \nYou must provide the password!\n"
context := Context{[]string{}, []string{"foo@foo.com"}, manager.Stdout, manager.Stderr}
command := Login{}
err := command.Run(&context, nil)
c.Assert(err, NotNil)
c.Assert(manager.Stdout.(*bytes.Buffer).String(), Equals, expected)
}
func (s *S) TestLogout(c *C) {
rfs := &testing.RecordingFs{}
fsystem = rfs
defer func() {
fsystem = nil
}()
expected := "Successfully logout!\n"
context := Context{[]string{}, []string{}, manager.Stdout, manager.Stderr}
command := Logout{}
err := command.Run(&context, nil)
c.Assert(err, IsNil)
c.Assert(manager.Stdout.(*bytes.Buffer).String(), Equals, expected)
tokenPath, err := joinWithUserDir(".tsuru_token")
c.Assert(err, IsNil)
c.Assert(rfs.HasAction("remove "+tokenPath), Equals, true)
}
func (s *S) TestLogoutWhenNotLoggedIn(c *C) {
fsystem = &testing.FailureFs{}
defer func() {
fsystem = nil
}()
context := Context{[]string{}, []string{}, manager.Stdout, manager.Stderr}
command := Logout{}
err := command.Run(&context, nil)
c.Assert(err, NotNil)
c.Assert(err.Error(), Equals, "You're not logged in!")
}
func (s *S) TestAddUserIsSubcommandOfTeam(c *C) {
t := Team{}
subc, ok := t.Subcommands()["add-user"]
c.Assert(ok, Equals, true)
c.Assert(subc, FitsTypeOf, &TeamAddUser{})
}
func (s *S) TestRemoveUserIsASubcommandOfTeam(c *C) {
t := Team{}
subc, ok := t.Subcommands()["remove-user"]
c.Assert(ok, Equals, true)
c.Assert(subc, FitsTypeOf, &TeamRemoveUser{})
}
func (s *S) TestCreateIsASubcommandOfTeam(c *C) {
t := Team{}
subc, ok := t.Subcommands()["create"]
c.Assert(ok, Equals, true)
c.Assert(subc, FitsTypeOf, &TeamCreate{})
}
func (s *S) TestTeamAddUser(c *C) {
expected := `User "andorito" was added to the "cobrateam" team` + "\n"
context := Context{[]string{}, []string{"cobrateam", "andorito"}, manager.Stdout, manager.Stderr}
command := TeamAddUser{}
client := NewClient(&http.Client{Transport: &transport{msg: "", status: http.StatusOK}})
err := command.Run(&context, client)
c.Assert(err, IsNil)
c.Assert(manager.Stdout.(*bytes.Buffer).String(), Equals, expected)
}
func (s *S) TestTeamRemoveUser(c *C) {
expected := `User "andorito" was removed from the "cobrateam" team` + "\n"
context := Context{[]string{}, []string{"cobrateam", "andorito"}, manager.Stdout, manager.Stderr}
command := TeamRemoveUser{}
client := NewClient(&http.Client{Transport: &transport{msg: "", status: http.StatusOK}})
err := command.Run(&context, client)
c.Assert(err, IsNil)
c.Assert(manager.Stdout.(*bytes.Buffer).String(), Equals, expected)
}
func (s *S) TestTeamCreate(c *C) {
expected := `Team "core" successfully created!` + "\n"
context := Context{[]string{}, []string{"core"}, manager.Stdout, manager.Stderr}
client := NewClient(&http.Client{Transport: &transport{msg: "", status: http.StatusCreated}})
command := TeamCreate{}
err := command.Run(&context, client)
c.Assert(err, IsNil)
c.Assert(manager.Stdout.(*bytes.Buffer).String(), Equals, expected)
}
func (s *S) TestTeamListRun(c *C) {
var called bool
trans := &conditionalTransport{
transport{
msg: `[{"name":"timeredbull"},{"name":"cobrateam"}]`,
status: http.StatusOK,
},
func(req *http.Request) bool {
called = true
return req.Method == "GET" && req.URL.Path == "/teams"
},
}
expected := `Teams:
- timeredbull
- cobrateam
`
client := NewClient(&http.Client{Transport: trans})
err := (&TeamList{}).Run(&Context{[]string{}, []string{}, manager.Stdout, manager.Stderr}, client)
c.Assert(err, IsNil)
c.Assert(called, Equals, true)
c.Assert(manager.Stdout.(*bytes.Buffer).String(), Equals, expected)
}
func (s *S) TestTeamListRunWithNoContent(c *C) {
client := NewClient(&http.Client{Transport: &transport{msg: "", status: http.StatusNoContent}})
err := (&TeamList{}).Run(&Context{[]string{}, []string{}, manager.Stdout, manager.Stderr}, client)
c.Assert(err, IsNil)
c.Assert(manager.Stdout.(*bytes.Buffer).String(), Equals, "")
}
func (s *S) TeatTeamListInfo(c *C) {
expected := &Info{
Name: "list",
Usage: "team list",
Desc: "List all teams that you are member.",
MinArgs: 0,
}
c.Assert((&TeamList{}).Info(), DeepEquals, expected)
}
func (s *S) TestTeamListIsACommand(c *C) {
var command Command
c.Assert(&TeamList{}, Implements, &command)
}
func (s *S) TeamTeamListIsAnInfoer(c *C) {
var infoer Infoer
c.Assert(&TeamList{}, Implements, &infoer)
}
func (s *S) TestTeamListIsASubCommandOfTeam(c *C) {
t := Team{}
subc, ok := t.Subcommands()["list"]
c.Assert(ok, Equals, true)
c.Assert(subc, FitsTypeOf, &TeamList{})
}
func (s *S) TestUser(c *C) {
expect := map[string]interface{}{
"create": &UserCreate{},
}
command := User{}
c.Assert(command.Subcommands(), DeepEquals, expect)
}
func (s *S) TestUserCreateShouldNotDependOnTsuruTokenFile(c *C) {
fsystem = &testing.FailureFs{}
defer func() {
fsystem = nil
}()
s.patchStdin(c, []byte("bar123\n"))
defer s.unpatchStdin()
expected := "Password: \n" + `User "foo@foo.com" successfully created!` + "\n"
context := Context{[]string{}, []string{"foo@foo.com"}, manager.Stdout, manager.Stderr}
client := NewClient(&http.Client{Transport: &transport{msg: "", status: http.StatusCreated}})
command := UserCreate{}
err := command.Run(&context, client)
c.Assert(err, IsNil)
c.Assert(manager.Stdout.(*bytes.Buffer).String(), Equals, expected)
}
func (s *S) TestUserCreate(c *C) {
s.patchStdin(c, []byte("bar123\n"))
defer s.unpatchStdin()
expected := "Password: \n" + `User "foo@foo.com" successfully created!` + "\n"
context := Context{[]string{}, []string{"foo@foo.com"}, manager.Stdout, manager.Stderr}
client := NewClient(&http.Client{Transport: &transport{msg: "", status: http.StatusCreated}})
command := UserCreate{}
err := command.Run(&context, client)
c.Assert(err, IsNil)
c.Assert(manager.Stdout.(*bytes.Buffer).String(), Equals, expected)
}
func (s *S) TestUserCreateShouldReturnErrorIfThePasswordIsNotGiven(c *C) {
s.patchStdin(c, []byte("\n"))
defer s.unpatchStdin()
expected := "Password: \nYou must provide the password!\n"
context := Context{[]string{}, []string{"foo@foo.com"}, manager.Stdout, manager.Stderr}
command := UserCreate{}
err := command.Run(&context, nil)
c.Assert(err, NotNil)
c.Assert(manager.Stdout.(*bytes.Buffer).String(), Equals, expected)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.