text
stringlengths 3
1.05M
|
|---|
const fs = require('fs');
const Contract = require('../contract');
const TestContract = require('./__mocks__/contracts/Test');
const TestContractWithComments = require('./__mocks__/contracts/TestWithComments');
const removeLineBreaks = require('../../../utils/remove-line-breaks');
const Event = require('../event');
const Method = require('../method');
const Fallback = require('../fallback');
const Constructor = require('../constructor');
const { stringify, parse } = JSON;
const removeLineBreaksFromObject = obj => parse(removeLineBreaks(stringify(obj)));
jest.mock('fs');
describe('Contract', () => {
let contract;
beforeEach(() => {
contract = new Contract('TestContractWithComments.json');
});
it('should create an instance', () => {
expect(contract).toBeInstanceOf(Contract);
});
it('should throw on creating an instance with no file provided', () => {
expect(() => new Contract()).toThrow();
});
it('should throw on creating an instance with invalid contract file', () => {
expect(() => new Contract('NoContract.json')).toThrow();
});
describe('set file', () => {
it('should read and parse new file', () => {
contract.file = 'TestContract.json';
const { content, file } = contract;
expect(content).toBeDefined();
expect(file).toBe('TestContract.json');
expect(content).toEqual(removeLineBreaksFromObject(TestContract));
});
it('should throw on invalid file', () => {
expect(() => { contract.file = 'NoContract.json'; }).toThrow();
});
});
describe('get file', () => {
it('should return parsed filename', () => {
expect(contract.file).toBe('TestContractWithComments.json');
});
it('should do not change file on error', () => {
expect(() => { contract.file = 'NoContract.json'; }).toThrow();
expect(contract.file).toBe('TestContractWithComments.json');
});
});
describe('get content', () => {
it('should return file content', () => {
const { content } = contract;
expect(content).toBeDefined();
expect(content).toEqual(removeLineBreaksFromObject(TestContractWithComments));
});
});
describe('get contractName', () => {
it('should return contract name', () => {
const { contractName } = contract;
expect(contractName).toBe('TestWithComments');
});
});
describe('get title', () => {
it('should return contract title', () => {
const { title } = contract;
expect(title).toBe('Test');
});
it('should return empty contract title', () => {
contract.file = 'TestContract.json';
const { title } = contract;
expect(title).toBeUndefined();
});
});
describe('get notice', () => {
it('should return contract notice', () => {
const { notice } = contract;
expect(notice).toBe('Test contract notice');
});
it('should return empty contract notice', () => {
contract.file = 'TestContract.json';
const { notice } = contract;
expect(notice).toBeUndefined();
});
});
describe('get details', () => {
it('should return contract details', () => {
const { details } = contract;
expect(details).toBe('Test contract dev');
});
it('should return empty contract details', () => {
contract.file = 'TestContract.json';
const { details } = contract;
expect(details).toBeUndefined();
});
});
describe('get link', () => {
it('should return anchor to top', () => {
expect(contract.link).toBe(contract.contractName.toLowerCase());
contract.file = 'TestContract.json';
expect(contract.link).toBe(contract.contractName.toLowerCase());
});
});
describe('get events', () => {
it('should return empty events array', () => {
const { events } = contract;
expect(events).toEqual([]);
});
it('should return filled events array', () => {
contract.file = 'TestContractWithEvents.json';
const { events } = contract;
expect(events.length).toBe(2);
events.map(event => expect(event).toBeInstanceOf(Event));
});
});
describe('get methods', () => {
it('should return filled methods array', () => {
const { methods } = contract;
expect(methods.length).toBe(2);
methods.map(method => expect(method).toBeInstanceOf(Method));
});
});
describe('get fallback', () => {
it('should return no fallback', () => {
const { fallback } = contract;
expect(fallback).toBeUndefined();
});
it('should return a fallback', () => {
contract.file = 'TestContractWithFallback.json';
const { fallback } = contract;
const {
inputs,
attributes,
name,
type,
signature,
} = fallback;
expect(fallback).toBeInstanceOf(Fallback);
expect(inputs).toEqual([]);
expect(attributes).toEqual(['payable']);
expect(name).toBe('Fallback');
expect(type).toBe('fallback');
expect(signature).toBeUndefined();
});
});
describe('get contractConstructor', () => {
it('should return no constructor', () => {
contract.file = 'TestContract.json';
const { contractConstructor: constructor } = contract;
expect(constructor).toBeUndefined();
});
it('should return a constructor', () => {
const { contractConstructor: constructor } = contract;
const {
name,
type,
signature,
} = constructor;
expect(constructor).toBeInstanceOf(Constructor);
expect(name).toBe('Constructor');
expect(type).toBe('constructor');
expect(signature).toBeUndefined();
});
});
describe('get markdown', () => {
it('should return a markdown content', () => {
const { markdown } = contract;
expect(typeof markdown).toBe('string');
});
});
describe('save()', () => {
it('should call a writeFileSync method', () => {
const { contractName, markdown } = contract;
contract.save('/');
expect(fs.writeFileSync).toHaveBeenCalled();
expect(fs.writeFileSync).toHaveBeenCalledWith(
`/${contractName}.md`,
markdown,
{ encoding: 'utf8' },
);
});
it('should call an existsSync method and mkdirSync method', () => {
contract.save('/test');
expect(fs.existsSync).toHaveBeenCalled();
expect(fs.existsSync).toHaveBeenCalledWith('/test');
expect(fs.mkdirSync).toHaveBeenCalled();
expect(fs.mkdirSync).toHaveBeenCalledWith('/test', { recursive: true });
});
it('should do not call a mkdirSync method', () => {
contract.save('/test-no-call');
expect(fs.existsSync).toHaveBeenCalled();
expect(fs.existsSync).toHaveBeenCalledWith('/test-no-call');
expect(fs.mkdirSync).not.toHaveBeenCalled();
});
});
});
|
exports.config = {
}
|
import {
Paper, Tab, AppBar,
} from "@mui/material";
import TabPanel from '@mui/lab/TabPanel';
import {useState} from "react";
import {TabContext, TabList} from "@mui/lab";
import Elevation from "./Elevation";
import Speed from "./Speed";
const centerStyle = {
left: "50%",
transform: 'translate(-50%, 0%)'
}
export default function GpxInfo( props ) {
const [value, setValue] = useState("1");
const handleChange = (event, newValue) => {
setValue(newValue);
};
return (
<div className="leaflet-bottom" style={centerStyle}>
<div className="leaflet-control leaflet-bar padding-container" >
<Paper >
<TabContext value={value}>
<TabPanel value="1">{props.renderedGpx && <Elevation renderedGpx={props.renderedGpx}/>}</TabPanel>
<TabPanel value="2">{props.renderedGpx && <Speed renderedGpx={props.renderedGpx}/>}</TabPanel>
<AppBar position="static" color="default">
<TabList onChange={handleChange}>
<Tab value={"1"} label="Elevation"/>
<Tab value={"2"} label="Speed"/>
</TabList>
</AppBar>
</TabContext>
</Paper>
</div>
</div>);
}
|
"""
:package: Hestia
:file: task.py
:brief: Task base class.
:author: PiloeGAO (Leo DEPOIX)
:version: 0.0.4
"""
class Task():
"""Task class.
Args:
entityType (str): Entity's type. Defaults to "Assets".
id (str): Entity's ID. Defaults to "".
name (str, optional): Entity's name. Defaults to "".
description (str, optional): Entity's description. Defaults to "".
"""
def __init__(self, taskType = "Assets", id = "", name = "", **kwargs):
# Common datas.
self.__type = taskType
self.__id = id
self.__name = name
self.__rawDatas = kwargs["rawDatas"] if "rawDatas" in kwargs else ""
@property
def id(self):
"""Get the id of the entity.
Returns:
str: Entity's ID.
"""
return self.__id
@property
def name(self):
"""Get the name of the entity.
Returns:
str : The name of the entity.
"""
return self.__name
@property
def type(self):
"""Get the type of the class.
Returns:
str: Task type.
"""
return self.__type
@property
def rawDatas(self):
"""Get the raw datas of the class.
Returns:
dict: Raw datas
"""
return self.__rawDatas
|
"""
Copyright (c) 2006 Jan-Klaas Kollhof
This file is part of jsonrpc.
jsonrpc is free software; you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation; either version 2.1 of the License, or
(at your option) any later version.
This software is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this software; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
class JSONRPCError:
pass
class DecodingJSONFailed(JSONRPCError):
pass
class InvalidJSONMessage(JSONRPCError):
pass
class MethodNotFound(JSONRPCError):
pass
class InvalidMethodParameters(JSONRPCError):
pass
class MethodNameNotAllowed(JSONRPCError):
pass
def getTracebackStr():
import traceback
import StringIO
s=StringIO.StringIO("")
traceback.print_exc(file=s)
return s.getvalue()
|
/**********************************************************************************************
*
* raylib v4.1-dev - A simple and easy-to-use library to enjoy videogames programming (www.raylib.com)
*
* FEATURES:
* - NO external dependencies, all required libraries included with raylib
* - Multiplatform: Windows, Linux, FreeBSD, OpenBSD, NetBSD, DragonFly,
* MacOS, Haiku, Android, Raspberry Pi, DRM native, HTML5.
* - Written in plain C code (C99) in PascalCase/camelCase notation
* - Hardware accelerated with OpenGL (1.1, 2.1, 3.3, 4.3 or ES2 - choose at compile)
* - Unique OpenGL abstraction layer (usable as standalone module): [rlgl]
* - Multiple Fonts formats supported (TTF, XNA fonts, AngelCode fonts)
* - Outstanding texture formats support, including compressed formats (DXT, ETC, ASTC)
* - Full 3d support for 3d Shapes, Models, Billboards, Heightmaps and more!
* - Flexible Materials system, supporting classic maps and PBR maps
* - Animated 3D models supported (skeletal bones animation) (IQM)
* - Shaders support, including Model shaders and Postprocessing shaders
* - Powerful math module for Vector, Matrix and Quaternion operations: [raymath]
* - Audio loading and playing with streaming support (WAV, OGG, MP3, FLAC, XM, MOD)
* - VR stereo rendering with configurable HMD device parameters
* - Bindings to multiple programming languages available!
*
* NOTES:
* - One default Font is loaded on InitWindow()->LoadFontDefault() [core, text]
* - One default Texture2D is loaded on rlglInit(), 1x1 white pixel R8G8B8A8 [rlgl] (OpenGL 3.3 or ES2)
* - One default Shader is loaded on rlglInit()->rlLoadShaderDefault() [rlgl] (OpenGL 3.3 or ES2)
* - One default RenderBatch is loaded on rlglInit()->rlLoadRenderBatch() [rlgl] (OpenGL 3.3 or ES2)
*
* DEPENDENCIES (included):
* [rcore] rglfw (Camilla Löwy - github.com/glfw/glfw) for window/context management and input (PLATFORM_DESKTOP)
* [rlgl] glad (David Herberth - github.com/Dav1dde/glad) for OpenGL 3.3 extensions loading (PLATFORM_DESKTOP)
* [raudio] miniaudio (David Reid - github.com/mackron/miniaudio) for audio device/context management
*
* OPTIONAL DEPENDENCIES (included):
* [rcore] msf_gif (Miles Fogle) for GIF recording
* [rcore] sinfl (Micha Mettke) for DEFLATE decompression algorythm
* [rcore] sdefl (Micha Mettke) for DEFLATE compression algorythm
* [rtextures] stb_image (Sean Barret) for images loading (BMP, TGA, PNG, JPEG, HDR...)
* [rtextures] stb_image_write (Sean Barret) for image writing (BMP, TGA, PNG, JPG)
* [rtextures] stb_image_resize (Sean Barret) for image resizing algorithms
* [rtext] stb_truetype (Sean Barret) for ttf fonts loading
* [rtext] stb_rect_pack (Sean Barret) for rectangles packing
* [rmodels] par_shapes (Philip Rideout) for parametric 3d shapes generation
* [rmodels] tinyobj_loader_c (Syoyo Fujita) for models loading (OBJ, MTL)
* [rmodels] cgltf (Johannes Kuhlmann) for models loading (glTF)
* [raudio] dr_wav (David Reid) for WAV audio file loading
* [raudio] dr_flac (David Reid) for FLAC audio file loading
* [raudio] dr_mp3 (David Reid) for MP3 audio file loading
* [raudio] stb_vorbis (Sean Barret) for OGG audio loading
* [raudio] jar_xm (Joshua Reisenauer) for XM audio module loading
* [raudio] jar_mod (Joshua Reisenauer) for MOD audio module loading
*
*
* LICENSE: zlib/libpng
*
* raylib is licensed under an unmodified zlib/libpng license, which is an OSI-certified,
* BSD-like license that allows static linking with closed source software:
*
* Copyright (c) 2013-2022 Ramon Santamaria (@raysan5)
*
* This software is provided "as-is", without any express or implied warranty. In no event
* will the authors be held liable for any damages arising from the use of this software.
*
* Permission is granted to anyone to use this software for any purpose, including commercial
* applications, and to alter it and redistribute it freely, subject to the following restrictions:
*
* 1. The origin of this software must not be misrepresented; you must not claim that you
* wrote the original software. If you use this software in a product, an acknowledgment
* in the product documentation would be appreciated but is not required.
*
* 2. Altered source versions must be plainly marked as such, and must not be misrepresented
* as being the original software.
*
* 3. This notice may not be removed or altered from any source distribution.
*
**********************************************************************************************/
#ifndef RAYLIB_H
#define RAYLIB_H
#include <stdarg.h> // Required for: va_list - Only used by TraceLogCallback
#define RAYLIB_VERSION "4.1-dev"
// Function specifiers in case library is build/used as a shared library (Windows)
// NOTE: Microsoft specifiers to tell compiler that symbols are imported/exported from a .dll
#if defined(_WIN32)
#if defined(BUILD_LIBTYPE_SHARED)
#define RLAPI __declspec(dllexport) // We are building the library as a Win32 shared library (.dll)
#elif defined(USE_LIBTYPE_SHARED)
#define RLAPI __declspec(dllimport) // We are using the library as a Win32 shared library (.dll)
#endif
#endif
#ifndef RLAPI
#define RLAPI // Functions defined as 'extern' by default (implicit specifiers)
#endif
//----------------------------------------------------------------------------------
// Some basic Defines
//----------------------------------------------------------------------------------
#ifndef PI
#define PI 3.14159265358979323846f
#endif
#ifndef DEG2RAD
#define DEG2RAD (PI/180.0f)
#endif
#ifndef RAD2DEG
#define RAD2DEG (180.0f/PI)
#endif
// Allow custom memory allocators
#ifndef RL_MALLOC
#define RL_MALLOC(sz) malloc(sz)
#endif
#ifndef RL_CALLOC
#define RL_CALLOC(n,sz) calloc(n,sz)
#endif
#ifndef RL_REALLOC
#define RL_REALLOC(ptr,sz) realloc(ptr,sz)
#endif
#ifndef RL_FREE
#define RL_FREE(ptr) free(ptr)
#endif
// NOTE: MSVC C++ compiler does not support compound literals (C99 feature)
// Plain structures in C++ (without constructors) can be initialized with { }
#if defined(__cplusplus)
#define CLITERAL(type) type
#else
#define CLITERAL(type) (type)
#endif
// NOTE: We set some defines with some data types declared by raylib
// Other modules (raymath, rlgl) also require some of those types, so,
// to be able to use those other modules as standalone (not depending on raylib)
// this defines are very useful for internal check and avoid type (re)definitions
#define RL_COLOR_TYPE
#define RL_RECTANGLE_TYPE
#define RL_VECTOR2_TYPE
#define RL_VECTOR3_TYPE
#define RL_VECTOR4_TYPE
#define RL_QUATERNION_TYPE
#define RL_MATRIX_TYPE
// Some Basic Colors
// NOTE: Custom raylib color palette for amazing visuals on WHITE background
#define LIGHTGRAY CLITERAL(Color){ 200, 200, 200, 255 } // Light Gray
#define GRAY CLITERAL(Color){ 130, 130, 130, 255 } // Gray
#define DARKGRAY CLITERAL(Color){ 80, 80, 80, 255 } // Dark Gray
#define YELLOW CLITERAL(Color){ 253, 249, 0, 255 } // Yellow
#define GOLD CLITERAL(Color){ 255, 203, 0, 255 } // Gold
#define ORANGE CLITERAL(Color){ 255, 161, 0, 255 } // Orange
#define PINK CLITERAL(Color){ 255, 109, 194, 255 } // Pink
#define RED CLITERAL(Color){ 230, 41, 55, 255 } // Red
#define MAROON CLITERAL(Color){ 190, 33, 55, 255 } // Maroon
#define GREEN CLITERAL(Color){ 0, 228, 48, 255 } // Green
#define LIME CLITERAL(Color){ 0, 158, 47, 255 } // Lime
#define DARKGREEN CLITERAL(Color){ 0, 117, 44, 255 } // Dark Green
#define SKYBLUE CLITERAL(Color){ 102, 191, 255, 255 } // Sky Blue
#define BLUE CLITERAL(Color){ 0, 121, 241, 255 } // Blue
#define DARKBLUE CLITERAL(Color){ 0, 82, 172, 255 } // Dark Blue
#define PURPLE CLITERAL(Color){ 200, 122, 255, 255 } // Purple
#define VIOLET CLITERAL(Color){ 135, 60, 190, 255 } // Violet
#define DARKPURPLE CLITERAL(Color){ 112, 31, 126, 255 } // Dark Purple
#define BEIGE CLITERAL(Color){ 211, 176, 131, 255 } // Beige
#define BROWN CLITERAL(Color){ 127, 106, 79, 255 } // Brown
#define DARKBROWN CLITERAL(Color){ 76, 63, 47, 255 } // Dark Brown
#define WHITE CLITERAL(Color){ 255, 255, 255, 255 } // White
#define BLACK CLITERAL(Color){ 0, 0, 0, 255 } // Black
#define BLANK CLITERAL(Color){ 0, 0, 0, 0 } // Blank (Transparent)
#define MAGENTA CLITERAL(Color){ 255, 0, 255, 255 } // Magenta
#define RAYWHITE CLITERAL(Color){ 245, 245, 245, 255 } // My own White (raylib logo)
//----------------------------------------------------------------------------------
// Structures Definition
//----------------------------------------------------------------------------------
// Boolean type
#if defined(__STDC__) && __STDC_VERSION__ >= 199901L
#include <stdbool.h>
#elif !defined(__cplusplus) && !defined(bool)
typedef enum bool { false, true } bool;
#define RL_BOOL_TYPE
#endif
// Vector2, 2 components
typedef struct Vector2 {
float x; // Vector x component
float y; // Vector y component
} Vector2;
// Vector3, 3 components
typedef struct Vector3 {
float x; // Vector x component
float y; // Vector y component
float z; // Vector z component
} Vector3;
// Vector4, 4 components
typedef struct Vector4 {
float x; // Vector x component
float y; // Vector y component
float z; // Vector z component
float w; // Vector w component
} Vector4;
// Quaternion, 4 components (Vector4 alias)
typedef Vector4 Quaternion;
// Matrix, 4x4 components, column major, OpenGL style, right handed
typedef struct Matrix {
float m0, m4, m8, m12; // Matrix first row (4 components)
float m1, m5, m9, m13; // Matrix second row (4 components)
float m2, m6, m10, m14; // Matrix third row (4 components)
float m3, m7, m11, m15; // Matrix fourth row (4 components)
} Matrix;
// Color, 4 components, R8G8B8A8 (32bit)
typedef struct Color {
unsigned char r; // Color red value
unsigned char g; // Color green value
unsigned char b; // Color blue value
unsigned char a; // Color alpha value
} Color;
// Rectangle, 4 components
typedef struct Rectangle {
float x; // Rectangle top-left corner position x
float y; // Rectangle top-left corner position y
float width; // Rectangle width
float height; // Rectangle height
} Rectangle;
// Image, pixel data stored in CPU memory (RAM)
typedef struct Image {
void *data; // Image raw data
int width; // Image base width
int height; // Image base height
int mipmaps; // Mipmap levels, 1 by default
int format; // Data format (PixelFormat type)
} Image;
// Texture, tex data stored in GPU memory (VRAM)
typedef struct Texture {
unsigned int id; // OpenGL texture id
int width; // Texture base width
int height; // Texture base height
int mipmaps; // Mipmap levels, 1 by default
int format; // Data format (PixelFormat type)
} Texture;
// Texture2D, same as Texture
typedef Texture Texture2D;
// TextureCubemap, same as Texture
typedef Texture TextureCubemap;
// RenderTexture, fbo for texture rendering
typedef struct RenderTexture {
unsigned int id; // OpenGL framebuffer object id
Texture texture; // Color buffer attachment texture
Texture depth; // Depth buffer attachment texture
} RenderTexture;
// RenderTexture2D, same as RenderTexture
typedef RenderTexture RenderTexture2D;
// NPatchInfo, n-patch layout info
typedef struct NPatchInfo {
Rectangle source; // Texture source rectangle
int left; // Left border offset
int top; // Top border offset
int right; // Right border offset
int bottom; // Bottom border offset
int layout; // Layout of the n-patch: 3x3, 1x3 or 3x1
} NPatchInfo;
// GlyphInfo, font characters glyphs info
typedef struct GlyphInfo {
int value; // Character value (Unicode)
int offsetX; // Character offset X when drawing
int offsetY; // Character offset Y when drawing
int advanceX; // Character advance position X
Image image; // Character image data
} GlyphInfo;
// Font, font texture and GlyphInfo array data
typedef struct Font {
int baseSize; // Base size (default chars height)
int glyphCount; // Number of glyph characters
int glyphPadding; // Padding around the glyph characters
Texture2D texture; // Texture atlas containing the glyphs
Rectangle *recs; // Rectangles in texture for the glyphs
GlyphInfo *glyphs; // Glyphs info data
} Font;
// Camera, defines position/orientation in 3d space
typedef struct Camera3D {
Vector3 position; // Camera position
Vector3 target; // Camera target it looks-at
Vector3 up; // Camera up vector (rotation over its axis)
float fovy; // Camera field-of-view apperture in Y (degrees) in perspective, used as near plane width in orthographic
int projection; // Camera projection: CAMERA_PERSPECTIVE or CAMERA_ORTHOGRAPHIC
} Camera3D;
typedef Camera3D Camera; // Camera type fallback, defaults to Camera3D
// Camera2D, defines position/orientation in 2d space
typedef struct Camera2D {
Vector2 offset; // Camera offset (displacement from target)
Vector2 target; // Camera target (rotation and zoom origin)
float rotation; // Camera rotation in degrees
float zoom; // Camera zoom (scaling), should be 1.0f by default
} Camera2D;
// Mesh, vertex data and vao/vbo
typedef struct Mesh {
int vertexCount; // Number of vertices stored in arrays
int triangleCount; // Number of triangles stored (indexed or not)
// Vertex attributes data
float *vertices; // Vertex position (XYZ - 3 components per vertex) (shader-location = 0)
float *texcoords; // Vertex texture coordinates (UV - 2 components per vertex) (shader-location = 1)
float *texcoords2; // Vertex second texture coordinates (useful for lightmaps) (shader-location = 5)
float *normals; // Vertex normals (XYZ - 3 components per vertex) (shader-location = 2)
float *tangents; // Vertex tangents (XYZW - 4 components per vertex) (shader-location = 4)
unsigned char *colors; // Vertex colors (RGBA - 4 components per vertex) (shader-location = 3)
unsigned short *indices; // Vertex indices (in case vertex data comes indexed)
// Animation vertex data
float *animVertices; // Animated vertex positions (after bones transformations)
float *animNormals; // Animated normals (after bones transformations)
unsigned char *boneIds; // Vertex bone ids, max 255 bone ids, up to 4 bones influence by vertex (skinning)
float *boneWeights; // Vertex bone weight, up to 4 bones influence by vertex (skinning)
// OpenGL identifiers
unsigned int vaoId; // OpenGL Vertex Array Object id
unsigned int *vboId; // OpenGL Vertex Buffer Objects id (default vertex data)
} Mesh;
// Shader
typedef struct Shader {
unsigned int id; // Shader program id
int *locs; // Shader locations array (RL_MAX_SHADER_LOCATIONS)
} Shader;
// MaterialMap
typedef struct MaterialMap {
Texture2D texture; // Material map texture
Color color; // Material map color
float value; // Material map value
} MaterialMap;
// Material, includes shader and maps
typedef struct Material {
Shader shader; // Material shader
MaterialMap *maps; // Material maps array (MAX_MATERIAL_MAPS)
float params[4]; // Material generic parameters (if required)
} Material;
// Transform, vectex transformation data
typedef struct Transform {
Vector3 translation; // Translation
Quaternion rotation; // Rotation
Vector3 scale; // Scale
} Transform;
// Bone, skeletal animation bone
typedef struct BoneInfo {
char name[32]; // Bone name
int parent; // Bone parent
} BoneInfo;
// Model, meshes, materials and animation data
typedef struct Model {
Matrix transform; // Local transform matrix
int meshCount; // Number of meshes
int materialCount; // Number of materials
Mesh *meshes; // Meshes array
Material *materials; // Materials array
int *meshMaterial; // Mesh material number
// Animation data
int boneCount; // Number of bones
BoneInfo *bones; // Bones information (skeleton)
Transform *bindPose; // Bones base transformation (pose)
} Model;
// ModelAnimation
typedef struct ModelAnimation {
int boneCount; // Number of bones
int frameCount; // Number of animation frames
BoneInfo *bones; // Bones information (skeleton)
Transform **framePoses; // Poses array by frame
} ModelAnimation;
// Ray, ray for raycasting
typedef struct Ray {
Vector3 position; // Ray position (origin)
Vector3 direction; // Ray direction
} Ray;
// RayCollision, ray hit information
typedef struct RayCollision {
bool hit; // Did the ray hit something?
float distance; // Distance to nearest hit
Vector3 point; // Point of nearest hit
Vector3 normal; // Surface normal of hit
} RayCollision;
// BoundingBox
typedef struct BoundingBox {
Vector3 min; // Minimum vertex box-corner
Vector3 max; // Maximum vertex box-corner
} BoundingBox;
// Wave, audio wave data
typedef struct Wave {
unsigned int frameCount; // Total number of frames (considering channels)
unsigned int sampleRate; // Frequency (samples per second)
unsigned int sampleSize; // Bit depth (bits per sample): 8, 16, 32 (24 not supported)
unsigned int channels; // Number of channels (1-mono, 2-stereo, ...)
void *data; // Buffer data pointer
} Wave;
typedef struct rAudioBuffer rAudioBuffer;
// AudioStream, custom audio stream
typedef struct AudioStream {
rAudioBuffer *buffer; // Pointer to internal data used by the audio system
unsigned int sampleRate; // Frequency (samples per second)
unsigned int sampleSize; // Bit depth (bits per sample): 8, 16, 32 (24 not supported)
unsigned int channels; // Number of channels (1-mono, 2-stereo, ...)
} AudioStream;
// Sound
typedef struct Sound {
AudioStream stream; // Audio stream
unsigned int frameCount; // Total number of frames (considering channels)
} Sound;
// Music, audio stream, anything longer than ~10 seconds should be streamed
typedef struct Music {
AudioStream stream; // Audio stream
unsigned int frameCount; // Total number of frames (considering channels)
bool looping; // Music looping enable
int ctxType; // Type of music context (audio filetype)
void *ctxData; // Audio context data, depends on type
} Music;
// VrDeviceInfo, Head-Mounted-Display device parameters
typedef struct VrDeviceInfo {
int hResolution; // Horizontal resolution in pixels
int vResolution; // Vertical resolution in pixels
float hScreenSize; // Horizontal size in meters
float vScreenSize; // Vertical size in meters
float vScreenCenter; // Screen center in meters
float eyeToScreenDistance; // Distance between eye and display in meters
float lensSeparationDistance; // Lens separation distance in meters
float interpupillaryDistance; // IPD (distance between pupils) in meters
float lensDistortionValues[4]; // Lens distortion constant parameters
float chromaAbCorrection[4]; // Chromatic aberration correction parameters
} VrDeviceInfo;
// VrStereoConfig, VR stereo rendering configuration for simulator
typedef struct VrStereoConfig {
Matrix projection[2]; // VR projection matrices (per eye)
Matrix viewOffset[2]; // VR view offset matrices (per eye)
float leftLensCenter[2]; // VR left lens center
float rightLensCenter[2]; // VR right lens center
float leftScreenCenter[2]; // VR left screen center
float rightScreenCenter[2]; // VR right screen center
float scale[2]; // VR distortion scale
float scaleIn[2]; // VR distortion scale in
} VrStereoConfig;
//----------------------------------------------------------------------------------
// Enumerators Definition
//----------------------------------------------------------------------------------
// System/Window config flags
// NOTE: Every bit registers one state (use it with bit masks)
// By default all flags are set to 0
typedef enum {
FLAG_VSYNC_HINT = 0x00000040, // Set to try enabling V-Sync on GPU
FLAG_FULLSCREEN_MODE = 0x00000002, // Set to run program in fullscreen
FLAG_WINDOW_RESIZABLE = 0x00000004, // Set to allow resizable window
FLAG_WINDOW_UNDECORATED = 0x00000008, // Set to disable window decoration (frame and buttons)
FLAG_WINDOW_HIDDEN = 0x00000080, // Set to hide window
FLAG_WINDOW_MINIMIZED = 0x00000200, // Set to minimize window (iconify)
FLAG_WINDOW_MAXIMIZED = 0x00000400, // Set to maximize window (expanded to monitor)
FLAG_WINDOW_UNFOCUSED = 0x00000800, // Set to window non focused
FLAG_WINDOW_TOPMOST = 0x00001000, // Set to window always on top
FLAG_WINDOW_ALWAYS_RUN = 0x00000100, // Set to allow windows running while minimized
FLAG_WINDOW_TRANSPARENT = 0x00000010, // Set to allow transparent framebuffer
FLAG_WINDOW_HIGHDPI = 0x00002000, // Set to support HighDPI
FLAG_MSAA_4X_HINT = 0x00000020, // Set to try enabling MSAA 4X
FLAG_INTERLACED_HINT = 0x00010000 // Set to try enabling interlaced video format (for V3D)
} ConfigFlags;
// Trace log level
// NOTE: Organized by priority level
typedef enum {
LOG_ALL = 0, // Display all logs
LOG_TRACE, // Trace logging, intended for internal use only
LOG_DEBUG, // Debug logging, used for internal debugging, it should be disabled on release builds
LOG_INFO, // Info logging, used for program execution info
LOG_WARNING, // Warning logging, used on recoverable failures
LOG_ERROR, // Error logging, used on unrecoverable failures
LOG_FATAL, // Fatal logging, used to abort program: exit(EXIT_FAILURE)
LOG_NONE // Disable logging
} TraceLogLevel;
// Keyboard keys (US keyboard layout)
// NOTE: Use GetKeyPressed() to allow redefining
// required keys for alternative layouts
typedef enum {
KEY_NULL = 0, // Key: NULL, used for no key pressed
// Alphanumeric keys
KEY_APOSTROPHE = 39, // Key: '
KEY_COMMA = 44, // Key: ,
KEY_MINUS = 45, // Key: -
KEY_PERIOD = 46, // Key: .
KEY_SLASH = 47, // Key: /
KEY_ZERO = 48, // Key: 0
KEY_ONE = 49, // Key: 1
KEY_TWO = 50, // Key: 2
KEY_THREE = 51, // Key: 3
KEY_FOUR = 52, // Key: 4
KEY_FIVE = 53, // Key: 5
KEY_SIX = 54, // Key: 6
KEY_SEVEN = 55, // Key: 7
KEY_EIGHT = 56, // Key: 8
KEY_NINE = 57, // Key: 9
KEY_SEMICOLON = 59, // Key: ;
KEY_EQUAL = 61, // Key: =
KEY_A = 65, // Key: A | a
KEY_B = 66, // Key: B | b
KEY_C = 67, // Key: C | c
KEY_D = 68, // Key: D | d
KEY_E = 69, // Key: E | e
KEY_F = 70, // Key: F | f
KEY_G = 71, // Key: G | g
KEY_H = 72, // Key: H | h
KEY_I = 73, // Key: I | i
KEY_J = 74, // Key: J | j
KEY_K = 75, // Key: K | k
KEY_L = 76, // Key: L | l
KEY_M = 77, // Key: M | m
KEY_N = 78, // Key: N | n
KEY_O = 79, // Key: O | o
KEY_P = 80, // Key: P | p
KEY_Q = 81, // Key: Q | q
KEY_R = 82, // Key: R | r
KEY_S = 83, // Key: S | s
KEY_T = 84, // Key: T | t
KEY_U = 85, // Key: U | u
KEY_V = 86, // Key: V | v
KEY_W = 87, // Key: W | w
KEY_X = 88, // Key: X | x
KEY_Y = 89, // Key: Y | y
KEY_Z = 90, // Key: Z | z
KEY_LEFT_BRACKET = 91, // Key: [
KEY_BACKSLASH = 92, // Key: '\'
KEY_RIGHT_BRACKET = 93, // Key: ]
KEY_GRAVE = 96, // Key: `
// Function keys
KEY_SPACE = 32, // Key: Space
KEY_ESCAPE = 256, // Key: Esc
KEY_ENTER = 257, // Key: Enter
KEY_TAB = 258, // Key: Tab
KEY_BACKSPACE = 259, // Key: Backspace
KEY_INSERT = 260, // Key: Ins
KEY_DELETE = 261, // Key: Del
KEY_RIGHT = 262, // Key: Cursor right
KEY_LEFT = 263, // Key: Cursor left
KEY_DOWN = 264, // Key: Cursor down
KEY_UP = 265, // Key: Cursor up
KEY_PAGE_UP = 266, // Key: Page up
KEY_PAGE_DOWN = 267, // Key: Page down
KEY_HOME = 268, // Key: Home
KEY_END = 269, // Key: End
KEY_CAPS_LOCK = 280, // Key: Caps lock
KEY_SCROLL_LOCK = 281, // Key: Scroll down
KEY_NUM_LOCK = 282, // Key: Num lock
KEY_PRINT_SCREEN = 283, // Key: Print screen
KEY_PAUSE = 284, // Key: Pause
KEY_F1 = 290, // Key: F1
KEY_F2 = 291, // Key: F2
KEY_F3 = 292, // Key: F3
KEY_F4 = 293, // Key: F4
KEY_F5 = 294, // Key: F5
KEY_F6 = 295, // Key: F6
KEY_F7 = 296, // Key: F7
KEY_F8 = 297, // Key: F8
KEY_F9 = 298, // Key: F9
KEY_F10 = 299, // Key: F10
KEY_F11 = 300, // Key: F11
KEY_F12 = 301, // Key: F12
KEY_LEFT_SHIFT = 340, // Key: Shift left
KEY_LEFT_CONTROL = 341, // Key: Control left
KEY_LEFT_ALT = 342, // Key: Alt left
KEY_LEFT_SUPER = 343, // Key: Super left
KEY_RIGHT_SHIFT = 344, // Key: Shift right
KEY_RIGHT_CONTROL = 345, // Key: Control right
KEY_RIGHT_ALT = 346, // Key: Alt right
KEY_RIGHT_SUPER = 347, // Key: Super right
KEY_KB_MENU = 348, // Key: KB menu
// Keypad keys
KEY_KP_0 = 320, // Key: Keypad 0
KEY_KP_1 = 321, // Key: Keypad 1
KEY_KP_2 = 322, // Key: Keypad 2
KEY_KP_3 = 323, // Key: Keypad 3
KEY_KP_4 = 324, // Key: Keypad 4
KEY_KP_5 = 325, // Key: Keypad 5
KEY_KP_6 = 326, // Key: Keypad 6
KEY_KP_7 = 327, // Key: Keypad 7
KEY_KP_8 = 328, // Key: Keypad 8
KEY_KP_9 = 329, // Key: Keypad 9
KEY_KP_DECIMAL = 330, // Key: Keypad .
KEY_KP_DIVIDE = 331, // Key: Keypad /
KEY_KP_MULTIPLY = 332, // Key: Keypad *
KEY_KP_SUBTRACT = 333, // Key: Keypad -
KEY_KP_ADD = 334, // Key: Keypad +
KEY_KP_ENTER = 335, // Key: Keypad Enter
KEY_KP_EQUAL = 336, // Key: Keypad =
// Android key buttons
KEY_BACK = 4, // Key: Android back button
KEY_MENU = 82, // Key: Android menu button
KEY_VOLUME_UP = 24, // Key: Android volume up button
KEY_VOLUME_DOWN = 25 // Key: Android volume down button
} KeyboardKey;
// Add backwards compatibility support for deprecated names
#define MOUSE_LEFT_BUTTON MOUSE_BUTTON_LEFT
#define MOUSE_RIGHT_BUTTON MOUSE_BUTTON_RIGHT
#define MOUSE_MIDDLE_BUTTON MOUSE_BUTTON_MIDDLE
// Mouse buttons
typedef enum {
MOUSE_BUTTON_LEFT = 0, // Mouse button left
MOUSE_BUTTON_RIGHT = 1, // Mouse button right
MOUSE_BUTTON_MIDDLE = 2, // Mouse button middle (pressed wheel)
MOUSE_BUTTON_SIDE = 3, // Mouse button side (advanced mouse device)
MOUSE_BUTTON_EXTRA = 4, // Mouse button extra (advanced mouse device)
MOUSE_BUTTON_FORWARD = 5, // Mouse button fordward (advanced mouse device)
MOUSE_BUTTON_BACK = 6, // Mouse button back (advanced mouse device)
} MouseButton;
// Mouse cursor
typedef enum {
MOUSE_CURSOR_DEFAULT = 0, // Default pointer shape
MOUSE_CURSOR_ARROW = 1, // Arrow shape
MOUSE_CURSOR_IBEAM = 2, // Text writing cursor shape
MOUSE_CURSOR_CROSSHAIR = 3, // Cross shape
MOUSE_CURSOR_POINTING_HAND = 4, // Pointing hand cursor
MOUSE_CURSOR_RESIZE_EW = 5, // Horizontal resize/move arrow shape
MOUSE_CURSOR_RESIZE_NS = 6, // Vertical resize/move arrow shape
MOUSE_CURSOR_RESIZE_NWSE = 7, // Top-left to bottom-right diagonal resize/move arrow shape
MOUSE_CURSOR_RESIZE_NESW = 8, // The top-right to bottom-left diagonal resize/move arrow shape
MOUSE_CURSOR_RESIZE_ALL = 9, // The omni-directional resize/move cursor shape
MOUSE_CURSOR_NOT_ALLOWED = 10 // The operation-not-allowed shape
} MouseCursor;
// Gamepad buttons
typedef enum {
GAMEPAD_BUTTON_UNKNOWN = 0, // Unknown button, just for error checking
GAMEPAD_BUTTON_LEFT_FACE_UP, // Gamepad left DPAD up button
GAMEPAD_BUTTON_LEFT_FACE_RIGHT, // Gamepad left DPAD right button
GAMEPAD_BUTTON_LEFT_FACE_DOWN, // Gamepad left DPAD down button
GAMEPAD_BUTTON_LEFT_FACE_LEFT, // Gamepad left DPAD left button
GAMEPAD_BUTTON_RIGHT_FACE_UP, // Gamepad right button up (i.e. PS3: Triangle, Xbox: Y)
GAMEPAD_BUTTON_RIGHT_FACE_RIGHT, // Gamepad right button right (i.e. PS3: Square, Xbox: X)
GAMEPAD_BUTTON_RIGHT_FACE_DOWN, // Gamepad right button down (i.e. PS3: Cross, Xbox: A)
GAMEPAD_BUTTON_RIGHT_FACE_LEFT, // Gamepad right button left (i.e. PS3: Circle, Xbox: B)
GAMEPAD_BUTTON_LEFT_TRIGGER_1, // Gamepad top/back trigger left (first), it could be a trailing button
GAMEPAD_BUTTON_LEFT_TRIGGER_2, // Gamepad top/back trigger left (second), it could be a trailing button
GAMEPAD_BUTTON_RIGHT_TRIGGER_1, // Gamepad top/back trigger right (one), it could be a trailing button
GAMEPAD_BUTTON_RIGHT_TRIGGER_2, // Gamepad top/back trigger right (second), it could be a trailing button
GAMEPAD_BUTTON_MIDDLE_LEFT, // Gamepad center buttons, left one (i.e. PS3: Select)
GAMEPAD_BUTTON_MIDDLE, // Gamepad center buttons, middle one (i.e. PS3: PS, Xbox: XBOX)
GAMEPAD_BUTTON_MIDDLE_RIGHT, // Gamepad center buttons, right one (i.e. PS3: Start)
GAMEPAD_BUTTON_LEFT_THUMB, // Gamepad joystick pressed button left
GAMEPAD_BUTTON_RIGHT_THUMB // Gamepad joystick pressed button right
} GamepadButton;
// Gamepad axis
typedef enum {
GAMEPAD_AXIS_LEFT_X = 0, // Gamepad left stick X axis
GAMEPAD_AXIS_LEFT_Y = 1, // Gamepad left stick Y axis
GAMEPAD_AXIS_RIGHT_X = 2, // Gamepad right stick X axis
GAMEPAD_AXIS_RIGHT_Y = 3, // Gamepad right stick Y axis
GAMEPAD_AXIS_LEFT_TRIGGER = 4, // Gamepad back trigger left, pressure level: [1..-1]
GAMEPAD_AXIS_RIGHT_TRIGGER = 5 // Gamepad back trigger right, pressure level: [1..-1]
} GamepadAxis;
// Material map index
typedef enum {
MATERIAL_MAP_ALBEDO = 0, // Albedo material (same as: MATERIAL_MAP_DIFFUSE)
MATERIAL_MAP_METALNESS, // Metalness material (same as: MATERIAL_MAP_SPECULAR)
MATERIAL_MAP_NORMAL, // Normal material
MATERIAL_MAP_ROUGHNESS, // Roughness material
MATERIAL_MAP_OCCLUSION, // Ambient occlusion material
MATERIAL_MAP_EMISSION, // Emission material
MATERIAL_MAP_HEIGHT, // Heightmap material
MATERIAL_MAP_CUBEMAP, // Cubemap material (NOTE: Uses GL_TEXTURE_CUBE_MAP)
MATERIAL_MAP_IRRADIANCE, // Irradiance material (NOTE: Uses GL_TEXTURE_CUBE_MAP)
MATERIAL_MAP_PREFILTER, // Prefilter material (NOTE: Uses GL_TEXTURE_CUBE_MAP)
MATERIAL_MAP_BRDF // Brdf material
} MaterialMapIndex;
#define MATERIAL_MAP_DIFFUSE MATERIAL_MAP_ALBEDO
#define MATERIAL_MAP_SPECULAR MATERIAL_MAP_METALNESS
// Shader location index
typedef enum {
SHADER_LOC_VERTEX_POSITION = 0, // Shader location: vertex attribute: position
SHADER_LOC_VERTEX_TEXCOORD01, // Shader location: vertex attribute: texcoord01
SHADER_LOC_VERTEX_TEXCOORD02, // Shader location: vertex attribute: texcoord02
SHADER_LOC_VERTEX_NORMAL, // Shader location: vertex attribute: normal
SHADER_LOC_VERTEX_TANGENT, // Shader location: vertex attribute: tangent
SHADER_LOC_VERTEX_COLOR, // Shader location: vertex attribute: color
SHADER_LOC_MATRIX_MVP, // Shader location: matrix uniform: model-view-projection
SHADER_LOC_MATRIX_VIEW, // Shader location: matrix uniform: view (camera transform)
SHADER_LOC_MATRIX_PROJECTION, // Shader location: matrix uniform: projection
SHADER_LOC_MATRIX_MODEL, // Shader location: matrix uniform: model (transform)
SHADER_LOC_MATRIX_NORMAL, // Shader location: matrix uniform: normal
SHADER_LOC_VECTOR_VIEW, // Shader location: vector uniform: view
SHADER_LOC_COLOR_DIFFUSE, // Shader location: vector uniform: diffuse color
SHADER_LOC_COLOR_SPECULAR, // Shader location: vector uniform: specular color
SHADER_LOC_COLOR_AMBIENT, // Shader location: vector uniform: ambient color
SHADER_LOC_MAP_ALBEDO, // Shader location: sampler2d texture: albedo (same as: SHADER_LOC_MAP_DIFFUSE)
SHADER_LOC_MAP_METALNESS, // Shader location: sampler2d texture: metalness (same as: SHADER_LOC_MAP_SPECULAR)
SHADER_LOC_MAP_NORMAL, // Shader location: sampler2d texture: normal
SHADER_LOC_MAP_ROUGHNESS, // Shader location: sampler2d texture: roughness
SHADER_LOC_MAP_OCCLUSION, // Shader location: sampler2d texture: occlusion
SHADER_LOC_MAP_EMISSION, // Shader location: sampler2d texture: emission
SHADER_LOC_MAP_HEIGHT, // Shader location: sampler2d texture: height
SHADER_LOC_MAP_CUBEMAP, // Shader location: samplerCube texture: cubemap
SHADER_LOC_MAP_IRRADIANCE, // Shader location: samplerCube texture: irradiance
SHADER_LOC_MAP_PREFILTER, // Shader location: samplerCube texture: prefilter
SHADER_LOC_MAP_BRDF // Shader location: sampler2d texture: brdf
} ShaderLocationIndex;
#define SHADER_LOC_MAP_DIFFUSE SHADER_LOC_MAP_ALBEDO
#define SHADER_LOC_MAP_SPECULAR SHADER_LOC_MAP_METALNESS
// Shader uniform data type
typedef enum {
SHADER_UNIFORM_FLOAT = 0, // Shader uniform type: float
SHADER_UNIFORM_VEC2, // Shader uniform type: vec2 (2 float)
SHADER_UNIFORM_VEC3, // Shader uniform type: vec3 (3 float)
SHADER_UNIFORM_VEC4, // Shader uniform type: vec4 (4 float)
SHADER_UNIFORM_INT, // Shader uniform type: int
SHADER_UNIFORM_IVEC2, // Shader uniform type: ivec2 (2 int)
SHADER_UNIFORM_IVEC3, // Shader uniform type: ivec3 (3 int)
SHADER_UNIFORM_IVEC4, // Shader uniform type: ivec4 (4 int)
SHADER_UNIFORM_SAMPLER2D // Shader uniform type: sampler2d
} ShaderUniformDataType;
// Shader attribute data types
typedef enum {
SHADER_ATTRIB_FLOAT = 0, // Shader attribute type: float
SHADER_ATTRIB_VEC2, // Shader attribute type: vec2 (2 float)
SHADER_ATTRIB_VEC3, // Shader attribute type: vec3 (3 float)
SHADER_ATTRIB_VEC4 // Shader attribute type: vec4 (4 float)
} ShaderAttributeDataType;
// Pixel formats
// NOTE: Support depends on OpenGL version and platform
typedef enum {
PIXELFORMAT_UNCOMPRESSED_GRAYSCALE = 1, // 8 bit per pixel (no alpha)
PIXELFORMAT_UNCOMPRESSED_GRAY_ALPHA, // 8*2 bpp (2 channels)
PIXELFORMAT_UNCOMPRESSED_R5G6B5, // 16 bpp
PIXELFORMAT_UNCOMPRESSED_R8G8B8, // 24 bpp
PIXELFORMAT_UNCOMPRESSED_R5G5B5A1, // 16 bpp (1 bit alpha)
PIXELFORMAT_UNCOMPRESSED_R4G4B4A4, // 16 bpp (4 bit alpha)
PIXELFORMAT_UNCOMPRESSED_R8G8B8A8, // 32 bpp
PIXELFORMAT_UNCOMPRESSED_R32, // 32 bpp (1 channel - float)
PIXELFORMAT_UNCOMPRESSED_R32G32B32, // 32*3 bpp (3 channels - float)
PIXELFORMAT_UNCOMPRESSED_R32G32B32A32, // 32*4 bpp (4 channels - float)
PIXELFORMAT_COMPRESSED_DXT1_RGB, // 4 bpp (no alpha)
PIXELFORMAT_COMPRESSED_DXT1_RGBA, // 4 bpp (1 bit alpha)
PIXELFORMAT_COMPRESSED_DXT3_RGBA, // 8 bpp
PIXELFORMAT_COMPRESSED_DXT5_RGBA, // 8 bpp
PIXELFORMAT_COMPRESSED_ETC1_RGB, // 4 bpp
PIXELFORMAT_COMPRESSED_ETC2_RGB, // 4 bpp
PIXELFORMAT_COMPRESSED_ETC2_EAC_RGBA, // 8 bpp
PIXELFORMAT_COMPRESSED_PVRT_RGB, // 4 bpp
PIXELFORMAT_COMPRESSED_PVRT_RGBA, // 4 bpp
PIXELFORMAT_COMPRESSED_ASTC_4x4_RGBA, // 8 bpp
PIXELFORMAT_COMPRESSED_ASTC_8x8_RGBA // 2 bpp
} PixelFormat;
// Texture parameters: filter mode
// NOTE 1: Filtering considers mipmaps if available in the texture
// NOTE 2: Filter is accordingly set for minification and magnification
typedef enum {
TEXTURE_FILTER_POINT = 0, // No filter, just pixel approximation
TEXTURE_FILTER_BILINEAR, // Linear filtering
TEXTURE_FILTER_TRILINEAR, // Trilinear filtering (linear with mipmaps)
TEXTURE_FILTER_ANISOTROPIC_4X, // Anisotropic filtering 4x
TEXTURE_FILTER_ANISOTROPIC_8X, // Anisotropic filtering 8x
TEXTURE_FILTER_ANISOTROPIC_16X, // Anisotropic filtering 16x
} TextureFilter;
// Texture parameters: wrap mode
typedef enum {
TEXTURE_WRAP_REPEAT = 0, // Repeats texture in tiled mode
TEXTURE_WRAP_CLAMP, // Clamps texture to edge pixel in tiled mode
TEXTURE_WRAP_MIRROR_REPEAT, // Mirrors and repeats the texture in tiled mode
TEXTURE_WRAP_MIRROR_CLAMP // Mirrors and clamps to border the texture in tiled mode
} TextureWrap;
// Cubemap layouts
typedef enum {
CUBEMAP_LAYOUT_AUTO_DETECT = 0, // Automatically detect layout type
CUBEMAP_LAYOUT_LINE_VERTICAL, // Layout is defined by a vertical line with faces
CUBEMAP_LAYOUT_LINE_HORIZONTAL, // Layout is defined by an horizontal line with faces
CUBEMAP_LAYOUT_CROSS_THREE_BY_FOUR, // Layout is defined by a 3x4 cross with cubemap faces
CUBEMAP_LAYOUT_CROSS_FOUR_BY_THREE, // Layout is defined by a 4x3 cross with cubemap faces
CUBEMAP_LAYOUT_PANORAMA // Layout is defined by a panorama image (equirectangular map)
} CubemapLayout;
// Font type, defines generation method
typedef enum {
FONT_DEFAULT = 0, // Default font generation, anti-aliased
FONT_BITMAP, // Bitmap font generation, no anti-aliasing
FONT_SDF // SDF font generation, requires external shader
} FontType;
// Color blending modes (pre-defined)
typedef enum {
BLEND_ALPHA = 0, // Blend textures considering alpha (default)
BLEND_ADDITIVE, // Blend textures adding colors
BLEND_MULTIPLIED, // Blend textures multiplying colors
BLEND_ADD_COLORS, // Blend textures adding colors (alternative)
BLEND_SUBTRACT_COLORS, // Blend textures subtracting colors (alternative)
BLEND_CUSTOM // Belnd textures using custom src/dst factors (use rlSetBlendMode())
} BlendMode;
// Gesture
// NOTE: It could be used as flags to enable only some gestures
typedef enum {
GESTURE_NONE = 0, // No gesture
GESTURE_TAP = 1, // Tap gesture
GESTURE_DOUBLETAP = 2, // Double tap gesture
GESTURE_HOLD = 4, // Hold gesture
GESTURE_DRAG = 8, // Drag gesture
GESTURE_SWIPE_RIGHT = 16, // Swipe right gesture
GESTURE_SWIPE_LEFT = 32, // Swipe left gesture
GESTURE_SWIPE_UP = 64, // Swipe up gesture
GESTURE_SWIPE_DOWN = 128, // Swipe down gesture
GESTURE_PINCH_IN = 256, // Pinch in gesture
GESTURE_PINCH_OUT = 512 // Pinch out gesture
} Gesture;
// Camera system modes
typedef enum {
CAMERA_CUSTOM = 0, // Custom camera
CAMERA_FREE, // Free camera
CAMERA_ORBITAL, // Orbital camera
CAMERA_FIRST_PERSON, // First person camera
CAMERA_THIRD_PERSON // Third person camera
} CameraMode;
// Camera projection
typedef enum {
CAMERA_PERSPECTIVE = 0, // Perspective projection
CAMERA_ORTHOGRAPHIC // Orthographic projection
} CameraProjection;
// N-patch layout
typedef enum {
NPATCH_NINE_PATCH = 0, // Npatch layout: 3x3 tiles
NPATCH_THREE_PATCH_VERTICAL, // Npatch layout: 1x3 tiles
NPATCH_THREE_PATCH_HORIZONTAL // Npatch layout: 3x1 tiles
} NPatchLayout;
// Callbacks to hook some internal functions
// WARNING: This callbacks are intended for advance users
typedef void (*TraceLogCallback)(int logLevel, const char *text, va_list args); // Logging: Redirect trace log messages
typedef unsigned char *(*LoadFileDataCallback)(const char *fileName, unsigned int *bytesRead); // FileIO: Load binary data
typedef bool (*SaveFileDataCallback)(const char *fileName, void *data, unsigned int bytesToWrite); // FileIO: Save binary data
typedef char *(*LoadFileTextCallback)(const char *fileName); // FileIO: Load text data
typedef bool (*SaveFileTextCallback)(const char *fileName, char *text); // FileIO: Save text data
//------------------------------------------------------------------------------------
// Global Variables Definition
//------------------------------------------------------------------------------------
// It's lonely here...
//------------------------------------------------------------------------------------
// Window and Graphics Device Functions (Module: core)
//------------------------------------------------------------------------------------
#if defined(__cplusplus)
extern "C" { // Prevents name mangling of functions
#endif
// Window-related functions
RLAPI void InitWindow(int width, int height, const char *title); // Initialize window and OpenGL context
RLAPI bool WindowShouldClose(void); // Check if KEY_ESCAPE pressed or Close icon pressed
RLAPI void CloseWindow(void); // Close window and unload OpenGL context
RLAPI bool IsWindowReady(void); // Check if window has been initialized successfully
RLAPI bool IsWindowFullscreen(void); // Check if window is currently fullscreen
RLAPI bool IsWindowHidden(void); // Check if window is currently hidden (only PLATFORM_DESKTOP)
RLAPI bool IsWindowMinimized(void); // Check if window is currently minimized (only PLATFORM_DESKTOP)
RLAPI bool IsWindowMaximized(void); // Check if window is currently maximized (only PLATFORM_DESKTOP)
RLAPI bool IsWindowFocused(void); // Check if window is currently focused (only PLATFORM_DESKTOP)
RLAPI bool IsWindowResized(void); // Check if window has been resized last frame
RLAPI bool IsWindowState(unsigned int flag); // Check if one specific window flag is enabled
RLAPI void SetWindowState(unsigned int flags); // Set window configuration state using flags (only PLATFORM_DESKTOP)
RLAPI void ClearWindowState(unsigned int flags); // Clear window configuration state flags
RLAPI void ToggleFullscreen(void); // Toggle window state: fullscreen/windowed (only PLATFORM_DESKTOP)
RLAPI void MaximizeWindow(void); // Set window state: maximized, if resizable (only PLATFORM_DESKTOP)
RLAPI void MinimizeWindow(void); // Set window state: minimized, if resizable (only PLATFORM_DESKTOP)
RLAPI void RestoreWindow(void); // Set window state: not minimized/maximized (only PLATFORM_DESKTOP)
RLAPI void SetWindowIcon(Image image); // Set icon for window (only PLATFORM_DESKTOP)
RLAPI void SetWindowTitle(const char *title); // Set title for window (only PLATFORM_DESKTOP)
RLAPI void SetWindowPosition(int x, int y); // Set window position on screen (only PLATFORM_DESKTOP)
RLAPI void SetWindowMonitor(int monitor); // Set monitor for the current window (fullscreen mode)
RLAPI void SetWindowMinSize(int width, int height); // Set window minimum dimensions (for FLAG_WINDOW_RESIZABLE)
RLAPI void SetWindowSize(int width, int height); // Set window dimensions
RLAPI void SetWindowOpacity(float opacity); // Set window opacity [0.0f..1.0f] (only PLATFORM_DESKTOP)
RLAPI void *GetWindowHandle(void); // Get native window handle
RLAPI int GetScreenWidth(void); // Get current screen width
RLAPI int GetScreenHeight(void); // Get current screen height
RLAPI int GetRenderWidth(void); // Get current render width (it considers HiDPI)
RLAPI int GetRenderHeight(void); // Get current render height (it considers HiDPI)
RLAPI int GetMonitorCount(void); // Get number of connected monitors
RLAPI int GetCurrentMonitor(void); // Get current connected monitor
RLAPI Vector2 GetMonitorPosition(int monitor); // Get specified monitor position
RLAPI int GetMonitorWidth(int monitor); // Get specified monitor width (max available by monitor)
RLAPI int GetMonitorHeight(int monitor); // Get specified monitor height (max available by monitor)
RLAPI int GetMonitorPhysicalWidth(int monitor); // Get specified monitor physical width in millimetres
RLAPI int GetMonitorPhysicalHeight(int monitor); // Get specified monitor physical height in millimetres
RLAPI int GetMonitorRefreshRate(int monitor); // Get specified monitor refresh rate
RLAPI Vector2 GetWindowPosition(void); // Get window position XY on monitor
RLAPI Vector2 GetWindowScaleDPI(void); // Get window scale DPI factor
RLAPI const char *GetMonitorName(int monitor); // Get the human-readable, UTF-8 encoded name of the primary monitor
RLAPI void SetClipboardText(const char *text); // Set clipboard text content
RLAPI const char *GetClipboardText(void); // Get clipboard text content
// Custom frame control functions
// NOTE: Those functions are intended for advance users that want full control over the frame processing
// By default EndDrawing() does this job: draws everything + SwapScreenBuffer() + manage frame timming + PollInputEvents()
// To avoid that behaviour and control frame processes manually, enable in config.h: SUPPORT_CUSTOM_FRAME_CONTROL
RLAPI void SwapScreenBuffer(void); // Swap back buffer with front buffer (screen drawing)
RLAPI void PollInputEvents(void); // Register all input events
RLAPI void WaitTime(float ms); // Wait for some milliseconds (halt program execution)
// Cursor-related functions
RLAPI void ShowCursor(void); // Shows cursor
RLAPI void HideCursor(void); // Hides cursor
RLAPI bool IsCursorHidden(void); // Check if cursor is not visible
RLAPI void EnableCursor(void); // Enables cursor (unlock cursor)
RLAPI void DisableCursor(void); // Disables cursor (lock cursor)
RLAPI bool IsCursorOnScreen(void); // Check if cursor is on the screen
// Drawing-related functions
RLAPI void ClearBackground(Color color); // Set background color (framebuffer clear color)
RLAPI void BeginDrawing(void); // Setup canvas (framebuffer) to start drawing
RLAPI void EndDrawing(void); // End canvas drawing and swap buffers (double buffering)
RLAPI void BeginMode2D(Camera2D camera); // Begin 2D mode with custom camera (2D)
RLAPI void EndMode2D(void); // Ends 2D mode with custom camera
RLAPI void BeginMode3D(Camera3D camera); // Begin 3D mode with custom camera (3D)
RLAPI void EndMode3D(void); // Ends 3D mode and returns to default 2D orthographic mode
RLAPI void BeginTextureMode(RenderTexture2D target); // Begin drawing to render texture
RLAPI void EndTextureMode(void); // Ends drawing to render texture
RLAPI void BeginShaderMode(Shader shader); // Begin custom shader drawing
RLAPI void EndShaderMode(void); // End custom shader drawing (use default shader)
RLAPI void BeginBlendMode(int mode); // Begin blending mode (alpha, additive, multiplied, subtract, custom)
RLAPI void EndBlendMode(void); // End blending mode (reset to default: alpha blending)
RLAPI void BeginScissorMode(int x, int y, int width, int height); // Begin scissor mode (define screen area for following drawing)
RLAPI void EndScissorMode(void); // End scissor mode
RLAPI void BeginVrStereoMode(VrStereoConfig config); // Begin stereo rendering (requires VR simulator)
RLAPI void EndVrStereoMode(void); // End stereo rendering (requires VR simulator)
// VR stereo config functions for VR simulator
RLAPI VrStereoConfig LoadVrStereoConfig(VrDeviceInfo device); // Load VR stereo config for VR simulator device parameters
RLAPI void UnloadVrStereoConfig(VrStereoConfig config); // Unload VR stereo config
// Shader management functions
// NOTE: Shader functionality is not available on OpenGL 1.1
RLAPI Shader LoadShader(const char *vsFileName, const char *fsFileName); // Load shader from files and bind default locations
RLAPI Shader LoadShaderFromMemory(const char *vsCode, const char *fsCode); // Load shader from code strings and bind default locations
RLAPI int GetShaderLocation(Shader shader, const char *uniformName); // Get shader uniform location
RLAPI int GetShaderLocationAttrib(Shader shader, const char *attribName); // Get shader attribute location
RLAPI void SetShaderValue(Shader shader, int locIndex, const void *value, int uniformType); // Set shader uniform value
RLAPI void SetShaderValueV(Shader shader, int locIndex, const void *value, int uniformType, int count); // Set shader uniform value vector
RLAPI void SetShaderValueMatrix(Shader shader, int locIndex, Matrix mat); // Set shader uniform value (matrix 4x4)
RLAPI void SetShaderValueTexture(Shader shader, int locIndex, Texture2D texture); // Set shader uniform value for texture (sampler2d)
RLAPI void UnloadShader(Shader shader); // Unload shader from GPU memory (VRAM)
// Screen-space-related functions
RLAPI Ray GetMouseRay(Vector2 mousePosition, Camera camera); // Get a ray trace from mouse position
RLAPI Matrix GetCameraMatrix(Camera camera); // Get camera transform matrix (view matrix)
RLAPI Matrix GetCameraMatrix2D(Camera2D camera); // Get camera 2d transform matrix
RLAPI Vector2 GetWorldToScreen(Vector3 position, Camera camera); // Get the screen space position for a 3d world space position
RLAPI Vector2 GetWorldToScreenEx(Vector3 position, Camera camera, int width, int height); // Get size position for a 3d world space position
RLAPI Vector2 GetWorldToScreen2D(Vector2 position, Camera2D camera); // Get the screen space position for a 2d camera world space position
RLAPI Vector2 GetScreenToWorld2D(Vector2 position, Camera2D camera); // Get the world space position for a 2d camera screen space position
// Timing-related functions
RLAPI void SetTargetFPS(int fps); // Set target FPS (maximum)
RLAPI int GetFPS(void); // Get current FPS
RLAPI float GetFrameTime(void); // Get time in seconds for last frame drawn (delta time)
RLAPI double GetTime(void); // Get elapsed time in seconds since InitWindow()
// Misc. functions
RLAPI int GetRandomValue(int min, int max); // Get a random value between min and max (both included)
RLAPI void SetRandomSeed(unsigned int seed); // Set the seed for the random number generator
RLAPI void TakeScreenshot(const char *fileName); // Takes a screenshot of current screen (filename extension defines format)
RLAPI void SetConfigFlags(unsigned int flags); // Setup init configuration flags (view FLAGS)
RLAPI void SetResizeCallback(void (*resizeCallback)()); // Set a function to call during resizing
RLAPI void TraceLog(int logLevel, const char *text, ...); // Show trace log messages (LOG_DEBUG, LOG_INFO, LOG_WARNING, LOG_ERROR...)
RLAPI void SetTraceLogLevel(int logLevel); // Set the current threshold (minimum) log level
RLAPI void *MemAlloc(int size); // Internal memory allocator
RLAPI void *MemRealloc(void *ptr, int size); // Internal memory reallocator
RLAPI void MemFree(void *ptr); // Internal memory free
// Set custom callbacks
// WARNING: Callbacks setup is intended for advance users
RLAPI void SetTraceLogCallback(TraceLogCallback callback); // Set custom trace log
RLAPI void SetLoadFileDataCallback(LoadFileDataCallback callback); // Set custom file binary data loader
RLAPI void SetSaveFileDataCallback(SaveFileDataCallback callback); // Set custom file binary data saver
RLAPI void SetLoadFileTextCallback(LoadFileTextCallback callback); // Set custom file text data loader
RLAPI void SetSaveFileTextCallback(SaveFileTextCallback callback); // Set custom file text data saver
// Files management functions
RLAPI unsigned char *LoadFileData(const char *fileName, unsigned int *bytesRead); // Load file data as byte array (read)
RLAPI void UnloadFileData(unsigned char *data); // Unload file data allocated by LoadFileData()
RLAPI bool SaveFileData(const char *fileName, void *data, unsigned int bytesToWrite); // Save data to file from byte array (write), returns true on success
RLAPI char *LoadFileText(const char *fileName); // Load text data from file (read), returns a '\0' terminated string
RLAPI void UnloadFileText(char *text); // Unload file text data allocated by LoadFileText()
RLAPI bool SaveFileText(const char *fileName, char *text); // Save text data to file (write), string must be '\0' terminated, returns true on success
RLAPI bool FileExists(const char *fileName); // Check if file exists
RLAPI bool DirectoryExists(const char *dirPath); // Check if a directory path exists
RLAPI bool IsFileExtension(const char *fileName, const char *ext); // Check file extension (including point: .png, .wav)
RLAPI const char *GetFileExtension(const char *fileName); // Get pointer to extension for a filename string (includes dot: '.png')
RLAPI const char *GetFileName(const char *filePath); // Get pointer to filename for a path string
RLAPI const char *GetFileNameWithoutExt(const char *filePath); // Get filename string without extension (uses static string)
RLAPI const char *GetDirectoryPath(const char *filePath); // Get full path for a given fileName with path (uses static string)
RLAPI const char *GetPrevDirectoryPath(const char *dirPath); // Get previous directory path for a given path (uses static string)
RLAPI const char *GetWorkingDirectory(void); // Get current working directory (uses static string)
RLAPI const char *GetApplicationDirectory(void); // Get the directory if the running application (uses static string)
RLAPI char **GetDirectoryFiles(const char *dirPath, int *count); // Get filenames in a directory path (memory should be freed)
RLAPI void ClearDirectoryFiles(void); // Clear directory files paths buffers (free memory)
RLAPI bool ChangeDirectory(const char *dir); // Change working directory, return true on success
RLAPI bool IsFileDropped(void); // Check if a file has been dropped into window
RLAPI char **GetDroppedFiles(int *count); // Get dropped files names (memory should be freed)
RLAPI void ClearDroppedFiles(void); // Clear dropped files paths buffer (free memory)
RLAPI long GetFileModTime(const char *fileName); // Get file modification time (last write time)
// Compression/Encoding functionality
RLAPI unsigned char *CompressData(unsigned char *data, int dataLength, int *compDataLength); // Compress data (DEFLATE algorithm)
RLAPI unsigned char *DecompressData(unsigned char *compData, int compDataLength, int *dataLength); // Decompress data (DEFLATE algorithm)
RLAPI char *EncodeDataBase64(const unsigned char *data, int dataLength, int *outputLength); // Encode data to Base64 string
RLAPI unsigned char *DecodeDataBase64(unsigned char *data, int *outputLength); // Decode Base64 string data
// Persistent storage management
RLAPI bool SaveStorageValue(unsigned int position, int value); // Save integer value to storage file (to defined position), returns true on success
RLAPI int LoadStorageValue(unsigned int position); // Load integer value from storage file (from defined position)
RLAPI void OpenURL(const char *url); // Open URL with default system browser (if available)
//------------------------------------------------------------------------------------
// Input Handling Functions (Module: core)
//------------------------------------------------------------------------------------
// Input-related functions: keyboard
RLAPI bool IsKeyPressed(int key); // Check if a key has been pressed once
RLAPI bool IsKeyDown(int key); // Check if a key is being pressed
RLAPI bool IsKeyReleased(int key); // Check if a key has been released once
RLAPI bool IsKeyUp(int key); // Check if a key is NOT being pressed
RLAPI void SetExitKey(int key); // Set a custom key to exit program (default is ESC)
RLAPI int GetKeyPressed(void); // Get key pressed (keycode), call it multiple times for keys queued, returns 0 when the queue is empty
RLAPI int GetCharPressed(void); // Get char pressed (unicode), call it multiple times for chars queued, returns 0 when the queue is empty
// Input-related functions: gamepads
RLAPI bool IsGamepadAvailable(int gamepad); // Check if a gamepad is available
RLAPI const char *GetGamepadName(int gamepad); // Get gamepad internal name id
RLAPI bool IsGamepadButtonPressed(int gamepad, int button); // Check if a gamepad button has been pressed once
RLAPI bool IsGamepadButtonDown(int gamepad, int button); // Check if a gamepad button is being pressed
RLAPI bool IsGamepadButtonReleased(int gamepad, int button); // Check if a gamepad button has been released once
RLAPI bool IsGamepadButtonUp(int gamepad, int button); // Check if a gamepad button is NOT being pressed
RLAPI int GetGamepadButtonPressed(void); // Get the last gamepad button pressed
RLAPI int GetGamepadAxisCount(int gamepad); // Get gamepad axis count for a gamepad
RLAPI float GetGamepadAxisMovement(int gamepad, int axis); // Get axis movement value for a gamepad axis
RLAPI int SetGamepadMappings(const char *mappings); // Set internal gamepad mappings (SDL_GameControllerDB)
// Input-related functions: mouse
RLAPI bool IsMouseButtonPressed(int button); // Check if a mouse button has been pressed once
RLAPI bool IsMouseButtonDown(int button); // Check if a mouse button is being pressed
RLAPI bool IsMouseButtonReleased(int button); // Check if a mouse button has been released once
RLAPI bool IsMouseButtonUp(int button); // Check if a mouse button is NOT being pressed
RLAPI int GetMouseX(void); // Get mouse position X
RLAPI int GetMouseY(void); // Get mouse position Y
RLAPI Vector2 GetMousePosition(void); // Get mouse position XY
RLAPI Vector2 GetMouseDelta(void); // Get mouse delta between frames
RLAPI void SetMousePosition(int x, int y); // Set mouse position XY
RLAPI void SetMouseOffset(int offsetX, int offsetY); // Set mouse offset
RLAPI void SetMouseScale(float scaleX, float scaleY); // Set mouse scaling
RLAPI float GetMouseWheelMove(void); // Get mouse wheel movement Y
RLAPI void SetMouseCursor(int cursor); // Set mouse cursor
// Input-related functions: touch
RLAPI int GetTouchX(void); // Get touch position X for touch point 0 (relative to screen size)
RLAPI int GetTouchY(void); // Get touch position Y for touch point 0 (relative to screen size)
RLAPI Vector2 GetTouchPosition(int index); // Get touch position XY for a touch point index (relative to screen size)
RLAPI int GetTouchPointId(int index); // Get touch point identifier for given index
RLAPI int GetTouchPointCount(void); // Get number of touch points
//------------------------------------------------------------------------------------
// Gestures and Touch Handling Functions (Module: rgestures)
//------------------------------------------------------------------------------------
RLAPI void SetGesturesEnabled(unsigned int flags); // Enable a set of gestures using flags
RLAPI bool IsGestureDetected(int gesture); // Check if a gesture have been detected
RLAPI int GetGestureDetected(void); // Get latest detected gesture
RLAPI float GetGestureHoldDuration(void); // Get gesture hold time in milliseconds
RLAPI Vector2 GetGestureDragVector(void); // Get gesture drag vector
RLAPI float GetGestureDragAngle(void); // Get gesture drag angle
RLAPI Vector2 GetGesturePinchVector(void); // Get gesture pinch delta
RLAPI float GetGesturePinchAngle(void); // Get gesture pinch angle
//------------------------------------------------------------------------------------
// Camera System Functions (Module: rcamera)
//------------------------------------------------------------------------------------
RLAPI void SetCameraMode(Camera camera, int mode); // Set camera mode (multiple camera modes available)
RLAPI void UpdateCamera(Camera *camera); // Update camera position for selected mode
RLAPI void SetCameraPanControl(int keyPan); // Set camera pan key to combine with mouse movement (free camera)
RLAPI void SetCameraAltControl(int keyAlt); // Set camera alt key to combine with mouse movement (free camera)
RLAPI void SetCameraSmoothZoomControl(int keySmoothZoom); // Set camera smooth zoom key to combine with mouse (free camera)
RLAPI void SetCameraMoveControls(int keyFront, int keyBack, int keyRight, int keyLeft, int keyUp, int keyDown); // Set camera move controls (1st person and 3rd person cameras)
//------------------------------------------------------------------------------------
// Basic Shapes Drawing Functions (Module: shapes)
//------------------------------------------------------------------------------------
// Set texture and rectangle to be used on shapes drawing
// NOTE: It can be useful when using basic shapes and one single font,
// defining a font char white rectangle would allow drawing everything in a single draw call
RLAPI void SetShapesTexture(Texture2D texture, Rectangle source); // Set texture and rectangle to be used on shapes drawing
// Basic shapes drawing functions
RLAPI void DrawPixel(int posX, int posY, Color color); // Draw a pixel
RLAPI void DrawPixelV(Vector2 position, Color color); // Draw a pixel (Vector version)
RLAPI void DrawLine(int startPosX, int startPosY, int endPosX, int endPosY, Color color); // Draw a line
RLAPI void DrawLineV(Vector2 startPos, Vector2 endPos, Color color); // Draw a line (Vector version)
RLAPI void DrawLineEx(Vector2 startPos, Vector2 endPos, float thick, Color color); // Draw a line defining thickness
RLAPI void DrawLineBezier(Vector2 startPos, Vector2 endPos, float thick, Color color); // Draw a line using cubic-bezier curves in-out
RLAPI void DrawLineBezierQuad(Vector2 startPos, Vector2 endPos, Vector2 controlPos, float thick, Color color); // Draw line using quadratic bezier curves with a control point
RLAPI void DrawLineBezierCubic(Vector2 startPos, Vector2 endPos, Vector2 startControlPos, Vector2 endControlPos, float thick, Color color); // Draw line using cubic bezier curves with 2 control points
RLAPI void DrawLineStrip(Vector2 *points, int pointCount, Color color); // Draw lines sequence
RLAPI void DrawCircle(int centerX, int centerY, float radius, Color color); // Draw a color-filled circle
RLAPI void DrawCircleSector(Vector2 center, float radius, float startAngle, float endAngle, int segments, Color color); // Draw a piece of a circle
RLAPI void DrawCircleSectorLines(Vector2 center, float radius, float startAngle, float endAngle, int segments, Color color); // Draw circle sector outline
RLAPI void DrawCircleGradient(int centerX, int centerY, float radius, Color color1, Color color2); // Draw a gradient-filled circle
RLAPI void DrawCircleV(Vector2 center, float radius, Color color); // Draw a color-filled circle (Vector version)
RLAPI void DrawCircleLines(int centerX, int centerY, float radius, Color color); // Draw circle outline
RLAPI void DrawEllipse(int centerX, int centerY, float radiusH, float radiusV, Color color); // Draw ellipse
RLAPI void DrawEllipseLines(int centerX, int centerY, float radiusH, float radiusV, Color color); // Draw ellipse outline
RLAPI void DrawRing(Vector2 center, float innerRadius, float outerRadius, float startAngle, float endAngle, int segments, Color color); // Draw ring
RLAPI void DrawRingLines(Vector2 center, float innerRadius, float outerRadius, float startAngle, float endAngle, int segments, Color color); // Draw ring outline
RLAPI void DrawRectangle(int posX, int posY, int width, int height, Color color); // Draw a color-filled rectangle
RLAPI void DrawRectangleV(Vector2 position, Vector2 size, Color color); // Draw a color-filled rectangle (Vector version)
RLAPI void DrawRectangleRec(Rectangle rec, Color color); // Draw a color-filled rectangle
RLAPI void DrawRectanglePro(Rectangle rec, Vector2 origin, float rotation, Color color); // Draw a color-filled rectangle with pro parameters
RLAPI void DrawRectangleGradientV(int posX, int posY, int width, int height, Color color1, Color color2);// Draw a vertical-gradient-filled rectangle
RLAPI void DrawRectangleGradientH(int posX, int posY, int width, int height, Color color1, Color color2);// Draw a horizontal-gradient-filled rectangle
RLAPI void DrawRectangleGradientEx(Rectangle rec, Color col1, Color col2, Color col3, Color col4); // Draw a gradient-filled rectangle with custom vertex colors
RLAPI void DrawRectangleLines(int posX, int posY, int width, int height, Color color); // Draw rectangle outline
RLAPI void DrawRectangleLinesEx(Rectangle rec, float lineThick, Color color); // Draw rectangle outline with extended parameters
RLAPI void DrawRectangleRounded(Rectangle rec, float roundness, int segments, Color color); // Draw rectangle with rounded edges
RLAPI void DrawRectangleRoundedLines(Rectangle rec, float roundness, int segments, float lineThick, Color color); // Draw rectangle with rounded edges outline
RLAPI void DrawTriangle(Vector2 v1, Vector2 v2, Vector2 v3, Color color); // Draw a color-filled triangle (vertex in counter-clockwise order!)
RLAPI void DrawTriangleLines(Vector2 v1, Vector2 v2, Vector2 v3, Color color); // Draw triangle outline (vertex in counter-clockwise order!)
RLAPI void DrawTriangleFan(Vector2 *points, int pointCount, Color color); // Draw a triangle fan defined by points (first vertex is the center)
RLAPI void DrawTriangleStrip(Vector2 *points, int pointCount, Color color); // Draw a triangle strip defined by points
RLAPI void DrawPoly(Vector2 center, int sides, float radius, float rotation, Color color); // Draw a regular polygon (Vector version)
RLAPI void DrawPolyLines(Vector2 center, int sides, float radius, float rotation, Color color); // Draw a polygon outline of n sides
RLAPI void DrawPolyLinesEx(Vector2 center, int sides, float radius, float rotation, float lineThick, Color color); // Draw a polygon outline of n sides with extended parameters
// Basic shapes collision detection functions
RLAPI bool CheckCollisionRecs(Rectangle rec1, Rectangle rec2); // Check collision between two rectangles
RLAPI bool CheckCollisionCircles(Vector2 center1, float radius1, Vector2 center2, float radius2); // Check collision between two circles
RLAPI bool CheckCollisionCircleRec(Vector2 center, float radius, Rectangle rec); // Check collision between circle and rectangle
RLAPI bool CheckCollisionPointRec(Vector2 point, Rectangle rec); // Check if point is inside rectangle
RLAPI bool CheckCollisionPointCircle(Vector2 point, Vector2 center, float radius); // Check if point is inside circle
RLAPI bool CheckCollisionPointTriangle(Vector2 point, Vector2 p1, Vector2 p2, Vector2 p3); // Check if point is inside a triangle
RLAPI bool CheckCollisionLines(Vector2 startPos1, Vector2 endPos1, Vector2 startPos2, Vector2 endPos2, Vector2 *collisionPoint); // Check the collision between two lines defined by two points each, returns collision point by reference
RLAPI bool CheckCollisionPointLine(Vector2 point, Vector2 p1, Vector2 p2, int threshold); // Check if point belongs to line created between two points [p1] and [p2] with defined margin in pixels [threshold]
RLAPI Rectangle GetCollisionRec(Rectangle rec1, Rectangle rec2); // Get collision rectangle for two rectangles collision
//------------------------------------------------------------------------------------
// Texture Loading and Drawing Functions (Module: textures)
//------------------------------------------------------------------------------------
// Image loading functions
// NOTE: This functions do not require GPU access
RLAPI Image LoadImage(const char *fileName); // Load image from file into CPU memory (RAM)
RLAPI Image LoadImageRaw(const char *fileName, int width, int height, int format, int headerSize); // Load image from RAW file data
RLAPI Image LoadImageAnim(const char *fileName, int *frames); // Load image sequence from file (frames appended to image.data)
RLAPI Image LoadImageFromMemory(const char *fileType, const unsigned char *fileData, int dataSize); // Load image from memory buffer, fileType refers to extension: i.e. '.png'
RLAPI Image LoadImageFromTexture(Texture2D texture); // Load image from GPU texture data
RLAPI Image LoadImageFromScreen(void); // Load image from screen buffer and (screenshot)
RLAPI void UnloadImage(Image image); // Unload image from CPU memory (RAM)
RLAPI bool ExportImage(Image image, const char *fileName); // Export image data to file, returns true on success
RLAPI bool ExportImageAsCode(Image image, const char *fileName); // Export image as code file defining an array of bytes, returns true on success
// Image generation functions
RLAPI Image GenImageColor(int width, int height, Color color); // Generate image: plain color
RLAPI Image GenImageGradientV(int width, int height, Color top, Color bottom); // Generate image: vertical gradient
RLAPI Image GenImageGradientH(int width, int height, Color left, Color right); // Generate image: horizontal gradient
RLAPI Image GenImageGradientRadial(int width, int height, float density, Color inner, Color outer); // Generate image: radial gradient
RLAPI Image GenImageChecked(int width, int height, int checksX, int checksY, Color col1, Color col2); // Generate image: checked
RLAPI Image GenImageWhiteNoise(int width, int height, float factor); // Generate image: white noise
RLAPI Image GenImageCellular(int width, int height, int tileSize); // Generate image: cellular algorithm, bigger tileSize means bigger cells
// Image manipulation functions
RLAPI Image ImageCopy(Image image); // Create an image duplicate (useful for transformations)
RLAPI Image ImageFromImage(Image image, Rectangle rec); // Create an image from another image piece
RLAPI Image ImageText(const char *text, int fontSize, Color color); // Create an image from text (default font)
RLAPI Image ImageTextEx(Font font, const char *text, float fontSize, float spacing, Color tint); // Create an image from text (custom sprite font)
RLAPI void ImageFormat(Image *image, int newFormat); // Convert image data to desired format
RLAPI void ImageToPOT(Image *image, Color fill); // Convert image to POT (power-of-two)
RLAPI void ImageCrop(Image *image, Rectangle crop); // Crop an image to a defined rectangle
RLAPI void ImageAlphaCrop(Image *image, float threshold); // Crop image depending on alpha value
RLAPI void ImageAlphaClear(Image *image, Color color, float threshold); // Clear alpha channel to desired color
RLAPI void ImageAlphaMask(Image *image, Image alphaMask); // Apply alpha mask to image
RLAPI void ImageAlphaPremultiply(Image *image); // Premultiply alpha channel
RLAPI void ImageResize(Image *image, int newWidth, int newHeight); // Resize image (Bicubic scaling algorithm)
RLAPI void ImageResizeNN(Image *image, int newWidth,int newHeight); // Resize image (Nearest-Neighbor scaling algorithm)
RLAPI void ImageResizeCanvas(Image *image, int newWidth, int newHeight, int offsetX, int offsetY, Color fill); // Resize canvas and fill with color
RLAPI void ImageMipmaps(Image *image); // Compute all mipmap levels for a provided image
RLAPI void ImageDither(Image *image, int rBpp, int gBpp, int bBpp, int aBpp); // Dither image data to 16bpp or lower (Floyd-Steinberg dithering)
RLAPI void ImageFlipVertical(Image *image); // Flip image vertically
RLAPI void ImageFlipHorizontal(Image *image); // Flip image horizontally
RLAPI void ImageRotateCW(Image *image); // Rotate image clockwise 90deg
RLAPI void ImageRotateCCW(Image *image); // Rotate image counter-clockwise 90deg
RLAPI void ImageColorTint(Image *image, Color color); // Modify image color: tint
RLAPI void ImageColorInvert(Image *image); // Modify image color: invert
RLAPI void ImageColorGrayscale(Image *image); // Modify image color: grayscale
RLAPI void ImageColorContrast(Image *image, float contrast); // Modify image color: contrast (-100 to 100)
RLAPI void ImageColorBrightness(Image *image, int brightness); // Modify image color: brightness (-255 to 255)
RLAPI void ImageColorReplace(Image *image, Color color, Color replace); // Modify image color: replace color
RLAPI Color *LoadImageColors(Image image); // Load color data from image as a Color array (RGBA - 32bit)
RLAPI Color *LoadImagePalette(Image image, int maxPaletteSize, int *colorCount); // Load colors palette from image as a Color array (RGBA - 32bit)
RLAPI void UnloadImageColors(Color *colors); // Unload color data loaded with LoadImageColors()
RLAPI void UnloadImagePalette(Color *colors); // Unload colors palette loaded with LoadImagePalette()
RLAPI Rectangle GetImageAlphaBorder(Image image, float threshold); // Get image alpha border rectangle
RLAPI Color GetImageColor(Image image, int x, int y); // Get image pixel color at (x, y) position
// Image drawing functions
// NOTE: Image software-rendering functions (CPU)
RLAPI void ImageClearBackground(Image *dst, Color color); // Clear image background with given color
RLAPI void ImageDrawPixel(Image *dst, int posX, int posY, Color color); // Draw pixel within an image
RLAPI void ImageDrawPixelV(Image *dst, Vector2 position, Color color); // Draw pixel within an image (Vector version)
RLAPI void ImageDrawLine(Image *dst, int startPosX, int startPosY, int endPosX, int endPosY, Color color); // Draw line within an image
RLAPI void ImageDrawLineV(Image *dst, Vector2 start, Vector2 end, Color color); // Draw line within an image (Vector version)
RLAPI void ImageDrawCircle(Image *dst, int centerX, int centerY, int radius, Color color); // Draw circle within an image
RLAPI void ImageDrawCircleV(Image *dst, Vector2 center, int radius, Color color); // Draw circle within an image (Vector version)
RLAPI void ImageDrawRectangle(Image *dst, int posX, int posY, int width, int height, Color color); // Draw rectangle within an image
RLAPI void ImageDrawRectangleV(Image *dst, Vector2 position, Vector2 size, Color color); // Draw rectangle within an image (Vector version)
RLAPI void ImageDrawRectangleRec(Image *dst, Rectangle rec, Color color); // Draw rectangle within an image
RLAPI void ImageDrawRectangleLines(Image *dst, Rectangle rec, int thick, Color color); // Draw rectangle lines within an image
RLAPI void ImageDraw(Image *dst, Image src, Rectangle srcRec, Rectangle dstRec, Color tint); // Draw a source image within a destination image (tint applied to source)
RLAPI void ImageDrawText(Image *dst, const char *text, int posX, int posY, int fontSize, Color color); // Draw text (using default font) within an image (destination)
RLAPI void ImageDrawTextEx(Image *dst, Font font, const char *text, Vector2 position, float fontSize, float spacing, Color tint); // Draw text (custom sprite font) within an image (destination)
// Texture loading functions
// NOTE: These functions require GPU access
RLAPI Texture2D LoadTexture(const char *fileName); // Load texture from file into GPU memory (VRAM)
RLAPI Texture2D LoadTextureFromImage(Image image); // Load texture from image data
RLAPI TextureCubemap LoadTextureCubemap(Image image, int layout); // Load cubemap from image, multiple image cubemap layouts supported
RLAPI RenderTexture2D LoadRenderTexture(int width, int height); // Load texture for rendering (framebuffer)
RLAPI void UnloadTexture(Texture2D texture); // Unload texture from GPU memory (VRAM)
RLAPI void UnloadRenderTexture(RenderTexture2D target); // Unload render texture from GPU memory (VRAM)
RLAPI void UpdateTexture(Texture2D texture, const void *pixels); // Update GPU texture with new data
RLAPI void UpdateTextureRec(Texture2D texture, Rectangle rec, const void *pixels); // Update GPU texture rectangle with new data
// Texture configuration functions
RLAPI void GenTextureMipmaps(Texture2D *texture); // Generate GPU mipmaps for a texture
RLAPI void SetTextureFilter(Texture2D texture, int filter); // Set texture scaling filter mode
RLAPI void SetTextureWrap(Texture2D texture, int wrap); // Set texture wrapping mode
// Texture drawing functions
RLAPI void DrawTexture(Texture2D texture, int posX, int posY, Color tint); // Draw a Texture2D
RLAPI void DrawTextureV(Texture2D texture, Vector2 position, Color tint); // Draw a Texture2D with position defined as Vector2
RLAPI void DrawTextureEx(Texture2D texture, Vector2 position, float rotation, float scale, Color tint); // Draw a Texture2D with extended parameters
RLAPI void DrawTextureRec(Texture2D texture, Rectangle source, Vector2 position, Color tint); // Draw a part of a texture defined by a rectangle
RLAPI void DrawTextureQuad(Texture2D texture, Vector2 tiling, Vector2 offset, Rectangle quad, Color tint); // Draw texture quad with tiling and offset parameters
RLAPI void DrawTextureTiled(Texture2D texture, Rectangle source, Rectangle dest, Vector2 origin, float rotation, float scale, Color tint); // Draw part of a texture (defined by a rectangle) with rotation and scale tiled into dest.
RLAPI void DrawTexturePro(Texture2D texture, Rectangle source, Rectangle dest, Vector2 origin, float rotation, Color tint); // Draw a part of a texture defined by a rectangle with 'pro' parameters
RLAPI void DrawTextureNPatch(Texture2D texture, NPatchInfo nPatchInfo, Rectangle dest, Vector2 origin, float rotation, Color tint); // Draws a texture (or part of it) that stretches or shrinks nicely
RLAPI void DrawTexturePoly(Texture2D texture, Vector2 center, Vector2 *points, Vector2 *texcoords, int pointCount, Color tint); // Draw a textured polygon
// Color/pixel related functions
RLAPI Color Fade(Color color, float alpha); // Get color with alpha applied, alpha goes from 0.0f to 1.0f
RLAPI int ColorToInt(Color color); // Get hexadecimal value for a Color
RLAPI Vector4 ColorNormalize(Color color); // Get Color normalized as float [0..1]
RLAPI Color ColorFromNormalized(Vector4 normalized); // Get Color from normalized values [0..1]
RLAPI Vector3 ColorToHSV(Color color); // Get HSV values for a Color, hue [0..360], saturation/value [0..1]
RLAPI Color ColorFromHSV(float hue, float saturation, float value); // Get a Color from HSV values, hue [0..360], saturation/value [0..1]
RLAPI Color ColorAlpha(Color color, float alpha); // Get color with alpha applied, alpha goes from 0.0f to 1.0f
RLAPI Color ColorAlphaBlend(Color dst, Color src, Color tint); // Get src alpha-blended into dst color with tint
RLAPI Color GetColor(unsigned int hexValue); // Get Color structure from hexadecimal value
RLAPI Color GetPixelColor(void *srcPtr, int format); // Get Color from a source pixel pointer of certain format
RLAPI void SetPixelColor(void *dstPtr, Color color, int format); // Set color formatted into destination pixel pointer
RLAPI int GetPixelDataSize(int width, int height, int format); // Get pixel data size in bytes for certain format
//------------------------------------------------------------------------------------
// Font Loading and Text Drawing Functions (Module: text)
//------------------------------------------------------------------------------------
// Font loading/unloading functions
RLAPI Font GetFontDefault(void); // Get the default Font
RLAPI Font LoadFont(const char *fileName); // Load font from file into GPU memory (VRAM)
RLAPI Font LoadFontEx(const char *fileName, int fontSize, int *fontChars, int glyphCount); // Load font from file with extended parameters, use NULL for fontChars and 0 for glyphCount to load the default character set
RLAPI Font LoadFontFromImage(Image image, Color key, int firstChar); // Load font from Image (XNA style)
RLAPI Font LoadFontFromMemory(const char *fileType, const unsigned char *fileData, int dataSize, int fontSize, int *fontChars, int glyphCount); // Load font from memory buffer, fileType refers to extension: i.e. '.ttf'
RLAPI GlyphInfo *LoadFontData(const unsigned char *fileData, int dataSize, int fontSize, int *fontChars, int glyphCount, int type); // Load font data for further use
RLAPI Image GenImageFontAtlas(const GlyphInfo *chars, Rectangle **recs, int glyphCount, int fontSize, int padding, int packMethod); // Generate image font atlas using chars info
RLAPI void UnloadFontData(GlyphInfo *chars, int glyphCount); // Unload font chars info data (RAM)
RLAPI void UnloadFont(Font font); // Unload font from GPU memory (VRAM)
RLAPI bool ExportFontAsCode(Font font, const char *fileName); // Export font as code file, returns true on success
// Text drawing functions
RLAPI void DrawFPS(int posX, int posY); // Draw current FPS
RLAPI void DrawText(const char *text, int posX, int posY, int fontSize, Color color); // Draw text (using default font)
RLAPI void DrawTextEx(Font font, const char *text, Vector2 position, float fontSize, float spacing, Color tint); // Draw text using font and additional parameters
RLAPI void DrawTextPro(Font font, const char *text, Vector2 position, Vector2 origin, float rotation, float fontSize, float spacing, Color tint); // Draw text using Font and pro parameters (rotation)
RLAPI void DrawTextCodepoint(Font font, int codepoint, Vector2 position, float fontSize, Color tint); // Draw one character (codepoint)
// Text font info functions
RLAPI int MeasureText(const char *text, int fontSize); // Measure string width for default font
RLAPI Vector2 MeasureTextEx(Font font, const char *text, float fontSize, float spacing); // Measure string size for Font
RLAPI int GetGlyphIndex(Font font, int codepoint); // Get glyph index position in font for a codepoint (unicode character), fallback to '?' if not found
RLAPI GlyphInfo GetGlyphInfo(Font font, int codepoint); // Get glyph font info data for a codepoint (unicode character), fallback to '?' if not found
RLAPI Rectangle GetGlyphAtlasRec(Font font, int codepoint); // Get glyph rectangle in font atlas for a codepoint (unicode character), fallback to '?' if not found
// Text codepoints management functions (unicode characters)
RLAPI int *LoadCodepoints(const char *text, int *count); // Load all codepoints from a UTF-8 text string, codepoints count returned by parameter
RLAPI void UnloadCodepoints(int *codepoints); // Unload codepoints data from memory
RLAPI int GetCodepointCount(const char *text); // Get total number of codepoints in a UTF-8 encoded string
RLAPI int GetCodepoint(const char *text, int *bytesProcessed); // Get next codepoint in a UTF-8 encoded string, 0x3f('?') is returned on failure
RLAPI const char *CodepointToUTF8(int codepoint, int *byteSize); // Encode one codepoint into UTF-8 byte array (array length returned as parameter)
RLAPI char *TextCodepointsToUTF8(int *codepoints, int length); // Encode text as codepoints array into UTF-8 text string (WARNING: memory must be freed!)
// Text strings management functions (no UTF-8 strings, only byte chars)
// NOTE: Some strings allocate memory internally for returned strings, just be careful!
RLAPI int TextCopy(char *dst, const char *src); // Copy one string to another, returns bytes copied
RLAPI bool TextIsEqual(const char *text1, const char *text2); // Check if two text string are equal
RLAPI unsigned int TextLength(const char *text); // Get text length, checks for '\0' ending
RLAPI const char *TextFormat(const char *text, ...); // Text formatting with variables (sprintf() style)
RLAPI const char *TextSubtext(const char *text, int position, int length); // Get a piece of a text string
RLAPI char *TextReplace(char *text, const char *replace, const char *by); // Replace text string (WARNING: memory must be freed!)
RLAPI char *TextInsert(const char *text, const char *insert, int position); // Insert text in a position (WARNING: memory must be freed!)
RLAPI const char *TextJoin(const char **textList, int count, const char *delimiter); // Join text strings with delimiter
RLAPI const char **TextSplit(const char *text, char delimiter, int *count); // Split text into multiple strings
RLAPI void TextAppend(char *text, const char *append, int *position); // Append text at specific position and move cursor!
RLAPI int TextFindIndex(const char *text, const char *find); // Find first text occurrence within a string
RLAPI const char *TextToUpper(const char *text); // Get upper case version of provided string
RLAPI const char *TextToLower(const char *text); // Get lower case version of provided string
RLAPI const char *TextToPascal(const char *text); // Get Pascal case notation version of provided string
RLAPI int TextToInteger(const char *text); // Get integer value from text (negative values not supported)
//------------------------------------------------------------------------------------
// Basic 3d Shapes Drawing Functions (Module: models)
//------------------------------------------------------------------------------------
// Basic geometric 3D shapes drawing functions
RLAPI void DrawLine3D(Vector3 startPos, Vector3 endPos, Color color); // Draw a line in 3D world space
RLAPI void DrawPoint3D(Vector3 position, Color color); // Draw a point in 3D space, actually a small line
RLAPI void DrawCircle3D(Vector3 center, float radius, Vector3 rotationAxis, float rotationAngle, Color color); // Draw a circle in 3D world space
RLAPI void DrawTriangle3D(Vector3 v1, Vector3 v2, Vector3 v3, Color color); // Draw a color-filled triangle (vertex in counter-clockwise order!)
RLAPI void DrawTriangleStrip3D(Vector3 *points, int pointCount, Color color); // Draw a triangle strip defined by points
RLAPI void DrawCube(Vector3 position, float width, float height, float length, Color color); // Draw cube
RLAPI void DrawCubeV(Vector3 position, Vector3 size, Color color); // Draw cube (Vector version)
RLAPI void DrawCubeWires(Vector3 position, float width, float height, float length, Color color); // Draw cube wires
RLAPI void DrawCubeWiresV(Vector3 position, Vector3 size, Color color); // Draw cube wires (Vector version)
RLAPI void DrawCubeTexture(Texture2D texture, Vector3 position, float width, float height, float length, Color color); // Draw cube textured
RLAPI void DrawCubeTextureRec(Texture2D texture, Rectangle source, Vector3 position, float width, float height, float length, Color color); // Draw cube with a region of a texture
RLAPI void DrawSphere(Vector3 centerPos, float radius, Color color); // Draw sphere
RLAPI void DrawSphereEx(Vector3 centerPos, float radius, int rings, int slices, Color color); // Draw sphere with extended parameters
RLAPI void DrawSphereWires(Vector3 centerPos, float radius, int rings, int slices, Color color); // Draw sphere wires
RLAPI void DrawCylinder(Vector3 position, float radiusTop, float radiusBottom, float height, int slices, Color color); // Draw a cylinder/cone
RLAPI void DrawCylinderEx(Vector3 startPos, Vector3 endPos, float startRadius, float endRadius, int sides, Color color); // Draw a cylinder with base at startPos and top at endPos
RLAPI void DrawCylinderWires(Vector3 position, float radiusTop, float radiusBottom, float height, int slices, Color color); // Draw a cylinder/cone wires
RLAPI void DrawCylinderWiresEx(Vector3 startPos, Vector3 endPos, float startRadius, float endRadius, int sides, Color color); // Draw a cylinder wires with base at startPos and top at endPos
RLAPI void DrawPlane(Vector3 centerPos, Vector2 size, Color color); // Draw a plane XZ
RLAPI void DrawRay(Ray ray, Color color); // Draw a ray line
RLAPI void DrawGrid(int slices, float spacing); // Draw a grid (centered at (0, 0, 0))
//------------------------------------------------------------------------------------
// Model 3d Loading and Drawing Functions (Module: models)
//------------------------------------------------------------------------------------
// Model management functions
RLAPI Model LoadModel(const char *fileName); // Load model from files (meshes and materials)
RLAPI Model LoadModelFromMesh(Mesh mesh); // Load model from generated mesh (default material)
RLAPI void UnloadModel(Model model); // Unload model (including meshes) from memory (RAM and/or VRAM)
RLAPI void UnloadModelKeepMeshes(Model model); // Unload model (but not meshes) from memory (RAM and/or VRAM)
RLAPI BoundingBox GetModelBoundingBox(Model model); // Compute model bounding box limits (considers all meshes)
// Model drawing functions
RLAPI void DrawModel(Model model, Vector3 position, float scale, Color tint); // Draw a model (with texture if set)
RLAPI void DrawModelEx(Model model, Vector3 position, Vector3 rotationAxis, float rotationAngle, Vector3 scale, Color tint); // Draw a model with extended parameters
RLAPI void DrawModelWires(Model model, Vector3 position, float scale, Color tint); // Draw a model wires (with texture if set)
RLAPI void DrawModelWiresEx(Model model, Vector3 position, Vector3 rotationAxis, float rotationAngle, Vector3 scale, Color tint); // Draw a model wires (with texture if set) with extended parameters
RLAPI void DrawBoundingBox(BoundingBox box, Color color); // Draw bounding box (wires)
RLAPI void DrawBillboard(Camera camera, Texture2D texture, Vector3 position, float size, Color tint); // Draw a billboard texture
RLAPI void DrawBillboardRec(Camera camera, Texture2D texture, Rectangle source, Vector3 position, Vector2 size, Color tint); // Draw a billboard texture defined by source
RLAPI void DrawBillboardPro(Camera camera, Texture2D texture, Rectangle source, Vector3 position, Vector3 up, Vector2 size, Vector2 origin, float rotation, Color tint); // Draw a billboard texture defined by source and rotation
// Mesh management functions
RLAPI void UploadMesh(Mesh *mesh, bool dynamic); // Upload mesh vertex data in GPU and provide VAO/VBO ids
RLAPI void UpdateMeshBuffer(Mesh mesh, int index, void *data, int dataSize, int offset); // Update mesh vertex data in GPU for a specific buffer index
RLAPI void UnloadMesh(Mesh mesh); // Unload mesh data from CPU and GPU
RLAPI void DrawMesh(Mesh mesh, Material material, Matrix transform); // Draw a 3d mesh with material and transform
RLAPI void DrawMeshInstanced(Mesh mesh, Material material, Matrix *transforms, int instances); // Draw multiple mesh instances with material and different transforms
RLAPI bool ExportMesh(Mesh mesh, const char *fileName); // Export mesh data to file, returns true on success
RLAPI BoundingBox GetMeshBoundingBox(Mesh mesh); // Compute mesh bounding box limits
RLAPI void GenMeshTangents(Mesh *mesh); // Compute mesh tangents
RLAPI void GenMeshBinormals(Mesh *mesh); // Compute mesh binormals
// Mesh generation functions
RLAPI Mesh GenMeshPoly(int sides, float radius); // Generate polygonal mesh
RLAPI Mesh GenMeshPlane(float width, float length, int resX, int resZ); // Generate plane mesh (with subdivisions)
RLAPI Mesh GenMeshCube(float width, float height, float length); // Generate cuboid mesh
RLAPI Mesh GenMeshSphere(float radius, int rings, int slices); // Generate sphere mesh (standard sphere)
RLAPI Mesh GenMeshHemiSphere(float radius, int rings, int slices); // Generate half-sphere mesh (no bottom cap)
RLAPI Mesh GenMeshCylinder(float radius, float height, int slices); // Generate cylinder mesh
RLAPI Mesh GenMeshCone(float radius, float height, int slices); // Generate cone/pyramid mesh
RLAPI Mesh GenMeshTorus(float radius, float size, int radSeg, int sides); // Generate torus mesh
RLAPI Mesh GenMeshKnot(float radius, float size, int radSeg, int sides); // Generate trefoil knot mesh
RLAPI Mesh GenMeshHeightmap(Image heightmap, Vector3 size); // Generate heightmap mesh from image data
RLAPI Mesh GenMeshCubicmap(Image cubicmap, Vector3 cubeSize); // Generate cubes-based map mesh from image data
// Material loading/unloading functions
RLAPI Material *LoadMaterials(const char *fileName, int *materialCount); // Load materials from model file
RLAPI Material LoadMaterialDefault(void); // Load default material (Supports: DIFFUSE, SPECULAR, NORMAL maps)
RLAPI void UnloadMaterial(Material material); // Unload material from GPU memory (VRAM)
RLAPI void SetMaterialTexture(Material *material, int mapType, Texture2D texture); // Set texture for a material map type (MATERIAL_MAP_DIFFUSE, MATERIAL_MAP_SPECULAR...)
RLAPI void SetModelMeshMaterial(Model *model, int meshId, int materialId); // Set material for a mesh
// Model animations loading/unloading functions
RLAPI ModelAnimation *LoadModelAnimations(const char *fileName, unsigned int *animCount); // Load model animations from file
RLAPI void UpdateModelAnimation(Model model, ModelAnimation anim, int frame); // Update model animation pose
RLAPI void UnloadModelAnimation(ModelAnimation anim); // Unload animation data
RLAPI void UnloadModelAnimations(ModelAnimation* animations, unsigned int count); // Unload animation array data
RLAPI bool IsModelAnimationValid(Model model, ModelAnimation anim); // Check model animation skeleton match
// Collision detection functions
RLAPI bool CheckCollisionSpheres(Vector3 center1, float radius1, Vector3 center2, float radius2); // Check collision between two spheres
RLAPI bool CheckCollisionBoxes(BoundingBox box1, BoundingBox box2); // Check collision between two bounding boxes
RLAPI bool CheckCollisionBoxSphere(BoundingBox box, Vector3 center, float radius); // Check collision between box and sphere
RLAPI RayCollision GetRayCollisionSphere(Ray ray, Vector3 center, float radius); // Get collision info between ray and sphere
RLAPI RayCollision GetRayCollisionBox(Ray ray, BoundingBox box); // Get collision info between ray and box
RLAPI RayCollision GetRayCollisionModel(Ray ray, Model model); // Get collision info between ray and model
RLAPI RayCollision GetRayCollisionMesh(Ray ray, Mesh mesh, Matrix transform); // Get collision info between ray and mesh
RLAPI RayCollision GetRayCollisionTriangle(Ray ray, Vector3 p1, Vector3 p2, Vector3 p3); // Get collision info between ray and triangle
RLAPI RayCollision GetRayCollisionQuad(Ray ray, Vector3 p1, Vector3 p2, Vector3 p3, Vector3 p4); // Get collision info between ray and quad
//------------------------------------------------------------------------------------
// Audio Loading and Playing Functions (Module: audio)
//------------------------------------------------------------------------------------
// Audio device management functions
RLAPI void InitAudioDevice(void); // Initialize audio device and context
RLAPI void CloseAudioDevice(void); // Close the audio device and context
RLAPI bool IsAudioDeviceReady(void); // Check if audio device has been initialized successfully
RLAPI void SetMasterVolume(float volume); // Set master volume (listener)
// Wave/Sound loading/unloading functions
RLAPI Wave LoadWave(const char *fileName); // Load wave data from file
RLAPI Wave LoadWaveFromMemory(const char *fileType, const unsigned char *fileData, int dataSize); // Load wave from memory buffer, fileType refers to extension: i.e. '.wav'
RLAPI Sound LoadSound(const char *fileName); // Load sound from file
RLAPI Sound LoadSoundFromWave(Wave wave); // Load sound from wave data
RLAPI void UpdateSound(Sound sound, const void *data, int sampleCount); // Update sound buffer with new data
RLAPI void UnloadWave(Wave wave); // Unload wave data
RLAPI void UnloadSound(Sound sound); // Unload sound
RLAPI bool ExportWave(Wave wave, const char *fileName); // Export wave data to file, returns true on success
RLAPI bool ExportWaveAsCode(Wave wave, const char *fileName); // Export wave sample data to code (.h), returns true on success
// Wave/Sound management functions
RLAPI void PlaySound(Sound sound); // Play a sound
RLAPI void StopSound(Sound sound); // Stop playing a sound
RLAPI void PauseSound(Sound sound); // Pause a sound
RLAPI void ResumeSound(Sound sound); // Resume a paused sound
RLAPI void PlaySoundMulti(Sound sound); // Play a sound (using multichannel buffer pool)
RLAPI void StopSoundMulti(void); // Stop any sound playing (using multichannel buffer pool)
RLAPI int GetSoundsPlaying(void); // Get number of sounds playing in the multichannel
RLAPI bool IsSoundPlaying(Sound sound); // Check if a sound is currently playing
RLAPI void SetSoundVolume(Sound sound, float volume); // Set volume for a sound (1.0 is max level)
RLAPI void SetSoundPitch(Sound sound, float pitch); // Set pitch for a sound (1.0 is base level)
RLAPI void SetSoundPan(Sound sound, float pan); // Set pan for a sound (0.5 is center)
RLAPI Wave WaveCopy(Wave wave); // Copy a wave to a new wave
RLAPI void WaveCrop(Wave *wave, int initSample, int finalSample); // Crop a wave to defined samples range
RLAPI void WaveFormat(Wave *wave, int sampleRate, int sampleSize, int channels); // Convert wave data to desired format
RLAPI float *LoadWaveSamples(Wave wave); // Load samples data from wave as a 32bit float data array
RLAPI void UnloadWaveSamples(float *samples); // Unload samples data loaded with LoadWaveSamples()
// Music management functions
RLAPI Music LoadMusicStream(const char *fileName); // Load music stream from file
RLAPI Music LoadMusicStreamFromMemory(const char *fileType, unsigned char *data, int dataSize); // Load music stream from data
RLAPI void UnloadMusicStream(Music music); // Unload music stream
RLAPI void PlayMusicStream(Music music); // Start music playing
RLAPI bool IsMusicStreamPlaying(Music music); // Check if music is playing
RLAPI void UpdateMusicStream(Music music); // Updates buffers for music streaming
RLAPI void StopMusicStream(Music music); // Stop music playing
RLAPI void PauseMusicStream(Music music); // Pause music playing
RLAPI void ResumeMusicStream(Music music); // Resume playing paused music
RLAPI void SeekMusicStream(Music music, float position); // Seek music to a position (in seconds)
RLAPI void SetMusicVolume(Music music, float volume); // Set volume for music (1.0 is max level)
RLAPI void SetMusicPitch(Music music, float pitch); // Set pitch for a music (1.0 is base level)
RLAPI void SetMusicPan(Music music, float pan); // Set pan for a music (0.5 is center)
RLAPI float GetMusicTimeLength(Music music); // Get music time length (in seconds)
RLAPI float GetMusicTimePlayed(Music music); // Get current music time played (in seconds)
// AudioStream management functions
RLAPI AudioStream LoadAudioStream(unsigned int sampleRate, unsigned int sampleSize, unsigned int channels); // Load audio stream (to stream raw audio pcm data)
RLAPI void UnloadAudioStream(AudioStream stream); // Unload audio stream and free memory
RLAPI void UpdateAudioStream(AudioStream stream, const void *data, int frameCount); // Update audio stream buffers with data
RLAPI bool IsAudioStreamProcessed(AudioStream stream); // Check if any audio stream buffers requires refill
RLAPI void PlayAudioStream(AudioStream stream); // Play audio stream
RLAPI void PauseAudioStream(AudioStream stream); // Pause audio stream
RLAPI void ResumeAudioStream(AudioStream stream); // Resume audio stream
RLAPI bool IsAudioStreamPlaying(AudioStream stream); // Check if audio stream is playing
RLAPI void StopAudioStream(AudioStream stream); // Stop audio stream
RLAPI void SetAudioStreamVolume(AudioStream stream, float volume); // Set volume for audio stream (1.0 is max level)
RLAPI void SetAudioStreamPitch(AudioStream stream, float pitch); // Set pitch for audio stream (1.0 is base level)
RLAPI void SetAudioStreamPan(AudioStream stream, float pan); // Set pan for audio stream (0.5 is centered)
RLAPI void SetAudioStreamBufferSizeDefault(int size); // Default size for new audio streams
#if defined(__cplusplus)
}
#endif
#endif // RAYLIB_H
|
/**
* Copyright (c) 2016-present, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE_render file in the root directory of this subproject. An additional grant
* of patent rights can be found in the PATENTS file in the same directory.
*/
#include "Camera.h"
using namespace surround360;
Camera makeCamera(
const Camera& camera,
const Camera::Vector3& position,
const Camera::Vector3& rotation,
const Camera::Vector2& principal,
const Camera::Real& focal,
const Camera::Vector2& distortion) {
Camera result = camera;
result.position = position;
result.setRotation(rotation);
result.principal = principal;
result.setScalarFocal(focal);
result.distortion = distortion;
return result;
}
struct ReprojectionFunctor {
static ceres::CostFunction* addResidual(
ceres::Problem& problem,
Camera::Vector3& position,
Camera::Vector3& rotation,
Camera::Vector2& principal,
Camera::Real& focal,
Camera::Vector2& distortion,
Camera::Vector3& world,
const Camera& camera,
const Camera::Vector2& pixel,
bool robust = false) {
auto* cost = new CostFunction(new ReprojectionFunctor(camera, pixel));
auto* loss = robust ? new ceres::HuberLoss(1.0) : nullptr;
problem.AddResidualBlock(
cost,
loss,
position.data(),
rotation.data(),
principal.data(),
&focal,
distortion.data(),
world.data());
return cost;
}
bool operator()(
double const* const position,
double const* const rotation,
double const* const principal,
double const* const focal,
double const* const distortion,
double const* const world,
double* residuals) const {
// create a camera using parameters
// TODO: maybe compute modified cameras once per iteration using
// vector<IterationCallback> Solver::Options::callbacks?
Camera modified = makeCamera(
camera,
Eigen::Map<const Camera::Vector3>(position),
Eigen::Map<const Camera::Vector3>(rotation),
Eigen::Map<const Camera::Vector2>(principal),
*focal,
Eigen::Map<const Camera::Vector2>(distortion));
// transform world with that camera and compare to pixel
Eigen::Map<const Camera::Vector3> w(world);
Eigen::Map<Camera::Vector2> r(residuals);
r = modified.pixel(w) - pixel;
return true;
}
private:
using CostFunction = ceres::NumericDiffCostFunction<
ReprojectionFunctor,
ceres::CENTRAL,
2, // residuals
3, // position
3, // rotation
2, // principal
1, // focal
2, // distortion
3>; // world
ReprojectionFunctor(const Camera& camera, const Camera::Vector2& pixel) :
camera(camera),
pixel(pixel) {
}
const Camera& camera;
const Camera::Vector2 pixel;
};
struct TriangulationFunctor {
static ceres::CostFunction* addResidual(
ceres::Problem& problem,
Camera::Vector3& world,
const Camera& camera,
const Camera::Vector2& pixel,
const bool robust = false) {
auto* cost = new CostFunction(new TriangulationFunctor(camera, pixel));
auto* loss = robust ? new ceres::HuberLoss(1.0) : nullptr;
problem.AddResidualBlock(
cost,
loss,
world.data());
return cost;
}
bool operator()(
double const* const world,
double* residuals) const {
Eigen::Map<const Camera::Vector3> w(world);
Eigen::Map<Camera::Vector2> r(residuals);
// transform world with camera and compare to pixel
r = camera.pixel(w) - pixel;
return true;
}
private:
using CostFunction = ceres::NumericDiffCostFunction<
TriangulationFunctor,
ceres::CENTRAL,
2, // residuals
3>; // world
TriangulationFunctor(const Camera& camera, const Camera::Vector2& pixel) :
camera(camera),
pixel(pixel) {
}
const Camera& camera;
const Camera::Vector2 pixel;
};
using Observations = std::vector<std::pair<const Camera&, Camera::Vector2>>;
Camera::Vector3 averageAtDistance(
const Observations& observations,
const Camera::Real distance) {
Camera::Vector3 sum = Camera::Vector3::Zero();
for (const auto& obs : observations) {
sum += obs.first.rig(obs.second).pointAt(distance);
}
return sum / observations.size();
}
Camera::Vector3 triangulateNonlinear(
const Observations& observations,
const bool forceInFront) {
ceres::Solver::Options options;
// initial value is average of distant points
const Camera::Real kInitialDistance = 1000; // not hugely important
Camera::Vector3 world = averageAtDistance(observations, kInitialDistance);
ceres::Problem problem;
for (const auto& obs : observations) {
TriangulationFunctor::addResidual(problem, world, obs.first, obs.second);
}
ceres::Solver::Summary summary;
ceres::Solve(options, &problem, &summary);
if (forceInFront) {
for (const auto& obs : observations) {
if (obs.first.isBehind(world)) {
return averageAtDistance(observations, Camera::kNearInfinity);
}
}
}
return world;
}
double calcPercentile(std::vector<double> values, double percentile = 0.5) {
if (values.empty()) {
return NAN;
}
CHECK_LT(percentile, 1);
size_t index(percentile * values.size());
std::nth_element(values.begin(), values.begin() + index, values.end());
return values[index];
}
Camera::Vector2 reprojectionError(
const ceres::Problem& problem,
ceres::ResidualBlockId id) {
auto cost = problem.GetCostFunctionForResidualBlock(id);
std::vector<double*> parameterBlocks;
problem.GetParameterBlocksForResidualBlock(id, ¶meterBlocks);
Camera::Vector2 residual;
cost->Evaluate(parameterBlocks.data(), residual.data(), nullptr);
return residual;
}
std::vector<double> getReprojectionErrorNorms(const ceres::Problem& problem) {
std::vector<double> result;
std::vector<ceres::ResidualBlockId> ids;
problem.GetResidualBlocks(&ids);
for (auto& id : ids) {
result.push_back(reprojectionError(problem, id).norm());
}
return result;
}
// remove if residual error is more than threshold
void removeOutliers(ceres::Problem& problem, double threshold) {
std::vector<ceres::ResidualBlockId> ids;
problem.GetResidualBlocks(&ids);
for (auto & id: ids) {
if (reprojectionError(problem, id).norm() > threshold) {
problem.RemoveResidualBlock(id);
}
}
}
|
"use strict"; //eslint-disable-line
require("babel-core/register");
const gulp = require("gulp");
const eslint = require("gulp-eslint");
const del = require("del");
// const mocha = require("gulp-mocha");
const babel = require("gulp-babel");
// const path = require("path");
const sourcemaps = require("gulp-sourcemaps");
gulp.task("clean", () => {
return del(["build/**/*"]);
});
gulp.task("compile", ["lint"], () => {
return gulp.src(["src/**/*"])
.pipe(sourcemaps.init({identityMap: true}))
.pipe(babel({}))
.pipe(sourcemaps.write(".", {includeContent: true}))
.pipe(gulp.dest("build/"));
});
gulp.task("lint", ["clean"], () => {
return gulp.src(["src/**/*.js"])
.pipe(eslint({
fix: true,
}))
.pipe(eslint.format())
.pipe(eslint.failAfterError());
});
gulp.task("watch", () => {
gulp.watch("src/**/*.*", ["compile"]);
});
gulp.task("default", ["compile"]);
|
import os
from fnmatch import fnmatch
from multiprocessing import Pool
import random
import cv2
import tqdm
source_path = "/home/rik/nsfw_classifier/nsfw_data_scraper/source"
save_path = "/home/rik/apps/youtube/nsfw-classifier-demo/data/train/positive"
size = 306
def process_image(image_file):
filename = image_file.split("/")[-1]
global size
try:
oriimage = cv2.imread(image_file, cv2.IMREAD_UNCHANGED)
if oriimage.shape[1] and oriimage.shape[0] >= 306:
img = cv2.resize(oriimage, (size, size))
cv2.imwrite(os.path.join(save_path, filename), img)
except Exception as e:
# print("Error : {}".format(str(e)))
pass
if __name__ == "__main__":
pattern = "*.jpg"
image_files = []
for path, subdirs, files in os.walk(source_path):
for name in files:
if fnmatch(name, pattern):
image_files.append(os.path.join(path, name))
sample = random.sample(image_files, 49000)
print("1st file : {}".format(sample[0]))
pool = Pool(processes=4)
list(tqdm.tqdm(pool.imap(process_image, sample), total=len(sample)))
pool.close()
pool.join()
|
import click
import glob
import numpy as np
import geopandas as gpd
import pandas as pd
import matplotlib.pyplot as plt
from copro import utils, evaluation
import os, sys
@click.command()
@click.option('-t0', '--start-year', type=int)
@click.option('-t1', '--end-year', type=int)
@click.option('-c', '--column', help='column name', default='chance_of_conflict', type=str)
@click.option('--geojson/--no-geojson', help='save output to geojson or not', default=False)
@click.option('--png/--no-png', help='save output to png or not', default=True)
@click.option('--verbose/--no-verbose', help='verbose on/off', default=False)
@click.argument('input_dir', type=click.Path())
@click.argument('output_dir',type=click.Path())
@click.argument('selected_polygons', type=click.Path())
def main(input_dir=None, output_dir=None, selected_polygons=None, start_year=None, end_year=None, geojson=None, png=None, column=None, verbose=None):
"""Post-processing script to calculate average model output over a user-specifeid period or all output geoJSON-files stored in input-dir.
Computed average values can be outputted as geoJSON-file or png-file or both.
Args:
input_dir: path to input directory with geoJSON-files located per projection year.
output_dir (str): path to directory where output will be stored.
selected_polygons (str): path to a shp-file with all polygons used in a CoPro run.
Output:
geoJSON-file with average column value per polygon (if geojson is set).
png-file with plot of average column value per polygon (if png is set)
"""
# check if start/end time settings are consistent
if ((start_year != None) and (end_year == None)) or ((end_year != None) and (start_year == None)):
raise ValueError('ERROR: if start or end year is specified, the pendant must be specified too!')
# read a shp-file with geometries of all selected polygons
click.echo('\nreading shp-file with all polygons from {}'.format(os.path.abspath(selected_polygons)))
selected_polygons_gdf = gpd.read_file(os.path.abspath(selected_polygons))
# create dataframe
global_df = utils.global_ID_geom_info(selected_polygons_gdf)
# find all geojson-files in input-dir
input_dir = os.path.abspath(input_dir)
click.echo('getting geojson-files from {}'.format(input_dir))
all_files = sorted(glob.glob(os.path.join(input_dir, '*.geojson')))
# if specific start/end time is specified, find only those geojson-files for specified period
if (start_year != None) and (end_year != None):
# define period between start and ent time
period = np.arange(start_year, end_year+1, 1)
click.echo('using all geojson-files for years {} to {}'.format(period[0], period[-1]))
# creating suffix for file saving later
suffix = '{}_to_{}'.format(period[0], period[-1])
# initiate empty list for selected geojson-files
selected_files = []
# select
for fo in all_files:
# if the year-suffix of geojson-file matches year in period, add to list
year = int(str(str(os.path.basename(fo)).rsplit('.')[0]).rsplit('_')[-1])
if year in period:
if verbose: print('adding to selection')
selected_files.append(fo)
# if not end/start time is specified, use all geojson-files in input-dir
else:
click.echo('using all geojson-files in input-dir')
# also here, create suffix for file saving laster
suffix = 'all_years'
selected_files = all_files
# initiatie empyt dataframe for collecting all annual output
y_df = pd.DataFrame()
# go through all geojson-files left over after selection step
for geojson in selected_files:
# read files and convert to datatrame
if verbose: click.echo('reading file {}'.format(geojson))
gdf = gpd.read_file(geojson, driver='GeoJSON')
df = pd.DataFrame(gdf)
# append to dataframe
y_df = y_df.append(df, ignore_index=True)
# initiate dataframe for time-averaged output
click.echo('creating one output dataframe from all geojson-files')
y_out = pd.DataFrame()
# get all unique IDs of polygons
y_out['ID'] = y_df.ID.unique()
click.echo('reading from column {}'.format(column))
if column == 'chance_of_conflict':
# add number of predictiosn made over all selected years
y_out = pd.merge(y_out, y_df.nr_predictions.groupby(y_df.ID).sum().to_frame(), on='ID')
# add number of predicted conflicts over all selected years
y_out = pd.merge(y_out, y_df.nr_predicted_conflicts.groupby(y_df.ID).sum().to_frame(), on='ID')
# determine chance of conflict over all selected years
y_out[column] = y_out.nr_predicted_conflicts / y_out.nr_predictions
elif column == 'avg_prob_1':
y_out = pd.merge(y_out, pd.to_numeric(y_df[column]).groupby(y_df.ID).mean().to_frame(), on='ID')
else:
raise ValueError('ERROR: column {} is not yet supported'.format(column))
# add geometry informatoin for each polygon
y_out = pd.merge(y_out, global_df, on='ID', how='left')
if not os.path.isdir(os.path.abspath(output_dir)):
click.echo('creating output folder {}'.format(os.path.abspath(output_dir)))
os.makedirs(os.path.abspath(output_dir))
# convert to geo-dataframe
gdf_out = gpd.GeoDataFrame(y_out, geometry=y_out.geometry)
# if specified, save as geojson-file to output-dir
if geojson:
click.echo('saving to {}'.format(os.path.abspath(os.path.join(output_dir, '{}_merged_{}.geojson'.format(column, suffix)))))
gdf_out.to_file(os.path.abspath(os.path.join(output_dir, '{}_merged_{}.geojson'.format(column, suffix))), driver='GeoJSON')
# if specified, save as png-file to output-dir
if png:
fig, ax = plt.subplots(1, 1)
gdf_out.plot(column=column, ax=ax,
cmap='Reds',
vmin=0, vmax=1,
legend=True,
legend_kwds={'label': column, 'orientation': "vertical"})
click.echo('saving to {}'.format(os.path.abspath(os.path.join(output_dir, '{}_merged_{}.png'.format(column, suffix)))))
plt.savefig(os.path.abspath(os.path.join(output_dir, '{}_merged_{}.png'.format(column, suffix))), dpi=300, bbox_inches='tight')
plt.close()
if __name__ == '__main__':
main()
|
var searchData=
[
['name',['name',['../classVinetalk_1_1VineObject_1_1cRep.html#a9008e6230b0294af18708ed27df0e6e4',1,'Vinetalk.VineObject.cRep.name()'],['../structvine__object__s.html#ac0d3e0bed9ca4efb24208d6e89a93db4',1,'vine_object_s::name()'],['../structallocation.html#ace567607042eb16aa3b0629142e5ea25',1,'allocation::name()']]],
['nano_5farm',['NANO_ARM',['../enumVinetalk_1_1VineAccelerator_1_1Type.html#aaf046799bea573cb9a79857227deada8',1,'Vinetalk.VineAccelerator.Type.NANO_ARM()'],['../vine__talk__types_8h.html#ae1360554e303883862a00382e62a6ecda7e7d12b64ac228ea5e49fdaa2676fc06',1,'NANO_ARM(): vine_talk_types.h']]],
['nano_5fcore',['NANO_CORE',['../enumVinetalk_1_1VineAccelerator_1_1Type.html#a3d43fd9961465abf958850da6749d4ce',1,'Vinetalk.VineAccelerator.Type.NANO_CORE()'],['../vine__talk__types_8h.html#ae1360554e303883862a00382e62a6ecdae0c9e1cc9180aaec2e98578ea88e1bfb',1,'NANO_CORE(): vine_talk_types.h']]],
['next',['next',['../structmalloc__segment.html#a6d7b4b2f0c010593cf90799d057d4655',1,'malloc_segment::next()'],['../classVinetalk_1_1VineObject_1_1cRep.html#a88f5837e2fec8244144a476773e1a1ed',1,'Vinetalk.VineObject.cRep.next()'],['../structutils__list__node.html#a37611aaa06306650bc1378f58ffc0ff5',1,'utils_list_node::next()']]],
['next_5fchunk',['next_chunk',['../malloc_8c.html#a891ae6952dcc03f81e75b91666400f51',1,'malloc.c']]],
['next_5fpinuse',['next_pinuse',['../malloc_8c.html#a62c85451d286c94329b31175ff6aa991',1,'malloc.c']]],
['no_5fmallinfo',['NO_MALLINFO',['../malloc_8c.html#abdaef093c0ac7f4ffca5df2d2b2f0560',1,'NO_MALLINFO(): malloc.c'],['../malloc_8h.html#abdaef093c0ac7f4ffca5df2d2b2f0560',1,'NO_MALLINFO(): malloc.h']]],
['no_5fmalloc_5fstats',['NO_MALLOC_STATS',['../malloc_8c.html#aa5a986d0460a969befdb7efd79ef049c',1,'malloc.c']]],
['no_5fsegment_5ftraversal',['NO_SEGMENT_TRAVERSAL',['../malloc_8c.html#a6413d234d61f597fccc2aac39e33941c',1,'malloc.c']]],
['noinline',['NOINLINE',['../malloc_8c.html#a1b173d22e57d9395897acbd8de62d505',1,'malloc.c']]],
['normalize',['normalize',['../WebUI_8cpp.html#af6c5cbc87f82e33a593a39a1b8cc647d',1,'normalize(): WebUI.cpp'],['../namespacebreaker.html#a17021dabea06ca683e9fdad61d058f25',1,'breaker.NORMALIZE()']]],
['nothing',['nothing',['../structutils__compat__empty__s.html#a819b91e732f1e8ee19dd607340513237',1,'utils_compat_empty_s']]],
['nsmallbins',['NSMALLBINS',['../malloc_8c.html#afe898942dfe9b3557981d0dc935fddde',1,'malloc.c']]],
['ntreebins',['NTREEBINS',['../malloc_8c.html#a187fe34f71c63b486f6d79ac51117061',1,'malloc.c']]],
['number',['number',['../namespacebreaker.html#af888a8e2259e864c71fa6c6bdbf0cfa2',1,'breaker']]]
];
|
import unittest
from plugins.sakuralive import SakuraLive
class TestPluginSakuraLive(unittest.TestCase):
def test_can_handle_url(self):
should_match = [
'http://www.sakuralive.com/preview.php?CHANNELNAME',
]
for url in should_match:
self.assertTrue(SakuraLive.can_handle_url(url))
should_not_match = [
'https://example.com/index.html',
]
for url in should_not_match:
self.assertFalse(SakuraLive.can_handle_url(url))
|
import { setRoute } from "egov-ui-kit/utils/commons";
import React from "react";
import {
getEpochForDate, sortByEpoch
} from "../../utils";
import {download} from "egov-common/ui-utils/commons"
export const searchResults = {
uiFramework: "custom-molecules",
componentPath: "Table",
visible: false,
props: {
columns: [
{
labelName: "Receipt No.",
labelKey: "CR_COMMON_TABLE_COL_RECEIPT_NO",
options: {
filter: false,
customBodyRender: (value, tableMeta, updateValue) => (
<div onClick={value => {
const receiptQueryString = [
{ key: "receiptNumbers", value: tableMeta.rowData[0] },
{ key: "tenantId", value: tableMeta.rowData[8] },
{ key: "businessService", value: tableMeta.rowData[9] }
]
download(receiptQueryString , "download" ,tableMeta.rowData[7]) ;
}} style={{color:'#2196F3'}}>
{value}
</div>
)
}
},
{
labelName: "Date",
labelKey: "CR_COMMON_TABLE_COL_DATE"
},
{
labelName: "Consumer code",
labelKey: "CR_COMMON_TABLE_CONSUMERCODE"
},
{
labelName: "Payee Name",
labelKey: "CR_COMMON_TABLE_COL_PAYEE_NAME"
},
{
labelName: "Service Type",
labelKey: "CR_SERVICE_TYPE_LABEL"
},
{
labelName: "Status",
labelKey: "CR_COMMON_TABLE_COL_STATUS"
},
{
labelName: "Action",
labelKey: "CR_COMMON_TABLE_ACTION",
options: {
filter: false,
customBodyRender: (value, tableMeta, updateValue) => (
<div onClick={value => {
if(tableMeta.rowData[6]=='CANCEL'){
setRoute(`/receipts/viewReceipt?receiptNumbers=${tableMeta.rowData[0]}&tenantId=${tableMeta.rowData[8]}&businessService=${tableMeta.rowData[9]}`);
}
}} style={{color:tableMeta.rowData[6]=='CANCEL'?'rgb(254, 122, 81)':"inherit",cursor:tableMeta.rowData[6]=='CANCEL'?'pointer':"initial"}}>
{value}
</div>
)
}
},
{
labelName: "Receipt Key",
labelKey: "RECEIPT_KEY",
options: {
display: false
}
},
{
labelName: "Tenant Id",
labelKey: "TENANT_ID",
options: {
display: false
}
},
{
labelName: "SERVICE TYPE",
labelKey: "SERVICE_TYPE",
options: {
display: false
}
},
],
title: {
labelKey: "COMMON_TABLE_SEARCH_RESULT_RECIEPT",
labelName: "COMMON_TABLE_SEARCH_RESULT_RECIEPT",
},
rows: "",
options: {
filter: false,
download: false,
responsive: "stacked",
selectableRows: false,
hover: true,
rowsPerPageOptions: [10, 15, 20],
},
customSortColumn: {
column: "Date",
sortingFn: (data, i, sortDateOrder) => {
const epochDates = data.reduce((acc, curr) => {
acc.push([...curr, getEpochForDate(curr[4], "dayend")]);
return acc;
}, []);
const order = sortDateOrder === "asc" ? true : false;
const finalData = sortByEpoch(epochDates, !order).map(item => {
item.pop();
return item;
});
return { data: finalData, currentOrder: !order ? "asc" : "desc" };
}
}
}
};
|
(window["webpackJsonp"]=window["webpackJsonp"]||[]).push([[54],{"571d":function(t,e,o){"use strict";o.r(e);var s=function(){var t=this,e=t.$createElement,o=t._self._c||e;return o("div",[o("q-input",{staticStyle:{display:"none"},model:{value:t.scaneddata.request_time,callback:function(e){t.$set(t.scaneddata,"request_time",e)},expression:"scaneddata.request_time"}}),o("q-card",{directives:[{name:"show",rawName:"v-show",value:!t.fab,expression:"!fab"}],style:{width:t.width,height:t.height},attrs:{flat:""}},[o("q-card-section",{staticStyle:{height:"75px"}},[o("div",{staticClass:"text-h6"},[t._v(t._s(t.goods_code_label))]),o("div",{staticClass:"text-subtitle2"},[t._v(t._s(t.goods_scan.goods_code))])]),o("q-separator"),o("q-scroll-area",{style:{height:t.scroll_height,width:t.width},attrs:{"thumb-style":t.thumbStyle,"bar-style":t.barStyle}},[o("q-list",[o("q-item",[o("q-item-section",[o("q-item-label",[t._v(t._s(t.goods_desc_label))]),o("q-item-label",{attrs:{caption:""}},[t._v(t._s(t.goods_scan.goods_desc))])],1)],1),o("q-item",[o("q-item-section",[o("q-item-label",[t._v(t._s(t.goods_supplier_label))]),o("q-item-label",{attrs:{caption:""}},[t._v(t._s(t.goods_scan.goods_supplier))])],1)],1),o("q-item",[o("q-item-section",[o("q-item-label",[t._v(t._s(t.goods_weight_label))]),o("q-item-label",{attrs:{caption:""}},[t._v(t._s(t.goods_scan.goods_weight))])],1)],1),o("q-item",[o("q-item-section",[o("q-item-label",[t._v(t._s(t.goods_w_label))]),o("q-item-label",{attrs:{caption:""}},[t._v(t._s(t.goods_scan.goods_w))])],1)],1),o("q-item",[o("q-item-section",[o("q-item-label",[t._v(t._s(t.goods_d_label))]),o("q-item-label",{attrs:{caption:""}},[t._v(t._s(t.goods_scan.goods_d))])],1)],1),o("q-item",[o("q-item-section",[o("q-item-label",[t._v(t._s(t.goods_h_label))]),o("q-item-label",{attrs:{caption:""}},[t._v(t._s(t.goods_scan.goods_h))])],1)],1),o("q-item",[o("q-item-section",[o("q-item-label",[t._v(t._s(t.unit_volume_label))]),o("q-item-label",{attrs:{caption:""}},[t._v(t._s(t.goods_scan.unit_volume))])],1)],1),o("q-item",[o("q-item-section",[o("q-item-label",[t._v(t._s(t.goods_unit_label))]),o("q-item-label",{attrs:{caption:""}},[t._v(t._s(t.goods_scan.goods_unit))])],1)],1),o("q-item",[o("q-item-section",[o("q-item-label",[t._v(t._s(t.goods_class_label))]),o("q-item-label",{attrs:{caption:""}},[t._v(t._s(t.goods_scan.goods_class))])],1)],1),o("q-item",[o("q-item-section",[o("q-item-label",[t._v(t._s(t.goods_brand_label))]),o("q-item-label",{attrs:{caption:""}},[t._v(t._s(t.goods_scan.goods_brand))])],1)],1),o("q-item",[o("q-item-section",[o("q-item-label",[t._v(t._s(t.goods_color_label))]),o("q-item-label",{attrs:{caption:""}},[t._v(t._s(t.goods_scan.goods_color))])],1)],1),o("q-item",[o("q-item-section",[o("q-item-label",[t._v(t._s(t.goods_shape_label))]),o("q-item-label",{attrs:{caption:""}},[t._v(t._s(t.goods_scan.goods_shape))])],1)],1),o("q-item",[o("q-item-section",[o("q-item-label",[t._v(t._s(t.goods_specs_label))]),o("q-item-label",{attrs:{caption:""}},[t._v(t._s(t.goods_scan.goods_specs))])],1)],1),o("q-item",[o("q-item-section",[o("q-item-label",[t._v(t._s(t.goods_origin_label))]),o("q-item-label",{attrs:{caption:""}},[t._v(t._s(t.goods_scan.goods_origin))])],1)],1),o("q-item",[o("q-item-section",[o("q-item-label",[t._v(t._s(t.goods_cost_label))]),o("q-item-label",{attrs:{caption:""}},[t._v(t._s(t.goods_scan.goods_cost))])],1)],1),o("q-item",[o("q-item-section",[o("q-item-label",[t._v(t._s(t.goods_price_label))]),o("q-item-label",{attrs:{caption:""}},[t._v(t._s(t.goods_scan.goods_price))])],1)],1),o("q-item",[o("q-item-section",[o("q-item-label",[t._v(t._s(t.creater_label))]),o("q-item-label",{attrs:{caption:""}},[t._v(t._s(t.goods_scan.creater))])],1)],1),o("q-item",[o("q-item-section",[o("q-item-label",[t._v(t._s(t.create_time_label))]),o("q-item-label",{attrs:{caption:""}},[t._v(t._s(t.goods_scan.create_time))])],1)],1),o("q-item",[o("q-item-section",[o("q-item-label",[t._v(t._s(t.updatetime_label))]),o("q-item-label",{attrs:{caption:""}},[t._v(t._s(t.goods_scan.update_time))])],1)],1)],1)],1),o("q-separator",{attrs:{dark:""}})],1)],1)},a=[],i=o("3004"),l=o("18d6"),_=o("09f9"),d={name:"Pagezebra_goodslist",data(){return{openid:"",login_name:"",authin:"0",pathname:"goods/",height:"",width:"",scroll_height:"",goods_scan:{},goods_code_label:this.$t("goods.view_goodslist.goods_code"),goods_desc_label:this.$t("goods.view_goodslist.goods_desc"),goods_supplier_label:this.$t("goods.view_goodslist.goods_supplier"),goods_weight_label:this.$t("goods.view_goodslist.goods_weight"),goods_w_label:this.$t("goods.view_goodslist.goods_w"),goods_d_label:this.$t("goods.view_goodslist.goods_d"),goods_h_label:this.$t("goods.view_goodslist.goods_h"),unit_volume_label:this.$t("goods.view_goodslist.unit_volume"),goods_unit_label:this.$t("goods.view_goodslist.goods_unit"),goods_class_label:this.$t("goods.view_goodslist.goods_class"),goods_brand_label:this.$t("goods.view_goodslist.goods_brand"),goods_color_label:this.$t("goods.view_goodslist.goods_color"),goods_shape_label:this.$t("goods.view_goodslist.goods_shape"),goods_specs_label:this.$t("goods.view_goodslist.goods_specs"),goods_origin_label:this.$t("goods.view_goodslist.goods_origin"),goods_cost_label:this.$t("goods.view_goodslist.goods_cost"),goods_price_label:this.$t("goods.view_goodslist.goods_price"),creater_label:this.$t("creater"),create_time_label:this.$t("createtime"),updatetime_label:this.$t("updatetime"),thumbStyle:{right:"4px",borderRadius:"5px",backgroundColor:"#E0E0E0",width:"5px",opacity:.75},barStyle:{right:"2px",borderRadius:"9px",backgroundColor:"#EEEEEE",width:"9px",opacity:.2}}},methods:{getGoodsList(t){var e=this;Object(i["e"])(e.pathname+"?goods_code="+t,{}).then((t=>{0===t.results.length?(navigator.vibrate(100),e.$q.notify({message:"No Goods Data",position:"top",icon:"close",color:"negative"})):1===t.results.length?e.goods_scan=t.results[0]:(navigator.vibrate(100),e.$q.notify({message:"Repeating Data",position:"top",icon:"close",color:"negative"}))})).catch((t=>{navigator.vibrate(100),e.$q.notify({message:t.detail,position:"top",icon:"close",color:"negative"})}))}},computed:{fab:{get(){return console.log("7",this.$store.state.fabchange.fab),this.$store.state.fabchange.fab}},scaneddata:{get(){return this.$store.state.scanedsolve.scaneddata}}},created(){var t=this;l["a"].has("openid")?t.openid=l["a"].getItem("openid"):(t.openid="",l["a"].set("openid","")),l["a"].has("login_name")?t.login_name=l["a"].getItem("login_name"):(t.login_name="",l["a"].set("login_name","")),l["a"].has("auth")?t.authin="1":t.authin="0"},mounted(){var t=this;t.width=1*_["a"].width+"px",t.height=_["a"].height-50+"px",t.scroll_height=_["a"].height-175+"px",t.barscan="",t.goods_scan=""},updated(){var t=this;""!==t.scaneddata&&("GOODS"===t.scaneddata.mode?t.getGoodsList(t.scaneddata.code):t.$q.notify({message:"No Goods Data",position:"top",icon:"close",color:"negative"}))},beforeDestroy(){}},n=d,g=o("42e1"),c=o("df19"),r=o("27f9"),m=o("f09f"),b=o("a370"),h=o("eb85"),p=o("4983"),q=o("1c1c"),v=o("66e5"),u=o("4074"),w=o("0170"),f=o("eebe"),$=o.n(f),y=Object(g["a"])(n,s,a,!1,null,null,null);"function"===typeof c["default"]&&Object(c["default"])(y);e["default"]=y.exports;$()(y,"components",{QInput:r["a"],QCard:m["a"],QCardSection:b["a"],QSeparator:h["a"],QScrollArea:p["a"],QList:q["a"],QItem:v["a"],QItemSection:u["a"],QItemLabel:w["a"]})},de8e:function(t,e){},df19:function(t,e,o){"use strict";var s=o("de8e"),a=o.n(s);e["default"]=a.a}}]);
|
import numpy as np
# Solution Library for problem 045e512c
"""
Solutions by Muhammad Ali Khan (Student ID 20235525)
"""
def get_sub_matrix(row, column, x):
"""Return the 3x3 matrix starting from the given (row, column) square. If the matrix goes beyond dimensions of the main matrix (x)
it fills in the squares in resultant matrix with 0
"""
result = []
for i in range(3):
result_row = []
for j in range(3):
if (column+j) < len(x[row]) and (row+i) < len(x[row]):
result_row.append(x[row+i][column+j])
if len(result_row) < 3:
for a in range(3-len(result_row)):
result_row.append(0)
result.append(result_row)
return (row, column, np.array(result))
def get_all_patterns(x):
"""Using the sliding window technique, this finds out all the 3x3 matrix in the main matrix (x).
It then returns only those matrix which has non-zero elements. (meaning have at least one colored square)
"""
matrices = []
for row in range(len(x)):
for column in range(len(x[row])):
matrices.append(get_sub_matrix(row,column,x) )
result = dict()
for index, (row, column, sub_matrix) in enumerate(matrices):
if not np.all(sub_matrix==0):
result[index] = (row, column, sub_matrix)
return result
def get_pattern_to_repeat(patterns):
"""This returns the matrix which has the most non-zero (colored squares) elements. It also returns the location of that matrix in the main.
"""
pattern_to_repeat = None
highest = 0
for key, (row, column, value) in patterns.items():
if np.count_nonzero(value) > highest:
highest = np.count_nonzero(value)
pattern_to_repeat = (row, column, value)
return pattern_to_repeat
def get_color(matrix):
"""Returns the color of the matrix (excluding black)
"""
for a in matrix:
for color in a:
if color != 0:
return color
def replace_color(matrix, color):
"""It creates a new copy of the matrix and then replaces the black squares in that matrix with the given color
"""
result = np.copy(matrix)
for i in range(len(result)):
for j in range(len(result)):
if result[i][j] != 0:
result[i][j] = color
return result
def get_colored_matrix(matrix, pattern_matrix):
"""Return the matrix filled with the color for black squares. This returns the location of the matrix too.
"""
(row, column, value) = matrix
color = get_color(value)
new_matrix = replace_color(pattern_matrix, color)
return (row, column, new_matrix )
"""
The following methods implement the logic to fill in the matrix with the colored matrix along a given axis.
this is a quick and dirty implemenation and can be improved with some better logic.
There are 8 directions namely : left, right, top, bottom, top_left, bottom_left, top_right and bottom_right.
Two of them are not implemented in the interest of time as they were not needed.
"""
def repeat_bottom_right_matrices(matrix, x):
row, column, pattern = matrix
i = 0
for a in range(row, len(x) + 10 , 4):
for r in range(a, a+3):
x_column = column + (i*4)
for c in range(x_column, x_column+3):
sub_matrix_row = (r-row) - (i*4)
sub_matrix_col = (c - column) - (i*4)
if (r>=0 and r < len(x)) and (c >= 0 and c < len(x[r])):
x[r][c] = pattern[sub_matrix_row][sub_matrix_col]
i = i + 1
return matrix
def repeat_top_right_matrices(matrix, x):
row, column, pattern = matrix
i = 0
for a in range(row, -10 , -4):
for r in range(a, a+3):
x_column = column + (i*4)
for c in range(x_column, x_column+3):
sub_matrix_row = (r-row) + (i*4)
sub_matrix_col = (c - column) - (i*4)
if (r>=0 and r < len(x)) and (c >= 0 and c < len(x[r])):
x[r][c] = pattern[sub_matrix_row][sub_matrix_col]
i = i + 1
return matrix
def repeat_left_matrices(matrix, x):
row, column, pattern = matrix
i = 0
for a in range(column, -10, -4):
for r in range(row, row+3):
for c in range(a, a+3):
sub_matrix_row = r-row
sub_matrix_col = (c - column) + (i*4)
if (r>=0 and r < len(x)) and (c >= 0 and c < len(x[r])):
x[r][c] = pattern[sub_matrix_row][sub_matrix_col]
i = i + 1
return matrix
def repeat_right_matrices(matrix, x):
row, column, pattern = matrix
i = 0
for a in range(column, len(x[0]) + 10, 4):
for r in range(row, row+3):
for c in range(a, a+3):
sub_matrix_row = r-row
sub_matrix_col = (c - column) - (i*4)
if (r>=0 and r < len(x)) and (c >= 0 and c < len(x[r])):
x[r][c] = pattern[sub_matrix_row][sub_matrix_col]
i = i + 1
return matrix
def repeat_up_matrices(matrix, x):
row, column, pattern = matrix
i = 0
for a in range(row, -10 , -4):
for r in range(a, a+3):
for c in range(column, column+3):
sub_matrix_row = (r-row) + (i*4)
sub_matrix_col = (c - column)
if (r>=0 and r < len(x)) and (c >= 0 and c < len(x[r])):
x[r][c] = pattern[sub_matrix_row][sub_matrix_col]
i = i + 1
return matrix
def repeat_down_matrices(matrix, x):
row, column, pattern = matrix
i = 0
for a in range(row, len(x) + 10 , 4):
for r in range(a, a+3):
for c in range(column, column+3):
sub_matrix_row = (r-row) - (i*4)
sub_matrix_col = (c - column)
if (r>=0 and r < len(x)) and (c >= 0 and c < len(x[r])):
x[r][c] = pattern[sub_matrix_row][sub_matrix_col]
i = i + 1
return matrix
def repeat_bottom_left_matrices(matrix, x):
'''
did not implement intentionally as was needed for the current assignment. But the logic remains same
'''
return matrix
def repeat_top_left_matrices(matrix, x):
'''
did not implement intentionally as was needed for the current assignment. But the logic remains same
'''
return matrix
|
const router = require('express').Router();
const { User, Post, Vote,Comment } = require("../../models");
// GET /api/users
router.get('/', (req, res) => {
// Access our User model and run .findAll() method)
User.findAll({
attributes: { exclude: ['password'] }
})
.then(dbUserData => res.json(dbUserData))
.catch(err => {
console.log(err);
res.status(500).json(err);
});
});
// GET /api/users/1
router.get('/:id', (req, res) => {
User.findOne({
attributes: { exclude: ['password'] },
where: {
id: req.params.id
},
include: [
{
model: Post,
attributes: ['id', 'title', 'post_url', 'created_at']
},
// include the Comment model here:
{
model: Comment,
attributes: ['id', 'comment_text', 'created_at'],
include: {
model: Post,
attributes: ['title']
}
},
{
model: Post,
attributes: ['title'],
through: Vote,
as: 'voted_posts'
}
]
})
.then(dbUserData => {
if (!dbUserData) {
res.status(404).json({ message: 'No user found with this id' });
return;
}
res.json(dbUserData);
})
.catch(err => {
console.log(err);
res.status(500).json(err);
});
});
// POST /api/users
router.post('/', (req, res) => {
// expects {username: 'Lernantino', email: 'lernantino@gmail.com', password: 'password1234'}
User.create({
username: req.body.username,
email: req.body.email,
password: req.body.password
})
.then(dbUserData => {
req.session.save(() => {
req.session.user_id = dbUserData.id;
req.session.username = dbUserData.username;
req.session.loggedIn = true;
res.json(dbUserData);
});
})
.catch(err => {
console.log(err);
res.status(500).json(err);
});
});
router.post('/login', (req, res) => {
User.findOne({
where: {
email: req.body.email
}
}).then(dbUserData => {
if (!dbUserData) {
res.status(400).json({ message: 'No user with that email address!' });
return;
}
const validPassword = dbUserData.checkPassword(req.body.password);
if (!validPassword) {
res.status(400).json({ message: 'Incorrect password!' });
return;
}
req.session.save(() => {
// declare session variables
req.session.user_id = dbUserData.id;
req.session.username = dbUserData.username;
req.session.loggedIn = true;
res.json({ user: dbUserData, message: 'You are now logged in!' });
});
});
});
//close user session
router.post('/logout', (req, res) => {
if (req.session.loggedIn) {
req.session.destroy(() => {
res.status(204).end();
});
}
else {
res.status(404).end();
}
});
// PUT /api/users/1
router.put('/:id', (req, res) => {
// expects {username: 'Lernantino', email: 'lernantino@gmail.com', password: 'password1234'}
// pass in req.body instead to only update what's passed through
User.update(req.body, {
individualHooks: true,
where: {
id: req.params.id
}
})
.then(dbUserData => {
if (!dbUserData[0]) {
res.status(404).json({ message: 'No user found with this id' });
return;
}
res.json(dbUserData);
})
.catch(err => {
console.log(err);
res.status(500).json(err);
});
});
// DELETE /api/users/1
router.delete('/:id', (req, res) => {
User.destroy({
where: {
id: req.params.id
}
})
.then(dbUserData => {
if (!dbUserData) {
res.status(404).json({ message: 'No user found with this id' });
return;
}
res.json(dbUserData);
})
.catch(err => {
console.log(err);
res.status(500).json(err);
});
});
module.exports = router;
|
const path = require("path")
const Mocha = require("mocha")
const glob = require("glob")
function run() {
// Create the mocha test
const mocha = new Mocha({
ui: "tdd",
color: true,
})
const testsRoot = path.resolve(__dirname, "..")
return new Promise((c, e) => {
glob("**/**.test.js", { cwd: testsRoot }, (err, files) => {
if (err) {
return e(err)
}
// Add files to the test suite
files.forEach((f) => mocha.addFile(path.resolve(testsRoot, f)))
try {
// Run the mocha test
mocha.run((failures) => {
if (failures > 0) {
e(new Error(`${failures} tests failed.`))
} else {
c()
}
})
} catch (err) {
console.error(err)
e(err)
}
})
})
}
module.exports = {
run,
}
|
from dataclasses import dataclass
from test.pycardano.util import check_two_way_cbor
from typing import Union
import pytest
from pycardano.exception import DeserializeException, SerializeException
from pycardano.plutus import (
COST_MODELS,
ExecutionUnits,
PlutusData,
Redeemer,
RedeemerTag,
plutus_script_hash,
)
from pycardano.serialization import IndefiniteList
@dataclass
class MyTest(PlutusData):
CONSTR_ID = 130
a: int
b: bytes
c: IndefiniteList
d: dict
@dataclass
class BigTest(PlutusData):
CONSTR_ID = 8
test: MyTest
@dataclass
class LargestTest(PlutusData):
CONSTR_ID = 9
@dataclass
class VestingParam(PlutusData):
CONSTR_ID = 1
beneficiary: bytes
deadline: int
testa: Union[BigTest, LargestTest]
testb: Union[BigTest, LargestTest]
@dataclass
class MyRedeemer(Redeemer):
data: MyTest
def test_plutus_data():
"""Ground truth of this test is generated by test/resources/haskell/PlutusData. See its README for more details."""
key_hash = bytes.fromhex("c2ff616e11299d9094ce0a7eb5b7284b705147a822f4ffbd471f971a")
deadline = 1643235300000
testa = BigTest(MyTest(123, b"1234", IndefiniteList([4, 5, 6]), {1: b"1", 2: b"2"}))
testb = LargestTest()
my_vesting = VestingParam(
beneficiary=key_hash, deadline=deadline, testa=testa, testb=testb
)
assert (
"d87a9f581cc2ff616e11299d9094ce0a7eb5b7284b705147a822f4ffbd471f971a1b0000017e9"
"874d2a0d905019fd8668218829f187b44313233349f040506ffa2014131024132ffffd9050280ff"
== my_vesting.to_cbor()
)
check_two_way_cbor(my_vesting)
def test_plutus_data_json():
key_hash = bytes.fromhex("c2ff616e11299d9094ce0a7eb5b7284b705147a822f4ffbd471f971a")
deadline = 1643235300000
testa = BigTest(MyTest(123, b"1234", IndefiniteList([4, 5, 6]), {1: b"1", 2: b"2"}))
testb = LargestTest()
my_vesting = VestingParam(
beneficiary=key_hash, deadline=deadline, testa=testa, testb=testb
)
encoded_json = my_vesting.to_json(separators=(",", ":"))
assert (
'{"constructor":1,"fields":[{"bytes":"c2ff616e11299d9094ce0a7eb5b7284b705147a822f4ffbd471f971a"},'
'{"int":1643235300000},{"constructor":8,"fields":[{"constructor":130,"fields":[{"int":123},'
'{"bytes":"31323334"},{"list":[{"int":4},{"int":5},{"int":6}]},{"map":[{"v":{"bytes":"31"},'
'"k":{"int":1}},{"v":{"bytes":"32"},"k":{"int":2}}]}]}]},{"constructor":9,"fields":[]}]}'
== encoded_json
)
assert my_vesting == VestingParam.from_json(encoded_json)
def test_plutus_data_to_json_wrong_type():
test = MyTest(123, b"1234", IndefiniteList([4, 5, 6]), {1: b"1", 2: b"2"})
test.a = "123"
with pytest.raises(TypeError):
test.to_json()
def test_plutus_data_from_json_wrong_constructor():
test = (
'{"constructor": 129, "fields": [{"int": 123}, {"bytes": "31323334"}, '
'{"list": [{"int": 4}, {"int": 5}, {"int": 6}]}, {"map": [{"v": {"bytes": "31"}, '
'"k": {"int": 1}}, {"v": {"bytes": "32"}, "k": {"int": 2}}]}]}'
)
with pytest.raises(DeserializeException):
MyTest.from_json(test)
test2 = (
'{"constructor":1,"fields":[{"bytes":"c2ff616e11299d9094ce0a7eb5b7284b705147a822f4ffbd471f971a"},'
'{"int":1643235300000},{"constructor":22,"fields":[{"constructor":130,"fields":[{"int":123},'
'{"bytes":"31323334"},{"list":[{"int":4},{"int":5},{"int":6}]},{"map":[{"v":{"bytes":"31"},'
'"k":{"int":1}},{"v":{"bytes":"32"},"k":{"int":2}}]}]}]},{"constructor":23,"fields":[]}]}'
)
with pytest.raises(DeserializeException):
VestingParam.from_json(test2)
def test_plutus_data_from_json_wrong_data_structure():
test = (
'{"constructor": 130, "fields": [{"int": 123}, {"bytes": "31323334"}, '
'{"wrong_list": [{"int": 4}, {"int": 5}, {"int": 6}]}, {"map": [{"v": {"bytes": "31"}, '
'"k": {"int": 1}}, {"v": {"bytes": "32"}, "k": {"int": 2}}]}]}'
)
with pytest.raises(DeserializeException):
MyTest.from_json(test)
def test_plutus_data_from_json_wrong_data_structure_type():
test = (
'[{"constructor": 130, "fields": [{"int": 123}, {"bytes": "31323334"}, '
'{"list": [{"int": 4}, {"int": 5}, {"int": 6}]}, {"map": [{"v": {"bytes": "31"}, '
'"k": {"int": 1}}, {"v": {"bytes": "32"}, "k": {"int": 2}}]}]}]'
)
with pytest.raises(TypeError):
MyTest.from_json(test)
def test_plutus_data_hash():
assert (
bytes.fromhex(
"923918e403bf43c34b4ef6b48eb2ee04babed17320d8d1b9ff9ad086e86f44ec"
)
== PlutusData().hash().payload
)
def test_redeemer():
data = MyTest(123, b"234", IndefiniteList([4, 5, 6]), {1: b"1", 2: b"2"})
redeemer = MyRedeemer(RedeemerTag.SPEND, data, ExecutionUnits(1000000, 1000000))
assert (
"840000d8668218829f187b433233349f040506ffa2014131024132ff821a000f42401a000f4240"
== redeemer.to_cbor()
)
check_two_way_cbor(redeemer)
def test_redeemer_empty_datum():
data = MyTest(123, b"234", IndefiniteList([]), {1: b"1", 2: b"2"})
redeemer = MyRedeemer(RedeemerTag.SPEND, data, ExecutionUnits(1000000, 1000000))
assert (
"840000d8668218829f187b433233349fffa2014131024132ff821a000f42401a000f4240"
== redeemer.to_cbor()
)
check_two_way_cbor(redeemer)
def test_cost_model():
assert (
"a141005901d59f1a000302590001011a00060bc719026d00011a000249f01903e800011"
"a000249f018201a0025cea81971f70419744d186419744d186419744d186419744d1864"
"19744d186419744d18641864186419744d18641a000249f018201a000249f018201a000"
"249f018201a000249f01903e800011a000249f018201a000249f01903e800081a000242"
"201a00067e2318760001011a000249f01903e800081a000249f01a0001b79818f7011a0"
"00249f0192710011a0002155e19052e011903e81a000249f01903e8011a000249f01820"
"1a000249f018201a000249f0182001011a000249f0011a000249f0041a000194af18f80"
"11a000194af18f8011a0002377c190556011a0002bdea1901f1011a000249f018201a00"
"0249f018201a000249f018201a000249f018201a000249f018201a000249f018201a000"
"242201a00067e23187600010119f04c192bd200011a000249f018201a000242201a0006"
"7e2318760001011a000242201a00067e2318760001011a0025cea81971f704001a00014"
"1bb041a000249f019138800011a000249f018201a000302590001011a000249f018201a"
"000249f018201a000249f018201a000249f018201a000249f018201a000249f018201a0"
"00249f018201a00330da70101ff" == COST_MODELS.to_cbor()
)
def test_plutus_script_hash():
plutus_script = b"test_script"
assert (
"36c198e1a9d05461945c1f1db2ffb927c2dfc26dd01b59ea93b678b2"
== plutus_script_hash(plutus_script).payload.hex()
)
|
#!/usr/bin/env python3
import pytest
from typing import TYPE_CHECKING
from pydantic import ValidationError
from binance.client.response import ResponseException
if TYPE_CHECKING:
from binance.client import Client
def test_get_position_mode(client: 'Client'):
if client._api_key is None:
pytest.skip("Requires API key!")
response = client.trade.get_position_mode(recvWindow=30000)
assert response.status == 200, response
def test_set_position_mode(client: 'Client'):
if client._api_key is None and client._api_secret is None:
pytest.skip("Requires API key and secret!")
func = client.trade.set_position_mode
with pytest.raises(ValidationError):
func()
client.trade.set_position_mode
response = client.trade.get_position_mode(recvWindow=30000)
assert response.status == 200, response
dualSidePosition = response.data['dualSidePosition']
with pytest.raises(ResponseException) as e:
response = func(dualSidePosition=dualSidePosition, recvWindow=30000)
assert e.value.status == 400, response
assert e.value.data['code'] == -4059, response
response = func(dualSidePosition='false' if dualSidePosition else 'true',
recvWindow=30000)
assert response.status == 200, response
response = func(dualSidePosition=dualSidePosition, recvWindow=30000)
assert response.status == 200, response
|
var storage = require('..')
var fs = require('fs')
var expect = require('chai').expect
var rimraf = require('rimraf')
var path = require('path')
describe('Storage', function () {
var userDir = path.join(__dirname, '..', '.tmp')
var route = [userDir, 'foo', 'bar']
var storageDir = path.join.apply(path, route)
var fileNameA = path.join(storageDir, 'a.json')
var fileNameB = path.join(storageDir, 'b.json')
function assertStorageNonExistence () {
try {
if (fs.accessSync) {
fs.accessSync(storageDir)
} else {
fs.statSync(storageDir)
}
} catch (e) {
expect(e.code).to.be.equals('ENOENT')
return
}
throw new Error('storage folder seems to exist?!')
}
function create () {
return storage.apply(null, route)
}
before(function () {
// For the test to run in an restricted environment that
// also doesn't interfere we run in a clear, separate user
// directory
rimraf.sync(userDir)
storage.userDir = userDir
})
describe('in clean state', function () {
beforeEach(function () {
rimraf.sync(userDir)
})
it('should not automatically create a folder', function () {
// If the storage creates a folder automatically we might
// theoretically end up with a lot of empty foldes, it
// is better that the storages are only created once they
// exist
create()
assertStorageNonExistence()
})
it('should not create a folder when getting data', function () {
// It might be tempting to make a system to create a file
// if a property is missing. This would be unwanted behavior
// (for the same reason as one above)
create().get('a')
assertStorageNonExistence()
})
it('should allow to get data', function () {
// By allowing this we can simply assume & test for null
expect(create().get('a')).to.be.equals(null)
})
it('should store files at a predicatable locations', function () {
// In order to be update compatible the path should be
// in this and future versions of the storage api
var storage = create()
storage.save('a', 1)
storage.save('b', 2)
expect(JSON.parse(fs.readFileSync(fileNameA))).to.be.equals(1)
expect(JSON.parse(fs.readFileSync(fileNameB))).to.be.equals(2)
})
it('should store readable files', function () {
// For debugging we want the files to be stored
// in a fashion that is readable to humans
create().save('a', { x: 1 })
expect(fs.readFileSync(fileNameA, 'utf8')).to.be.equals('{\n "x": 1\n}')
})
it('should allow to store data', function () {
var storage = create()
storage.save('a', 1)
expect(storage.get('a')).to.be.equals(1)
})
it('should allow to reset the store', function () {
// Reset as operation could happen at any given
// time, this can break if reset assumes that
// the folder exists
var storage = create()
storage.reset()
expect(storage.get('a')).to.be.equals(null)
})
})
describe('with stored data', function () {
var storage = create()
beforeEach(function () {
rimraf.sync(userDir)
storage.save('a', 1)
})
it('should return the stored data', function () {
expect(storage.get('a')).to.be.equals(1)
})
it('should allow to overwrite the data', function () {
storage.save('a', 2)
expect(storage.get('a')).to.be.equals(2)
})
it('should allow to reset the data', function () {
storage.reset()
expect(storage.get('a')).to.be.equals(null)
})
})
describe('with corrupt storage', function () {
var storage = create()
beforeEach(function () {
rimraf.sync(userDir)
fs.mkdirSync(storageDir, { recursive: true })
fs.writeFileSync(fileNameA, '{')
})
it('should still not break get', function () {
// By breaking the API with a broken file we gain no
// value for the user of workshoppers. A broken file
// should not inflict pain on the user of
// workshopper-adventure
//
// TODO: Debugging broken files could be improved by storing the broken
// file in a backup location
expect(storage.get('a')).to.be.equal(null)
})
it('should allow to overwrite the broken data', function () {
storage.save('a', 1)
expect(storage.get('a')).to.be.equal(1)
})
it('should allow to reset the state', function () {
storage.reset()
expect(storage.get('a')).to.be.equal(null)
})
})
describe('inaccessible storage', function () {
var storage = create()
before(function () {
rimraf.sync(userDir)
fs.mkdirSync(storageDir, { recursive: true })
fs.writeFileSync(fileNameA, '{"x": 1}')
fs.chmodSync(fileNameA, 0)
fs.chmodSync(storageDir, 0)
})
it('should still not break get', function () {
expect(storage.get('a')).to.be.equal(null)
})
it('should just not store if a file isn\'t writable', function () {
storage.save('a', 1)
expect(storage.get('a')).to.be.equal(null)
})
after(function () {
fs.chmodSync(storageDir, 448)
fs.chmodSync(fileNameA, 448)
rimraf.sync(userDir)
})
})
})
describe('Storage [async]', function () {
var userDir = path.join(__dirname, '..', '.tmp')
var route = [userDir, 'foo', 'bar']
var storageDir = path.join.apply(path, route)
var fileNameA = path.join(storageDir, 'a.json')
var fileNameB = path.join(storageDir, 'b.json')
async function assertStorageNonExistence () {
try {
await fs.accessSync(storageDir)
} catch (e) {
expect(e.code).to.be.equals('ENOENT')
return
}
throw new Error('storage folder seems to exist?!')
}
function create () {
return storage.apply(null, route).promises
}
before(function () {
// For the test to run in an restricted environment that
// also doesn't interfere we run in a clear, separate user
// directory
rimraf.sync(userDir)
storage.userDir = userDir
})
describe('in clean state', function () {
beforeEach(function () {
rimraf.sync(userDir)
})
it('should not automatically create a folder', function () {
// If the storage creates a folder automatically we might
// theoretically end up with a lot of empty foldes, it
// is better that the storages are only created once they
// exist
create()
assertStorageNonExistence()
})
it('should not create a folder when getting data', async function () {
// It might be tempting to make a system to create a file
// if a property is missing. This would be unwanted behavior
// (for the same reason as one above)
await create().get('a')
assertStorageNonExistence()
})
it('should allow to get data', async function () {
// By allowing this we can simply assume & test for null
expect(await create().get('a')).to.be.equals(null)
})
it('should store files at a predicatable locations', async function () {
// In order to be update compatible the path should be
// in this and future versions of the storage api
var storage = create()
await storage.save('a', 1)
await storage.save('b', 2)
expect(JSON.parse(fs.readFileSync(fileNameA))).to.be.equals(1)
expect(JSON.parse(fs.readFileSync(fileNameB))).to.be.equals(2)
})
it('should store readable files', async function () {
// For debugging we want the files to be stored
// in a fashion that is readable to humans
await create().save('a', { x: 1 })
expect(fs.readFileSync(fileNameA, 'utf8')).to.be.equals('{\n "x": 1\n}')
})
it('should allow to store data', async function () {
var storage = create()
await storage.save('a', 1)
expect(await storage.get('a')).to.be.equals(1)
})
it('should allow to reset the store', async function () {
// Reset as operation could happen at any given
// time, this can break if reset assumes that
// the folder exists
var storage = create()
await storage.reset()
expect(await storage.get('a')).to.be.equals(null)
})
})
describe('with stored data', function () {
var storage = create()
beforeEach(async function () {
rimraf.sync(userDir)
await storage.save('a', 1)
})
it('should return the stored data', async function () {
expect(await storage.get('a')).to.be.equals(1)
})
it('should allow to overwrite the data', async function () {
await storage.save('a', 2)
expect(await storage.get('a')).to.be.equals(2)
})
it('should allow to reset the data', async function () {
await storage.reset()
expect(await storage.get('a')).to.be.equals(null)
})
})
describe('with corrupt storage', function () {
var storage = create()
beforeEach(function () {
rimraf.sync(userDir)
fs.mkdirSync(storageDir, { recursive: true })
fs.writeFileSync(fileNameA, '{')
})
it('should still not break get', async function () {
// By breaking the API with a broken file we gain no
// value for the user of workshoppers. A broken file
// should not inflict pain on the user of
// workshopper-adventure
//
// TODO: Debugging broken files could be improved by storing the broken
// file in a backup location
expect(await storage.get('a')).to.be.equal(null)
})
it('should allow to overwrite the broken data', async function () {
await storage.save('a', 1)
expect(await storage.get('a')).to.be.equal(1)
})
it('should allow to reset the state', async function () {
await storage.reset()
expect(await storage.get('a')).to.be.equal(null)
})
})
describe('inaccessible storage', function () {
var storage = create()
before(function () {
rimraf.sync(userDir)
fs.mkdirSync(storageDir, { recursive: true })
fs.writeFileSync(fileNameA, '{"x": 1}')
fs.chmodSync(fileNameA, 0)
fs.chmodSync(storageDir, 0)
})
it('should still not break get', async function () {
expect(await storage.get('a')).to.be.equal(null)
})
it('should just not store if a file isn\'t writable', async function () {
await storage.save('a', 1)
expect(await storage.get('a')).to.be.equal(null)
})
after(function () {
fs.chmodSync(storageDir, 448)
fs.chmodSync(fileNameA, 448)
rimraf.sync(userDir)
})
})
})
|
describe('Core.Palette', () => {
before(() => {
Test.assertSL();
});
describe('Constructor', () => {
let palette;
before(() => {
palette = new Test.Lib.Core.Palette(Test.SL, $.extend({}, Test.Lib.Core.StampLines.DEFAULT.config.Palettes));
});
after(() => {
palette.destroy();
palette = undefined;
});
it('should initialize', () => {
expect(palette).to.exist;
});
it('should be constructed by Palette', () => {
expect(palette.constructor.name).to.equal('Palette');
});
});
describe('#generateDOM', () => {
let palette;
before(() => {
palette = new Test.Lib.Core.Palette(Test.SL, {});
});
after(() => {
palette.destroy();
palette = undefined;
});
afterEach(() => {
palette.destroyDOM();
});
it('should return a DOM element', () => {
let paletteDOM = palette.generateDOM();
expect(paletteDOM).to.exist;
});
it('should return a DOM element of class .sl-palette', () => {
let paletteDOM = palette.generateDOM();
expect(paletteDOM.is('.sl-palette')).to.be.true;
});
it('should track the palette element in palette.DOM', () => {
let paletteDOM = palette.generateDOM();
expect(palette.DOM.palette).to.equal(paletteDOM);
});
});
describe('#destroyDOM', () => {
let palette;
before(() => {
palette = new Test.Lib.Core.Palette(Test.SL, {});
});
after(() => {
palette.destroy();
palette = undefined;
});
beforeEach(() => {
palette.generateDOM();
});
it('should remove the DOM element from its parent', () => {
let paletteDOM = palette.DOM.palette;
let createdDOM = Test.SL.UI.Dock.assertDOM();
// give it a parent to check unbdinding from
Test.SL.DOM.dock.append(paletteDOM);
palette.destroyDOM();
expect(paletteDOM.parent()).to.have.length(0);
paletteDOM = undefined;
if (createdDOM) {
// remove the Dock DOM if this test created it
Test.SL.UI.Dock.destroyDOM();
}
});
it('should remove the palette element from palette.DOM', () => {
palette.destroyDOM();
expect(palette.DOM.palette).to.not.exist;
});
});
});
|
'''
URL: https://leetcode.com/problems/rearrange-string-k-distance-apart
Time complexity: O(n) (would be O(nlogn) but heap has max size of 26 letters here :) )
Space complexity: O(1) (would be O(n) but we have max 26 unique letters :) )
'''
from heapq import heappush, heappop
from collections import defaultdict
class Solution:
def rearrangeString(self, s, k):
"""
:type s: str
:type k: int
:rtype: str
"""
if k == 0:
return s
heap = []
unique_letters = set(s)
counts = defaultdict(int)
for letter in s:
counts[letter] += 1
for letter, count in counts.items():
heappush(heap, (-count, letter))
res = ""
chars_left = len(s)
while chars_left > 0 and len(heap) > 0:
if k >= chars_left:
if chars_left == len(unique_letters):
while len(heap) > 0:
_, letter = heappop(heap)
res += letter
return res
return ""
selected_items = []
for i in range(k):
if len(heap) == 0:
return ""
item = heappop(heap)
selected_items.append(item)
letter = item[1]
res += letter
chars_left -= 1
for item in selected_items:
if item[0]+1 < 0:
heappush(heap, (item[0]+1, item[1]))
else:
unique_letters.remove(item[1])
return res
|
"use strict";
/**
* @class elFinder command "paste"
* Paste filesfrom clipboard into directory.
* If files pasted in its parent directory - files duplicates will created
*
* @author Dmitry (dio) Levashov
**/
elFinder.prototype.commands.paste = function() {
this.disableOnSearch = true;
this.updateOnSelect = false;
this.handlers = {
changeclipboard : function() { this.update(); }
}
this.shortcuts = [{
pattern : 'ctrl+v shift+insert'
}];
this.getstate = function(dst) {
if (this._disabled) {
return -1;
}
if (dst) {
if ($.isArray(dst)) {
if (dst.length != 1) {
return -1;
}
dst = this.fm.file(dst[0]);
}
} else {
dst = this.fm.cwd();
}
return this.fm.clipboard().length && dst.mime == 'directory' && dst.write ? 0 : -1;
}
this.exec = function(dst) {
var self = this,
fm = self.fm,
dst = dst ? this.files(dst)[0] : fm.cwd(),
files = fm.clipboard(),
cnt = files.length,
cut = cnt ? files[0].cut : false,
error = cut ? 'errMove' : 'errCopy',
fpaste = [],
fcopy = [],
dfrd = $.Deferred()
.fail(function(error) {
error && fm.error(error);
}),
copy = function(files) {
return files.length && fm._commands.duplicate
? fm.exec('duplicate', files)
: $.Deferred().resolve();
},
paste = function(files) {
var dfrd = $.Deferred(),
existed = [],
intersect = function(files, names) {
var ret = [],
i = files.length;
while (i--) {
$.inArray(files[i].name, names) !== -1 && ret.unshift(i);
}
return ret;
},
confirm = function(ndx) {
var i = existed[ndx],
file = files[i],
last = ndx == existed.length-1;
if (!file) {
return;
}
fm.confirm({
title : fm.i18n(cut ? 'moveFiles' : 'copyFiles'),
text : fm.i18n(['errExists', file.name, 'confirmRepl']),
all : !last,
accept : {
label : 'btnYes',
callback : function(all) {
!last && !all
? confirm(++ndx)
: paste(files);
}
},
reject : {
label : 'btnNo',
callback : function(all) {
var i;
if (all) {
i = existed.length;
while (ndx < i--) {
files[existed[i]].remove = true
}
} else {
files[existed[ndx]].remove = true;
}
!last && !all
? confirm(++ndx)
: paste(files);
}
},
cancel : {
label : 'btnCancel',
callback : function() {
dfrd.resolve();
}
}
})
},
valid = function(names) {
existed = intersect(files, names);
existed.length ? confirm(0) : paste(files);
},
paste = function(files) {
var files = $.map(files, function(file) { return !file.remove ? file : null } ),
cnt = files.length,
groups = {},
args = [],
src;
if (!cnt) {
return dfrd.resolve();
}
src = files[0].phash;
files = $.map(files, function(f) { return f.hash});
fm.request({
data : {cmd : 'paste', dst : dst.hash, targets : files, cut : cut ? 1 : 0, src : src},
notify : {type : cut ? 'move' : 'copy', cnt : cnt}
})
.always(function() {
fm.unlockfiles({files : files});
});
}
;
if (self._disabled || !files.length) {
return dfrd.resolve();
}
if (fm.oldAPI) {
paste(files);
} else {
if (!fm.option('copyOverwrite')) {
paste(files);
} else {
dst.hash == fm.cwd().hash
? valid($.map(fm.files(), function(file) { return file.phash == dst.hash ? file.name : null }))
: fm.request({
data : {cmd : 'ls', target : dst.hash},
notify : {type : 'prepare', cnt : 1, hideCnt : true},
preventFail : true
})
.always(function(data) {
valid(data.list || [])
});
}
}
return dfrd;
},
parents, fparents;
if (!cnt || !dst || dst.mime != 'directory') {
return dfrd.reject();
}
if (!dst.write) {
return dfrd.reject([error, files[0].name, 'errPerm']);
}
parents = fm.parents(dst.hash);
$.each(files, function(i, file) {
if (!file.read) {
return !dfrd.reject([error, files[0].name, 'errPerm']);
}
if (cut && file.locked) {
return !dfrd.reject(['errLocked', file.name]);
}
if ($.inArray(file.hash, parents) !== -1) {
return !dfrd.reject(['errCopyInItself', file.name]);
}
fparents = fm.parents(file.hash);
if ($.inArray(dst.hash, fparents) !== -1) {
if ($.map(fparents, function(h) { var d = fm.file(h); return d.phash == dst.hash && d.name == file.name ? d : null }).length) {
return !dfrd.reject(['errReplByChild', file.name]);
}
}
if (file.phash == dst.hash) {
fcopy.push(file.hash);
} else {
fpaste.push({
hash : file.hash,
phash : file.phash,
name : file.name
});
}
});
if (dfrd.isRejected()) {
return dfrd;
}
return $.when(
copy(fcopy),
paste(fpaste)
).always(function() {
cut && fm.clipboard([]);
});
}
}
|
from getpass import getpass
from json import load
import sys
class EmailSettings(object):
""" Create an object with all connection settings """
def __init__(self):
with open("config.json") as config:
config_file = load(config)
# Get username safely
try:
self.username = config_file['username']
if self.username == "":
raise ValueError("Empty username")
except (KeyError, ValueError):
self.username = input("Username: ")
# Get password safely
try:
self.password = config_file['password']
if self.password == "":
raise ValueError("Empty password")
except (KeyError, ValueError):
self.password = getpass("Password: ")
# Get SMTP address safely
try:
self.smtp_address = config_file['smtp_address']
if self.smtp_address == "":
raise ValueError("Defaulting SMTP Address")
except (KeyError, ValueError):
self.smtp_address = self.username
# Get server safely
try:
self.server = config_file['server']
if self.server == "":
raise ValueError("Defaulting server to webmail.sherweb2010.com")
except (KeyError, ValueError):
self.server = 'webmail.sherweb2010.com'
# Get Primary to email address safely
try:
self.to_address = config_file['primary_to_address']
except (KeyError, ValueError):
self.to_address = None
# Get CC recpients safely
try:
self.cc_recipients = config_file['cc_recipients']
except (KeyError, ValueError):
self.cc_recipients = []
|
/*---------------------------------------------------------------------------
FT1000 driver for Flarion Flash OFDM NIC Device
Copyright (C) 2002 Flarion Technologies, All rights reserved.
This program is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the Free
Software Foundation; either version 2 of the License, or (at your option) any
later version. This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details. You should have received a copy of the GNU General Public
License along with this program; if not, write to the
Free Software Foundation, Inc., 59 Temple Place -
Suite 330, Boston, MA 02111-1307, USA.
---------------------------------------------------------------------------
Description: Common structures and defines
---------------------------------------------------------------------------*/
#ifndef _FT1000H_
#define _FT1000H_
#include "../ft1000.h"
#define FT1000_DRV_VER 0x01010300
#define FT1000_DPRAM_BASE 0x0000 /* Dual Port RAM starting offset */
/* Maximum number of occurrence of pseudo header errors before resetting PC Card. */
#define MAX_PH_ERR 300
#define SUCCESS 0x00
#define FAILURE 0x01
struct ft1000_pcmcia {
int PktIntfErr;
u16 packetseqnum;
void *link;
};
struct pcmcia_device;
struct net_device;
extern struct net_device *init_ft1000_card(struct pcmcia_device *link,
void *ft1000_reset);
extern void stop_ft1000_card(struct net_device *dev);
extern int card_download(struct net_device *dev, const u8 *pFileStart,
size_t FileLength);
extern void ft1000InitProc(struct net_device *dev);
extern void ft1000CleanupProc(struct net_device *dev);
extern u16 ft1000_read_dpram(struct net_device *dev, int offset);
extern void card_bootload(struct net_device *dev);
extern u16 ft1000_read_dpram_mag_16(struct net_device *dev, int offset, int Index);
extern u32 ft1000_read_dpram_mag_32(struct net_device *dev, int offset);
void ft1000_write_dpram_mag_32(struct net_device *dev, int offset, u32 value);
/* Read the value of a given ASIC register. */
static inline u16 ft1000_read_reg(struct net_device *dev, u16 offset)
{
return inw(dev->base_addr + offset);
}
/* Set the value of a given ASIC register. */
static inline void ft1000_write_reg(struct net_device *dev, u16 offset, u16 value)
{
outw(value, dev->base_addr + offset);
}
#endif
|
def new_client():
file_client = open('file_client.txt', 'a')
file_client.write(input('DNI:\n'))
file_client.write(';')
file_client.write(input('Name:\n'))
file_client.write(';')
file_client.write(input('Surname:\n'))
file_client.write(';')
file_client.write(input('Address:\n'))
file_client.write(';')
file_client.write(input('Postal Code:\n'))
file_client.write(';')
file_client.write(input('City/Town:\n'))
file_client.write(';')
file_client.write(input('Telephone Number:\n'))
file_client.write(';')
rate = input('Rate (Write "Bronze", "Silver" or "Gold:)\n')
while rate != 'Bronze' and rate != 'Silver' and rate != 'Gold':
print('Please write a valid rate.\n')
rate = input('Rate (Write "Bronze", "Silver" or "Gold):\n')
file_client.write(rate)
file_client.write(';')
file_client.write(input('Bank account number\n'))
file_client.write('\n')
print('New client added')
file_client.close()
def delete_client():
file_client = open('file_client.txt', 'r')
file_client_slave = open('slave.txt', 'a+')
search = input('Enter DNI to search\n')
for line in file_client.readlines():
if search not in line:
file_client_slave.write(line)
file_client.close()
file_client_slave.close()
file_client_slave = open('slave.txt', 'r')
file_client = open('file_client.txt', 'w')
new_file = file_client_slave.read()
file_client.write(new_file)
file_client.close()
file_client_slave.close()
file_client_slave = open('slave.txt', 'w')
file_client_slave.truncate()
file_client_slave.close()
print('Client successfully deleted')
def search_client():
file_client = open('file_client.txt', 'r')
count_client = 0
search = input('Enter DNI\n')
found = False
line = file_client.readline()
while not found and line != '':
count_client += 1
if search in line:
print(f'Found, client number {count_client}')
print(f'Client data\n', line)
found = True
line = file_client.readline()
if not found:
print('Client not found')
file_client.close()
def debt_client():
debt_client = open('debt_client.txt', 'a')
debt_client.write('DNI:')
debt_client.write(input('DNI:\n'))
debt_client.write(';')
debt_client.write('Amount of debt:')
debt_client.write(input('Debt:\n'))
debt_client.write(';')
# new_client()
# search_client()
# delete_client()
# debt_client()
|
/*
* Generated by asn1c-0.9.24 (http://lionet.info/asn1c)
* From ASN.1 module "S1AP-IEs"
* found in "S1AP-IEs.asn"
*/
#ifndef _S1ap_ForbiddenInterRATs_H_
#define _S1ap_ForbiddenInterRATs_H_
#include <asn_application.h>
/* Including external dependencies */
#include <NativeEnumerated.h>
#ifdef __cplusplus
extern "C" {
#endif
/* Dependencies */
typedef enum S1ap_ForbiddenInterRATs {
S1ap_ForbiddenInterRATs_all = 0,
S1ap_ForbiddenInterRATs_geran = 1,
S1ap_ForbiddenInterRATs_utran = 2,
S1ap_ForbiddenInterRATs_cdma2000 = 3,
/*
* Enumeration is extensible
*/
S1ap_ForbiddenInterRATs_geranandutran = 4,
S1ap_ForbiddenInterRATs_cdma2000andutran = 5
} e_S1ap_ForbiddenInterRATs;
/* S1ap-ForbiddenInterRATs */
typedef long S1ap_ForbiddenInterRATs_t;
/* Implementation */
extern asn_TYPE_descriptor_t asn_DEF_S1ap_ForbiddenInterRATs;
asn_struct_free_f S1ap_ForbiddenInterRATs_free;
asn_struct_print_f S1ap_ForbiddenInterRATs_print;
asn_constr_check_f S1ap_ForbiddenInterRATs_constraint;
ber_type_decoder_f S1ap_ForbiddenInterRATs_decode_ber;
der_type_encoder_f S1ap_ForbiddenInterRATs_encode_der;
xer_type_decoder_f S1ap_ForbiddenInterRATs_decode_xer;
xer_type_encoder_f S1ap_ForbiddenInterRATs_encode_xer;
#ifdef __cplusplus
}
#endif
#endif /* _S1ap_ForbiddenInterRATs_H_ */
#include <asn_internal.h>
|
#ifndef BIT32_H
#define BIT32_H
#include <stdint.h>
/*
LENG - length of an integer
NBETA - number of parallel betas
NBETA_MAX - align NBETA into "LENG" boundaries
NBETA_PER_WORD - number of betas combined into a word
NWORD - number of words for all betas
must garantee NBETA <= LENG,
this restriction can be overcomed by distributing temperature replicas on multiple words
6 neighbors
left (xa,y ,z ) J0
right (xb,y ,z ) J1
up (x ,ya,z ) J2
down (x ,yb,z ) J3
front (x ,y ,za) J4
back (x ,y ,zb) J5
*/
// ACMSC-1 is buggy
#define ACMSC_FORMAT 0
typedef int32_t MSC_DATATYPE;
#define LENG 32
#define NBETA_MAX LENG
#define MASK_A 0xffffffff
#define NBETA 24
#define NBETA_PER_WORD 24
#define NWORD 1
#define NBIT_PER_SEG 4
#if ACMSC_FORMAT == 0
#define NSEG_PER_WORD 6
#define NBETA_PER_SEG 4
#define MASK_J 0xfc000000
#define MASK_J0 0x04000000
#define MASK_J1 0x08000000
#define MASK_J2 0x10000000
#define MASK_J3 0x20000000
#define MASK_J4 0x40000000
#define MASK_J5 0x80000000
#define SHIFT_J0 26
#define SHIFT_J1 27
#define SHIFT_J2 28
#define SHIFT_J3 29
#define SHIFT_J4 30
#define SHIFT_J5 31
#define MASK_S 0x00111111
#define MASK_S0 0x00ffffff
#define MASK_E 0xf
#define SHIFT_MAX 23
#endif
/*
MASK_J 1111 11-- 0000 0000 0000 0000 0000 0000
MASK_J0 0000 01-- 0000 0000 0000 0000 0000 0000
MASK_J1 0000 10-- 0000 0000 0000 0000 0000 0000
MASK_J2 0001 00-- 0000 0000 0000 0000 0000 0000
MASK_J3 0010 00-- 0000 0000 0000 0000 0000 0000
MASK_J4 0100 00-- 0000 0000 0000 0000 0000 0000
MASK_J5 1000 00-- 0000 0000 0000 0000 0000 0000
MASK_S ---- ---- 0001 0001 0001 0001 0001 0001
MASK_S0 ---- ---- 1111 1111 1111 1111 1111 1111
iter0 * * * * * *
iter1 * * * * * *
iter2 * * * * * *
iter3 * * * * * *
*/
#if ACMSC_FORMAT == 1
#define NSEG_PER_WORD 8
#define NBETA_PER_SEG 3
#define MASK_J 0x00888888
#define MASK_J0 0x00000008
#define MASK_J1 0x00000080
#define MASK_J2 0x00000800
#define MASK_J3 0x00008000
#define MASK_J4 0x00080000
#define MASK_J5 0x00800000
#define SHIFT_J0 3
#define SHIFT_J1 7
#define SHIFT_J2 11
#define SHIFT_J3 15
#define SHIFT_J4 19
#define SHIFT_J5 23
#define MASK_S 0x11111111
#define MASK_S0 0x77777777
#define MASK_E 0xf
#define SHIFT_MAX 31
#endif
/*
MASK_J 0000 0000 1000 1000 1000 1000 1000 1000
MASK_J0 0000 0000 0000 0000 0000 0000 0000 1000
MASK_J1 0000 0000 0000 0000 0000 0000 1000 0000
MASK_J2 0000 0000 0000 0000 0000 1000 0000 0000
MASK_J3 0000 0000 0000 0000 1000 0000 0000 0000
MASK_J4 0000 0000 0000 1000 0000 0000 0000 0000
MASK_J5 0000 0000 1000 0000 0000 0000 0000 0000
MASK_S 0001 0001 0001 0001 0001 0001 0001 0001
MASK_S0 0111 0111 0111 0111 0111 0111 0111 0111
iter0 * * * * * * * *
iter1 * * * * * * * *
iter2 * * * * * * * *
*/
#endif /* BIT32_H */
|
import React from 'react';
import {View, TouchableOpacity} from 'react-native';
import styles from './styles';
import PropTypes from 'prop-types';
import {Image} from '@components';
import {Images, useTheme} from '@config';
export default function Card(props) {
const {colors} = useTheme();
const {style, children, styleContent, image, onPress} = props;
return (
<TouchableOpacity
style={[styles.card, {borderColor: colors.border}, style]}
onPress={onPress}
activeOpacity={0.9}>
<Image source={image} style={styles.imageBanner} />
<View style={[styles.content, styleContent]}>{children}</View>
</TouchableOpacity>
);
}
Card.propTypes = {
image: PropTypes.node.isRequired,
style: PropTypes.oneOfType([PropTypes.object, PropTypes.array]),
styleContent: PropTypes.object,
children: PropTypes.oneOfType([
PropTypes.element,
PropTypes.arrayOf(PropTypes.element),
]),
onPress: PropTypes.func,
};
Card.defaultProps = {
image: Images.profile2,
style: {},
styleContent: {},
onPress: () => {},
};
|
import axios from 'axios';
const API_KEY = '40377088d000fa327c7b6487e6308e8e';
const ROOT_URL = `http://api.openweathermap.org/data/2.5/forecast?&appid=${API_KEY}`;
export const FETCH_WEATHER = 'FETCH_WEATHER';
export function fetchWeather(city) {
const url = `${ROOT_URL}&q=${city},gb`;
const request = axios.get(url);
return {
type: FETCH_WEATHER,
payload: request // request is a Promise, the redux-promise middleware (https://www.npmjs.com/package/redux-promise), only passes on the actions once the promise is resolved.
};
}
|
# Copyright 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. autosummary::
:toctree: _autosummary
Domain
DataPath
NamedDataPath
Image
DICOMStudy
DICOMSeries
DICOMSOPInstance
SelectedSeries
StudySelectedSeries
"""
from .datapath import DataPath, NamedDataPath
from .dicom_series import DICOMSeries
from .dicom_series_selection import SelectedSeries, StudySelectedSeries
from .dicom_sop_instance import DICOMSOPInstance
from .dicom_study import DICOMStudy
from .domain import Domain
from .image import Image
|
import matplotlib.pyplot as plt
# The slices will be ordered and plotted counter-clockwise.
labels = 'Frogs', 'Hogs', 'Dogs', 'Logs'
sizes = [15, 30, 45, 10]
colors = ['yellowgreen', 'gold', 'lightskyblue', 'lightcoral']
explode = (0, 0.1, 0, 0) # only "explode" the 2nd slice (i.e. 'Hogs')
plt.pie(sizes, explode=explode, labels=labels, colors=colors,
autopct='%1.1f%%', shadow=True, startangle=90)
# Set aspect ratio to be equal so that pie is drawn as a circle.
plt.axis('equal')
fig = plt.figure()
ax = fig.gca()
import numpy as np
ax.pie(np.random.random(4), explode=explode, labels=labels, colors=colors,
autopct='%1.1f%%', shadow=True, startangle=90,
radius=0.25, center=(0, 0), frame=True)
ax.pie(np.random.random(4), explode=explode, labels=labels, colors=colors,
autopct='%1.1f%%', shadow=True, startangle=90,
radius=0.25, center=(1, 1), frame=True)
ax.pie(np.random.random(4), explode=explode, labels=labels, colors=colors,
autopct='%1.1f%%', shadow=True, startangle=90,
radius=0.25, center=(0, 1), frame=True)
ax.pie(np.random.random(4), explode=explode, labels=labels, colors=colors,
autopct='%1.1f%%', shadow=True, startangle=90,
radius=0.25, center=(1, 0), frame=True)
ax.set_xticks([0, 1])
ax.set_yticks([0, 1])
ax.set_xticklabels(["Sunny", "Cloudy"])
ax.set_yticklabels(["Dry", "Rainy"])
ax.set_xlim((-0.5, 1.5))
ax.set_ylim((-0.5, 1.5))
# Set aspect ratio to be equal so that pie is drawn as a circle.
ax.set_aspect('equal')
plt.show()
|
from pandac.PandaModules import *
from direct.interval.IntervalGlobal import *
from direct.particles import ParticleEffect
from direct.particles import Particles
from direct.particles import ForceGroup
from PooledEffect import PooledEffect
from EffectController import EffectController
import os
class VoodooAuraHeal(PooledEffect, EffectController):
cardScale = 64.0
def __init__(self):
PooledEffect.__init__(self)
EffectController.__init__(self)
model = loader.loadModel('models/effects/particleMaps')
self.card = model.find('**/particleSpark')
if not VoodooAuraHeal.particleDummy:
VoodooAuraHeal.particleDummy = render.attachNewNode(ModelNode('VoodooAuraHealParticleDummy'))
VoodooAuraHeal.particleDummy.setDepthWrite(0)
VoodooAuraHeal.particleDummy.setLightOff()
VoodooAuraHeal.particleDummy.setFogOff()
VoodooAuraHeal.particleDummy.setColorScaleOff()
self.effectColor = Vec4(1, 1, 1, 1)
self.f = ParticleEffect.ParticleEffect('HealSparks')
self.f.reparentTo(self)
self.p0 = Particles.Particles('particles-1')
self.p0.setFactory('PointParticleFactory')
self.p0.setRenderer('SpriteParticleRenderer')
self.p0.setEmitter('DiscEmitter')
self.f.addParticles(self.p0)
self.p0.setPoolSize(64)
self.p0.setBirthRate(0.01)
self.p0.setLitterSize(4)
self.p0.setLitterSpread(0)
self.p0.setSystemLifespan(0.0)
self.p0.setLocalVelocityFlag(1)
self.p0.setSystemGrowsOlderFlag(0)
self.p0.factory.setLifespanBase(1.0)
self.p0.factory.setLifespanSpread(0.0)
self.p0.factory.setMassBase(1.0)
self.p0.factory.setMassSpread(0.0)
self.p0.factory.setTerminalVelocityBase(400.0)
self.p0.factory.setTerminalVelocitySpread(0.0)
self.p0.renderer.setAlphaMode(BaseParticleRenderer.PRALPHAOUT)
self.p0.renderer.setUserAlpha(1.0)
self.p0.renderer.setFromNode(self.card)
self.p0.renderer.setColor(Vec4(1, 1, 1, 1))
self.p0.renderer.setXScaleFlag(1)
self.p0.renderer.setYScaleFlag(1)
self.p0.renderer.setAnimAngleFlag(0)
self.p0.renderer.setInitialXScale(0.012 * self.cardScale)
self.p0.renderer.setFinalXScale(0.005 * self.cardScale)
self.p0.renderer.setInitialYScale(0.01 * self.cardScale)
self.p0.renderer.setFinalYScale(0.03 * self.cardScale)
self.p0.renderer.setNonanimatedTheta(0.0)
self.p0.renderer.setAlphaBlendMethod(BaseParticleRenderer.PPBLENDLINEAR)
self.p0.renderer.setAlphaDisable(0)
self.p0.renderer.setColorBlendMode(ColorBlendAttrib.MAdd, ColorBlendAttrib.OIncomingAlpha, ColorBlendAttrib.OOne)
self.p0.emitter.setEmissionType(BaseParticleEmitter.ETRADIATE)
self.p0.emitter.setAmplitude(0.0)
self.p0.emitter.setAmplitudeSpread(0.1)
self.p0.emitter.setOffsetForce(Vec3(0.0, 0.0, 2.0))
self.p0.emitter.setExplicitLaunchVector(Vec3(0.0, 0.0, 0.0))
self.p0.emitter.setRadiateOrigin(Point3(0.0, 0.0, 0.0))
self.p0.emitter.setRadius(10.0)
def createTrack(self):
self.startEffect = Sequence(Func(self.p0.clearToInitial), Func(self.p0.setBirthRate, 0.1), Func(self.f.start, self, self.particleDummy))
self.endEffect = Sequence(Func(self.p0.setBirthRate, 100.0), Wait(1.0), Func(self.cleanUpEffect))
self.track = Sequence(self.startEffect, Wait(1.0), self.endEffect)
def setEffectColor(self, color):
self.effectColor = color
self.p0.renderer.setColor(color)
def cleanUpEffect(self):
EffectController.cleanUpEffect(self)
self.checkInEffect(self)
def destroy(self):
EffectController.destroy(self)
PooledEffect.destroy(self)
|
/**
* DO NOT EDIT THIS FILE
*
* It is not used to to build anything.
*
* It's just a record of the old flow types.
*
* Use it as a guide when converting
* - static/src/javascripts/projects/common/modules/ui/bannerPicker.js
* to .ts, then delete it.
*/
// @flow
import ophan from 'ophan/ng';
export type Banner = {
id: string,
canShow: () => Promise<boolean>,
show: () => Promise<boolean>,
};
const init = (banners: Array<Banner>): Promise<void> => {
const results: Array<'pending' | boolean> = new Array(banners.length).fill(
'pending',
0
);
const getSuccessfulBannerIndex = (): number => {
const firstCheckPassedIndex = results.findIndex(item => item === true);
// if no check has passed firstCheckPassedIndex equals -1
// if first check has passed firstCheckPassedIndex equals 0
if (firstCheckPassedIndex <= 0) {
return firstCheckPassedIndex;
}
// if firstCheckPassedIndex greater than 0 then get higher priority checks from array that are pending
const pendingHigherPriorityCheckIndex = results
.slice(0, firstCheckPassedIndex)
.findIndex(item => item === 'pending');
// if there are no higher priority checks pending return firstCheckPassedIndex
if (pendingHigherPriorityCheckIndex === -1) {
return firstCheckPassedIndex;
}
return -1;
};
return new Promise(resolve => {
const TIME_LIMIT = 2000;
let bannerPicked = false;
banners.forEach((banner, index) => {
const pushToResults = (result: boolean): void => {
results[index] = result;
const successfulBannerIndex = getSuccessfulBannerIndex();
if (!bannerPicked && successfulBannerIndex !== -1) {
const successfulBanner = banners[successfulBannerIndex];
successfulBanner.show();
bannerPicked = true;
const trackingObj = {
component: 'banner-picker',
value: successfulBanner.id,
};
ophan.record(trackingObj);
}
if (!results.includes('pending')) {
resolve();
}
};
let hasTimedOut = false;
// checks that take longer than TIME_LIMIT are forced to fail
const timeout = setTimeout(() => {
hasTimedOut = true;
pushToResults(false);
const trackingObj = {
component: 'banner-picker-timeout',
value: banner.id,
};
ophan.record(trackingObj);
}, TIME_LIMIT);
banner.canShow().then(result => {
if (!hasTimedOut) {
clearTimeout(timeout);
pushToResults(result);
}
});
});
});
};
export { init };
|
# -*- coding: utf-8 -*-
# Copyright 2012 Fanficdownloader team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Software: eFiction
import time
import logging
logger = logging.getLogger(__name__)
import re
import urllib2
from .. import BeautifulSoup as bs
from ..htmlcleanup import stripHTML
from .. import exceptions as exceptions
from base_adapter import BaseSiteAdapter, makeDate
def getClass():
return NHAMagicalWorldsUsAdapter
# Class name has to be unique. Our convention is camel case the
# sitename with Adapter at the end. www is skipped.
class NHAMagicalWorldsUsAdapter(BaseSiteAdapter):
def __init__(self, config, url):
BaseSiteAdapter.__init__(self, config, url)
self.decode = ["Windows-1252",
"utf8"] # 1252 is a superset of iso-8859-1.
# Most sites that claim to be
# iso-8859-1 (and some that claim to be
# utf8) are really windows-1252.
self.username = "NoneGiven" # if left empty, site doesn't return any message at all.
self.password = ""
self.is_adult=False
# get storyId from url--url validation guarantees query is only sid=1234
self.story.setMetadata('storyId',self.parsedUrl.query.split('=',)[1])
# normalized story URL.
self._setURL('http://' + self.getSiteDomain() + '/viewstory.php?sid='+self.story.getMetadata('storyId'))
# Each adapter needs to have a unique site abbreviation.
self.story.setMetadata('siteabbrev','nha')
# The date format will vary from site to site.
# http://docs.python.org/library/datetime.html#strftime-strptime-behavior
self.dateformat = " %d/%m/%y"
@staticmethod # must be @staticmethod, don't remove it.
def getSiteDomain():
# The site domain. Does have www here, if it uses it.
return 'nha.magical-worlds.us'
@classmethod
def getSiteExampleURLs(cls):
return "http://"+cls.getSiteDomain()+"/viewstory.php?sid=1234"
def getSiteURLPattern(self):
return re.escape("http://"+self.getSiteDomain()+"/viewstory.php?sid=")+r"\d+$"
## Getting the chapter list and the meta data, plus 'is adult' checking.
def extractChapterUrlsAndMetadata(self):
url = self.url
logger.debug("URL: "+url)
try:
data = self._fetchUrl(url)
except urllib2.HTTPError, e:
if e.code == 404:
raise exceptions.StoryDoesNotExist(self.url)
else:
raise e
m = re.search(r"'viewstory.php\?sid=\d+((?:&ageconsent=ok)?&warning=\d+)'",data)
if m != None:
if self.is_adult or self.getConfig("is_adult"):
# We tried the default and still got a warning, so
# let's pull the warning number from the 'continue'
# link and reload data.
addurl = m.group(1)
# correct stupid & error in url.
addurl = addurl.replace("&","&")
url = self.url+'&index=1'+addurl
logger.debug("URL 2nd try: "+url)
try:
data = self._fetchUrl(url)
except urllib2.HTTPError, e:
if e.code == 404:
raise exceptions.StoryDoesNotExist(self.url)
else:
raise e
else:
raise exceptions.AdultCheckRequired(self.url)
if "Access denied. This story has not been validated by the adminstrators of this site." in data:
raise exceptions.FailedToDownload(self.getSiteDomain() +" says: Access denied. This story has not been validated by the adminstrators of this site.")
# use BeautifulSoup HTML parser to make everything easier to find.
soup = bs.BeautifulSoup(data)
# print data
# Now go hunting for all the meta data and the chapter list.
# Find authorid and URL from... author url.
a = soup.find('a', href=re.compile(r"viewuser.php\?uid=\d+"))
self.story.setMetadata('authorId',a['href'].split('=')[1])
self.story.setMetadata('authorUrl','http://'+self.host+'/'+a['href'])
self.story.setMetadata('author',a.string)
asoup = bs.BeautifulSoup(self._fetchUrl(self.story.getMetadata('authorUrl')))
try:
# in case link points somewhere other than the first chapter
a = soup.findAll('option')[1]['value']
self.story.setMetadata('storyId',a.split('=',)[1])
url = 'http://'+self.host+'/'+a
soup = bs.BeautifulSoup(self._fetchUrl(url))
except:
pass
for info in asoup.findAll('table', {'width' : '100%', 'bordercolor' : re.compile(r'#')}):
a = info.find('a')
if 'viewstory.php?sid='+self.story.getMetadata('storyId') == a['href'] or \
('viewstory.php?sid='+self.story.getMetadata('storyId')+'&') in a['href']:
self.story.setMetadata('title',stripHTML(a))
break
# Find the chapters:
chapters=soup.findAll('a', href=re.compile(r'viewstory.php\?sid='+self.story.getMetadata('storyId')+'&chapter=\d+$'))
if len(chapters) == 0:
self.chapterUrls.append((self.story.getMetadata('title'),url))
else:
for chapter in chapters:
# just in case there's tags, like <i> in chapter titles.
self.chapterUrls.append((stripHTML(chapter),'http://'+self.host+'/'+chapter['href']))
self.story.setMetadata('numChapters',len(self.chapterUrls))
# eFiction sites don't help us out a lot with their meta data
# formating, so it's a little ugly.
# utility method
def defaultGetattr(d):
try:
return d.name
except:
return ""
cats = info.findAll('a',href=re.compile('categories.php'))
for cat in cats:
self.story.addToList('category',cat.string)
a = info.find('a', href=re.compile(r'viewuser.php'))
val = a.nextSibling
svalue = ""
while not defaultGetattr(val) == 'br':
val = val.nextSibling
val = val.nextSibling
while not defaultGetattr(val) == 'br':
svalue += unicode(val)
val = val.nextSibling
self.setDescription(url,svalue)
#does not provide convenient way to get word count
labels = info.findAll('i')
for labelspan in labels:
value = labelspan.nextSibling
label = stripHTML(labelspan)
if 'Rating' in label:
self.story.setMetadata('rating', value.split(' -')[0])
if 'Genres' in label:
genres = value.string.split(', ')
for genre in genres:
if 'None' not in genre:
self.story.addToList('genre',genre.split(' -')[0])
if 'Characters' in label:
chars = value.string.split(', ')
for char in chars:
if 'None' not in char:
self.story.addToList('characters',char.split(' -')[0])
if 'Warnings' in label:
warnings = value.string.split(', ')
for warning in warnings:
if 'None' not in warning:
self.story.addToList('warnings',warning.split(' -')[0])
if 'Completed' in label:
if 'Yes' in value:
self.story.setMetadata('status', 'Completed')
else:
self.story.setMetadata('status', 'In-Progress')
if 'Published' in label:
self.story.setMetadata('datePublished', makeDate(value.split(' -')[0], self.dateformat))
if 'Updated' in label:
# there's a stray [ at the end.
#value = value[0:-1]
self.story.setMetadata('dateUpdated', makeDate(value.split(' -')[0], self.dateformat))
# grab the text for an individual chapter.
def getChapterText(self, url):
logger.debug('Getting chapter text from: %s' % url)
data = self._fetchUrl(url)
soup = bs.BeautifulSoup(data, selfClosingTags=('br','hr','span','center')) # some chapters seem to be hanging up on those tags, so it is safer to close them
story = soup.find('div', {"id" : "story"})
if None == story:
raise exceptions.FailedToDownload("Error downloading Chapter: %s! Missing required element!" % url)
return self.utf8FromSoup(url,story)
|
import React, { Component } from 'react';
import { connect } from 'react-redux';
import { Link } from 'react-router';
class Header extends Component {
renderLinks() {
if(this.props.authenticated) {
return (
<li className="nav-item">
<Link className="nav-link" to="/signout">Sign Out</Link>
</li>
)
} else {
return [
<li className="nav-item" key={1}>
<Link className="nav-link" to="/signin">Sign in</Link>
</li>,
<li className="nav-item" key={2}>
<Link className="nav-link" to="/signup">Sign up</Link>
</li>
];
}
}
render() {
return (
<nav className="navbar navbar-light">
<Link to="/" className="navbar-brand">Redux Auth</Link>
<ul className="nav navbar-nav">
{this.renderLinks()}
</ul>
</nav>
);
}
}
function mapStateToProps(state) {
return {
authenticated: state.auth.authenticated
}
}
export default connect(mapStateToProps)(Header);
|
import pandas as pd
import re
import requests
from bs4 import BeautifulSoup
from tqdm import tqdm
# Get basic players information for all players
base_url = "https://sofifa.com/players?offset="
columns = ['ID', 'Name', 'Age', 'Photo', 'Nationality', 'Flag', 'Overall', 'Potential', 'Club', 'Club Logo', 'Value', 'Wage', 'Special']
data = pd.DataFrame(columns = columns)
# basic info
for offset in tqdm(range(300)):
url = base_url + str(offset * 61)
source_code = requests.get(url)
plain_text = source_code.text
soup = BeautifulSoup(plain_text, 'html.parser')
table_body = soup.find('tbody')
for row in table_body.findAll('tr'):
td = row.findAll('td')
picture = td[0].find('img').get('data-src')
pid = td[0].find('img').get('id')
nationality = td[1].find('a').get('title')
flag_img = td[1].find('img').get('data-src')
name = td[1].findAll('a')[1].text
age = td[2].text.strip()
overall = td[3].text.strip()
potential = td[4].text.strip()
club = td[5].find('a').text
club_logo = td[5].find('img').get('data-src')
value = td[6].text.strip()
wage = td[7].text.strip()
special = td[8].text.strip()
player_data = pd.DataFrame([[pid, name, age, picture, nationality, flag_img, overall, potential, club, club_logo, value, wage, special]])
player_data.columns = columns
data = data.append(player_data, ignore_index=True)
data = data.drop_duplicates()
data.to_csv('basicdata.csv', encoding='utf-8-sig')
# store the basic data
data = pd.read_csv('basicdata.csv')
# Get detailed player information from player page
detailed_columns = ['Preferred Foot', 'International Reputation', 'Weak Foot', 'Skill Moves', 'Work Rate', 'Body Type', 'Real Face', 'Position', 'Jersey Number', 'Joined', 'Loaned From', 'Contract Valid Until', 'Height', 'Weight', 'LS', 'ST', 'RS', 'LW', 'LF', 'CF', 'RF', 'RW', 'LAM', 'CAM', 'RAM', 'LM', 'LCM', 'CM', 'RCM', 'RM', 'LWB', 'LDM', 'CDM', 'RDM', 'RWB', 'LB', 'LCB', 'CB', 'RCB', 'RB', 'Crossing', 'Finishing', 'HeadingAccuracy', 'ShortPassing', 'Volleys', 'Dribbling', 'Curve', 'FKAccuracy', 'LongPassing', 'BallControl', 'Acceleration', 'SprintSpeed', 'Agility', 'Reactions', 'Balance', 'ShotPower', 'Jumping', 'Stamina', 'Strength', 'LongShots', 'Aggression', 'Interceptions', 'Positioning', 'Vision', 'Penalties', 'Composure', 'Marking', 'StandingTackle', 'SlidingTackle', 'GKDiving', 'GKHandling', 'GKKicking', 'GKPositioning', 'GKReflexes', 'ID']
detailed_data = pd.DataFrame(index = range(0, data.count()[0]), columns = detailed_columns)
detailed_data.ID = data.ID.values
player_data_url = 'https://sofifa.com/player/'
for i in tqdm(range(data.ID.shape[0])):
id = data.ID[i]
url = player_data_url + str(id)
source_code = requests.get(url)
plain_text = source_code.text
soup = BeautifulSoup(plain_text, 'html.parser')
skill_map = {}
columns = soup.find('div', {'class': 'teams'}).find('div', {'class': 'columns'}).findAll('div', {'class': 'column col-6'})
columns.append(soup.find('div', {'class': 'teams'}).find('div', {'class': 'columns'}).findAll('div', {'class': 'bp3-callout'})[0])
for column in columns:
skills = column.findAll('li')
for skill in skills:
if(skill.find('label') != None):
label = skill.find('label').text
value = skill.text.replace(label, '').strip()
skill_map[label] = value
meta_data = soup.find('div', {'class': 'meta'}).text.split(' ')
length = len(meta_data)
weight = meta_data[length - 1]
height = meta_data[length - 2].split('\'')[0] + '\'' + meta_data[length - 2].split('\'')[1].split('\"')[0]
skill_map["Height"] = height
skill_map['Weight'] = weight
# if('Position' in skill_map.keys()):
# if skill_map['Position'] in ('', 'RES', 'SUB'):
# skill_map['Position'] = soup.find('article').find('div', {'class': 'meta'}).find('span').text
# if(skill_map['Position'] != 'GK'):
# card_rows = soup.find('aside').find('div', {'class': 'card mb-2'}).find('div', {'class': 'card-body'}).findAll('div', {'class': 'columns'})
# for c_row in card_rows:
# attributes = c_row.findAll('div', {'class': re.compile('column col-sm-2 text-center')})
# for attribute in attributes:
# if(attribute.find('div')):
# name = ''.join(re.findall('[a-zA-Z]', attribute.text))
# value = attribute.text.replace(name, '').strip()
# skill_map[str(name)] = value
# sections = soup.find('article').findAll('div', {'class': 'mb-2'})[1:3]
# first = sections[0].findAll('div', {'class': 'column col-4'})
# second = sections[1].findAll('div', {'class': 'column col-4'})[:-1]
# sections = first + second
sections = soup.find('article').findAll('div', {'class': 'column col-4'})[:-1]
for section in sections:
items = section.find('ul').findAll('li')
for item in items:
value = int(re.findall(r'\d+', item.text)[0])
name = ''.join(re.findall('[a-zA-Z]*', item.text))
skill_map[str(name)] = value
for key, value in skill_map.items():
detailed_data.loc[detailed_data.ID == id, key] = value
full_data = pd.merge(data, detailed_data, how = 'inner', on = 'ID')
full_data.to_csv('fifa19data.csv', encoding='utf-8-sig')
|
# test
#input = 3 # answer : 638
#real
input = 344 # answer : 996
class Node:
def __init__(self, value, ptr):
self.value = value
self.ptr = ptr
current = Node(0, None)
current.ptr = current
zeroptr = current
for i in range(1, 2018):
# move forward "input" steps
for j in range(input):
current = current.ptr
# insert the next node
newNode = Node(i, current.ptr)
current.ptr = newNode
if current.value == 0: print zeroptr.ptr.value
current = newNode
# what is the current node pointing at
print current.ptr.value
def printLoop(current, length):
output = ''
for i in range(length):
output += str(current.value)
current = current.ptr
print output
#printLoop(current, 10)
'''
1
7
14
15
89
246
2344
2726
5935
10545
41867
45544
187773
474004
799704
'''
|
/*
* Copyright (C) the libgit2 contributors. All rights reserved.
*
* This file is part of libgit2, distributed under the GNU GPL v2 with
* a Linking Exception. For full terms see the included COPYING file.
*/
#ifndef INCLUDE_git_submodule_h__
#define INCLUDE_git_submodule_h__
#include BOSS_LIBGIT2_U_common_h //original-code:"common.h"
#include "types.h"
#include "oid.h"
#include BOSS_LIBGIT2_U_remote_h //original-code:"remote.h"
#include "checkout.h"
/**
* @file git2/submodule.h
* @brief Git submodule management utilities
*
* Submodule support in libgit2 builds a list of known submodules and keeps
* it in the repository. The list is built from the .gitmodules file, the
* .git/config file, the index, and the HEAD tree. Items in the working
* directory that look like submodules (i.e. a git repo) but are not
* mentioned in those places won't be tracked.
*
* @defgroup git_submodule Git submodule management routines
* @ingroup Git
* @{
*/
GIT_BEGIN_DECL
/**
* Return codes for submodule status.
*
* A combination of these flags will be returned to describe the status of a
* submodule. Depending on the "ignore" property of the submodule, some of
* the flags may never be returned because they indicate changes that are
* supposed to be ignored.
*
* Submodule info is contained in 4 places: the HEAD tree, the index, config
* files (both .git/config and .gitmodules), and the working directory. Any
* or all of those places might be missing information about the submodule
* depending on what state the repo is in. We consider all four places to
* build the combination of status flags.
*
* There are four values that are not really status, but give basic info
* about what sources of submodule data are available. These will be
* returned even if ignore is set to "ALL".
*
* * IN_HEAD - superproject head contains submodule
* * IN_INDEX - superproject index contains submodule
* * IN_CONFIG - superproject gitmodules has submodule
* * IN_WD - superproject workdir has submodule
*
* The following values will be returned so long as ignore is not "ALL".
*
* * INDEX_ADDED - in index, not in head
* * INDEX_DELETED - in head, not in index
* * INDEX_MODIFIED - index and head don't match
* * WD_UNINITIALIZED - workdir contains empty directory
* * WD_ADDED - in workdir, not index
* * WD_DELETED - in index, not workdir
* * WD_MODIFIED - index and workdir head don't match
*
* The following can only be returned if ignore is "NONE" or "UNTRACKED".
*
* * WD_INDEX_MODIFIED - submodule workdir index is dirty
* * WD_WD_MODIFIED - submodule workdir has modified files
*
* Lastly, the following will only be returned for ignore "NONE".
*
* * WD_UNTRACKED - wd contains untracked files
*/
typedef enum {
GIT_SUBMODULE_STATUS_IN_HEAD = (1u << 0),
GIT_SUBMODULE_STATUS_IN_INDEX = (1u << 1),
GIT_SUBMODULE_STATUS_IN_CONFIG = (1u << 2),
GIT_SUBMODULE_STATUS_IN_WD = (1u << 3),
GIT_SUBMODULE_STATUS_INDEX_ADDED = (1u << 4),
GIT_SUBMODULE_STATUS_INDEX_DELETED = (1u << 5),
GIT_SUBMODULE_STATUS_INDEX_MODIFIED = (1u << 6),
GIT_SUBMODULE_STATUS_WD_UNINITIALIZED = (1u << 7),
GIT_SUBMODULE_STATUS_WD_ADDED = (1u << 8),
GIT_SUBMODULE_STATUS_WD_DELETED = (1u << 9),
GIT_SUBMODULE_STATUS_WD_MODIFIED = (1u << 10),
GIT_SUBMODULE_STATUS_WD_INDEX_MODIFIED = (1u << 11),
GIT_SUBMODULE_STATUS_WD_WD_MODIFIED = (1u << 12),
GIT_SUBMODULE_STATUS_WD_UNTRACKED = (1u << 13),
} git_submodule_status_t;
#define GIT_SUBMODULE_STATUS__IN_FLAGS 0x000Fu
#define GIT_SUBMODULE_STATUS__INDEX_FLAGS 0x0070u
#define GIT_SUBMODULE_STATUS__WD_FLAGS 0x3F80u
#define GIT_SUBMODULE_STATUS_IS_UNMODIFIED(S) \
(((S) & ~GIT_SUBMODULE_STATUS__IN_FLAGS) == 0)
#define GIT_SUBMODULE_STATUS_IS_INDEX_UNMODIFIED(S) \
(((S) & GIT_SUBMODULE_STATUS__INDEX_FLAGS) == 0)
#define GIT_SUBMODULE_STATUS_IS_WD_UNMODIFIED(S) \
(((S) & (GIT_SUBMODULE_STATUS__WD_FLAGS & \
~GIT_SUBMODULE_STATUS_WD_UNINITIALIZED)) == 0)
#define GIT_SUBMODULE_STATUS_IS_WD_DIRTY(S) \
(((S) & (GIT_SUBMODULE_STATUS_WD_INDEX_MODIFIED | \
GIT_SUBMODULE_STATUS_WD_WD_MODIFIED | \
GIT_SUBMODULE_STATUS_WD_UNTRACKED)) != 0)
/**
* Submodule update options structure
*
* Use the GIT_SUBMODULE_UPDATE_OPTIONS_INIT to get the default settings,
* like this:
*
* git_submodule_update_options opts = GIT_SUBMODULE_UPDATE_OPTIONS_INIT;
*/
typedef struct git_submodule_update_options {
unsigned int version;
/**
* These options are passed to the checkout step. To disable
* checkout, set the `checkout_strategy` to
* `GIT_CHECKOUT_NONE`. Generally you will want the use
* GIT_CHECKOUT_SAFE to update files in the working
* directory. Use the `clone_checkout_strategy` field
* to set the checkout strategy that will be used in
* the case where update needs to clone the repository.
*/
git_checkout_options checkout_opts;
/**
* Callbacks to use for reporting fetch progress, and for acquiring
* credentials in the event they are needed.
*/
git_remote_callbacks remote_callbacks;
/**
* The checkout strategy to use when the sub repository needs to
* be cloned. Use GIT_CHECKOUT_SAFE_CREATE to create all files
* in the working directory for the newly cloned repository.
*/
unsigned int clone_checkout_strategy;
/**
* The identity used when updating the reflog. NULL means to
* use the default signature using the config.
*/
git_signature *signature;
} git_submodule_update_options;
#define GIT_SUBMODULE_UPDATE_OPTIONS_VERSION 1
#define GIT_SUBMODULE_UPDATE_OPTIONS_INIT \
{ GIT_CHECKOUT_OPTIONS_VERSION, \
{ GIT_CHECKOUT_OPTIONS_VERSION, GIT_CHECKOUT_SAFE}, \
GIT_REMOTE_CALLBACKS_INIT, GIT_CHECKOUT_SAFE_CREATE }
/**
* Initializes a `git_submodule_update_options` with default values.
* Equivalent to creating an instance with GIT_SUBMODULE_UPDATE_OPTIONS_INIT.
*
* @param opts The `git_submodule_update_options` instance to initialize.
* @param version Version of struct; pass `GIT_SUBMODULE_UPDATE_OPTIONS_VERSION`
* @return Zero on success; -1 on failure.
*/
GIT_EXTERN(int) git_submodule_update_init_options(
git_submodule_update_options *opts, unsigned int version);
/**
* Update a submodule. This will clone a missing submodule and
* checkout the subrepository to the commit specified in the index of
* containing repository.
*
* @param submodule Submodule object
* @param init If the submodule is not initialized, setting this flag to true
* will initialize the submodule before updating. Otherwise, this will
* return an error if attempting to update an uninitialzed repository.
* but setting this to true forces them to be updated.
* @param options configuration options for the update. If NULL, the
* function works as though GIT_SUBMODULE_UPDATE_OPTIONS_INIT was passed.
* @return 0 on success, any non-zero return value from a callback
* function, or a negative value to indicate an error (use
* `giterr_last` for a detailed error message).
*/
GIT_EXTERN(int) git_submodule_update(git_submodule *submodule, int init, git_submodule_update_options *options);
/**
* Lookup submodule information by name or path.
*
* Given either the submodule name or path (they are usually the same), this
* returns a structure describing the submodule.
*
* There are two expected error scenarios:
*
* - The submodule is not mentioned in the HEAD, the index, and the config,
* but does "exist" in the working directory (i.e. there is a subdirectory
* that appears to be a Git repository). In this case, this function
* returns GIT_EEXISTS to indicate a sub-repository exists but not in a
* state where a git_submodule can be instantiated.
* - The submodule is not mentioned in the HEAD, index, or config and the
* working directory doesn't contain a value git repo at that path.
* There may or may not be anything else at that path, but nothing that
* looks like a submodule. In this case, this returns GIT_ENOTFOUND.
*
* You must call `git_submodule_free` when done with the submodule.
*
* @param out Output ptr to submodule; pass NULL to just get return code
* @param repo The parent repository
* @param name The name of or path to the submodule; trailing slashes okay
* @return 0 on success, GIT_ENOTFOUND if submodule does not exist,
* GIT_EEXISTS if a repository is found in working directory only,
* -1 on other errors.
*/
GIT_EXTERN(int) git_submodule_lookup(
git_submodule **out,
git_repository *repo,
const char *name);
/**
* Release a submodule
*
* @param submodule Submodule object
*/
GIT_EXTERN(void) git_submodule_free(git_submodule *submodule);
/**
* Iterate over all tracked submodules of a repository.
*
* See the note on `git_submodule` above. This iterates over the tracked
* submodules as described therein.
*
* If you are concerned about items in the working directory that look like
* submodules but are not tracked, the diff API will generate a diff record
* for workdir items that look like submodules but are not tracked, showing
* them as added in the workdir. Also, the status API will treat the entire
* subdirectory of a contained git repo as a single GIT_STATUS_WT_NEW item.
*
* @param repo The repository
* @param callback Function to be called with the name of each submodule.
* Return a non-zero value to terminate the iteration.
* @param payload Extra data to pass to callback
* @return 0 on success, -1 on error, or non-zero return value of callback
*/
GIT_EXTERN(int) git_submodule_foreach(
git_repository *repo,
int (*callback)(git_submodule *sm, const char *name, void *payload),
void *payload);
/**
* Set up a new git submodule for checkout.
*
* This does "git submodule add" up to the fetch and checkout of the
* submodule contents. It preps a new submodule, creates an entry in
* .gitmodules and creates an empty initialized repository either at the
* given path in the working directory or in .git/modules with a gitlink
* from the working directory to the new repo.
*
* To fully emulate "git submodule add" call this function, then open the
* submodule repo and perform the clone step as needed. Lastly, call
* `git_submodule_add_finalize()` to wrap up adding the new submodule and
* .gitmodules to the index to be ready to commit.
*
* You must call `git_submodule_free` on the submodule object when done.
*
* @param out The newly created submodule ready to open for clone
* @param repo The repository in which you want to create the submodule
* @param url URL for the submodule's remote
* @param path Path at which the submodule should be created
* @param use_gitlink Should workdir contain a gitlink to the repo in
* .git/modules vs. repo directly in workdir.
* @return 0 on success, GIT_EEXISTS if submodule already exists,
* -1 on other errors.
*/
GIT_EXTERN(int) git_submodule_add_setup(
git_submodule **out,
git_repository *repo,
const char *url,
const char *path,
int use_gitlink);
/**
* Resolve the setup of a new git submodule.
*
* This should be called on a submodule once you have called add setup
* and done the clone of the submodule. This adds the .gitmodules file
* and the newly cloned submodule to the index to be ready to be committed
* (but doesn't actually do the commit).
*
* @param submodule The submodule to finish adding.
*/
GIT_EXTERN(int) git_submodule_add_finalize(git_submodule *submodule);
/**
* Add current submodule HEAD commit to index of superproject.
*
* @param submodule The submodule to add to the index
* @param write_index Boolean if this should immediately write the index
* file. If you pass this as false, you will have to get the
* git_index and explicitly call `git_index_write()` on it to
* save the change.
* @return 0 on success, <0 on failure
*/
GIT_EXTERN(int) git_submodule_add_to_index(
git_submodule *submodule,
int write_index);
/**
* Write submodule settings to .gitmodules file.
*
* This commits any in-memory changes to the submodule to the gitmodules
* file on disk. You may also be interested in `git_submodule_init()` which
* writes submodule info to ".git/config" (which is better for local changes
* to submodule settings) and/or `git_submodule_sync()` which writes
* settings about remotes to the actual submodule repository.
*
* @param submodule The submodule to write.
* @return 0 on success, <0 on failure.
*/
GIT_EXTERN(int) git_submodule_save(git_submodule *submodule);
/**
* Get the containing repository for a submodule.
*
* This returns a pointer to the repository that contains the submodule.
* This is a just a reference to the repository that was passed to the
* original `git_submodule_lookup()` call, so if that repository has been
* freed, then this may be a dangling reference.
*
* @param submodule Pointer to submodule object
* @return Pointer to `git_repository`
*/
GIT_EXTERN(git_repository *) git_submodule_owner(git_submodule *submodule);
/**
* Get the name of submodule.
*
* @param submodule Pointer to submodule object
* @return Pointer to the submodule name
*/
GIT_EXTERN(const char *) git_submodule_name(git_submodule *submodule);
/**
* Get the path to the submodule.
*
* The path is almost always the same as the submodule name, but the
* two are actually not required to match.
*
* @param submodule Pointer to submodule object
* @return Pointer to the submodule path
*/
GIT_EXTERN(const char *) git_submodule_path(git_submodule *submodule);
/**
* Get the URL for the submodule.
*
* @param submodule Pointer to submodule object
* @return Pointer to the submodule url
*/
GIT_EXTERN(const char *) git_submodule_url(git_submodule *submodule);
/**
* Resolve a submodule url relative to the given repository.
*
* @param out buffer to store the absolute submodule url in
* @param repo Pointer to repository object
* @param url Relative url
* @return 0 or an error code
*/
GIT_EXTERN(int) git_submodule_resolve_url(git_buf *out, git_repository *repo, const char *url);
/**
* Get the branch for the submodule.
*
* @param submodule Pointer to submodule object
* @return Pointer to the submodule branch
*/
GIT_EXTERN(const char *) git_submodule_branch(git_submodule *submodule);
/**
* Set the URL for the submodule.
*
* This sets the URL in memory for the submodule. This will be used for
* any following submodule actions while this submodule data is in memory.
*
* After calling this, you may wish to call `git_submodule_save()` to write
* the changes back to the ".gitmodules" file and `git_submodule_sync()` to
* write the changes to the checked out submodule repository.
*
* @param submodule Pointer to the submodule object
* @param url URL that should be used for the submodule
* @return 0 on success, <0 on failure
*/
GIT_EXTERN(int) git_submodule_set_url(git_submodule *submodule, const char *url);
/**
* Get the OID for the submodule in the index.
*
* @param submodule Pointer to submodule object
* @return Pointer to git_oid or NULL if submodule is not in index.
*/
GIT_EXTERN(const git_oid *) git_submodule_index_id(git_submodule *submodule);
/**
* Get the OID for the submodule in the current HEAD tree.
*
* @param submodule Pointer to submodule object
* @return Pointer to git_oid or NULL if submodule is not in the HEAD.
*/
GIT_EXTERN(const git_oid *) git_submodule_head_id(git_submodule *submodule);
/**
* Get the OID for the submodule in the current working directory.
*
* This returns the OID that corresponds to looking up 'HEAD' in the checked
* out submodule. If there are pending changes in the index or anything
* else, this won't notice that. You should call `git_submodule_status()`
* for a more complete picture about the state of the working directory.
*
* @param submodule Pointer to submodule object
* @return Pointer to git_oid or NULL if submodule is not checked out.
*/
GIT_EXTERN(const git_oid *) git_submodule_wd_id(git_submodule *submodule);
/**
* Get the ignore rule that will be used for the submodule.
*
* These values control the behavior of `git_submodule_status()` for this
* submodule. There are four ignore values:
*
* - **GIT_SUBMODULE_IGNORE_NONE** will consider any change to the contents
* of the submodule from a clean checkout to be dirty, including the
* addition of untracked files. This is the default if unspecified.
* - **GIT_SUBMODULE_IGNORE_UNTRACKED** examines the contents of the
* working tree (i.e. call `git_status_foreach()` on the submodule) but
* UNTRACKED files will not count as making the submodule dirty.
* - **GIT_SUBMODULE_IGNORE_DIRTY** means to only check if the HEAD of the
* submodule has moved for status. This is fast since it does not need to
* scan the working tree of the submodule at all.
* - **GIT_SUBMODULE_IGNORE_ALL** means not to open the submodule repo.
* The working directory will be consider clean so long as there is a
* checked out version present.
*
* plus the special **GIT_SUBMODULE_IGNORE_RESET** which can be used with
* `git_submodule_set_ignore()` to revert to the on-disk setting.
*
* @param submodule The submodule to check
* @return The current git_submodule_ignore_t valyue what will be used for
* this submodule.
*/
GIT_EXTERN(git_submodule_ignore_t) git_submodule_ignore(
git_submodule *submodule);
/**
* Set the ignore rule for the submodule.
*
* This sets the in-memory ignore rule for the submodule which will
* control the behavior of `git_submodule_status()`.
*
* To make changes persistent, call `git_submodule_save()` to write the
* value to disk (in the ".gitmodules" and ".git/config" files).
*
* Call with `GIT_SUBMODULE_IGNORE_RESET` or call `git_submodule_reload()`
* to revert the in-memory rule to the value that is on disk.
*
* @param submodule The submodule to update
* @param ignore The new value for the ignore rule
* @return old value for ignore
*/
GIT_EXTERN(git_submodule_ignore_t) git_submodule_set_ignore(
git_submodule *submodule,
git_submodule_ignore_t ignore);
/**
* Get the update rule that will be used for the submodule.
*
* This value controls the behavior of the `git submodule update` command.
* There are four useful values documented with `git_submodule_update_t`
* plus the `GIT_SUBMODULE_UPDATE_RESET` which can be used to revert to
* the on-disk setting.
*
* @param submodule The submodule to check
* @return The current git_submodule_update_t value that will be used
* for this submodule.
*/
GIT_EXTERN(git_submodule_update_t) git_submodule_update_strategy(
git_submodule *submodule);
/**
* Set the update rule for the submodule.
*
* The initial value comes from the ".git/config" setting of
* `submodule.$name.update` for this submodule (which is initialized from
* the ".gitmodules" file). Using this function sets the update rule in
* memory for the submodule. Call `git_submodule_save()` to write out the
* new update rule.
*
* Calling this again with GIT_SUBMODULE_UPDATE_RESET or calling
* `git_submodule_reload()` will revert the rule to the on disk value.
*
* @param submodule The submodule to update
* @param update The new value to use
* @return old value for update
*/
GIT_EXTERN(git_submodule_update_t) git_submodule_set_update(
git_submodule *submodule,
git_submodule_update_t update);
/**
* Read the fetchRecurseSubmodules rule for a submodule.
*
* This accesses the submodule.<name>.fetchRecurseSubmodules value for
* the submodule that controls fetching behavior for the submodule.
*
* Note that at this time, libgit2 does not honor this setting and the
* fetch functionality current ignores submodules.
*
* @return 0 if fetchRecurseSubmodules is false, 1 if true
*/
GIT_EXTERN(git_submodule_recurse_t) git_submodule_fetch_recurse_submodules(
git_submodule *submodule);
/**
* Set the fetchRecurseSubmodules rule for a submodule.
*
* This sets the submodule.<name>.fetchRecurseSubmodules value for
* the submodule. You should call `git_submodule_save()` if you want
* to persist the new value.
*
* @param submodule The submodule to modify
* @param fetch_recurse_submodules Boolean value
* @return old value for fetchRecurseSubmodules
*/
GIT_EXTERN(git_submodule_recurse_t) git_submodule_set_fetch_recurse_submodules(
git_submodule *submodule,
git_submodule_recurse_t fetch_recurse_submodules);
/**
* Copy submodule info into ".git/config" file.
*
* Just like "git submodule init", this copies information about the
* submodule into ".git/config". You can use the accessor functions
* above to alter the in-memory git_submodule object and control what
* is written to the config, overriding what is in .gitmodules.
*
* @param submodule The submodule to write into the superproject config
* @param overwrite By default, existing entries will not be overwritten,
* but setting this to true forces them to be updated.
* @return 0 on success, <0 on failure.
*/
GIT_EXTERN(int) git_submodule_init(git_submodule *submodule, int overwrite);
/**
* Set up the subrepository for a submodule in preparation for clone.
*
* This function can be called to init and set up a submodule
* repository from a submodule in preparation to clone it from
* its remote.
*
* @param out Output pointer to the created git repository.
* @param sm The submodule to create a new subrepository from.
* @param use_gitlink Should the workdir contain a gitlink to
* the repo in .git/modules vs. repo directly in workdir.
* @return 0 on success, <0 on failure.
*/
GIT_EXTERN(int) git_submodule_repo_init(
git_repository **out,
const git_submodule *sm,
int use_gitlink);
/**
* Copy submodule remote info into submodule repo.
*
* This copies the information about the submodules URL into the checked out
* submodule config, acting like "git submodule sync". This is useful if
* you have altered the URL for the submodule (or it has been altered by a
* fetch of upstream changes) and you need to update your local repo.
*/
GIT_EXTERN(int) git_submodule_sync(git_submodule *submodule);
/**
* Open the repository for a submodule.
*
* This is a newly opened repository object. The caller is responsible for
* calling `git_repository_free()` on it when done. Multiple calls to this
* function will return distinct `git_repository` objects. This will only
* work if the submodule is checked out into the working directory.
*
* @param repo Pointer to the submodule repo which was opened
* @param submodule Submodule to be opened
* @return 0 on success, <0 if submodule repo could not be opened.
*/
GIT_EXTERN(int) git_submodule_open(
git_repository **repo,
git_submodule *submodule);
/**
* Reread submodule info from config, index, and HEAD.
*
* Call this to reread cached submodule information for this submodule if
* you have reason to believe that it has changed.
*
* @param submodule The submodule to reload
* @param force Force reload even if the data doesn't seem out of date
* @return 0 on success, <0 on error
*/
GIT_EXTERN(int) git_submodule_reload(git_submodule *submodule, int force);
/**
* Reread all submodule info.
*
* Call this to reload all cached submodule information for the repo.
*
* @param repo The repository to reload submodule data for
* @param force Force full reload even if the data doesn't seem out of date
* @return 0 on success, <0 on error
*/
GIT_EXTERN(int) git_submodule_reload_all(git_repository *repo, int force);
/**
* Get the status for a submodule.
*
* This looks at a submodule and tries to determine the status. It
* will return a combination of the `GIT_SUBMODULE_STATUS` values above.
* How deeply it examines the working directory to do this will depend
* on the `git_submodule_ignore_t` value for the submodule - which can be
* set either temporarily or permanently with `git_submodule_set_ignore()`.
*
* @param status Combination of `GIT_SUBMODULE_STATUS` flags
* @param submodule Submodule for which to get status
* @return 0 on success, <0 on error
*/
GIT_EXTERN(int) git_submodule_status(
unsigned int *status,
git_submodule *submodule);
/**
* Get the locations of submodule information.
*
* This is a bit like a very lightweight version of `git_submodule_status`.
* It just returns a made of the first four submodule status values (i.e.
* the ones like GIT_SUBMODULE_STATUS_IN_HEAD, etc) that tell you where the
* submodule data comes from (i.e. the HEAD commit, gitmodules file, etc.).
* This can be useful if you want to know if the submodule is present in the
* working directory at this point in time, etc.
*
* @param location_status Combination of first four `GIT_SUBMODULE_STATUS` flags
* @param submodule Submodule for which to get status
* @return 0 on success, <0 on error
*/
GIT_EXTERN(int) git_submodule_location(
unsigned int *location_status,
git_submodule *submodule);
/** @} */
GIT_END_DECL
#endif
|
""" Download a file from a url.
"""
from __future__ import print_function
import sys
import argparse
from time import monotonic
from six.moves.urllib.request import urlopen
try:
import pasteboard as clipboard
import console
except:
console = None
def main(args):
from progress.bar import ChargingBar as Bar
ap = argparse.ArgumentParser()
ap.add_argument("-o", "--output-file", nargs="?", help="save content as file")
ap.add_argument(
"url", nargs="?", help="the url to read from (default to clipboard)"
)
ns = ap.parse_args(args)
url = ns.url or clipboard.string()
output_file = ns.output_file or url.split("/")[-1]
try:
# print('Opening: %s\n' % url)
u = urlopen(url)
meta = u.info()
try:
file_size = int(meta["Content-Length"])
except (IndexError, ValueError, TypeError):
file_size = 0
# print("Save as: {} ".format(output_file), end="")
# print("({} bytes)".format(file_size if file_size else "???"))
with open(output_file, "wb") as f:
file_size_dl = 0
block_sz = 8192
if file_size != 0 and file_size is not None:
bar = Bar("Downloading", max=100)
else:
bar = None
_n = 0
while True:
buf = u.read(block_sz)
if not buf:
break
file_size_dl += len(buf)
f.write(buf)
if bar is not None:
n = int(file_size_dl * 100.0 / file_size)
if n == _n:
continue
_n = n
now = monotonic()
dt = now - bar._ts
bar.update_avg(n, dt)
bar._ts = now
bar.index = n
bar.update()
if bar is not None:
bar.finish()
except Exception as e:
print(e)
print("Unable to download file: %s" % url)
return 1
return 0
if __name__ == "__main__":
main(sys.argv[1:])
|
import { useMutation } from '@redwoodjs/web'
import { toast } from '@redwoodjs/web/toast'
import { Link, routes, navigate } from '@redwoodjs/router'
const DELETE_POST_MUTATION = gql`
mutation DeletePostMutation($id: Int!) {
deletePost(id: $id) {
id
}
}
`
const jsonDisplay = (obj) => {
return (
<pre>
<code>{JSON.stringify(obj, null, 2)}</code>
</pre>
)
}
const timeTag = (datetime) => {
return (
<time dateTime={datetime} title={datetime}>
{new Date(datetime).toUTCString()}
</time>
)
}
const checkboxInputTag = (checked) => {
return <input type="checkbox" checked={checked} disabled />
}
const Post = ({ post }) => {
const [deletePost] = useMutation(DELETE_POST_MUTATION, {
onCompleted: () => {
toast.success('Post deleted')
navigate(routes.posts())
},
onError: (error) => {
toast.error(error.message)
},
})
const onDeleteClick = (id) => {
if (confirm('Are you sure you want to delete post ' + id + '?')) {
deletePost({ variables: { id } })
}
}
return (
<>
<div className="rw-segment">
<header className="rw-segment-header">
<h2 className="rw-heading rw-heading-secondary">
Post {post.id} Detail
</h2>
</header>
<table className="rw-table">
<tbody>
<tr>
<th>Id</th>
<td>{post.id}</td>
</tr>
<tr>
<th>Title</th>
<td>{post.title}</td>
</tr>
<tr>
<th>Created at</th>
<td>{timeTag(post.createdAt)}</td>
</tr>
</tbody>
</table>
</div>
<nav className="rw-button-group">
<Link
to={routes.editPost({ id: post.id })}
className="rw-button rw-button-blue"
>
Edit
</Link>
<button
type="button"
className="rw-button rw-button-red"
onClick={() => onDeleteClick(post.id)}
>
Delete
</button>
</nav>
</>
)
}
export default Post
|
# coding: utf-8
"""
stash-server
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: v0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from swagger_client.models.io_k8s_api_core_v1_key_to_path import IoK8sApiCoreV1KeyToPath # noqa: F401,E501
class IoK8sApiCoreV1SecretVolumeSource(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'default_mode': 'int',
'items': 'list[IoK8sApiCoreV1KeyToPath]',
'optional': 'bool',
'secret_name': 'str'
}
attribute_map = {
'default_mode': 'defaultMode',
'items': 'items',
'optional': 'optional',
'secret_name': 'secretName'
}
def __init__(self, default_mode=None, items=None, optional=None, secret_name=None): # noqa: E501
"""IoK8sApiCoreV1SecretVolumeSource - a model defined in Swagger""" # noqa: E501
self._default_mode = None
self._items = None
self._optional = None
self._secret_name = None
self.discriminator = None
if default_mode is not None:
self.default_mode = default_mode
if items is not None:
self.items = items
if optional is not None:
self.optional = optional
if secret_name is not None:
self.secret_name = secret_name
@property
def default_mode(self):
"""Gets the default_mode of this IoK8sApiCoreV1SecretVolumeSource. # noqa: E501
Optional: mode bits to use on created files by default. Must be a value between 0 and 0777. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. # noqa: E501
:return: The default_mode of this IoK8sApiCoreV1SecretVolumeSource. # noqa: E501
:rtype: int
"""
return self._default_mode
@default_mode.setter
def default_mode(self, default_mode):
"""Sets the default_mode of this IoK8sApiCoreV1SecretVolumeSource.
Optional: mode bits to use on created files by default. Must be a value between 0 and 0777. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. # noqa: E501
:param default_mode: The default_mode of this IoK8sApiCoreV1SecretVolumeSource. # noqa: E501
:type: int
"""
self._default_mode = default_mode
@property
def items(self):
"""Gets the items of this IoK8sApiCoreV1SecretVolumeSource. # noqa: E501
If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'. # noqa: E501
:return: The items of this IoK8sApiCoreV1SecretVolumeSource. # noqa: E501
:rtype: list[IoK8sApiCoreV1KeyToPath]
"""
return self._items
@items.setter
def items(self, items):
"""Sets the items of this IoK8sApiCoreV1SecretVolumeSource.
If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'. # noqa: E501
:param items: The items of this IoK8sApiCoreV1SecretVolumeSource. # noqa: E501
:type: list[IoK8sApiCoreV1KeyToPath]
"""
self._items = items
@property
def optional(self):
"""Gets the optional of this IoK8sApiCoreV1SecretVolumeSource. # noqa: E501
Specify whether the Secret or it's keys must be defined # noqa: E501
:return: The optional of this IoK8sApiCoreV1SecretVolumeSource. # noqa: E501
:rtype: bool
"""
return self._optional
@optional.setter
def optional(self, optional):
"""Sets the optional of this IoK8sApiCoreV1SecretVolumeSource.
Specify whether the Secret or it's keys must be defined # noqa: E501
:param optional: The optional of this IoK8sApiCoreV1SecretVolumeSource. # noqa: E501
:type: bool
"""
self._optional = optional
@property
def secret_name(self):
"""Gets the secret_name of this IoK8sApiCoreV1SecretVolumeSource. # noqa: E501
Name of the secret in the pod's namespace to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret # noqa: E501
:return: The secret_name of this IoK8sApiCoreV1SecretVolumeSource. # noqa: E501
:rtype: str
"""
return self._secret_name
@secret_name.setter
def secret_name(self, secret_name):
"""Sets the secret_name of this IoK8sApiCoreV1SecretVolumeSource.
Name of the secret in the pod's namespace to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret # noqa: E501
:param secret_name: The secret_name of this IoK8sApiCoreV1SecretVolumeSource. # noqa: E501
:type: str
"""
self._secret_name = secret_name
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, IoK8sApiCoreV1SecretVolumeSource):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
const { GraphQLUpload } = require("graphql-upload")
const { accountResolvers, accountTypeDefs } = require("./Account")
const { categoryResolvers, categoryTypeDefs } = require("./Category")
const { fileResolvers, fileTypeDefs } = require("./File")
const { orderResolvers, orderTypeDefs } = require("./Order")
const { productResolvers, productTypeDefs } = require("./Product")
const language = require("../constants/language")
const currency = require("../constants/currency")
const datetime = require("../constants/datetime")
const commonTypeDefs = `
type Query {
me: SafeAccount
}
type Mutation {
login(email: String!, password: String!): Token
}
type Token {
token: String!
}
type Options {
label: LanguageString!
options: [LanguageString!]!
}
type Option {
label: LanguageString!
option: LanguageString!
}
type LanguageString ${language.getGql()}
type CurrencyFloat ${currency.getGql()}
type DateObject ${datetime.getGql()}
input OptionsInput {
label: LanguageStringInput!
options: [LanguageStringInput!]!
}
input OptionInput {
label: LanguageStringInput!
option: LanguageStringInput!
}
input LanguageStringInput ${language.getGql()}
input CurrencyFloatInput ${currency.getGql()}
`
const typeDefs = [
commonTypeDefs,
accountTypeDefs,
categoryTypeDefs,
fileTypeDefs,
orderTypeDefs,
productTypeDefs,
]
const resolvers = {
Query: {
...accountResolvers.Query,
...categoryResolvers.Query,
...fileResolvers.Query,
...orderResolvers.Query,
...productResolvers.Query,
},
Mutation: {
...accountResolvers.Mutation,
...categoryResolvers.Mutation,
...fileResolvers.Mutation,
...orderResolvers.Mutation,
...productResolvers.Mutation,
},
Product: {
...productResolvers.Product,
},
Upload: GraphQLUpload,
}
module.exports = { typeDefs, resolvers }
|
const { NotImplementedError } = require('../extensions/index.js');
/**
* Extract season from given date and expose the enemy scout!
*
* @param {Date | FakeDate} date real or fake date
* @returns {String} time of the year
*
* @example
*
* getSeason(new Date(2020, 02, 31)) => 'spring'
*
*/
const deeperFakeDate = {
toString() {
return Date.prototype.toString.call(new Date());
},
getMonth() {
return Date.prototype.getMonth.call(new Date());
},
getFullYear() {
return Date.prototype.getFullYear.call(new Date(1994, 1, 2, 3, 4, 5));
},
getDate() {
return Date.prototype.getDate.call(new Date(2020, 0, 3, 4, 5, 6));
},
getHours() {
return Date.prototype.getHours.call(new Date(1978, 2, 4, 5, 6, 7));
},
getMinutes() {
return Date.prototype.getMinutes.call(new Date(202, 3, 5, 6, 7, 8));
},
getSeconds() {
return Date.prototype.getSeconds.call(new Date(1, 4, 6, 7, 8, 9));
},
getMilliseconds() {
return Date.prototype.getMilliseconds.call(new Date(2019, 7, 8, 9, 10, 11));
},
getDay() {
return Date.prototype.getDay.call(new Date(1812, 8, 9, 10, 11, 12));
},
[Symbol.toStringTag]: 'Date'
};
Object.setPrototypeOf(deeperFakeDate, Object.getPrototypeOf(new Date()));
console.log(deeperFakeDate.constructor === (new Date().constructor));
function getSeason(date) {
if (arguments.length == 0) return 'Unable to determine the time of year!';
if (Object.keys(date).length > 0) throw Error("Invalid date!");
if (!(date instanceof Date)) throw Error("Invalid date!");
if (date.value) throw Error("Invalid date!");
const month = date.getMonth();
// throw new NotImplementedError('Not implemented');
switch (month) {
case 11:
case 0:
case 1:
return 'winter'
case 2:
case 3:
case 4:
return 'spring'
case 5:
case 6:
case 7:
return 'summer'
case 8:
case 9:
case 10:
return 'fall'
default:
break;
}
}
module.exports = {
getSeason
};
|
var searchData=
[
['netconnector',['NetConnector',['../classcom_1_1android_1_1net_1_1NetConnector.html',1,'com::android::net']]]
];
|
import React from 'react'
const renderCharacterKey = (character, value, onUpdateValue, animated) => {
const onClick = () => {
onUpdateValue(value ? value + character : character)
}
const animations = ['bounceInDown', 'bounceInUp', 'bounceInLeft', 'bounceInRight']
const animation = animated ? `animated ${animations[Math.floor(Math.random() * animations.length)]}` : ''
return (
<div
key={`char-${character}`}
className={`action-button medium font-mono uppercase bold green ${animation}`}
onClick={onClick}
>
{character}
</div>
)
}
const renderRemoveCharacterKey = (value, onUpdateValue, animated) => {
const onClick = () => {
onUpdateValue(value.slice(0, -1))
}
const animations = ['bounceInDown', 'bounceInUp', 'bounceInLeft', 'bounceInRight']
const animation = animated ? `animated ${animations[Math.floor(Math.random() * animations.length)]}` : ''
return (
<div
className={`action-button medium red icon no-text ${animation}`}
onClick={onClick}
>
<i className="fa fa-chevron-circle-left" />
</div>
)
}
export {
renderCharacterKey,
renderRemoveCharacterKey,
}
|
import datetime
import itertools
import os
import re
from importlib import import_module
from urllib.parse import ParseResult, quote, urlparse
from django.apps import apps
from django.conf import settings
from django.contrib.admin.models import LogEntry
from django.contrib.auth import (
BACKEND_SESSION_KEY, REDIRECT_FIELD_NAME, SESSION_KEY,
)
from django.contrib.auth.forms import (
AuthenticationForm, PasswordChangeForm, SetPasswordForm,
)
from django.contrib.auth.models import User
from django.contrib.auth.views import (
INTERNAL_RESET_SESSION_TOKEN, LoginView, logout_then_login,
redirect_to_login,
)
from django.contrib.sessions.middleware import SessionMiddleware
from django.contrib.sites.requests import RequestSite
from django.core import mail
from django.db import connection
from django.http import HttpRequest, QueryDict
from django.middleware.csrf import CsrfViewMiddleware, get_token
from django.test import Client, TestCase, override_settings
from django.test.utils import patch_logger
from django.urls import NoReverseMatch, reverse, reverse_lazy
from django.utils.http import urlsafe_base64_encode
from django.utils.translation import LANGUAGE_SESSION_KEY
from .client import PasswordResetConfirmClient
from .models import CustomUser, UUIDUser
from .settings import AUTH_TEMPLATES
@override_settings(
LANGUAGES=[('en', 'English')],
LANGUAGE_CODE='en',
TEMPLATES=AUTH_TEMPLATES,
ROOT_URLCONF='auth_tests.urls',
)
class AuthViewsTestCase(TestCase):
"""
Helper base class for all the follow test cases.
"""
@classmethod
def setUpTestData(cls):
cls.u1 = User.objects.create_user(username='testclient', password='password', email='testclient@example.com')
cls.u3 = User.objects.create_user(username='staff', password='password', email='staffmember@example.com')
def login(self, username='testclient', password='password'):
response = self.client.post('/login/', {
'username': username,
'password': password,
})
self.assertIn(SESSION_KEY, self.client.session)
return response
def logout(self):
response = self.client.get('/admin/logout/')
self.assertEqual(response.status_code, 200)
self.assertNotIn(SESSION_KEY, self.client.session)
def assertFormError(self, response, error):
"""Assert that error is found in response.context['form'] errors"""
form_errors = list(itertools.chain(*response.context['form'].errors.values()))
self.assertIn(str(error), form_errors)
def assertURLEqual(self, url, expected, parse_qs=False):
"""
Given two URLs, make sure all their components (the ones given by
urlparse) are equal, only comparing components that are present in both
URLs.
If `parse_qs` is True, then the querystrings are parsed with QueryDict.
This is useful if you don't want the order of parameters to matter.
Otherwise, the query strings are compared as-is.
"""
fields = ParseResult._fields
for attr, x, y in zip(fields, urlparse(url), urlparse(expected)):
if parse_qs and attr == 'query':
x, y = QueryDict(x), QueryDict(y)
if x and y and x != y:
self.fail("%r != %r (%s doesn't match)" % (url, expected, attr))
@override_settings(ROOT_URLCONF='django.contrib.auth.urls')
class AuthViewNamedURLTests(AuthViewsTestCase):
def test_named_urls(self):
"Named URLs should be reversible"
expected_named_urls = [
('login', [], {}),
('logout', [], {}),
('password_change', [], {}),
('password_change_done', [], {}),
('password_reset', [], {}),
('password_reset_done', [], {}),
('password_reset_confirm', [], {
'uidb64': 'aaaaaaa',
'token': '1111-aaaaa',
}),
('password_reset_complete', [], {}),
]
for name, args, kwargs in expected_named_urls:
with self.subTest(name=name):
try:
reverse(name, args=args, kwargs=kwargs)
except NoReverseMatch:
self.fail("Reversal of url named '%s' failed with NoReverseMatch" % name)
class PasswordResetTest(AuthViewsTestCase):
def setUp(self):
self.client = PasswordResetConfirmClient()
def test_email_not_found(self):
"""If the provided email is not registered, don't raise any error but
also don't send any email."""
response = self.client.get('/password_reset/')
self.assertEqual(response.status_code, 200)
response = self.client.post('/password_reset/', {'email': 'not_a_real_email@email.com'})
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 0)
def test_email_found(self):
"Email is sent if a valid email address is provided for password reset"
response = self.client.post('/password_reset/', {'email': 'staffmember@example.com'})
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 1)
self.assertIn("http://", mail.outbox[0].body)
self.assertEqual(settings.DEFAULT_FROM_EMAIL, mail.outbox[0].from_email)
# optional multipart text/html email has been added. Make sure original,
# default functionality is 100% the same
self.assertFalse(mail.outbox[0].message().is_multipart())
def test_extra_email_context(self):
"""
extra_email_context should be available in the email template context.
"""
response = self.client.post(
'/password_reset_extra_email_context/',
{'email': 'staffmember@example.com'},
)
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 1)
self.assertIn('Email email context: "Hello!"', mail.outbox[0].body)
def test_html_mail_template(self):
"""
A multipart email with text/plain and text/html is sent
if the html_email_template parameter is passed to the view
"""
response = self.client.post('/password_reset/html_email_template/', {'email': 'staffmember@example.com'})
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0].message()
self.assertEqual(len(message.get_payload()), 2)
self.assertTrue(message.is_multipart())
self.assertEqual(message.get_payload(0).get_content_type(), 'text/plain')
self.assertEqual(message.get_payload(1).get_content_type(), 'text/html')
self.assertNotIn('<html>', message.get_payload(0).get_payload())
self.assertIn('<html>', message.get_payload(1).get_payload())
def test_email_found_custom_from(self):
"Email is sent if a valid email address is provided for password reset when a custom from_email is provided."
response = self.client.post('/password_reset_from_email/', {'email': 'staffmember@example.com'})
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual("staffmember@example.com", mail.outbox[0].from_email)
# Skip any 500 handler action (like sending more mail...)
@override_settings(DEBUG_PROPAGATE_EXCEPTIONS=True)
def test_poisoned_http_host(self):
"Poisoned HTTP_HOST headers can't be used for reset emails"
# This attack is based on the way browsers handle URLs. The colon
# should be used to separate the port, but if the URL contains an @,
# the colon is interpreted as part of a username for login purposes,
# making 'evil.com' the request domain. Since HTTP_HOST is used to
# produce a meaningful reset URL, we need to be certain that the
# HTTP_HOST header isn't poisoned. This is done as a check when get_host()
# is invoked, but we check here as a practical consequence.
with patch_logger('django.security.DisallowedHost', 'error') as logger_calls:
response = self.client.post(
'/password_reset/',
{'email': 'staffmember@example.com'},
HTTP_HOST='www.example:dr.frankenstein@evil.tld'
)
self.assertEqual(response.status_code, 400)
self.assertEqual(len(mail.outbox), 0)
self.assertEqual(len(logger_calls), 1)
# Skip any 500 handler action (like sending more mail...)
@override_settings(DEBUG_PROPAGATE_EXCEPTIONS=True)
def test_poisoned_http_host_admin_site(self):
"Poisoned HTTP_HOST headers can't be used for reset emails on admin views"
with patch_logger('django.security.DisallowedHost', 'error') as logger_calls:
response = self.client.post(
'/admin_password_reset/',
{'email': 'staffmember@example.com'},
HTTP_HOST='www.example:dr.frankenstein@evil.tld'
)
self.assertEqual(response.status_code, 400)
self.assertEqual(len(mail.outbox), 0)
self.assertEqual(len(logger_calls), 1)
def _test_confirm_start(self):
# Start by creating the email
self.client.post('/password_reset/', {'email': 'staffmember@example.com'})
self.assertEqual(len(mail.outbox), 1)
return self._read_signup_email(mail.outbox[0])
def _read_signup_email(self, email):
urlmatch = re.search(r"https?://[^/]*(/.*reset/\S*)", email.body)
self.assertIsNotNone(urlmatch, "No URL found in sent email")
return urlmatch.group(), urlmatch.groups()[0]
def test_confirm_valid(self):
url, path = self._test_confirm_start()
response = self.client.get(path)
# redirect to a 'complete' page:
self.assertContains(response, "Please enter your new password")
def test_confirm_invalid(self):
url, path = self._test_confirm_start()
# Let's munge the token in the path, but keep the same length,
# in case the URLconf will reject a different length.
path = path[:-5] + ("0" * 4) + path[-1]
response = self.client.get(path)
self.assertContains(response, "The password reset link was invalid")
def test_confirm_invalid_user(self):
# A nonexistent user returns a 200 response, not a 404.
response = self.client.get('/reset/123456/1-1/')
self.assertContains(response, "The password reset link was invalid")
def test_confirm_overflow_user(self):
# A base36 user id that overflows int returns a 200 response.
response = self.client.get('/reset/zzzzzzzzzzzzz/1-1/')
self.assertContains(response, "The password reset link was invalid")
def test_confirm_invalid_post(self):
# Same as test_confirm_invalid, but trying to do a POST instead.
url, path = self._test_confirm_start()
path = path[:-5] + ("0" * 4) + path[-1]
self.client.post(path, {
'new_password1': 'anewpassword',
'new_password2': ' anewpassword',
})
# Check the password has not been changed
u = User.objects.get(email='staffmember@example.com')
self.assertTrue(not u.check_password("anewpassword"))
def test_confirm_invalid_hash(self):
"""A POST with an invalid token is rejected."""
u = User.objects.get(email='staffmember@example.com')
original_password = u.password
url, path = self._test_confirm_start()
path_parts = path.split('-')
path_parts[-1] = ("0") * 20 + '/'
path = '-'.join(path_parts)
response = self.client.post(path, {
'new_password1': 'anewpassword',
'new_password2': 'anewpassword',
})
self.assertIs(response.context['validlink'], False)
u.refresh_from_db()
self.assertEqual(original_password, u.password) # password hasn't changed
def test_confirm_complete(self):
url, path = self._test_confirm_start()
response = self.client.post(path, {'new_password1': 'anewpassword', 'new_password2': 'anewpassword'})
# Check the password has been changed
u = User.objects.get(email='staffmember@example.com')
self.assertTrue(u.check_password("anewpassword"))
# The reset token is deleted from the session.
self.assertNotIn(INTERNAL_RESET_SESSION_TOKEN, self.client.session)
# Check we can't use the link again
response = self.client.get(path)
self.assertContains(response, "The password reset link was invalid")
def test_confirm_different_passwords(self):
url, path = self._test_confirm_start()
response = self.client.post(path, {'new_password1': 'anewpassword', 'new_password2': 'x'})
self.assertFormError(response, SetPasswordForm.error_messages['password_mismatch'])
def test_reset_redirect_default(self):
response = self.client.post('/password_reset/', {'email': 'staffmember@example.com'})
self.assertRedirects(response, '/password_reset/done/', fetch_redirect_response=False)
def test_reset_custom_redirect(self):
response = self.client.post('/password_reset/custom_redirect/', {'email': 'staffmember@example.com'})
self.assertRedirects(response, '/custom/', fetch_redirect_response=False)
def test_reset_custom_redirect_named(self):
response = self.client.post('/password_reset/custom_redirect/named/', {'email': 'staffmember@example.com'})
self.assertRedirects(response, '/password_reset/', fetch_redirect_response=False)
def test_confirm_redirect_default(self):
url, path = self._test_confirm_start()
response = self.client.post(path, {'new_password1': 'anewpassword', 'new_password2': 'anewpassword'})
self.assertRedirects(response, '/reset/done/', fetch_redirect_response=False)
def test_confirm_redirect_custom(self):
url, path = self._test_confirm_start()
path = path.replace('/reset/', '/reset/custom/')
response = self.client.post(path, {'new_password1': 'anewpassword', 'new_password2': 'anewpassword'})
self.assertRedirects(response, '/custom/', fetch_redirect_response=False)
def test_confirm_redirect_custom_named(self):
url, path = self._test_confirm_start()
path = path.replace('/reset/', '/reset/custom/named/')
response = self.client.post(path, {'new_password1': 'anewpassword', 'new_password2': 'anewpassword'})
self.assertRedirects(response, '/password_reset/', fetch_redirect_response=False)
def test_confirm_login_post_reset(self):
url, path = self._test_confirm_start()
path = path.replace('/reset/', '/reset/post_reset_login/')
response = self.client.post(path, {'new_password1': 'anewpassword', 'new_password2': 'anewpassword'})
self.assertRedirects(response, '/reset/done/', fetch_redirect_response=False)
self.assertIn(SESSION_KEY, self.client.session)
@override_settings(
AUTHENTICATION_BACKENDS=[
'django.contrib.auth.backends.ModelBackend',
'django.contrib.auth.backends.AllowAllUsersModelBackend',
]
)
def test_confirm_login_post_reset_custom_backend(self):
# This backend is specified in the url().
backend = 'django.contrib.auth.backends.AllowAllUsersModelBackend'
url, path = self._test_confirm_start()
path = path.replace('/reset/', '/reset/post_reset_login_custom_backend/')
response = self.client.post(path, {'new_password1': 'anewpassword', 'new_password2': 'anewpassword'})
self.assertRedirects(response, '/reset/done/', fetch_redirect_response=False)
self.assertIn(SESSION_KEY, self.client.session)
self.assertEqual(self.client.session[BACKEND_SESSION_KEY], backend)
def test_confirm_login_post_reset_already_logged_in(self):
url, path = self._test_confirm_start()
path = path.replace('/reset/', '/reset/post_reset_login/')
self.login()
response = self.client.post(path, {'new_password1': 'anewpassword', 'new_password2': 'anewpassword'})
self.assertRedirects(response, '/reset/done/', fetch_redirect_response=False)
self.assertIn(SESSION_KEY, self.client.session)
def test_confirm_display_user_from_form(self):
url, path = self._test_confirm_start()
response = self.client.get(path)
# The password_reset_confirm() view passes the user object to the
# SetPasswordForm``, even on GET requests (#16919). For this test,
# {{ form.user }}`` is rendered in the template
# registration/password_reset_confirm.html.
username = User.objects.get(email='staffmember@example.com').username
self.assertContains(response, "Hello, %s." % username)
# However, the view should NOT pass any user object on a form if the
# password reset link was invalid.
response = self.client.get('/reset/zzzzzzzzzzzzz/1-1/')
self.assertContains(response, "Hello, .")
def test_confirm_link_redirects_to_set_password_page(self):
url, path = self._test_confirm_start()
# Don't use PasswordResetConfirmClient (self.client) here which
# automatically fetches the redirect page.
client = Client()
response = client.get(path)
token = response.resolver_match.kwargs['token']
uuidb64 = response.resolver_match.kwargs['uidb64']
self.assertRedirects(response, '/reset/%s/set-password/' % uuidb64)
self.assertEqual(client.session['_password_reset_token'], token)
def test_invalid_link_if_going_directly_to_the_final_reset_password_url(self):
url, path = self._test_confirm_start()
_, uuidb64, _ = path.strip('/').split('/')
response = Client().get('/reset/%s/set-password/' % uuidb64)
self.assertContains(response, 'The password reset link was invalid')
@override_settings(AUTH_USER_MODEL='auth_tests.CustomUser')
class CustomUserPasswordResetTest(AuthViewsTestCase):
user_email = 'staffmember@example.com'
@classmethod
def setUpTestData(cls):
cls.u1 = CustomUser.custom_objects.create(
email='staffmember@example.com',
date_of_birth=datetime.date(1976, 11, 8),
)
cls.u1.set_password('password')
cls.u1.save()
def setUp(self):
self.client = PasswordResetConfirmClient()
def _test_confirm_start(self):
# Start by creating the email
response = self.client.post('/password_reset/', {'email': self.user_email})
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 1)
return self._read_signup_email(mail.outbox[0])
def _read_signup_email(self, email):
urlmatch = re.search(r"https?://[^/]*(/.*reset/\S*)", email.body)
self.assertIsNotNone(urlmatch, "No URL found in sent email")
return urlmatch.group(), urlmatch.groups()[0]
def test_confirm_valid_custom_user(self):
url, path = self._test_confirm_start()
response = self.client.get(path)
# redirect to a 'complete' page:
self.assertContains(response, "Please enter your new password")
# then submit a new password
response = self.client.post(path, {
'new_password1': 'anewpassword',
'new_password2': 'anewpassword',
})
self.assertRedirects(response, '/reset/done/')
@override_settings(AUTH_USER_MODEL='auth_tests.UUIDUser')
class UUIDUserPasswordResetTest(CustomUserPasswordResetTest):
def _test_confirm_start(self):
# instead of fixture
UUIDUser.objects.create_user(
email=self.user_email,
username='foo',
password='foo',
)
return super()._test_confirm_start()
def test_confirm_invalid_uuid(self):
"""A uidb64 that decodes to a non-UUID doesn't crash."""
_, path = self._test_confirm_start()
invalid_uidb64 = urlsafe_base64_encode('INVALID_UUID'.encode()).decode()
first, _uuidb64_, second = path.strip('/').split('/')
response = self.client.get('/' + '/'.join((first, invalid_uidb64, second)) + '/')
self.assertContains(response, 'The password reset link was invalid')
class ChangePasswordTest(AuthViewsTestCase):
def fail_login(self):
response = self.client.post('/login/', {
'username': 'testclient',
'password': 'password',
})
self.assertFormError(response, AuthenticationForm.error_messages['invalid_login'] % {
'username': User._meta.get_field('username').verbose_name
})
def logout(self):
self.client.get('/logout/')
def test_password_change_fails_with_invalid_old_password(self):
self.login()
response = self.client.post('/password_change/', {
'old_password': 'donuts',
'new_password1': 'password1',
'new_password2': 'password1',
})
self.assertFormError(response, PasswordChangeForm.error_messages['password_incorrect'])
def test_password_change_fails_with_mismatched_passwords(self):
self.login()
response = self.client.post('/password_change/', {
'old_password': 'password',
'new_password1': 'password1',
'new_password2': 'donuts',
})
self.assertFormError(response, SetPasswordForm.error_messages['password_mismatch'])
def test_password_change_succeeds(self):
self.login()
self.client.post('/password_change/', {
'old_password': 'password',
'new_password1': 'password1',
'new_password2': 'password1',
})
self.fail_login()
self.login(password='password1')
def test_password_change_done_succeeds(self):
self.login()
response = self.client.post('/password_change/', {
'old_password': 'password',
'new_password1': 'password1',
'new_password2': 'password1',
})
self.assertRedirects(response, '/password_change/done/', fetch_redirect_response=False)
@override_settings(LOGIN_URL='/login/')
def test_password_change_done_fails(self):
response = self.client.get('/password_change/done/')
self.assertRedirects(response, '/login/?next=/password_change/done/', fetch_redirect_response=False)
def test_password_change_redirect_default(self):
self.login()
response = self.client.post('/password_change/', {
'old_password': 'password',
'new_password1': 'password1',
'new_password2': 'password1',
})
self.assertRedirects(response, '/password_change/done/', fetch_redirect_response=False)
def test_password_change_redirect_custom(self):
self.login()
response = self.client.post('/password_change/custom/', {
'old_password': 'password',
'new_password1': 'password1',
'new_password2': 'password1',
})
self.assertRedirects(response, '/custom/', fetch_redirect_response=False)
def test_password_change_redirect_custom_named(self):
self.login()
response = self.client.post('/password_change/custom/named/', {
'old_password': 'password',
'new_password1': 'password1',
'new_password2': 'password1',
})
self.assertRedirects(response, '/password_reset/', fetch_redirect_response=False)
class SessionAuthenticationTests(AuthViewsTestCase):
def test_user_password_change_updates_session(self):
"""
#21649 - Ensure contrib.auth.views.password_change updates the user's
session auth hash after a password change so the session isn't logged out.
"""
self.login()
original_session_key = self.client.session.session_key
response = self.client.post('/password_change/', {
'old_password': 'password',
'new_password1': 'password1',
'new_password2': 'password1',
})
# if the hash isn't updated, retrieving the redirection page will fail.
self.assertRedirects(response, '/password_change/done/')
# The session key is rotated.
self.assertNotEqual(original_session_key, self.client.session.session_key)
class LoginTest(AuthViewsTestCase):
def test_current_site_in_context_after_login(self):
response = self.client.get(reverse('login'))
self.assertEqual(response.status_code, 200)
if apps.is_installed('django.contrib.sites'):
Site = apps.get_model('sites.Site')
site = Site.objects.get_current()
self.assertEqual(response.context['site'], site)
self.assertEqual(response.context['site_name'], site.name)
else:
self.assertIsInstance(response.context['site'], RequestSite)
self.assertIsInstance(response.context['form'], AuthenticationForm)
def test_security_check(self):
login_url = reverse('login')
# These URLs should not pass the security check.
bad_urls = (
'http://example.com',
'http:///example.com',
'https://example.com',
'ftp://example.com',
'///example.com',
'//example.com',
'javascript:alert("XSS")',
)
for bad_url in bad_urls:
with self.subTest(bad_url=bad_url):
nasty_url = '%(url)s?%(next)s=%(bad_url)s' % {
'url': login_url,
'next': REDIRECT_FIELD_NAME,
'bad_url': quote(bad_url),
}
response = self.client.post(nasty_url, {
'username': 'testclient',
'password': 'password',
})
self.assertEqual(response.status_code, 302)
self.assertNotIn(bad_url, response.url, '%s should be blocked' % bad_url)
# These URLs should pass the security check.
good_urls = (
'/view/?param=http://example.com',
'/view/?param=https://example.com',
'/view?param=ftp://example.com',
'view/?param=//example.com',
'https://testserver/',
'HTTPS://testserver/',
'//testserver/',
'/url%20with%20spaces/',
)
for good_url in good_urls:
with self.subTest(good_url=good_url):
safe_url = '%(url)s?%(next)s=%(good_url)s' % {
'url': login_url,
'next': REDIRECT_FIELD_NAME,
'good_url': quote(good_url),
}
response = self.client.post(safe_url, {
'username': 'testclient',
'password': 'password',
})
self.assertEqual(response.status_code, 302)
self.assertIn(good_url, response.url, '%s should be allowed' % good_url)
def test_security_check_https(self):
login_url = reverse('login')
non_https_next_url = 'http://testserver/path'
not_secured_url = '%(url)s?%(next)s=%(next_url)s' % {
'url': login_url,
'next': REDIRECT_FIELD_NAME,
'next_url': quote(non_https_next_url),
}
post_data = {
'username': 'testclient',
'password': 'password',
}
response = self.client.post(not_secured_url, post_data, secure=True)
self.assertEqual(response.status_code, 302)
self.assertNotEqual(response.url, non_https_next_url)
self.assertEqual(response.url, settings.LOGIN_REDIRECT_URL)
def test_login_form_contains_request(self):
# The custom authentication form for this login requires a request to
# initialize it.
response = self.client.post('/custom_request_auth_login/', {
'username': 'testclient',
'password': 'password',
})
# The login was successful.
self.assertRedirects(response, settings.LOGIN_REDIRECT_URL, fetch_redirect_response=False)
def test_login_csrf_rotate(self):
"""
Makes sure that a login rotates the currently-used CSRF token.
"""
# Do a GET to establish a CSRF token
# The test client isn't used here as it's a test for middleware.
req = HttpRequest()
CsrfViewMiddleware().process_view(req, LoginView.as_view(), (), {})
# get_token() triggers CSRF token inclusion in the response
get_token(req)
resp = LoginView.as_view()(req)
resp2 = CsrfViewMiddleware().process_response(req, resp)
csrf_cookie = resp2.cookies.get(settings.CSRF_COOKIE_NAME, None)
token1 = csrf_cookie.coded_value
# Prepare the POST request
req = HttpRequest()
req.COOKIES[settings.CSRF_COOKIE_NAME] = token1
req.method = "POST"
req.POST = {'username': 'testclient', 'password': 'password', 'csrfmiddlewaretoken': token1}
# Use POST request to log in
SessionMiddleware().process_request(req)
CsrfViewMiddleware().process_view(req, LoginView.as_view(), (), {})
req.META["SERVER_NAME"] = "testserver" # Required to have redirect work in login view
req.META["SERVER_PORT"] = 80
resp = LoginView.as_view()(req)
resp2 = CsrfViewMiddleware().process_response(req, resp)
csrf_cookie = resp2.cookies.get(settings.CSRF_COOKIE_NAME, None)
token2 = csrf_cookie.coded_value
# Check the CSRF token switched
self.assertNotEqual(token1, token2)
def test_session_key_flushed_on_login(self):
"""
To avoid reusing another user's session, ensure a new, empty session is
created if the existing session corresponds to a different authenticated
user.
"""
self.login()
original_session_key = self.client.session.session_key
self.login(username='staff')
self.assertNotEqual(original_session_key, self.client.session.session_key)
def test_session_key_flushed_on_login_after_password_change(self):
"""
As above, but same user logging in after a password change.
"""
self.login()
original_session_key = self.client.session.session_key
# If no password change, session key should not be flushed.
self.login()
self.assertEqual(original_session_key, self.client.session.session_key)
user = User.objects.get(username='testclient')
user.set_password('foobar')
user.save()
self.login(password='foobar')
self.assertNotEqual(original_session_key, self.client.session.session_key)
def test_login_session_without_hash_session_key(self):
"""
Session without django.contrib.auth.HASH_SESSION_KEY should login
without an exception.
"""
user = User.objects.get(username='testclient')
engine = import_module(settings.SESSION_ENGINE)
session = engine.SessionStore()
session[SESSION_KEY] = user.id
session.save()
original_session_key = session.session_key
self.client.cookies[settings.SESSION_COOKIE_NAME] = original_session_key
self.login()
self.assertNotEqual(original_session_key, self.client.session.session_key)
class LoginURLSettings(AuthViewsTestCase):
"""Tests for settings.LOGIN_URL."""
def assertLoginURLEquals(self, url, parse_qs=False):
response = self.client.get('/login_required/')
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, url, parse_qs=parse_qs)
@override_settings(LOGIN_URL='/login/')
def test_standard_login_url(self):
self.assertLoginURLEquals('/login/?next=/login_required/')
@override_settings(LOGIN_URL='login')
def test_named_login_url(self):
self.assertLoginURLEquals('/login/?next=/login_required/')
@override_settings(LOGIN_URL='http://remote.example.com/login')
def test_remote_login_url(self):
quoted_next = quote('http://testserver/login_required/')
expected = 'http://remote.example.com/login?next=%s' % quoted_next
self.assertLoginURLEquals(expected)
@override_settings(LOGIN_URL='https:///login/')
def test_https_login_url(self):
quoted_next = quote('http://testserver/login_required/')
expected = 'https:///login/?next=%s' % quoted_next
self.assertLoginURLEquals(expected)
@override_settings(LOGIN_URL='/login/?pretty=1')
def test_login_url_with_querystring(self):
self.assertLoginURLEquals('/login/?pretty=1&next=/login_required/', parse_qs=True)
@override_settings(LOGIN_URL='http://remote.example.com/login/?next=/default/')
def test_remote_login_url_with_next_querystring(self):
quoted_next = quote('http://testserver/login_required/')
expected = 'http://remote.example.com/login/?next=%s' % quoted_next
self.assertLoginURLEquals(expected)
@override_settings(LOGIN_URL=reverse_lazy('login'))
def test_lazy_login_url(self):
self.assertLoginURLEquals('/login/?next=/login_required/')
class LoginRedirectUrlTest(AuthViewsTestCase):
"""Tests for settings.LOGIN_REDIRECT_URL."""
def assertLoginRedirectURLEqual(self, url):
response = self.login()
self.assertRedirects(response, url, fetch_redirect_response=False)
def test_default(self):
self.assertLoginRedirectURLEqual('/accounts/profile/')
@override_settings(LOGIN_REDIRECT_URL='/custom/')
def test_custom(self):
self.assertLoginRedirectURLEqual('/custom/')
@override_settings(LOGIN_REDIRECT_URL='password_reset')
def test_named(self):
self.assertLoginRedirectURLEqual('/password_reset/')
@override_settings(LOGIN_REDIRECT_URL='http://remote.example.com/welcome/')
def test_remote(self):
self.assertLoginRedirectURLEqual('http://remote.example.com/welcome/')
class RedirectToLoginTests(AuthViewsTestCase):
"""Tests for the redirect_to_login view"""
@override_settings(LOGIN_URL=reverse_lazy('login'))
def test_redirect_to_login_with_lazy(self):
login_redirect_response = redirect_to_login(next='/else/where/')
expected = '/login/?next=/else/where/'
self.assertEqual(expected, login_redirect_response.url)
@override_settings(LOGIN_URL=reverse_lazy('login'))
def test_redirect_to_login_with_lazy_and_unicode(self):
login_redirect_response = redirect_to_login(next='/else/where/झ/')
expected = '/login/?next=/else/where/%E0%A4%9D/'
self.assertEqual(expected, login_redirect_response.url)
class LogoutThenLoginTests(AuthViewsTestCase):
"""Tests for the logout_then_login view"""
def confirm_logged_out(self):
self.assertNotIn(SESSION_KEY, self.client.session)
@override_settings(LOGIN_URL='/login/')
def test_default_logout_then_login(self):
self.login()
req = HttpRequest()
req.method = 'GET'
req.session = self.client.session
response = logout_then_login(req)
self.confirm_logged_out()
self.assertRedirects(response, '/login/', fetch_redirect_response=False)
def test_logout_then_login_with_custom_login(self):
self.login()
req = HttpRequest()
req.method = 'GET'
req.session = self.client.session
response = logout_then_login(req, login_url='/custom/')
self.confirm_logged_out()
self.assertRedirects(response, '/custom/', fetch_redirect_response=False)
class LoginRedirectAuthenticatedUser(AuthViewsTestCase):
dont_redirect_url = '/login/redirect_authenticated_user_default/'
do_redirect_url = '/login/redirect_authenticated_user/'
def test_default(self):
"""Stay on the login page by default."""
self.login()
response = self.client.get(self.dont_redirect_url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['next'], '')
def test_guest(self):
"""If not logged in, stay on the same page."""
response = self.client.get(self.do_redirect_url)
self.assertEqual(response.status_code, 200)
def test_redirect(self):
"""If logged in, go to default redirected URL."""
self.login()
response = self.client.get(self.do_redirect_url)
self.assertRedirects(response, '/accounts/profile/', fetch_redirect_response=False)
@override_settings(LOGIN_REDIRECT_URL='/custom/')
def test_redirect_url(self):
"""If logged in, go to custom redirected URL."""
self.login()
response = self.client.get(self.do_redirect_url)
self.assertRedirects(response, '/custom/', fetch_redirect_response=False)
def test_redirect_param(self):
"""If next is specified as a GET parameter, go there."""
self.login()
url = self.do_redirect_url + '?next=/custom_next/'
response = self.client.get(url)
self.assertRedirects(response, '/custom_next/', fetch_redirect_response=False)
def test_redirect_loop(self):
"""
Detect a redirect loop if LOGIN_REDIRECT_URL is not correctly set,
with and without custom parameters.
"""
self.login()
msg = (
"Redirection loop for authenticated user detected. Check that "
"your LOGIN_REDIRECT_URL doesn't point to a login page"
)
with self.settings(LOGIN_REDIRECT_URL=self.do_redirect_url):
with self.assertRaisesMessage(ValueError, msg):
self.client.get(self.do_redirect_url)
url = self.do_redirect_url + '?bla=2'
with self.assertRaisesMessage(ValueError, msg):
self.client.get(url)
class LoginSuccessURLAllowedHostsTest(AuthViewsTestCase):
def test_success_url_allowed_hosts_same_host(self):
response = self.client.post('/login/allowed_hosts/', {
'username': 'testclient',
'password': 'password',
'next': 'https://testserver/home',
})
self.assertIn(SESSION_KEY, self.client.session)
self.assertRedirects(response, 'https://testserver/home', fetch_redirect_response=False)
def test_success_url_allowed_hosts_safe_host(self):
response = self.client.post('/login/allowed_hosts/', {
'username': 'testclient',
'password': 'password',
'next': 'https://otherserver/home',
})
self.assertIn(SESSION_KEY, self.client.session)
self.assertRedirects(response, 'https://otherserver/home', fetch_redirect_response=False)
def test_success_url_allowed_hosts_unsafe_host(self):
response = self.client.post('/login/allowed_hosts/', {
'username': 'testclient',
'password': 'password',
'next': 'https://evil/home',
})
self.assertIn(SESSION_KEY, self.client.session)
self.assertRedirects(response, '/accounts/profile/', fetch_redirect_response=False)
class LogoutTest(AuthViewsTestCase):
def confirm_logged_out(self):
self.assertNotIn(SESSION_KEY, self.client.session)
def test_logout_default(self):
"Logout without next_page option renders the default template"
self.login()
response = self.client.get('/logout/')
self.assertContains(response, 'Logged out')
self.confirm_logged_out()
def test_logout_with_post(self):
self.login()
response = self.client.post('/logout/')
self.assertContains(response, 'Logged out')
self.confirm_logged_out()
def test_14377(self):
# Bug 14377
self.login()
response = self.client.get('/logout/')
self.assertIn('site', response.context)
def test_logout_doesnt_cache(self):
"""
The logout() view should send "no-cache" headers for reasons described
in #25490.
"""
response = self.client.get('/logout/')
self.assertIn('no-store', response['Cache-Control'])
def test_logout_with_overridden_redirect_url(self):
# Bug 11223
self.login()
response = self.client.get('/logout/next_page/')
self.assertRedirects(response, '/somewhere/', fetch_redirect_response=False)
response = self.client.get('/logout/next_page/?next=/login/')
self.assertRedirects(response, '/login/', fetch_redirect_response=False)
self.confirm_logged_out()
def test_logout_with_next_page_specified(self):
"Logout with next_page option given redirects to specified resource"
self.login()
response = self.client.get('/logout/next_page/')
self.assertRedirects(response, '/somewhere/', fetch_redirect_response=False)
self.confirm_logged_out()
def test_logout_with_redirect_argument(self):
"Logout with query string redirects to specified resource"
self.login()
response = self.client.get('/logout/?next=/login/')
self.assertRedirects(response, '/login/', fetch_redirect_response=False)
self.confirm_logged_out()
def test_logout_with_custom_redirect_argument(self):
"Logout with custom query string redirects to specified resource"
self.login()
response = self.client.get('/logout/custom_query/?follow=/somewhere/')
self.assertRedirects(response, '/somewhere/', fetch_redirect_response=False)
self.confirm_logged_out()
def test_logout_with_named_redirect(self):
"Logout resolves names or URLs passed as next_page."
self.login()
response = self.client.get('/logout/next_page/named/')
self.assertRedirects(response, '/password_reset/', fetch_redirect_response=False)
self.confirm_logged_out()
def test_success_url_allowed_hosts_same_host(self):
self.login()
response = self.client.get('/logout/allowed_hosts/?next=https://testserver/')
self.assertRedirects(response, 'https://testserver/', fetch_redirect_response=False)
self.confirm_logged_out()
def test_success_url_allowed_hosts_safe_host(self):
self.login()
response = self.client.get('/logout/allowed_hosts/?next=https://otherserver/')
self.assertRedirects(response, 'https://otherserver/', fetch_redirect_response=False)
self.confirm_logged_out()
def test_success_url_allowed_hosts_unsafe_host(self):
self.login()
response = self.client.get('/logout/allowed_hosts/?next=https://evil/')
self.assertRedirects(response, '/logout/allowed_hosts/', fetch_redirect_response=False)
self.confirm_logged_out()
def test_security_check(self):
logout_url = reverse('logout')
# These URLs should not pass the security check.
bad_urls = (
'http://example.com',
'http:///example.com',
'https://example.com',
'ftp://example.com',
'///example.com',
'//example.com',
'javascript:alert("XSS")',
)
for bad_url in bad_urls:
with self.subTest(bad_url=bad_url):
nasty_url = '%(url)s?%(next)s=%(bad_url)s' % {
'url': logout_url,
'next': REDIRECT_FIELD_NAME,
'bad_url': quote(bad_url),
}
self.login()
response = self.client.get(nasty_url)
self.assertEqual(response.status_code, 302)
self.assertNotIn(bad_url, response.url, '%s should be blocked' % bad_url)
self.confirm_logged_out()
# These URLs should pass the security check.
good_urls = (
'/view/?param=http://example.com',
'/view/?param=https://example.com',
'/view?param=ftp://example.com',
'view/?param=//example.com',
'https://testserver/',
'HTTPS://testserver/',
'//testserver/',
'/url%20with%20spaces/',
)
for good_url in good_urls:
with self.subTest(good_url=good_url):
safe_url = '%(url)s?%(next)s=%(good_url)s' % {
'url': logout_url,
'next': REDIRECT_FIELD_NAME,
'good_url': quote(good_url),
}
self.login()
response = self.client.get(safe_url)
self.assertEqual(response.status_code, 302)
self.assertIn(good_url, response.url, '%s should be allowed' % good_url)
self.confirm_logged_out()
def test_security_check_https(self):
logout_url = reverse('logout')
non_https_next_url = 'http://testserver/'
url = '%(url)s?%(next)s=%(next_url)s' % {
'url': logout_url,
'next': REDIRECT_FIELD_NAME,
'next_url': quote(non_https_next_url),
}
self.login()
response = self.client.get(url, secure=True)
self.assertRedirects(response, logout_url, fetch_redirect_response=False)
self.confirm_logged_out()
def test_logout_preserve_language(self):
"""Language stored in session is preserved after logout"""
# Create a new session with language
engine = import_module(settings.SESSION_ENGINE)
session = engine.SessionStore()
session[LANGUAGE_SESSION_KEY] = 'pl'
session.save()
self.client.cookies[settings.SESSION_COOKIE_NAME] = session.session_key
self.client.get('/logout/')
self.assertEqual(self.client.session[LANGUAGE_SESSION_KEY], 'pl')
@override_settings(LOGOUT_REDIRECT_URL='/custom/')
def test_logout_redirect_url_setting(self):
self.login()
response = self.client.get('/logout/')
self.assertRedirects(response, '/custom/', fetch_redirect_response=False)
@override_settings(LOGOUT_REDIRECT_URL='logout')
def test_logout_redirect_url_named_setting(self):
self.login()
response = self.client.get('/logout/')
self.assertRedirects(response, '/logout/', fetch_redirect_response=False)
# Redirect in test_user_change_password will fail if session auth hash
# isn't updated after password change (#21649)
@override_settings(ROOT_URLCONF='auth_tests.urls_admin')
class ChangelistTests(AuthViewsTestCase):
def setUp(self):
# Make me a superuser before logging in.
User.objects.filter(username='testclient').update(is_staff=True, is_superuser=True)
self.login()
self.admin = User.objects.get(pk=self.u1.pk)
def get_user_data(self, user):
return {
'username': user.username,
'password': user.password,
'email': user.email,
'is_active': user.is_active,
'is_staff': user.is_staff,
'is_superuser': user.is_superuser,
'last_login_0': user.last_login.strftime('%Y-%m-%d'),
'last_login_1': user.last_login.strftime('%H:%M:%S'),
'initial-last_login_0': user.last_login.strftime('%Y-%m-%d'),
'initial-last_login_1': user.last_login.strftime('%H:%M:%S'),
'date_joined_0': user.date_joined.strftime('%Y-%m-%d'),
'date_joined_1': user.date_joined.strftime('%H:%M:%S'),
'initial-date_joined_0': user.date_joined.strftime('%Y-%m-%d'),
'initial-date_joined_1': user.date_joined.strftime('%H:%M:%S'),
'first_name': user.first_name,
'last_name': user.last_name,
}
# #20078 - users shouldn't be allowed to guess password hashes via
# repeated password__startswith queries.
def test_changelist_disallows_password_lookups(self):
# A lookup that tries to filter on password isn't OK
with patch_logger('django.security.DisallowedModelAdminLookup', 'error') as logger_calls:
response = self.client.get(reverse('auth_test_admin:auth_user_changelist') + '?password__startswith=sha1$')
self.assertEqual(response.status_code, 400)
self.assertEqual(len(logger_calls), 1)
def test_user_change_email(self):
data = self.get_user_data(self.admin)
data['email'] = 'new_' + data['email']
response = self.client.post(
reverse('auth_test_admin:auth_user_change', args=(self.admin.pk,)),
data
)
self.assertRedirects(response, reverse('auth_test_admin:auth_user_changelist'))
row = LogEntry.objects.latest('id')
self.assertEqual(row.get_change_message(), 'Changed email.')
def test_user_not_change(self):
response = self.client.post(
reverse('auth_test_admin:auth_user_change', args=(self.admin.pk,)),
self.get_user_data(self.admin)
)
self.assertRedirects(response, reverse('auth_test_admin:auth_user_changelist'))
row = LogEntry.objects.latest('id')
self.assertEqual(row.get_change_message(), 'No fields changed.')
def test_user_change_password(self):
user_change_url = reverse('auth_test_admin:auth_user_change', args=(self.admin.pk,))
password_change_url = reverse('auth_test_admin:auth_user_password_change', args=(self.admin.pk,))
response = self.client.get(user_change_url)
# Test the link inside password field help_text.
rel_link = re.search(
r'you can change the password using <a href="([^"]*)">this form</a>',
response.content.decode()
).groups()[0]
self.assertEqual(
os.path.normpath(user_change_url + rel_link),
os.path.normpath(password_change_url)
)
response = self.client.post(
password_change_url,
{
'password1': 'password1',
'password2': 'password1',
}
)
self.assertRedirects(response, user_change_url)
row = LogEntry.objects.latest('id')
self.assertEqual(row.get_change_message(), 'Changed password.')
self.logout()
self.login(password='password1')
def test_user_change_different_user_password(self):
u = User.objects.get(email='staffmember@example.com')
response = self.client.post(
reverse('auth_test_admin:auth_user_password_change', args=(u.pk,)),
{
'password1': 'password1',
'password2': 'password1',
}
)
self.assertRedirects(response, reverse('auth_test_admin:auth_user_change', args=(u.pk,)))
row = LogEntry.objects.latest('id')
self.assertEqual(row.user_id, self.admin.pk)
self.assertEqual(row.object_id, str(u.pk))
self.assertEqual(row.get_change_message(), 'Changed password.')
def test_password_change_bad_url(self):
response = self.client.get(reverse('auth_test_admin:auth_user_password_change', args=('foobar',)))
self.assertEqual(response.status_code, 404)
@override_settings(
AUTH_USER_MODEL='auth_tests.UUIDUser',
ROOT_URLCONF='auth_tests.urls_custom_user_admin',
)
class UUIDUserTests(TestCase):
def test_admin_password_change(self):
u = UUIDUser.objects.create_superuser(username='uuid', email='foo@bar.com', password='test')
self.assertTrue(self.client.login(username='uuid', password='test'))
user_change_url = reverse('custom_user_admin:auth_tests_uuiduser_change', args=(u.pk,))
response = self.client.get(user_change_url)
self.assertEqual(response.status_code, 200)
password_change_url = reverse('custom_user_admin:auth_user_password_change', args=(u.pk,))
response = self.client.get(password_change_url)
self.assertEqual(response.status_code, 200)
# A LogEntry is created with pk=1 which breaks a FK constraint on MySQL
with connection.constraint_checks_disabled():
response = self.client.post(password_change_url, {
'password1': 'password1',
'password2': 'password1',
})
self.assertRedirects(response, user_change_url)
row = LogEntry.objects.latest('id')
self.assertEqual(row.user_id, 1) # hardcoded in CustomUserAdmin.log_change()
self.assertEqual(row.object_id, str(u.pk))
self.assertEqual(row.get_change_message(), 'Changed password.')
# The LogEntry.user column isn't altered to a UUID type so it's set to
# an integer manually in CustomUserAdmin to avoid an error. To avoid a
# constraint error, delete the entry before constraints are checked
# after the test.
row.delete()
|
$(function () {
$('.b-team__list').slick({
//arrows: true,
//dots: false,
//infinite: true,
//swipe: false,
//centerMode: true,
//centerPadding: '60px',
slidesToShow: 3
});
});
|
webpackHotUpdate("app",{
/***/ "./src/helpers/alignGrid.ts":
/*!**********************************!*\
!*** ./src/helpers/alignGrid.ts ***!
\**********************************/
/*! no static exports found */
/***/ (function(module, exports, __webpack_require__) {
"use strict";
eval("\nObject.defineProperty(exports, \"__esModule\", { value: true });\nvar TILE_SIZE = 50;\nvar AlignGrid = /** @class */ (function () {\n function AlignGrid(config) {\n this.counter = 0;\n this.clumps = new Map();\n if (!config.scene) {\n console.log('missing scene!');\n return;\n }\n if (!config.rows) {\n console.log('no rows given wee woo');\n }\n if (!config.cols) {\n console.log('no columns given wee woo');\n }\n this.h = config.rows * TILE_SIZE;\n this.w = config.cols * TILE_SIZE;\n this.rows = config.rows;\n this.cols = config.cols;\n this.scene = config.scene;\n this.grid = new Array(this.rows);\n this.selected = 'lavaTile';\n for (var i = 0; i < this.cols; i++) {\n this.grid[i] = new Array(this.rows);\n }\n this.playerTile = null;\n }\n AlignGrid.prototype.show = function (a) {\n if (a === void 0) { a = 0.7; }\n this.graphics = this.scene.add.graphics();\n this.graphics.lineStyle(1, 0xff0000, a);\n for (var i = 0; i < this.w; i += TILE_SIZE) {\n this.graphics.moveTo(i, 0);\n this.graphics.lineTo(i, this.h);\n }\n for (var i = 0; i < this.h; i += TILE_SIZE) {\n this.graphics.moveTo(0, i);\n this.graphics.lineTo(this.w, i);\n }\n this.graphics.strokePath();\n };\n AlignGrid.prototype.placeAt = function (x1, y1, objName, game) {\n //converted centered coordinates in pixels to place in grid square\n var row = Math.floor(x1 / TILE_SIZE);\n var col = Math.floor(y1 / TILE_SIZE);\n var x2 = row * TILE_SIZE + TILE_SIZE / 2;\n var y2 = col * TILE_SIZE + TILE_SIZE / 2;\n if (objName == 'clear') {\n if (this.grid[row][col]) {\n this.grid[row][col].destroy();\n }\n this.grid[row][col] = null;\n return;\n }\n var obj = game.add.image(x2, y2, objName);\n obj.name = objName;\n if (this.grid[row][col]) {\n if (this.playerTile && this.playerTile[0] == row && this.playerTile[1] == col) {\n this.playerTile = null;\n }\n this.grid[row][col].destroy();\n }\n if (objName == 'player') {\n if (this.playerTile) {\n this.grid[this.playerTile[0]][this.playerTile[1]].destroy();\n }\n this.playerTile = [row, col];\n }\n this.grid[row][col] = obj;\n obj.x = x2;\n obj.y = y2;\n console.log(this.playerTile);\n };\n AlignGrid.prototype.getRowOrCol = function (pixel) {\n return Math.floor(pixel / TILE_SIZE);\n };\n AlignGrid.prototype.getPixel = function (rowOrCol) {\n return rowOrCol * TILE_SIZE + TILE_SIZE / 2;\n };\n AlignGrid.prototype.neighbors = function (i, j) {\n return [\n i + ',' + (j - 1),\n i + 1 + ',' + (j - 1),\n i + 1 + ',' + j,\n i + 1 + ',' + (j + 1),\n i + ',' + (j + 1),\n i - 1 + ',' + (j + 1),\n i - 1 + ',' + j,\n i - 1 + ',' + (j - 1),\n ];\n };\n AlignGrid.prototype.unpack = function (coord) {\n var split = coord.indexOf(',');\n var i = parseInt(coord.substring(0, split));\n var j = parseInt(coord.substring(split + 1));\n return [i, j];\n };\n /**\n * start and end of rectangle drawn by mouse to clump selected tiles\n * @param sx start x pixel coordinate\n * @param sy start y pixel coordinate\n * @param ex end x pixel coordinate\n * @param ey end y pixel coordinate\n */\n AlignGrid.prototype.clump = function (sx, sy, ex, ey) {\n var _this = this;\n var curr = new Set();\n var sr = this.getRowOrCol(sx);\n var sc = this.getRowOrCol(sy);\n var er = this.getRowOrCol(ex);\n var ec = this.getRowOrCol(ey);\n for (var i = sr; i <= er; i++) {\n for (var j = sc; j <= ec; j++) {\n curr.add(i + ',' + j);\n if (this.clumps.has(this.grid[i][j].clumpId)) {\n var toAdd = this.clumps.get(this.grid[i][j].clumpId);\n toAdd.forEach(function (e) {\n curr.add(e);\n });\n }\n }\n }\n // figure out which tile texture to use based on spritesheet\n curr.forEach(function (e) {\n var i = _this.unpack(e)[0];\n var j = _this.unpack(e)[1];\n var candidates = _this.neighbors(i, j);\n var id = [0, 0, 0, 0, 0, 0, 0, 0];\n for (var x = 0; x < candidates.length; x++) {\n var coord = candidates[x];\n var a = _this.unpack(coord)[0];\n var b = _this.unpack(coord)[1];\n if (curr.has(coord) && _this.grid[a][b].name == _this.grid[i][j].name) {\n }\n }\n });\n };\n return AlignGrid;\n}());\nexports.default = AlignGrid;\n\n\n//# sourceURL=webpack:///./src/helpers/alignGrid.ts?");
/***/ }),
/***/ "./src/scenes/level-editor-scene.ts":
/*!******************************************!*\
!*** ./src/scenes/level-editor-scene.ts ***!
\******************************************/
/*! no static exports found */
/***/ (function(module, exports, __webpack_require__) {
"use strict";
eval("\nvar __extends = (this && this.__extends) || (function () {\n var extendStatics = function (d, b) {\n extendStatics = Object.setPrototypeOf ||\n ({ __proto__: [] } instanceof Array && function (d, b) { d.__proto__ = b; }) ||\n function (d, b) { for (var p in b) if (Object.prototype.hasOwnProperty.call(b, p)) d[p] = b[p]; };\n return extendStatics(d, b);\n };\n return function (d, b) {\n extendStatics(d, b);\n function __() { this.constructor = d; }\n d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __());\n };\n})();\nObject.defineProperty(exports, \"__esModule\", { value: true });\nexports.LevelEditor = void 0;\nvar alignGrid_1 = __webpack_require__(/*! ../helpers/alignGrid */ \"./src/helpers/alignGrid.ts\");\nvar levelEditorButton_1 = __webpack_require__(/*! ../helpers/levelEditorButton */ \"./src/helpers/levelEditorButton.ts\");\nvar sceneConfig = {\n active: false,\n visible: false,\n key: 'LevelEditor',\n};\nvar W_WIDTH = 1200;\nvar W_HEIGHT = 600;\nvar cursors;\nvar controls;\nvar grid;\nvar pointer;\nvar aGrid;\nvar graphics;\n// let menuPositions = [];\n// let menuNames = []\n// for (let i = 0; i < 8; i++) {\n// menuPositions.push(200 + i * 36);\n// }\nvar LevelEditor = /** @class */ (function (_super) {\n __extends(LevelEditor, _super);\n function LevelEditor() {\n var _this = _super.call(this, sceneConfig) || this;\n _this.speed = 200;\n _this.selected = 'lava';\n _this.onButton = false;\n return _this;\n }\n LevelEditor.prototype.preload = function () {\n this.load.image('background', 'assets/backgrounds/level-editor.png');\n this.load.image('ground', 'assets/squares/platform.png');\n this.load.image('house', 'assets/squares/house.png');\n this.load.image('dirt', 'assets/squares/dirtTile.png');\n this.load.image('steel', 'assets/squares/steelTile.png');\n this.load.image('lava', 'assets/squares/lavaTile.png');\n this.load.spritesheet('fireball', 'assets/fireball.png', { frameWidth: 38, frameHeight: 19 });\n this.load.spritesheet('player', 'assets/dude.png', { frameWidth: 32, frameHeight: 48 });\n this.load.spritesheet('lizard', 'assets/monsters/lizard.png', { frameWidth: 70, frameHeight: 50 });\n this.load.spritesheet('spider', 'assets/monsters/spider.png', { frameWidth: 77, frameHeight: 61 });\n this.load.spritesheet('spiderArmored', 'assets/monsters/spiderArmored.png', { frameWidth: 77, frameHeight: 61 });\n this.load.spritesheet('crate', 'assets/squares/crate.png', { frameWidth: 79, frameHeight: 80 });\n this.load.spritesheet('squareFire', 'assets/squares/squareFire.png', { frameWidth: 79, frameHeight: 80 });\n this.load.spritesheet('fireDisappear', 'assets/squares/fireDisappear.png', { frameWidth: 84, frameHeight: 133 });\n };\n LevelEditor.prototype.create = function () {\n pointer = this.input.activePointer;\n var background = this.add.image(W_WIDTH / 2, W_HEIGHT / 2, 'background');\n background.setScale(W_WIDTH / background.width);\n cursors = this.input.keyboard.createCursorKeys();\n this.matter.world.setBounds(0, 0, W_WIDTH, W_HEIGHT, 32, true, true, false, true);\n this.cameras.main.setBounds(0, 0, W_WIDTH, W_HEIGHT);\n var controlConfig = {\n camera: this.cameras.main,\n left: cursors.left,\n right: cursors.right,\n up: cursors.up,\n down: cursors.down,\n acceleration: 0.04,\n drag: 0.0005,\n maxSpeed: 0.7,\n };\n var gridConfig = {\n scene: this,\n cols: W_WIDTH / 50,\n rows: W_HEIGHT / 50,\n };\n aGrid = new alignGrid_1.default(gridConfig);\n controls = new Phaser.Cameras.Controls.SmoothedKeyControl(controlConfig);\n new levelEditorButton_1.default(550, 150, 'Clump', '#fff', 'clump', this);\n var menuNames = ['Clear', 'Crate', 'Lava', 'Dirt', 'Steel', 'Lizard', 'Spider', 'Player', 'Armored\\n Spider'];\n var menuSelects = ['clear', 'crate', 'lava', 'dirt', 'steel', 'lizard', 'spider', 'player', 'spiderArmored'];\n var menuButtons = [];\n for (var i = 0; i < 8; i++) {\n menuButtons.push(new levelEditorButton_1.default(550, 200 + i * 36, menuNames[i], '#fff', menuSelects[i], this));\n }\n // const crateButton = new LevelEditorButton(550, , 'Crate', '#fff', 'crate', this);\n // const lavaButton = new LevelEditorButton(550, 236, 'Lava', '#fff', 'lava', this);\n // const dirtButton = new LevelEditorButton(550, 272, 'Dirt', '#fff', 'dirt', this);\n // const steelButton = new LevelEditorButton(550, 308, 'Steel', '#fff', 'steel', this);\n // const lizardButton = new LevelEditorButton(550, 308, 'Lizard', '#fff', 'lizard', this);\n // const spiderButton = new LevelEditorButton(550, 308, 'Spider', '#fff', 'spider', this);\n // const armorSpiderButton = new LevelEditorButton(550, 308, 'Armored Spider', '#fff', 'spiderArmored', this);\n // const playerButton = new LevelEditorButton(550, 308, 'Player', '#fff', 'player', this);\n };\n LevelEditor.prototype.update = function (time, delta) {\n controls.update(delta);\n aGrid.show();\n if (pointer.isDown) {\n if (this.selected == 'clump') {\n }\n else if (!this.onButton) {\n aGrid.placeAt(pointer.worldX, pointer.worldY, this.selected, this);\n }\n }\n };\n return LevelEditor;\n}(Phaser.Scene));\nexports.LevelEditor = LevelEditor;\n\n\n//# sourceURL=webpack:///./src/scenes/level-editor-scene.ts?");
/***/ })
})
|
/* vi:set ts=8 sts=4 sw=4 noet:
*
* VIM - Vi IMproved by Bram Moolenaar
*
* Do ":help uganda" in Vim to read copying and usage conditions.
* Do ":help credits" in Vim to see a list of people who contributed.
* See README.txt for an overview of the Vim source code.
*/
/*
* ops.c: implementation of various operators: op_shift, op_delete, op_tilde,
* op_change, op_yank, do_put, do_join
*/
#include "vim.h"
/*
* Number of registers.
* 0 = unnamed register, for normal yanks and puts
* 1..9 = registers '1' to '9', for deletes
* 10..35 = registers 'a' to 'z'
* 36 = delete register '-'
* 37 = Selection register '*'. Only if FEAT_CLIPBOARD defined
* 38 = Clipboard register '+'. Only if FEAT_CLIPBOARD and FEAT_X11 defined
*/
/*
* Symbolic names for some registers.
*/
#define DELETION_REGISTER 36
#ifdef FEAT_CLIPBOARD
# define STAR_REGISTER 37
# ifdef FEAT_X11
# define PLUS_REGISTER 38
# else
# define PLUS_REGISTER STAR_REGISTER /* there is only one */
# endif
#endif
#ifdef FEAT_DND
# define TILDE_REGISTER (PLUS_REGISTER + 1)
#endif
#ifdef FEAT_CLIPBOARD
# ifdef FEAT_DND
# define NUM_REGISTERS (TILDE_REGISTER + 1)
# else
# define NUM_REGISTERS (PLUS_REGISTER + 1)
# endif
#else
# define NUM_REGISTERS 37
#endif
/*
* Each yank register has an array of pointers to lines.
*/
typedef struct
{
char_u **y_array; /* pointer to array of line pointers */
linenr_T y_size; /* number of lines in y_array */
char_u y_type; /* MLINE, MCHAR or MBLOCK */
colnr_T y_width; /* only set if y_type == MBLOCK */
#ifdef FEAT_VIMINFO
time_t y_time_set;
#endif
} yankreg_T;
static yankreg_T y_regs[NUM_REGISTERS];
static yankreg_T *y_current; /* ptr to current yankreg */
static int y_append; /* TRUE when appending */
static yankreg_T *y_previous = NULL; /* ptr to last written yankreg */
/*
* structure used by block_prep, op_delete and op_yank for blockwise operators
* also op_change, op_shift, op_insert, op_replace - AKelly
*/
struct block_def
{
int startspaces; /* 'extra' cols before first char */
int endspaces; /* 'extra' cols after last char */
int textlen; /* chars in block */
char_u *textstart; /* pointer to 1st char (partially) in block */
colnr_T textcol; /* index of chars (partially) in block */
colnr_T start_vcol; /* start col of 1st char wholly inside block */
colnr_T end_vcol; /* start col of 1st char wholly after block */
#ifdef FEAT_VISUALEXTRA
int is_short; /* TRUE if line is too short to fit in block */
int is_MAX; /* TRUE if curswant==MAXCOL when starting */
int is_oneChar; /* TRUE if block within one character */
int pre_whitesp; /* screen cols of ws before block */
int pre_whitesp_c; /* chars of ws before block */
colnr_T end_char_vcols; /* number of vcols of post-block char */
#endif
colnr_T start_char_vcols; /* number of vcols of pre-block char */
};
#ifdef FEAT_VISUALEXTRA
static void shift_block(oparg_T *oap, int amount);
static void block_insert(oparg_T *oap, char_u *s, int b_insert, struct block_def*bdp);
#endif
static int stuff_yank(int, char_u *);
static void put_reedit_in_typebuf(int silent);
static int put_in_typebuf(char_u *s, int esc, int colon,
int silent);
static void stuffescaped(char_u *arg, int literally);
#ifdef FEAT_MBYTE
static void mb_adjust_opend(oparg_T *oap);
#endif
static void free_yank(long);
static void free_yank_all(void);
static int yank_copy_line(struct block_def *bd, long y_idx);
#ifdef FEAT_CLIPBOARD
static void copy_yank_reg(yankreg_T *reg);
static void may_set_selection(void);
#endif
static void dis_msg(char_u *p, int skip_esc);
static void block_prep(oparg_T *oap, struct block_def *, linenr_T, int);
static int do_addsub(int op_type, pos_T *pos, int length, linenr_T Prenum1);
#if defined(FEAT_CLIPBOARD) || defined(FEAT_EVAL)
static void str_to_reg(yankreg_T *y_ptr, int yank_type, char_u *str, long len, long blocklen, int str_list);
#endif
static int ends_in_white(linenr_T lnum);
#ifdef FEAT_COMMENTS
static int same_leader(linenr_T lnum, int, char_u *, int, char_u *);
static int fmt_check_par(linenr_T, int *, char_u **, int do_comments);
#else
static int fmt_check_par(linenr_T);
#endif
// Flags for third item in "opchars".
#define OPF_LINES 1 // operator always works on lines
#define OPF_CHANGE 2 // operator changes text
/*
* The names of operators.
* IMPORTANT: Index must correspond with defines in vim.h!!!
* The third field holds OPF_ flags.
*/
static char opchars[][3] =
{
{NUL, NUL, 0}, // OP_NOP
{'d', NUL, OPF_CHANGE}, // OP_DELETE
{'y', NUL, 0}, // OP_YANK
{'c', NUL, OPF_CHANGE}, // OP_CHANGE
{'<', NUL, OPF_LINES | OPF_CHANGE}, // OP_LSHIFT
{'>', NUL, OPF_LINES | OPF_CHANGE}, // OP_RSHIFT
{'!', NUL, OPF_LINES | OPF_CHANGE}, // OP_FILTER
{'g', '~', OPF_CHANGE}, // OP_TILDE
{'=', NUL, OPF_LINES | OPF_CHANGE}, // OP_INDENT
{'g', 'q', OPF_LINES | OPF_CHANGE}, // OP_FORMAT
{':', NUL, OPF_LINES}, // OP_COLON
{'g', 'U', OPF_CHANGE}, // OP_UPPER
{'g', 'u', OPF_CHANGE}, // OP_LOWER
{'J', NUL, OPF_LINES | OPF_CHANGE}, // DO_JOIN
{'g', 'J', OPF_LINES | OPF_CHANGE}, // DO_JOIN_NS
{'g', '?', OPF_CHANGE}, // OP_ROT13
{'r', NUL, OPF_CHANGE}, // OP_REPLACE
{'I', NUL, OPF_CHANGE}, // OP_INSERT
{'A', NUL, OPF_CHANGE}, // OP_APPEND
{'z', 'f', OPF_LINES}, // OP_FOLD
{'z', 'o', OPF_LINES}, // OP_FOLDOPEN
{'z', 'O', OPF_LINES}, // OP_FOLDOPENREC
{'z', 'c', OPF_LINES}, // OP_FOLDCLOSE
{'z', 'C', OPF_LINES}, // OP_FOLDCLOSEREC
{'z', 'd', OPF_LINES}, // OP_FOLDDEL
{'z', 'D', OPF_LINES}, // OP_FOLDDELREC
{'g', 'w', OPF_LINES | OPF_CHANGE}, // OP_FORMAT2
{'g', '@', OPF_CHANGE}, // OP_FUNCTION
{Ctrl_A, NUL, OPF_CHANGE}, // OP_NR_ADD
{Ctrl_X, NUL, OPF_CHANGE}, // OP_NR_SUB
};
/*
* Translate a command name into an operator type.
* Must only be called with a valid operator name!
*/
int
get_op_type(int char1, int char2)
{
int i;
if (char1 == 'r') /* ignore second character */
return OP_REPLACE;
if (char1 == '~') /* when tilde is an operator */
return OP_TILDE;
if (char1 == 'g' && char2 == Ctrl_A) /* add */
return OP_NR_ADD;
if (char1 == 'g' && char2 == Ctrl_X) /* subtract */
return OP_NR_SUB;
for (i = 0; ; ++i)
{
if (opchars[i][0] == char1 && opchars[i][1] == char2)
break;
if (i == (int)(sizeof(opchars) / sizeof(char [3]) - 1))
{
internal_error("get_op_type()");
break;
}
}
return i;
}
/*
* Return TRUE if operator "op" always works on whole lines.
*/
int
op_on_lines(int op)
{
return opchars[op][2] & OPF_LINES;
}
/*
* Return TRUE if operator "op" changes text.
*/
int
op_is_change(int op)
{
return opchars[op][2] & OPF_CHANGE;
}
/*
* Get first operator command character.
* Returns 'g' or 'z' if there is another command character.
*/
int
get_op_char(int optype)
{
return opchars[optype][0];
}
/*
* Get second operator command character.
*/
int
get_extra_op_char(int optype)
{
return opchars[optype][1];
}
/*
* op_shift - handle a shift operation
*/
void
op_shift(oparg_T *oap, int curs_top, int amount)
{
long i;
int first_char;
char_u *s;
int block_col = 0;
if (u_save((linenr_T)(oap->start.lnum - 1),
(linenr_T)(oap->end.lnum + 1)) == FAIL)
return;
if (oap->block_mode)
block_col = curwin->w_cursor.col;
for (i = oap->line_count; --i >= 0; )
{
first_char = *ml_get_curline();
if (first_char == NUL) /* empty line */
curwin->w_cursor.col = 0;
#ifdef FEAT_VISUALEXTRA
else if (oap->block_mode)
shift_block(oap, amount);
#endif
else
/* Move the line right if it doesn't start with '#', 'smartindent'
* isn't set or 'cindent' isn't set or '#' isn't in 'cino'. */
#if defined(FEAT_SMARTINDENT) || defined(FEAT_CINDENT)
if (first_char != '#' || !preprocs_left())
#endif
{
shift_line(oap->op_type == OP_LSHIFT, p_sr, amount, FALSE);
}
++curwin->w_cursor.lnum;
}
changed_lines(oap->start.lnum, 0, oap->end.lnum + 1, 0L);
if (oap->block_mode)
{
curwin->w_cursor.lnum = oap->start.lnum;
curwin->w_cursor.col = block_col;
}
else if (curs_top) /* put cursor on first line, for ">>" */
{
curwin->w_cursor.lnum = oap->start.lnum;
beginline(BL_SOL | BL_FIX); /* shift_line() may have set cursor.col */
}
else
--curwin->w_cursor.lnum; /* put cursor on last line, for ":>" */
#ifdef FEAT_FOLDING
/* The cursor line is not in a closed fold */
foldOpenCursor();
#endif
if (oap->line_count > p_report)
{
if (oap->op_type == OP_RSHIFT)
s = (char_u *)">";
else
s = (char_u *)"<";
if (oap->line_count == 1)
{
if (amount == 1)
sprintf((char *)IObuff, _("1 line %sed 1 time"), s);
else
sprintf((char *)IObuff, _("1 line %sed %d times"), s, amount);
}
else
{
if (amount == 1)
sprintf((char *)IObuff, _("%ld lines %sed 1 time"),
oap->line_count, s);
else
sprintf((char *)IObuff, _("%ld lines %sed %d times"),
oap->line_count, s, amount);
}
msg(IObuff);
}
/*
* Set "'[" and "']" marks.
*/
curbuf->b_op_start = oap->start;
curbuf->b_op_end.lnum = oap->end.lnum;
curbuf->b_op_end.col = (colnr_T)STRLEN(ml_get(oap->end.lnum));
if (curbuf->b_op_end.col > 0)
--curbuf->b_op_end.col;
}
/*
* shift the current line one shiftwidth left (if left != 0) or right
* leaves cursor on first blank in the line
*/
void
shift_line(
int left,
int round,
int amount,
int call_changed_bytes) /* call changed_bytes() */
{
int count;
int i, j;
int p_sw = (int)get_sw_value(curbuf);
count = get_indent(); /* get current indent */
if (round) /* round off indent */
{
i = count / p_sw; /* number of p_sw rounded down */
j = count % p_sw; /* extra spaces */
if (j && left) /* first remove extra spaces */
--amount;
if (left)
{
i -= amount;
if (i < 0)
i = 0;
}
else
i += amount;
count = i * p_sw;
}
else /* original vi indent */
{
if (left)
{
count -= p_sw * amount;
if (count < 0)
count = 0;
}
else
count += p_sw * amount;
}
/* Set new indent */
#ifdef FEAT_VREPLACE
if (State & VREPLACE_FLAG)
change_indent(INDENT_SET, count, FALSE, NUL, call_changed_bytes);
else
#endif
(void)set_indent(count, call_changed_bytes ? SIN_CHANGED : 0);
}
#if defined(FEAT_VISUALEXTRA) || defined(PROTO)
/*
* Shift one line of the current block one shiftwidth right or left.
* Leaves cursor on first character in block.
*/
static void
shift_block(oparg_T *oap, int amount)
{
int left = (oap->op_type == OP_LSHIFT);
int oldstate = State;
int total;
char_u *newp, *oldp;
int oldcol = curwin->w_cursor.col;
int p_sw = (int)get_sw_value(curbuf);
int p_ts = (int)curbuf->b_p_ts;
struct block_def bd;
int incr;
colnr_T ws_vcol;
int i = 0, j = 0;
int len;
#ifdef FEAT_RIGHTLEFT
int old_p_ri = p_ri;
p_ri = 0; /* don't want revins in indent */
#endif
State = INSERT; /* don't want REPLACE for State */
block_prep(oap, &bd, curwin->w_cursor.lnum, TRUE);
if (bd.is_short)
return;
/* total is number of screen columns to be inserted/removed */
total = (int)((unsigned)amount * (unsigned)p_sw);
if ((total / p_sw) != amount)
return; /* multiplication overflow */
oldp = ml_get_curline();
if (!left)
{
/*
* 1. Get start vcol
* 2. Total ws vcols
* 3. Divvy into TABs & spp
* 4. Construct new string
*/
total += bd.pre_whitesp; /* all virtual WS upto & incl a split TAB */
ws_vcol = bd.start_vcol - bd.pre_whitesp;
if (bd.startspaces)
{
#ifdef FEAT_MBYTE
if (has_mbyte)
{
if ((*mb_ptr2len)(bd.textstart) == 1)
++bd.textstart;
else
{
ws_vcol = 0;
bd.startspaces = 0;
}
}
else
#endif
++bd.textstart;
}
for ( ; VIM_ISWHITE(*bd.textstart); )
{
/* TODO: is passing bd.textstart for start of the line OK? */
incr = lbr_chartabsize_adv(bd.textstart, &bd.textstart,
(colnr_T)(bd.start_vcol));
total += incr;
bd.start_vcol += incr;
}
/* OK, now total=all the VWS reqd, and textstart points at the 1st
* non-ws char in the block. */
if (!curbuf->b_p_et)
i = ((ws_vcol % p_ts) + total) / p_ts; /* number of tabs */
if (i)
j = ((ws_vcol % p_ts) + total) % p_ts; /* number of spp */
else
j = total;
/* if we're splitting a TAB, allow for it */
bd.textcol -= bd.pre_whitesp_c - (bd.startspaces != 0);
len = (int)STRLEN(bd.textstart) + 1;
newp = alloc_check((unsigned)(bd.textcol + i + j + len));
if (newp == NULL)
return;
vim_memset(newp, NUL, (size_t)(bd.textcol + i + j + len));
mch_memmove(newp, oldp, (size_t)bd.textcol);
vim_memset(newp + bd.textcol, TAB, (size_t)i);
vim_memset(newp + bd.textcol + i, ' ', (size_t)j);
/* the end */
mch_memmove(newp + bd.textcol + i + j, bd.textstart, (size_t)len);
}
else /* left */
{
colnr_T destination_col; /* column to which text in block will
be shifted */
char_u *verbatim_copy_end; /* end of the part of the line which is
copied verbatim */
colnr_T verbatim_copy_width;/* the (displayed) width of this part
of line */
unsigned fill; /* nr of spaces that replace a TAB */
unsigned new_line_len; /* the length of the line after the
block shift */
size_t block_space_width;
size_t shift_amount;
char_u *non_white = bd.textstart;
colnr_T non_white_col;
/*
* Firstly, let's find the first non-whitespace character that is
* displayed after the block's start column and the character's column
* number. Also, let's calculate the width of all the whitespace
* characters that are displayed in the block and precede the searched
* non-whitespace character.
*/
/* If "bd.startspaces" is set, "bd.textstart" points to the character,
* the part of which is displayed at the block's beginning. Let's start
* searching from the next character. */
if (bd.startspaces)
MB_PTR_ADV(non_white);
/* The character's column is in "bd.start_vcol". */
non_white_col = bd.start_vcol;
while (VIM_ISWHITE(*non_white))
{
incr = lbr_chartabsize_adv(bd.textstart, &non_white, non_white_col);
non_white_col += incr;
}
block_space_width = non_white_col - oap->start_vcol;
/* We will shift by "total" or "block_space_width", whichever is less.
*/
shift_amount = (block_space_width < (size_t)total
? block_space_width : (size_t)total);
/* The column to which we will shift the text. */
destination_col = (colnr_T)(non_white_col - shift_amount);
/* Now let's find out how much of the beginning of the line we can
* reuse without modification. */
verbatim_copy_end = bd.textstart;
verbatim_copy_width = bd.start_vcol;
/* If "bd.startspaces" is set, "bd.textstart" points to the character
* preceding the block. We have to subtract its width to obtain its
* column number. */
if (bd.startspaces)
verbatim_copy_width -= bd.start_char_vcols;
while (verbatim_copy_width < destination_col)
{
char_u *line = verbatim_copy_end;
/* TODO: is passing verbatim_copy_end for start of the line OK? */
incr = lbr_chartabsize(line, verbatim_copy_end,
verbatim_copy_width);
if (verbatim_copy_width + incr > destination_col)
break;
verbatim_copy_width += incr;
MB_PTR_ADV(verbatim_copy_end);
}
/* If "destination_col" is different from the width of the initial
* part of the line that will be copied, it means we encountered a tab
* character, which we will have to partly replace with spaces. */
fill = destination_col - verbatim_copy_width;
/* The replacement line will consist of:
* - the beginning of the original line up to "verbatim_copy_end",
* - "fill" number of spaces,
* - the rest of the line, pointed to by non_white. */
new_line_len = (unsigned)(verbatim_copy_end - oldp)
+ fill
+ (unsigned)STRLEN(non_white) + 1;
newp = alloc_check(new_line_len);
if (newp == NULL)
return;
mch_memmove(newp, oldp, (size_t)(verbatim_copy_end - oldp));
vim_memset(newp + (verbatim_copy_end - oldp), ' ', (size_t)fill);
STRMOVE(newp + (verbatim_copy_end - oldp) + fill, non_white);
}
/* replace the line */
ml_replace(curwin->w_cursor.lnum, newp, FALSE);
changed_bytes(curwin->w_cursor.lnum, (colnr_T)bd.textcol);
State = oldstate;
curwin->w_cursor.col = oldcol;
#ifdef FEAT_RIGHTLEFT
p_ri = old_p_ri;
#endif
}
#endif
#ifdef FEAT_VISUALEXTRA
/*
* Insert string "s" (b_insert ? before : after) block :AKelly
* Caller must prepare for undo.
*/
static void
block_insert(
oparg_T *oap,
char_u *s,
int b_insert,
struct block_def *bdp)
{
int p_ts;
int count = 0; /* extra spaces to replace a cut TAB */
int spaces = 0; /* non-zero if cutting a TAB */
colnr_T offset; /* pointer along new line */
unsigned s_len; /* STRLEN(s) */
char_u *newp, *oldp; /* new, old lines */
linenr_T lnum; /* loop var */
int oldstate = State;
State = INSERT; /* don't want REPLACE for State */
s_len = (unsigned)STRLEN(s);
for (lnum = oap->start.lnum + 1; lnum <= oap->end.lnum; lnum++)
{
block_prep(oap, bdp, lnum, TRUE);
if (bdp->is_short && b_insert)
continue; /* OP_INSERT, line ends before block start */
oldp = ml_get(lnum);
if (b_insert)
{
p_ts = bdp->start_char_vcols;
spaces = bdp->startspaces;
if (spaces != 0)
count = p_ts - 1; /* we're cutting a TAB */
offset = bdp->textcol;
}
else /* append */
{
p_ts = bdp->end_char_vcols;
if (!bdp->is_short) /* spaces = padding after block */
{
spaces = (bdp->endspaces ? p_ts - bdp->endspaces : 0);
if (spaces != 0)
count = p_ts - 1; /* we're cutting a TAB */
offset = bdp->textcol + bdp->textlen - (spaces != 0);
}
else /* spaces = padding to block edge */
{
/* if $ used, just append to EOL (ie spaces==0) */
if (!bdp->is_MAX)
spaces = (oap->end_vcol - bdp->end_vcol) + 1;
count = spaces;
offset = bdp->textcol + bdp->textlen;
}
}
#ifdef FEAT_MBYTE
if (has_mbyte && spaces > 0)
{
int off;
/* Avoid starting halfway a multi-byte character. */
if (b_insert)
{
off = (*mb_head_off)(oldp, oldp + offset + spaces);
}
else
{
off = (*mb_off_next)(oldp, oldp + offset);
offset += off;
}
spaces -= off;
count -= off;
}
#endif
newp = alloc_check((unsigned)(STRLEN(oldp)) + s_len + count + 1);
if (newp == NULL)
continue;
/* copy up to shifted part */
mch_memmove(newp, oldp, (size_t)(offset));
oldp += offset;
/* insert pre-padding */
vim_memset(newp + offset, ' ', (size_t)spaces);
/* copy the new text */
mch_memmove(newp + offset + spaces, s, (size_t)s_len);
offset += s_len;
if (spaces && !bdp->is_short)
{
/* insert post-padding */
vim_memset(newp + offset + spaces, ' ', (size_t)(p_ts - spaces));
/* We're splitting a TAB, don't copy it. */
oldp++;
/* We allowed for that TAB, remember this now */
count++;
}
if (spaces > 0)
offset += count;
STRMOVE(newp + offset, oldp);
ml_replace(lnum, newp, FALSE);
if (lnum == oap->end.lnum)
{
/* Set "']" mark to the end of the block instead of the end of
* the insert in the first line. */
curbuf->b_op_end.lnum = oap->end.lnum;
curbuf->b_op_end.col = offset;
}
} /* for all lnum */
changed_lines(oap->start.lnum + 1, 0, oap->end.lnum + 1, 0L);
State = oldstate;
}
#endif
#if defined(FEAT_LISP) || defined(FEAT_CINDENT) || defined(PROTO)
/*
* op_reindent - handle reindenting a block of lines.
*/
void
op_reindent(oparg_T *oap, int (*how)(void))
{
long i;
char_u *l;
int amount;
linenr_T first_changed = 0;
linenr_T last_changed = 0;
linenr_T start_lnum = curwin->w_cursor.lnum;
/* Don't even try when 'modifiable' is off. */
if (!curbuf->b_p_ma)
{
EMSG(_(e_modifiable));
return;
}
for (i = oap->line_count; --i >= 0 && !got_int; )
{
/* it's a slow thing to do, so give feedback so there's no worry that
* the computer's just hung. */
if (i > 1
&& (i % 50 == 0 || i == oap->line_count - 1)
&& oap->line_count > p_report)
smsg((char_u *)_("%ld lines to indent... "), i);
/*
* Be vi-compatible: For lisp indenting the first line is not
* indented, unless there is only one line.
*/
#ifdef FEAT_LISP
if (i != oap->line_count - 1 || oap->line_count == 1
|| how != get_lisp_indent)
#endif
{
l = skipwhite(ml_get_curline());
if (*l == NUL) /* empty or blank line */
amount = 0;
else
amount = how(); /* get the indent for this line */
if (amount >= 0 && set_indent(amount, SIN_UNDO))
{
/* did change the indent, call changed_lines() later */
if (first_changed == 0)
first_changed = curwin->w_cursor.lnum;
last_changed = curwin->w_cursor.lnum;
}
}
++curwin->w_cursor.lnum;
curwin->w_cursor.col = 0; /* make sure it's valid */
}
/* put cursor on first non-blank of indented line */
curwin->w_cursor.lnum = start_lnum;
beginline(BL_SOL | BL_FIX);
/* Mark changed lines so that they will be redrawn. When Visual
* highlighting was present, need to continue until the last line. When
* there is no change still need to remove the Visual highlighting. */
if (last_changed != 0)
changed_lines(first_changed, 0,
oap->is_VIsual ? start_lnum + oap->line_count :
last_changed + 1, 0L);
else if (oap->is_VIsual)
redraw_curbuf_later(INVERTED);
if (oap->line_count > p_report)
{
i = oap->line_count - (i + 1);
if (i == 1)
MSG(_("1 line indented "));
else
smsg((char_u *)_("%ld lines indented "), i);
}
/* set '[ and '] marks */
curbuf->b_op_start = oap->start;
curbuf->b_op_end = oap->end;
}
#endif /* defined(FEAT_LISP) || defined(FEAT_CINDENT) */
#if defined(FEAT_EVAL) || defined(PROTO)
/*
* Keep the last expression line here, for repeating.
*/
static char_u *expr_line = NULL;
/*
* Get an expression for the "\"=expr1" or "CTRL-R =expr1"
* Returns '=' when OK, NUL otherwise.
*/
int
get_expr_register(void)
{
char_u *new_line;
new_line = getcmdline('=', 0L, 0);
if (new_line == NULL)
return NUL;
if (*new_line == NUL) /* use previous line */
vim_free(new_line);
else
set_expr_line(new_line);
return '=';
}
/*
* Set the expression for the '=' register.
* Argument must be an allocated string.
*/
void
set_expr_line(char_u *new_line)
{
vim_free(expr_line);
expr_line = new_line;
}
/*
* Get the result of the '=' register expression.
* Returns a pointer to allocated memory, or NULL for failure.
*/
char_u *
get_expr_line(void)
{
char_u *expr_copy;
char_u *rv;
static int nested = 0;
if (expr_line == NULL)
return NULL;
/* Make a copy of the expression, because evaluating it may cause it to be
* changed. */
expr_copy = vim_strsave(expr_line);
if (expr_copy == NULL)
return NULL;
/* When we are invoked recursively limit the evaluation to 10 levels.
* Then return the string as-is. */
if (nested >= 10)
return expr_copy;
++nested;
rv = eval_to_string(expr_copy, NULL, TRUE);
--nested;
vim_free(expr_copy);
return rv;
}
/*
* Get the '=' register expression itself, without evaluating it.
*/
char_u *
get_expr_line_src(void)
{
if (expr_line == NULL)
return NULL;
return vim_strsave(expr_line);
}
#endif /* FEAT_EVAL */
/*
* Check if 'regname' is a valid name of a yank register.
* Note: There is no check for 0 (default register), caller should do this
*/
int
valid_yank_reg(
int regname,
int writing) /* if TRUE check for writable registers */
{
if ( (regname > 0 && ASCII_ISALNUM(regname))
|| (!writing && vim_strchr((char_u *)
#ifdef FEAT_EVAL
"/.%:="
#else
"/.%:"
#endif
, regname) != NULL)
|| regname == '#'
|| regname == '"'
|| regname == '-'
|| regname == '_'
#ifdef FEAT_CLIPBOARD
|| regname == '*'
|| regname == '+'
#endif
#ifdef FEAT_DND
|| (!writing && regname == '~')
#endif
)
return TRUE;
return FALSE;
}
/*
* Set y_current and y_append, according to the value of "regname".
* Cannot handle the '_' register.
* Must only be called with a valid register name!
*
* If regname is 0 and writing, use register 0
* If regname is 0 and reading, use previous register
*
* Return TRUE when the register should be inserted literally (selection or
* clipboard).
*/
int
get_yank_register(int regname, int writing)
{
int i;
int ret = FALSE;
y_append = FALSE;
if ((regname == 0 || regname == '"') && !writing && y_previous != NULL)
{
y_current = y_previous;
return ret;
}
i = regname;
if (VIM_ISDIGIT(i))
i -= '0';
else if (ASCII_ISLOWER(i))
i = CharOrdLow(i) + 10;
else if (ASCII_ISUPPER(i))
{
i = CharOrdUp(i) + 10;
y_append = TRUE;
}
else if (regname == '-')
i = DELETION_REGISTER;
#ifdef FEAT_CLIPBOARD
/* When selection is not available, use register 0 instead of '*' */
else if (clip_star.available && regname == '*')
{
i = STAR_REGISTER;
ret = TRUE;
}
/* When clipboard is not available, use register 0 instead of '+' */
else if (clip_plus.available && regname == '+')
{
i = PLUS_REGISTER;
ret = TRUE;
}
#endif
#ifdef FEAT_DND
else if (!writing && regname == '~')
i = TILDE_REGISTER;
#endif
else /* not 0-9, a-z, A-Z or '-': use register 0 */
i = 0;
y_current = &(y_regs[i]);
if (writing) /* remember the register we write into for do_put() */
y_previous = y_current;
return ret;
}
#if defined(FEAT_CLIPBOARD) || defined(PROTO)
/*
* When "regname" is a clipboard register, obtain the selection. If it's not
* available return zero, otherwise return "regname".
*/
int
may_get_selection(int regname)
{
if (regname == '*')
{
if (!clip_star.available)
regname = 0;
else
clip_get_selection(&clip_star);
}
else if (regname == '+')
{
if (!clip_plus.available)
regname = 0;
else
clip_get_selection(&clip_plus);
}
return regname;
}
#endif
/*
* Obtain the contents of a "normal" register. The register is made empty.
* The returned pointer has allocated memory, use put_register() later.
*/
void *
get_register(
int name,
int copy) /* make a copy, if FALSE make register empty. */
{
yankreg_T *reg;
int i;
#ifdef FEAT_CLIPBOARD
/* When Visual area changed, may have to update selection. Obtain the
* selection too. */
if (name == '*' && clip_star.available)
{
if (clip_isautosel_star())
clip_update_selection(&clip_star);
may_get_selection(name);
}
if (name == '+' && clip_plus.available)
{
if (clip_isautosel_plus())
clip_update_selection(&clip_plus);
may_get_selection(name);
}
#endif
get_yank_register(name, 0);
reg = (yankreg_T *)alloc((unsigned)sizeof(yankreg_T));
if (reg != NULL)
{
*reg = *y_current;
if (copy)
{
/* If we run out of memory some or all of the lines are empty. */
if (reg->y_size == 0)
reg->y_array = NULL;
else
reg->y_array = (char_u **)alloc((unsigned)(sizeof(char_u *)
* reg->y_size));
if (reg->y_array != NULL)
{
for (i = 0; i < reg->y_size; ++i)
reg->y_array[i] = vim_strsave(y_current->y_array[i]);
}
}
else
y_current->y_array = NULL;
}
return (void *)reg;
}
/*
* Put "reg" into register "name". Free any previous contents and "reg".
*/
void
put_register(int name, void *reg)
{
get_yank_register(name, 0);
free_yank_all();
*y_current = *(yankreg_T *)reg;
vim_free(reg);
#ifdef FEAT_CLIPBOARD
/* Send text written to clipboard register to the clipboard. */
may_set_selection();
#endif
}
void
free_register(void *reg)
{
yankreg_T tmp;
tmp = *y_current;
*y_current = *(yankreg_T *)reg;
free_yank_all();
vim_free(reg);
*y_current = tmp;
}
#if defined(FEAT_MOUSE) || defined(PROTO)
/*
* return TRUE if the current yank register has type MLINE
*/
int
yank_register_mline(int regname)
{
if (regname != 0 && !valid_yank_reg(regname, FALSE))
return FALSE;
if (regname == '_') /* black hole is always empty */
return FALSE;
get_yank_register(regname, FALSE);
return (y_current->y_type == MLINE);
}
#endif
/*
* Start or stop recording into a yank register.
*
* Return FAIL for failure, OK otherwise.
*/
int
do_record(int c)
{
char_u *p;
static int regname;
yankreg_T *old_y_previous, *old_y_current;
int retval;
if (reg_recording == 0) /* start recording */
{
/* registers 0-9, a-z and " are allowed */
if (c < 0 || (!ASCII_ISALNUM(c) && c != '"'))
retval = FAIL;
else
{
reg_recording = c;
showmode();
regname = c;
retval = OK;
}
}
else /* stop recording */
{
/*
* Get the recorded key hits. K_SPECIAL and CSI will be escaped, this
* needs to be removed again to put it in a register. exec_reg then
* adds the escaping back later.
*/
reg_recording = 0;
MSG("");
p = get_recorded();
if (p == NULL)
retval = FAIL;
else
{
/* Remove escaping for CSI and K_SPECIAL in multi-byte chars. */
vim_unescape_csi(p);
/*
* We don't want to change the default register here, so save and
* restore the current register name.
*/
old_y_previous = y_previous;
old_y_current = y_current;
retval = stuff_yank(regname, p);
y_previous = old_y_previous;
y_current = old_y_current;
}
}
return retval;
}
/*
* Stuff string "p" into yank register "regname" as a single line (append if
* uppercase). "p" must have been alloced.
*
* return FAIL for failure, OK otherwise
*/
static int
stuff_yank(int regname, char_u *p)
{
char_u *lp;
char_u **pp;
/* check for read-only register */
if (regname != 0 && !valid_yank_reg(regname, TRUE))
{
vim_free(p);
return FAIL;
}
if (regname == '_') /* black hole: don't do anything */
{
vim_free(p);
return OK;
}
get_yank_register(regname, TRUE);
if (y_append && y_current->y_array != NULL)
{
pp = &(y_current->y_array[y_current->y_size - 1]);
lp = lalloc((long_u)(STRLEN(*pp) + STRLEN(p) + 1), TRUE);
if (lp == NULL)
{
vim_free(p);
return FAIL;
}
STRCPY(lp, *pp);
STRCAT(lp, p);
vim_free(p);
vim_free(*pp);
*pp = lp;
}
else
{
free_yank_all();
if ((y_current->y_array =
(char_u **)alloc((unsigned)sizeof(char_u *))) == NULL)
{
vim_free(p);
return FAIL;
}
y_current->y_array[0] = p;
y_current->y_size = 1;
y_current->y_type = MCHAR; /* used to be MLINE, why? */
#ifdef FEAT_VIMINFO
y_current->y_time_set = vim_time();
#endif
}
return OK;
}
static int execreg_lastc = NUL;
/*
* Execute a yank register: copy it into the stuff buffer.
*
* Return FAIL for failure, OK otherwise.
*/
int
do_execreg(
int regname,
int colon, /* insert ':' before each line */
int addcr, /* always add '\n' to end of line */
int silent) /* set "silent" flag in typeahead buffer */
{
long i;
char_u *p;
int retval = OK;
int remap;
if (regname == '@') /* repeat previous one */
{
if (execreg_lastc == NUL)
{
EMSG(_("E748: No previously used register"));
return FAIL;
}
regname = execreg_lastc;
}
/* check for valid regname */
if (regname == '%' || regname == '#' || !valid_yank_reg(regname, FALSE))
{
emsg_invreg(regname);
return FAIL;
}
execreg_lastc = regname;
#ifdef FEAT_CLIPBOARD
regname = may_get_selection(regname);
#endif
if (regname == '_') /* black hole: don't stuff anything */
return OK;
#ifdef FEAT_CMDHIST
if (regname == ':') /* use last command line */
{
if (last_cmdline == NULL)
{
EMSG(_(e_nolastcmd));
return FAIL;
}
VIM_CLEAR(new_last_cmdline); /* don't keep the cmdline containing @: */
/* Escape all control characters with a CTRL-V */
p = vim_strsave_escaped_ext(last_cmdline,
(char_u *)"\001\002\003\004\005\006\007\010\011\012\013\014\015\016\017\020\021\022\023\024\025\026\027\030\031\032\033\034\035\036\037", Ctrl_V, FALSE);
if (p != NULL)
{
/* When in Visual mode "'<,'>" will be prepended to the command.
* Remove it when it's already there. */
if (VIsual_active && STRNCMP(p, "'<,'>", 5) == 0)
retval = put_in_typebuf(p + 5, TRUE, TRUE, silent);
else
retval = put_in_typebuf(p, TRUE, TRUE, silent);
}
vim_free(p);
}
#endif
#ifdef FEAT_EVAL
else if (regname == '=')
{
p = get_expr_line();
if (p == NULL)
return FAIL;
retval = put_in_typebuf(p, TRUE, colon, silent);
vim_free(p);
}
#endif
else if (regname == '.') /* use last inserted text */
{
p = get_last_insert_save();
if (p == NULL)
{
EMSG(_(e_noinstext));
return FAIL;
}
retval = put_in_typebuf(p, FALSE, colon, silent);
vim_free(p);
}
else
{
get_yank_register(regname, FALSE);
if (y_current->y_array == NULL)
return FAIL;
/* Disallow remaping for ":@r". */
remap = colon ? REMAP_NONE : REMAP_YES;
/*
* Insert lines into typeahead buffer, from last one to first one.
*/
put_reedit_in_typebuf(silent);
for (i = y_current->y_size; --i >= 0; )
{
char_u *escaped;
/* insert NL between lines and after last line if type is MLINE */
if (y_current->y_type == MLINE || i < y_current->y_size - 1
|| addcr)
{
if (ins_typebuf((char_u *)"\n", remap, 0, TRUE, silent) == FAIL)
return FAIL;
}
escaped = vim_strsave_escape_csi(y_current->y_array[i]);
if (escaped == NULL)
return FAIL;
retval = ins_typebuf(escaped, remap, 0, TRUE, silent);
vim_free(escaped);
if (retval == FAIL)
return FAIL;
if (colon && ins_typebuf((char_u *)":", remap, 0, TRUE, silent)
== FAIL)
return FAIL;
}
reg_executing = regname == 0 ? '"' : regname; // disable "q" command
}
return retval;
}
/*
* If "restart_edit" is not zero, put it in the typeahead buffer, so that it's
* used only after other typeahead has been processed.
*/
static void
put_reedit_in_typebuf(int silent)
{
char_u buf[3];
if (restart_edit != NUL)
{
if (restart_edit == 'V')
{
buf[0] = 'g';
buf[1] = 'R';
buf[2] = NUL;
}
else
{
buf[0] = restart_edit == 'I' ? 'i' : restart_edit;
buf[1] = NUL;
}
if (ins_typebuf(buf, REMAP_NONE, 0, TRUE, silent) == OK)
restart_edit = NUL;
}
}
/*
* Insert register contents "s" into the typeahead buffer, so that it will be
* executed again.
* When "esc" is TRUE it is to be taken literally: Escape CSI characters and
* no remapping.
*/
static int
put_in_typebuf(
char_u *s,
int esc,
int colon, /* add ':' before the line */
int silent)
{
int retval = OK;
put_reedit_in_typebuf(silent);
if (colon)
retval = ins_typebuf((char_u *)"\n", REMAP_NONE, 0, TRUE, silent);
if (retval == OK)
{
char_u *p;
if (esc)
p = vim_strsave_escape_csi(s);
else
p = s;
if (p == NULL)
retval = FAIL;
else
retval = ins_typebuf(p, esc ? REMAP_NONE : REMAP_YES,
0, TRUE, silent);
if (esc)
vim_free(p);
}
if (colon && retval == OK)
retval = ins_typebuf((char_u *)":", REMAP_NONE, 0, TRUE, silent);
return retval;
}
/*
* Insert a yank register: copy it into the Read buffer.
* Used by CTRL-R command and middle mouse button in insert mode.
*
* return FAIL for failure, OK otherwise
*/
int
insert_reg(
int regname,
int literally_arg) /* insert literally, not as if typed */
{
long i;
int retval = OK;
char_u *arg;
int allocated;
int literally = literally_arg;
/*
* It is possible to get into an endless loop by having CTRL-R a in
* register a and then, in insert mode, doing CTRL-R a.
* If you hit CTRL-C, the loop will be broken here.
*/
ui_breakcheck();
if (got_int)
return FAIL;
/* check for valid regname */
if (regname != NUL && !valid_yank_reg(regname, FALSE))
return FAIL;
#ifdef FEAT_CLIPBOARD
regname = may_get_selection(regname);
#endif
if (regname == '.') /* insert last inserted text */
retval = stuff_inserted(NUL, 1L, TRUE);
else if (get_spec_reg(regname, &arg, &allocated, TRUE))
{
if (arg == NULL)
return FAIL;
stuffescaped(arg, literally);
if (allocated)
vim_free(arg);
}
else /* name or number register */
{
if (get_yank_register(regname, FALSE))
literally = TRUE;
if (y_current->y_array == NULL)
retval = FAIL;
else
{
for (i = 0; i < y_current->y_size; ++i)
{
stuffescaped(y_current->y_array[i], literally);
/*
* Insert a newline between lines and after last line if
* y_type is MLINE.
*/
if (y_current->y_type == MLINE || i < y_current->y_size - 1)
stuffcharReadbuff('\n');
}
}
}
return retval;
}
/*
* Stuff a string into the typeahead buffer, such that edit() will insert it
* literally ("literally" TRUE) or interpret is as typed characters.
*/
static void
stuffescaped(char_u *arg, int literally)
{
int c;
char_u *start;
while (*arg != NUL)
{
/* Stuff a sequence of normal ASCII characters, that's fast. Also
* stuff K_SPECIAL to get the effect of a special key when "literally"
* is TRUE. */
start = arg;
while ((*arg >= ' '
#ifndef EBCDIC
&& *arg < DEL /* EBCDIC: chars above space are normal */
#endif
)
|| (*arg == K_SPECIAL && !literally))
++arg;
if (arg > start)
stuffReadbuffLen(start, (long)(arg - start));
/* stuff a single special character */
if (*arg != NUL)
{
#ifdef FEAT_MBYTE
if (has_mbyte)
c = mb_cptr2char_adv(&arg);
else
#endif
c = *arg++;
if (literally && ((c < ' ' && c != TAB) || c == DEL))
stuffcharReadbuff(Ctrl_V);
stuffcharReadbuff(c);
}
}
}
/*
* If "regname" is a special register, return TRUE and store a pointer to its
* value in "argp".
*/
int
get_spec_reg(
int regname,
char_u **argp,
int *allocated, /* return: TRUE when value was allocated */
int errmsg) /* give error message when failing */
{
int cnt;
*argp = NULL;
*allocated = FALSE;
switch (regname)
{
case '%': /* file name */
if (errmsg)
check_fname(); /* will give emsg if not set */
*argp = curbuf->b_fname;
return TRUE;
case '#': /* alternate file name */
*argp = getaltfname(errmsg); /* may give emsg if not set */
return TRUE;
#ifdef FEAT_EVAL
case '=': /* result of expression */
*argp = get_expr_line();
*allocated = TRUE;
return TRUE;
#endif
case ':': /* last command line */
if (last_cmdline == NULL && errmsg)
EMSG(_(e_nolastcmd));
*argp = last_cmdline;
return TRUE;
case '/': /* last search-pattern */
if (last_search_pat() == NULL && errmsg)
EMSG(_(e_noprevre));
*argp = last_search_pat();
return TRUE;
case '.': /* last inserted text */
*argp = get_last_insert_save();
*allocated = TRUE;
if (*argp == NULL && errmsg)
EMSG(_(e_noinstext));
return TRUE;
#ifdef FEAT_SEARCHPATH
case Ctrl_F: /* Filename under cursor */
case Ctrl_P: /* Path under cursor, expand via "path" */
if (!errmsg)
return FALSE;
*argp = file_name_at_cursor(FNAME_MESS | FNAME_HYP
| (regname == Ctrl_P ? FNAME_EXP : 0), 1L, NULL);
*allocated = TRUE;
return TRUE;
#endif
case Ctrl_W: /* word under cursor */
case Ctrl_A: /* WORD (mnemonic All) under cursor */
if (!errmsg)
return FALSE;
cnt = find_ident_under_cursor(argp, regname == Ctrl_W
? (FIND_IDENT|FIND_STRING) : FIND_STRING);
*argp = cnt ? vim_strnsave(*argp, cnt) : NULL;
*allocated = TRUE;
return TRUE;
case Ctrl_L: /* Line under cursor */
if (!errmsg)
return FALSE;
*argp = ml_get_buf(curwin->w_buffer,
curwin->w_cursor.lnum, FALSE);
return TRUE;
case '_': /* black hole: always empty */
*argp = (char_u *)"";
return TRUE;
}
return FALSE;
}
/*
* Paste a yank register into the command line.
* Only for non-special registers.
* Used by CTRL-R command in command-line mode
* insert_reg() can't be used here, because special characters from the
* register contents will be interpreted as commands.
*
* return FAIL for failure, OK otherwise
*/
int
cmdline_paste_reg(
int regname,
int literally_arg, /* Insert text literally instead of "as typed" */
int remcr) /* don't add CR characters */
{
long i;
int literally = literally_arg;
if (get_yank_register(regname, FALSE))
literally = TRUE;
if (y_current->y_array == NULL)
return FAIL;
for (i = 0; i < y_current->y_size; ++i)
{
cmdline_paste_str(y_current->y_array[i], literally);
/* Insert ^M between lines and after last line if type is MLINE.
* Don't do this when "remcr" is TRUE. */
if ((y_current->y_type == MLINE || i < y_current->y_size - 1) && !remcr)
cmdline_paste_str((char_u *)"\r", literally);
/* Check for CTRL-C, in case someone tries to paste a few thousand
* lines and gets bored. */
ui_breakcheck();
if (got_int)
return FAIL;
}
return OK;
}
#if defined(FEAT_CLIPBOARD) || defined(PROTO)
/*
* Adjust the register name pointed to with "rp" for the clipboard being
* used always and the clipboard being available.
*/
void
adjust_clip_reg(int *rp)
{
/* If no reg. specified, and "unnamed" or "unnamedplus" is in 'clipboard',
* use '*' or '+' reg, respectively. "unnamedplus" prevails. */
if (*rp == 0 && (clip_unnamed != 0 || clip_unnamed_saved != 0))
{
if (clip_unnamed != 0)
*rp = ((clip_unnamed & CLIP_UNNAMED_PLUS) && clip_plus.available)
? '+' : '*';
else
*rp = ((clip_unnamed_saved & CLIP_UNNAMED_PLUS) && clip_plus.available)
? '+' : '*';
}
if (!clip_star.available && *rp == '*')
*rp = 0;
if (!clip_plus.available && *rp == '+')
*rp = 0;
}
#endif
/*
* Shift the delete registers: "9 is cleared, "8 becomes "9, etc.
*/
void
shift_delete_registers()
{
int n;
y_current = &y_regs[9];
free_yank_all(); /* free register nine */
for (n = 9; n > 1; --n)
y_regs[n] = y_regs[n - 1];
y_current = &y_regs[1];
if (!y_append)
y_previous = y_current;
y_regs[1].y_array = NULL; /* set register one to empty */
}
#if defined(FEAT_EVAL)
static void
yank_do_autocmd(oparg_T *oap, yankreg_T *reg)
{
static int recursive = FALSE;
dict_T *v_event;
list_T *list;
int n;
char_u buf[NUMBUFLEN + 2];
long reglen = 0;
if (recursive)
return;
v_event = get_vim_var_dict(VV_EVENT);
list = list_alloc();
if (list == NULL)
return;
for (n = 0; n < reg->y_size; n++)
list_append_string(list, reg->y_array[n], -1);
list->lv_lock = VAR_FIXED;
dict_add_list(v_event, "regcontents", list);
buf[0] = (char_u)oap->regname;
buf[1] = NUL;
dict_add_nr_str(v_event, "regname", 0, buf);
buf[0] = get_op_char(oap->op_type);
buf[1] = get_extra_op_char(oap->op_type);
buf[2] = NUL;
dict_add_nr_str(v_event, "operator", 0, buf);
buf[0] = NUL;
buf[1] = NUL;
switch (get_reg_type(oap->regname, ®len))
{
case MLINE: buf[0] = 'V'; break;
case MCHAR: buf[0] = 'v'; break;
case MBLOCK:
vim_snprintf((char *)buf, sizeof(buf), "%c%ld", Ctrl_V,
reglen + 1);
break;
}
dict_add_nr_str(v_event, "regtype", 0, buf);
/* Lock the dictionary and its keys */
dict_set_items_ro(v_event);
recursive = TRUE;
textlock++;
apply_autocmds(EVENT_TEXTYANKPOST, NULL, NULL, FALSE, curbuf);
textlock--;
recursive = FALSE;
/* Empty the dictionary, v:event is still valid */
dict_free_contents(v_event);
hash_init(&v_event->dv_hashtab);
}
#endif
/*
* Handle a delete operation.
*
* Return FAIL if undo failed, OK otherwise.
*/
int
op_delete(oparg_T *oap)
{
int n;
linenr_T lnum;
char_u *ptr;
char_u *newp, *oldp;
struct block_def bd;
linenr_T old_lcount = curbuf->b_ml.ml_line_count;
int did_yank = FALSE;
int orig_regname = oap->regname;
if (curbuf->b_ml.ml_flags & ML_EMPTY) /* nothing to do */
return OK;
/* Nothing to delete, return here. Do prepare undo, for op_change(). */
if (oap->empty)
return u_save_cursor();
if (!curbuf->b_p_ma)
{
EMSG(_(e_modifiable));
return FAIL;
}
#ifdef FEAT_CLIPBOARD
adjust_clip_reg(&oap->regname);
#endif
#ifdef FEAT_MBYTE
if (has_mbyte)
mb_adjust_opend(oap);
#endif
/*
* Imitate the strange Vi behaviour: If the delete spans more than one
* line and motion_type == MCHAR and the result is a blank line, make the
* delete linewise. Don't do this for the change command or Visual mode.
*/
if ( oap->motion_type == MCHAR
&& !oap->is_VIsual
&& !oap->block_mode
&& oap->line_count > 1
&& oap->motion_force == NUL
&& oap->op_type == OP_DELETE)
{
ptr = ml_get(oap->end.lnum) + oap->end.col;
if (*ptr != NUL)
ptr += oap->inclusive;
ptr = skipwhite(ptr);
if (*ptr == NUL && inindent(0))
oap->motion_type = MLINE;
}
/*
* Check for trying to delete (e.g. "D") in an empty line.
* Note: For the change operator it is ok.
*/
if ( oap->motion_type == MCHAR
&& oap->line_count == 1
&& oap->op_type == OP_DELETE
&& *ml_get(oap->start.lnum) == NUL)
{
/*
* It's an error to operate on an empty region, when 'E' included in
* 'cpoptions' (Vi compatible).
*/
#ifdef FEAT_VIRTUALEDIT
if (virtual_op)
/* Virtual editing: Nothing gets deleted, but we set the '[ and ']
* marks as if it happened. */
goto setmarks;
#endif
if (vim_strchr(p_cpo, CPO_EMPTYREGION) != NULL)
beep_flush();
return OK;
}
/*
* Do a yank of whatever we're about to delete.
* If a yank register was specified, put the deleted text into that
* register. For the black hole register '_' don't yank anything.
*/
if (oap->regname != '_')
{
if (oap->regname != 0)
{
/* check for read-only register */
if (!valid_yank_reg(oap->regname, TRUE))
{
beep_flush();
return OK;
}
get_yank_register(oap->regname, TRUE); /* yank into specif'd reg. */
if (op_yank(oap, TRUE, FALSE) == OK) /* yank without message */
did_yank = TRUE;
}
/*
* Put deleted text into register 1 and shift number registers if the
* delete contains a line break, or when a regname has been specified.
* Use the register name from before adjust_clip_reg() may have
* changed it.
*/
if (orig_regname != 0 || oap->motion_type == MLINE
|| oap->line_count > 1 || oap->use_reg_one)
{
shift_delete_registers();
if (op_yank(oap, TRUE, FALSE) == OK)
did_yank = TRUE;
}
/* Yank into small delete register when no named register specified
* and the delete is within one line. */
if ((
#ifdef FEAT_CLIPBOARD
((clip_unnamed & CLIP_UNNAMED) && oap->regname == '*') ||
((clip_unnamed & CLIP_UNNAMED_PLUS) && oap->regname == '+') ||
#endif
oap->regname == 0) && oap->motion_type != MLINE
&& oap->line_count == 1)
{
oap->regname = '-';
get_yank_register(oap->regname, TRUE);
if (op_yank(oap, TRUE, FALSE) == OK)
did_yank = TRUE;
oap->regname = 0;
}
/*
* If there's too much stuff to fit in the yank register, then get a
* confirmation before doing the delete. This is crude, but simple.
* And it avoids doing a delete of something we can't put back if we
* want.
*/
if (!did_yank)
{
int msg_silent_save = msg_silent;
msg_silent = 0; /* must display the prompt */
n = ask_yesno((char_u *)_("cannot yank; delete anyway"), TRUE);
msg_silent = msg_silent_save;
if (n != 'y')
{
EMSG(_(e_abort));
return FAIL;
}
}
#if defined(FEAT_EVAL)
if (did_yank && has_textyankpost())
yank_do_autocmd(oap, y_current);
#endif
}
/*
* block mode delete
*/
if (oap->block_mode)
{
if (u_save((linenr_T)(oap->start.lnum - 1),
(linenr_T)(oap->end.lnum + 1)) == FAIL)
return FAIL;
for (lnum = curwin->w_cursor.lnum; lnum <= oap->end.lnum; ++lnum)
{
block_prep(oap, &bd, lnum, TRUE);
if (bd.textlen == 0) /* nothing to delete */
continue;
/* Adjust cursor position for tab replaced by spaces and 'lbr'. */
if (lnum == curwin->w_cursor.lnum)
{
curwin->w_cursor.col = bd.textcol + bd.startspaces;
# ifdef FEAT_VIRTUALEDIT
curwin->w_cursor.coladd = 0;
# endif
}
/* n == number of chars deleted
* If we delete a TAB, it may be replaced by several characters.
* Thus the number of characters may increase!
*/
n = bd.textlen - bd.startspaces - bd.endspaces;
oldp = ml_get(lnum);
newp = alloc_check((unsigned)STRLEN(oldp) + 1 - n);
if (newp == NULL)
continue;
/* copy up to deleted part */
mch_memmove(newp, oldp, (size_t)bd.textcol);
/* insert spaces */
vim_memset(newp + bd.textcol, ' ',
(size_t)(bd.startspaces + bd.endspaces));
/* copy the part after the deleted part */
oldp += bd.textcol + bd.textlen;
STRMOVE(newp + bd.textcol + bd.startspaces + bd.endspaces, oldp);
/* replace the line */
ml_replace(lnum, newp, FALSE);
}
check_cursor_col();
changed_lines(curwin->w_cursor.lnum, curwin->w_cursor.col,
oap->end.lnum + 1, 0L);
oap->line_count = 0; /* no lines deleted */
}
else if (oap->motion_type == MLINE)
{
if (oap->op_type == OP_CHANGE)
{
/* Delete the lines except the first one. Temporarily move the
* cursor to the next line. Save the current line number, if the
* last line is deleted it may be changed.
*/
if (oap->line_count > 1)
{
lnum = curwin->w_cursor.lnum;
++curwin->w_cursor.lnum;
del_lines((long)(oap->line_count - 1), TRUE);
curwin->w_cursor.lnum = lnum;
}
if (u_save_cursor() == FAIL)
return FAIL;
if (curbuf->b_p_ai) /* don't delete indent */
{
beginline(BL_WHITE); /* cursor on first non-white */
did_ai = TRUE; /* delete the indent when ESC hit */
ai_col = curwin->w_cursor.col;
}
else
beginline(0); /* cursor in column 0 */
truncate_line(FALSE); /* delete the rest of the line */
/* leave cursor past last char in line */
if (oap->line_count > 1)
u_clearline(); /* "U" command not possible after "2cc" */
}
else
{
del_lines(oap->line_count, TRUE);
beginline(BL_WHITE | BL_FIX);
u_clearline(); /* "U" command not possible after "dd" */
}
}
else
{
#ifdef FEAT_VIRTUALEDIT
if (virtual_op)
{
int endcol = 0;
/* For virtualedit: break the tabs that are partly included. */
if (gchar_pos(&oap->start) == '\t')
{
if (u_save_cursor() == FAIL) /* save first line for undo */
return FAIL;
if (oap->line_count == 1)
endcol = getviscol2(oap->end.col, oap->end.coladd);
coladvance_force(getviscol2(oap->start.col, oap->start.coladd));
oap->start = curwin->w_cursor;
if (oap->line_count == 1)
{
coladvance(endcol);
oap->end.col = curwin->w_cursor.col;
oap->end.coladd = curwin->w_cursor.coladd;
curwin->w_cursor = oap->start;
}
}
/* Break a tab only when it's included in the area. */
if (gchar_pos(&oap->end) == '\t'
&& (int)oap->end.coladd < oap->inclusive)
{
/* save last line for undo */
if (u_save((linenr_T)(oap->end.lnum - 1),
(linenr_T)(oap->end.lnum + 1)) == FAIL)
return FAIL;
curwin->w_cursor = oap->end;
coladvance_force(getviscol2(oap->end.col, oap->end.coladd));
oap->end = curwin->w_cursor;
curwin->w_cursor = oap->start;
}
}
#endif
if (oap->line_count == 1) /* delete characters within one line */
{
if (u_save_cursor() == FAIL) /* save line for undo */
return FAIL;
/* if 'cpoptions' contains '$', display '$' at end of change */
if ( vim_strchr(p_cpo, CPO_DOLLAR) != NULL
&& oap->op_type == OP_CHANGE
&& oap->end.lnum == curwin->w_cursor.lnum
&& !oap->is_VIsual)
display_dollar(oap->end.col - !oap->inclusive);
n = oap->end.col - oap->start.col + 1 - !oap->inclusive;
#ifdef FEAT_VIRTUALEDIT
if (virtual_op)
{
/* fix up things for virtualedit-delete:
* break the tabs which are going to get in our way
*/
char_u *curline = ml_get_curline();
int len = (int)STRLEN(curline);
if (oap->end.coladd != 0
&& (int)oap->end.col >= len - 1
&& !(oap->start.coladd && (int)oap->end.col >= len - 1))
n++;
/* Delete at least one char (e.g, when on a control char). */
if (n == 0 && oap->start.coladd != oap->end.coladd)
n = 1;
/* When deleted a char in the line, reset coladd. */
if (gchar_cursor() != NUL)
curwin->w_cursor.coladd = 0;
}
#endif
(void)del_bytes((long)n, !virtual_op,
oap->op_type == OP_DELETE && !oap->is_VIsual);
}
else /* delete characters between lines */
{
pos_T curpos;
/* save deleted and changed lines for undo */
if (u_save((linenr_T)(curwin->w_cursor.lnum - 1),
(linenr_T)(curwin->w_cursor.lnum + oap->line_count)) == FAIL)
return FAIL;
truncate_line(TRUE); /* delete from cursor to end of line */
curpos = curwin->w_cursor; /* remember curwin->w_cursor */
++curwin->w_cursor.lnum;
del_lines((long)(oap->line_count - 2), FALSE);
/* delete from start of line until op_end */
n = (oap->end.col + 1 - !oap->inclusive);
curwin->w_cursor.col = 0;
(void)del_bytes((long)n, !virtual_op,
oap->op_type == OP_DELETE && !oap->is_VIsual);
curwin->w_cursor = curpos; /* restore curwin->w_cursor */
(void)do_join(2, FALSE, FALSE, FALSE, FALSE);
}
}
msgmore(curbuf->b_ml.ml_line_count - old_lcount);
#ifdef FEAT_VIRTUALEDIT
setmarks:
#endif
if (oap->block_mode)
{
curbuf->b_op_end.lnum = oap->end.lnum;
curbuf->b_op_end.col = oap->start.col;
}
else
curbuf->b_op_end = oap->start;
curbuf->b_op_start = oap->start;
return OK;
}
#ifdef FEAT_MBYTE
/*
* Adjust end of operating area for ending on a multi-byte character.
* Used for deletion.
*/
static void
mb_adjust_opend(oparg_T *oap)
{
char_u *p;
if (oap->inclusive)
{
p = ml_get(oap->end.lnum);
oap->end.col += mb_tail_off(p, p + oap->end.col);
}
}
#endif
#if defined(FEAT_VISUALEXTRA) || defined(PROTO)
/*
* Replace a whole area with one character.
*/
int
op_replace(oparg_T *oap, int c)
{
int n, numc;
#ifdef FEAT_MBYTE
int num_chars;
#endif
char_u *newp, *oldp;
size_t oldlen;
struct block_def bd;
char_u *after_p = NULL;
int had_ctrl_v_cr = FALSE;
if ((curbuf->b_ml.ml_flags & ML_EMPTY ) || oap->empty)
return OK; /* nothing to do */
if (c == REPLACE_CR_NCHAR)
{
had_ctrl_v_cr = TRUE;
c = CAR;
}
else if (c == REPLACE_NL_NCHAR)
{
had_ctrl_v_cr = TRUE;
c = NL;
}
#ifdef FEAT_MBYTE
if (has_mbyte)
mb_adjust_opend(oap);
#endif
if (u_save((linenr_T)(oap->start.lnum - 1),
(linenr_T)(oap->end.lnum + 1)) == FAIL)
return FAIL;
/*
* block mode replace
*/
if (oap->block_mode)
{
bd.is_MAX = (curwin->w_curswant == MAXCOL);
for ( ; curwin->w_cursor.lnum <= oap->end.lnum; ++curwin->w_cursor.lnum)
{
curwin->w_cursor.col = 0; /* make sure cursor position is valid */
block_prep(oap, &bd, curwin->w_cursor.lnum, TRUE);
if (bd.textlen == 0 && (!virtual_op || bd.is_MAX))
continue; /* nothing to replace */
/* n == number of extra chars required
* If we split a TAB, it may be replaced by several characters.
* Thus the number of characters may increase!
*/
#ifdef FEAT_VIRTUALEDIT
/* If the range starts in virtual space, count the initial
* coladd offset as part of "startspaces" */
if (virtual_op && bd.is_short && *bd.textstart == NUL)
{
pos_T vpos;
vpos.lnum = curwin->w_cursor.lnum;
getvpos(&vpos, oap->start_vcol);
bd.startspaces += vpos.coladd;
n = bd.startspaces;
}
else
#endif
/* allow for pre spaces */
n = (bd.startspaces ? bd.start_char_vcols - 1 : 0);
/* allow for post spp */
n += (bd.endspaces
#ifdef FEAT_VIRTUALEDIT
&& !bd.is_oneChar
#endif
&& bd.end_char_vcols > 0) ? bd.end_char_vcols - 1 : 0;
/* Figure out how many characters to replace. */
numc = oap->end_vcol - oap->start_vcol + 1;
if (bd.is_short && (!virtual_op || bd.is_MAX))
numc -= (oap->end_vcol - bd.end_vcol) + 1;
#ifdef FEAT_MBYTE
/* A double-wide character can be replaced only up to half the
* times. */
if ((*mb_char2cells)(c) > 1)
{
if ((numc & 1) && !bd.is_short)
{
++bd.endspaces;
++n;
}
numc = numc / 2;
}
/* Compute bytes needed, move character count to num_chars. */
num_chars = numc;
numc *= (*mb_char2len)(c);
#endif
/* oldlen includes textlen, so don't double count */
n += numc - bd.textlen;
oldp = ml_get_curline();
oldlen = STRLEN(oldp);
newp = alloc_check((unsigned)oldlen + 1 + n);
if (newp == NULL)
continue;
vim_memset(newp, NUL, (size_t)(oldlen + 1 + n));
/* copy up to deleted part */
mch_memmove(newp, oldp, (size_t)bd.textcol);
oldp += bd.textcol + bd.textlen;
/* insert pre-spaces */
vim_memset(newp + bd.textcol, ' ', (size_t)bd.startspaces);
/* insert replacement chars CHECK FOR ALLOCATED SPACE */
/* REPLACE_CR_NCHAR/REPLACE_NL_NCHAR is used for entering CR
* literally. */
if (had_ctrl_v_cr || (c != '\r' && c != '\n'))
{
#ifdef FEAT_MBYTE
if (has_mbyte)
{
n = (int)STRLEN(newp);
while (--num_chars >= 0)
n += (*mb_char2bytes)(c, newp + n);
}
else
#endif
vim_memset(newp + STRLEN(newp), c, (size_t)numc);
if (!bd.is_short)
{
/* insert post-spaces */
vim_memset(newp + STRLEN(newp), ' ', (size_t)bd.endspaces);
/* copy the part after the changed part */
STRMOVE(newp + STRLEN(newp), oldp);
}
}
else
{
/* Replacing with \r or \n means splitting the line. */
after_p = alloc_check(
(unsigned)(oldlen + 1 + n - STRLEN(newp)));
if (after_p != NULL)
STRMOVE(after_p, oldp);
}
/* replace the line */
ml_replace(curwin->w_cursor.lnum, newp, FALSE);
if (after_p != NULL)
{
ml_append(curwin->w_cursor.lnum++, after_p, 0, FALSE);
appended_lines_mark(curwin->w_cursor.lnum, 1L);
oap->end.lnum++;
vim_free(after_p);
}
}
}
else
{
/*
* MCHAR and MLINE motion replace.
*/
if (oap->motion_type == MLINE)
{
oap->start.col = 0;
curwin->w_cursor.col = 0;
oap->end.col = (colnr_T)STRLEN(ml_get(oap->end.lnum));
if (oap->end.col)
--oap->end.col;
}
else if (!oap->inclusive)
dec(&(oap->end));
while (LTOREQ_POS(curwin->w_cursor, oap->end))
{
n = gchar_cursor();
if (n != NUL)
{
#ifdef FEAT_MBYTE
if ((*mb_char2len)(c) > 1 || (*mb_char2len)(n) > 1)
{
/* This is slow, but it handles replacing a single-byte
* with a multi-byte and the other way around. */
if (curwin->w_cursor.lnum == oap->end.lnum)
oap->end.col += (*mb_char2len)(c) - (*mb_char2len)(n);
n = State;
State = REPLACE;
ins_char(c);
State = n;
/* Backup to the replaced character. */
dec_cursor();
}
else
#endif
{
#ifdef FEAT_VIRTUALEDIT
if (n == TAB)
{
int end_vcol = 0;
if (curwin->w_cursor.lnum == oap->end.lnum)
{
/* oap->end has to be recalculated when
* the tab breaks */
end_vcol = getviscol2(oap->end.col,
oap->end.coladd);
}
coladvance_force(getviscol());
if (curwin->w_cursor.lnum == oap->end.lnum)
getvpos(&oap->end, end_vcol);
}
#endif
PCHAR(curwin->w_cursor, c);
}
}
#ifdef FEAT_VIRTUALEDIT
else if (virtual_op && curwin->w_cursor.lnum == oap->end.lnum)
{
int virtcols = oap->end.coladd;
if (curwin->w_cursor.lnum == oap->start.lnum
&& oap->start.col == oap->end.col && oap->start.coladd)
virtcols -= oap->start.coladd;
/* oap->end has been trimmed so it's effectively inclusive;
* as a result an extra +1 must be counted so we don't
* trample the NUL byte. */
coladvance_force(getviscol2(oap->end.col, oap->end.coladd) + 1);
curwin->w_cursor.col -= (virtcols + 1);
for (; virtcols >= 0; virtcols--)
{
PCHAR(curwin->w_cursor, c);
if (inc(&curwin->w_cursor) == -1)
break;
}
}
#endif
/* Advance to next character, stop at the end of the file. */
if (inc_cursor() == -1)
break;
}
}
curwin->w_cursor = oap->start;
check_cursor();
changed_lines(oap->start.lnum, oap->start.col, oap->end.lnum + 1, 0L);
/* Set "'[" and "']" marks. */
curbuf->b_op_start = oap->start;
curbuf->b_op_end = oap->end;
return OK;
}
#endif
static int swapchars(int op_type, pos_T *pos, int length);
/*
* Handle the (non-standard vi) tilde operator. Also for "gu", "gU" and "g?".
*/
void
op_tilde(oparg_T *oap)
{
pos_T pos;
struct block_def bd;
int did_change = FALSE;
if (u_save((linenr_T)(oap->start.lnum - 1),
(linenr_T)(oap->end.lnum + 1)) == FAIL)
return;
pos = oap->start;
if (oap->block_mode) /* Visual block mode */
{
for (; pos.lnum <= oap->end.lnum; ++pos.lnum)
{
int one_change;
block_prep(oap, &bd, pos.lnum, FALSE);
pos.col = bd.textcol;
one_change = swapchars(oap->op_type, &pos, bd.textlen);
did_change |= one_change;
#ifdef FEAT_NETBEANS_INTG
if (netbeans_active() && one_change)
{
char_u *ptr = ml_get_buf(curbuf, pos.lnum, FALSE);
netbeans_removed(curbuf, pos.lnum, bd.textcol,
(long)bd.textlen);
netbeans_inserted(curbuf, pos.lnum, bd.textcol,
&ptr[bd.textcol], bd.textlen);
}
#endif
}
if (did_change)
changed_lines(oap->start.lnum, 0, oap->end.lnum + 1, 0L);
}
else /* not block mode */
{
if (oap->motion_type == MLINE)
{
oap->start.col = 0;
pos.col = 0;
oap->end.col = (colnr_T)STRLEN(ml_get(oap->end.lnum));
if (oap->end.col)
--oap->end.col;
}
else if (!oap->inclusive)
dec(&(oap->end));
if (pos.lnum == oap->end.lnum)
did_change = swapchars(oap->op_type, &pos,
oap->end.col - pos.col + 1);
else
for (;;)
{
did_change |= swapchars(oap->op_type, &pos,
pos.lnum == oap->end.lnum ? oap->end.col + 1:
(int)STRLEN(ml_get_pos(&pos)));
if (LTOREQ_POS(oap->end, pos) || inc(&pos) == -1)
break;
}
if (did_change)
{
changed_lines(oap->start.lnum, oap->start.col, oap->end.lnum + 1,
0L);
#ifdef FEAT_NETBEANS_INTG
if (netbeans_active() && did_change)
{
char_u *ptr;
int count;
pos = oap->start;
while (pos.lnum < oap->end.lnum)
{
ptr = ml_get_buf(curbuf, pos.lnum, FALSE);
count = (int)STRLEN(ptr) - pos.col;
netbeans_removed(curbuf, pos.lnum, pos.col, (long)count);
netbeans_inserted(curbuf, pos.lnum, pos.col,
&ptr[pos.col], count);
pos.col = 0;
pos.lnum++;
}
ptr = ml_get_buf(curbuf, pos.lnum, FALSE);
count = oap->end.col - pos.col + 1;
netbeans_removed(curbuf, pos.lnum, pos.col, (long)count);
netbeans_inserted(curbuf, pos.lnum, pos.col,
&ptr[pos.col], count);
}
#endif
}
}
if (!did_change && oap->is_VIsual)
/* No change: need to remove the Visual selection */
redraw_curbuf_later(INVERTED);
/*
* Set '[ and '] marks.
*/
curbuf->b_op_start = oap->start;
curbuf->b_op_end = oap->end;
if (oap->line_count > p_report)
{
if (oap->line_count == 1)
MSG(_("1 line changed"));
else
smsg((char_u *)_("%ld lines changed"), oap->line_count);
}
}
/*
* Invoke swapchar() on "length" bytes at position "pos".
* "pos" is advanced to just after the changed characters.
* "length" is rounded up to include the whole last multi-byte character.
* Also works correctly when the number of bytes changes.
* Returns TRUE if some character was changed.
*/
static int
swapchars(int op_type, pos_T *pos, int length)
{
int todo;
int did_change = 0;
for (todo = length; todo > 0; --todo)
{
# ifdef FEAT_MBYTE
if (has_mbyte)
{
int len = (*mb_ptr2len)(ml_get_pos(pos));
/* we're counting bytes, not characters */
if (len > 0)
todo -= len - 1;
}
# endif
did_change |= swapchar(op_type, pos);
if (inc(pos) == -1) /* at end of file */
break;
}
return did_change;
}
/*
* If op_type == OP_UPPER: make uppercase,
* if op_type == OP_LOWER: make lowercase,
* if op_type == OP_ROT13: do rot13 encoding,
* else swap case of character at 'pos'
* returns TRUE when something actually changed.
*/
int
swapchar(int op_type, pos_T *pos)
{
int c;
int nc;
c = gchar_pos(pos);
/* Only do rot13 encoding for ASCII characters. */
if (c >= 0x80 && op_type == OP_ROT13)
return FALSE;
#ifdef FEAT_MBYTE
if (op_type == OP_UPPER && c == 0xdf
&& (enc_latin1like || STRCMP(p_enc, "iso-8859-2") == 0))
{
pos_T sp = curwin->w_cursor;
/* Special handling of German sharp s: change to "SS". */
curwin->w_cursor = *pos;
del_char(FALSE);
ins_char('S');
ins_char('S');
curwin->w_cursor = sp;
inc(pos);
}
if (enc_dbcs != 0 && c >= 0x100) /* No lower/uppercase letter */
return FALSE;
#endif
nc = c;
if (MB_ISLOWER(c))
{
if (op_type == OP_ROT13)
nc = ROT13(c, 'a');
else if (op_type != OP_LOWER)
nc = MB_TOUPPER(c);
}
else if (MB_ISUPPER(c))
{
if (op_type == OP_ROT13)
nc = ROT13(c, 'A');
else if (op_type != OP_UPPER)
nc = MB_TOLOWER(c);
}
if (nc != c)
{
#ifdef FEAT_MBYTE
if (enc_utf8 && (c >= 0x80 || nc >= 0x80))
{
pos_T sp = curwin->w_cursor;
curwin->w_cursor = *pos;
/* don't use del_char(), it also removes composing chars */
del_bytes(utf_ptr2len(ml_get_cursor()), FALSE, FALSE);
ins_char(nc);
curwin->w_cursor = sp;
}
else
#endif
PCHAR(*pos, nc);
return TRUE;
}
return FALSE;
}
#if defined(FEAT_VISUALEXTRA) || defined(PROTO)
/*
* op_insert - Insert and append operators for Visual mode.
*/
void
op_insert(oparg_T *oap, long count1)
{
long ins_len, pre_textlen = 0;
char_u *firstline, *ins_text;
colnr_T ind_pre = 0, ind_post;
struct block_def bd;
int i;
pos_T t1;
/* edit() changes this - record it for OP_APPEND */
bd.is_MAX = (curwin->w_curswant == MAXCOL);
/* vis block is still marked. Get rid of it now. */
curwin->w_cursor.lnum = oap->start.lnum;
update_screen(INVERTED);
if (oap->block_mode)
{
#ifdef FEAT_VIRTUALEDIT
/* When 'virtualedit' is used, need to insert the extra spaces before
* doing block_prep(). When only "block" is used, virtual edit is
* already disabled, but still need it when calling
* coladvance_force(). */
if (curwin->w_cursor.coladd > 0)
{
int old_ve_flags = ve_flags;
ve_flags = VE_ALL;
if (u_save_cursor() == FAIL)
return;
coladvance_force(oap->op_type == OP_APPEND
? oap->end_vcol + 1 : getviscol());
if (oap->op_type == OP_APPEND)
--curwin->w_cursor.col;
ve_flags = old_ve_flags;
}
#endif
/* Get the info about the block before entering the text */
block_prep(oap, &bd, oap->start.lnum, TRUE);
/* Get indent information */
ind_pre = (colnr_T)getwhitecols_curline();
firstline = ml_get(oap->start.lnum) + bd.textcol;
if (oap->op_type == OP_APPEND)
firstline += bd.textlen;
pre_textlen = (long)STRLEN(firstline);
}
if (oap->op_type == OP_APPEND)
{
if (oap->block_mode
#ifdef FEAT_VIRTUALEDIT
&& curwin->w_cursor.coladd == 0
#endif
)
{
/* Move the cursor to the character right of the block. */
curwin->w_set_curswant = TRUE;
while (*ml_get_cursor() != NUL
&& (curwin->w_cursor.col < bd.textcol + bd.textlen))
++curwin->w_cursor.col;
if (bd.is_short && !bd.is_MAX)
{
/* First line was too short, make it longer and adjust the
* values in "bd". */
if (u_save_cursor() == FAIL)
return;
for (i = 0; i < bd.endspaces; ++i)
ins_char(' ');
bd.textlen += bd.endspaces;
}
}
else
{
curwin->w_cursor = oap->end;
check_cursor_col();
/* Works just like an 'i'nsert on the next character. */
if (!LINEEMPTY(curwin->w_cursor.lnum)
&& oap->start_vcol != oap->end_vcol)
inc_cursor();
}
}
t1 = oap->start;
(void)edit(NUL, FALSE, (linenr_T)count1);
/* When a tab was inserted, and the characters in front of the tab
* have been converted to a tab as well, the column of the cursor
* might have actually been reduced, so need to adjust here. */
if (t1.lnum == curbuf->b_op_start_orig.lnum
&& LT_POS(curbuf->b_op_start_orig, t1))
oap->start = curbuf->b_op_start_orig;
/* If user has moved off this line, we don't know what to do, so do
* nothing.
* Also don't repeat the insert when Insert mode ended with CTRL-C. */
if (curwin->w_cursor.lnum != oap->start.lnum || got_int)
return;
if (oap->block_mode)
{
struct block_def bd2;
int did_indent = FALSE;
size_t len;
int add;
/* If indent kicked in, the firstline might have changed
* but only do that, if the indent actually increased. */
ind_post = (colnr_T)getwhitecols_curline();
if (curbuf->b_op_start.col > ind_pre && ind_post > ind_pre)
{
bd.textcol += ind_post - ind_pre;
bd.start_vcol += ind_post - ind_pre;
did_indent = TRUE;
}
/* The user may have moved the cursor before inserting something, try
* to adjust the block for that. But only do it, if the difference
* does not come from indent kicking in. */
if (oap->start.lnum == curbuf->b_op_start_orig.lnum
&& !bd.is_MAX && !did_indent)
{
if (oap->op_type == OP_INSERT
&& oap->start.col
#ifdef FEAT_VIRTUALEDIT
+ oap->start.coladd
#endif
!= curbuf->b_op_start_orig.col
#ifdef FEAT_VIRTUALEDIT
+ curbuf->b_op_start_orig.coladd
#endif
)
{
int t = getviscol2(curbuf->b_op_start_orig.col,
curbuf->b_op_start_orig.coladd);
oap->start.col = curbuf->b_op_start_orig.col;
pre_textlen -= t - oap->start_vcol;
oap->start_vcol = t;
}
else if (oap->op_type == OP_APPEND
&& oap->end.col
#ifdef FEAT_VIRTUALEDIT
+ oap->end.coladd
#endif
>= curbuf->b_op_start_orig.col
#ifdef FEAT_VIRTUALEDIT
+ curbuf->b_op_start_orig.coladd
#endif
)
{
int t = getviscol2(curbuf->b_op_start_orig.col,
curbuf->b_op_start_orig.coladd);
oap->start.col = curbuf->b_op_start_orig.col;
/* reset pre_textlen to the value of OP_INSERT */
pre_textlen += bd.textlen;
pre_textlen -= t - oap->start_vcol;
oap->start_vcol = t;
oap->op_type = OP_INSERT;
}
}
/*
* Spaces and tabs in the indent may have changed to other spaces and
* tabs. Get the starting column again and correct the length.
* Don't do this when "$" used, end-of-line will have changed.
*/
block_prep(oap, &bd2, oap->start.lnum, TRUE);
if (!bd.is_MAX || bd2.textlen < bd.textlen)
{
if (oap->op_type == OP_APPEND)
{
pre_textlen += bd2.textlen - bd.textlen;
if (bd2.endspaces)
--bd2.textlen;
}
bd.textcol = bd2.textcol;
bd.textlen = bd2.textlen;
}
/*
* Subsequent calls to ml_get() flush the firstline data - take a
* copy of the required string.
*/
firstline = ml_get(oap->start.lnum);
len = STRLEN(firstline);
add = bd.textcol;
if (oap->op_type == OP_APPEND)
add += bd.textlen;
if ((size_t)add > len)
firstline += len; // short line, point to the NUL
else
firstline += add;
if (pre_textlen >= 0
&& (ins_len = (long)STRLEN(firstline) - pre_textlen) > 0)
{
ins_text = vim_strnsave(firstline, (int)ins_len);
if (ins_text != NULL)
{
/* block handled here */
if (u_save(oap->start.lnum,
(linenr_T)(oap->end.lnum + 1)) == OK)
block_insert(oap, ins_text, (oap->op_type == OP_INSERT),
&bd);
curwin->w_cursor.col = oap->start.col;
check_cursor();
vim_free(ins_text);
}
}
}
}
#endif
/*
* op_change - handle a change operation
*
* return TRUE if edit() returns because of a CTRL-O command
*/
int
op_change(oparg_T *oap)
{
colnr_T l;
int retval;
#ifdef FEAT_VISUALEXTRA
long offset;
linenr_T linenr;
long ins_len;
long pre_textlen = 0;
long pre_indent = 0;
char_u *firstline;
char_u *ins_text, *newp, *oldp;
struct block_def bd;
#endif
l = oap->start.col;
if (oap->motion_type == MLINE)
{
l = 0;
#ifdef FEAT_SMARTINDENT
if (!p_paste && curbuf->b_p_si
# ifdef FEAT_CINDENT
&& !curbuf->b_p_cin
# endif
)
can_si = TRUE; /* It's like opening a new line, do si */
#endif
}
/* First delete the text in the region. In an empty buffer only need to
* save for undo */
if (curbuf->b_ml.ml_flags & ML_EMPTY)
{
if (u_save_cursor() == FAIL)
return FALSE;
}
else if (op_delete(oap) == FAIL)
return FALSE;
if ((l > curwin->w_cursor.col) && !LINEEMPTY(curwin->w_cursor.lnum)
&& !virtual_op)
inc_cursor();
#ifdef FEAT_VISUALEXTRA
/* check for still on same line (<CR> in inserted text meaningless) */
/* skip blank lines too */
if (oap->block_mode)
{
# ifdef FEAT_VIRTUALEDIT
/* Add spaces before getting the current line length. */
if (virtual_op && (curwin->w_cursor.coladd > 0
|| gchar_cursor() == NUL))
coladvance_force(getviscol());
# endif
firstline = ml_get(oap->start.lnum);
pre_textlen = (long)STRLEN(firstline);
pre_indent = (long)getwhitecols(firstline);
bd.textcol = curwin->w_cursor.col;
}
#endif
#if defined(FEAT_LISP) || defined(FEAT_CINDENT)
if (oap->motion_type == MLINE)
fix_indent();
#endif
retval = edit(NUL, FALSE, (linenr_T)1);
#ifdef FEAT_VISUALEXTRA
/*
* In Visual block mode, handle copying the new text to all lines of the
* block.
* Don't repeat the insert when Insert mode ended with CTRL-C.
*/
if (oap->block_mode && oap->start.lnum != oap->end.lnum && !got_int)
{
/* Auto-indenting may have changed the indent. If the cursor was past
* the indent, exclude that indent change from the inserted text. */
firstline = ml_get(oap->start.lnum);
if (bd.textcol > (colnr_T)pre_indent)
{
long new_indent = (long)getwhitecols(firstline);
pre_textlen += new_indent - pre_indent;
bd.textcol += new_indent - pre_indent;
}
ins_len = (long)STRLEN(firstline) - pre_textlen;
if (ins_len > 0)
{
/* Subsequent calls to ml_get() flush the firstline data - take a
* copy of the inserted text. */
if ((ins_text = alloc_check((unsigned)(ins_len + 1))) != NULL)
{
vim_strncpy(ins_text, firstline + bd.textcol, (size_t)ins_len);
for (linenr = oap->start.lnum + 1; linenr <= oap->end.lnum;
linenr++)
{
block_prep(oap, &bd, linenr, TRUE);
if (!bd.is_short || virtual_op)
{
# ifdef FEAT_VIRTUALEDIT
pos_T vpos;
/* If the block starts in virtual space, count the
* initial coladd offset as part of "startspaces" */
if (bd.is_short)
{
vpos.lnum = linenr;
(void)getvpos(&vpos, oap->start_vcol);
}
else
vpos.coladd = 0;
# endif
oldp = ml_get(linenr);
newp = alloc_check((unsigned)(STRLEN(oldp)
# ifdef FEAT_VIRTUALEDIT
+ vpos.coladd
# endif
+ ins_len + 1));
if (newp == NULL)
continue;
/* copy up to block start */
mch_memmove(newp, oldp, (size_t)bd.textcol);
offset = bd.textcol;
# ifdef FEAT_VIRTUALEDIT
vim_memset(newp + offset, ' ', (size_t)vpos.coladd);
offset += vpos.coladd;
# endif
mch_memmove(newp + offset, ins_text, (size_t)ins_len);
offset += ins_len;
oldp += bd.textcol;
STRMOVE(newp + offset, oldp);
ml_replace(linenr, newp, FALSE);
}
}
check_cursor();
changed_lines(oap->start.lnum + 1, 0, oap->end.lnum + 1, 0L);
}
vim_free(ins_text);
}
}
#endif
return retval;
}
/*
* set all the yank registers to empty (called from main())
*/
void
init_yank(void)
{
int i;
for (i = 0; i < NUM_REGISTERS; ++i)
y_regs[i].y_array = NULL;
}
#if defined(EXITFREE) || defined(PROTO)
void
clear_registers(void)
{
int i;
for (i = 0; i < NUM_REGISTERS; ++i)
{
y_current = &y_regs[i];
if (y_current->y_array != NULL)
free_yank_all();
}
}
#endif
/*
* Free "n" lines from the current yank register.
* Called for normal freeing and in case of error.
*/
static void
free_yank(long n)
{
if (y_current->y_array != NULL)
{
long i;
for (i = n; --i >= 0; )
{
#ifdef AMIGA /* only for very slow machines */
if ((i & 1023) == 1023) /* this may take a while */
{
/*
* This message should never cause a hit-return message.
* Overwrite this message with any next message.
*/
++no_wait_return;
smsg((char_u *)_("freeing %ld lines"), i + 1);
--no_wait_return;
msg_didout = FALSE;
msg_col = 0;
}
#endif
vim_free(y_current->y_array[i]);
}
VIM_CLEAR(y_current->y_array);
#ifdef AMIGA
if (n >= 1000)
MSG("");
#endif
}
}
static void
free_yank_all(void)
{
free_yank(y_current->y_size);
}
/*
* Yank the text between "oap->start" and "oap->end" into a yank register.
* If we are to append (uppercase register), we first yank into a new yank
* register and then concatenate the old and the new one (so we keep the old
* one in case of out-of-memory).
*
* Return FAIL for failure, OK otherwise.
*/
int
op_yank(oparg_T *oap, int deleting, int mess)
{
long y_idx; /* index in y_array[] */
yankreg_T *curr; /* copy of y_current */
yankreg_T newreg; /* new yank register when appending */
char_u **new_ptr;
linenr_T lnum; /* current line number */
long j;
int yanktype = oap->motion_type;
long yanklines = oap->line_count;
linenr_T yankendlnum = oap->end.lnum;
char_u *p;
char_u *pnew;
struct block_def bd;
#if defined(FEAT_CLIPBOARD) && defined(FEAT_X11)
int did_star = FALSE;
#endif
/* check for read-only register */
if (oap->regname != 0 && !valid_yank_reg(oap->regname, TRUE))
{
beep_flush();
return FAIL;
}
if (oap->regname == '_') /* black hole: nothing to do */
return OK;
#ifdef FEAT_CLIPBOARD
if (!clip_star.available && oap->regname == '*')
oap->regname = 0;
else if (!clip_plus.available && oap->regname == '+')
oap->regname = 0;
#endif
if (!deleting) /* op_delete() already set y_current */
get_yank_register(oap->regname, TRUE);
curr = y_current;
/* append to existing contents */
if (y_append && y_current->y_array != NULL)
y_current = &newreg;
else
free_yank_all(); /* free previously yanked lines */
/*
* If the cursor was in column 1 before and after the movement, and the
* operator is not inclusive, the yank is always linewise.
*/
if ( oap->motion_type == MCHAR
&& oap->start.col == 0
&& !oap->inclusive
&& (!oap->is_VIsual || *p_sel == 'o')
&& !oap->block_mode
&& oap->end.col == 0
&& yanklines > 1)
{
yanktype = MLINE;
--yankendlnum;
--yanklines;
}
y_current->y_size = yanklines;
y_current->y_type = yanktype; /* set the yank register type */
y_current->y_width = 0;
y_current->y_array = (char_u **)lalloc_clear((long_u)(sizeof(char_u *) *
yanklines), TRUE);
if (y_current->y_array == NULL)
{
y_current = curr;
return FAIL;
}
#ifdef FEAT_VIMINFO
y_current->y_time_set = vim_time();
#endif
y_idx = 0;
lnum = oap->start.lnum;
if (oap->block_mode)
{
/* Visual block mode */
y_current->y_type = MBLOCK; /* set the yank register type */
y_current->y_width = oap->end_vcol - oap->start_vcol;
if (curwin->w_curswant == MAXCOL && y_current->y_width > 0)
y_current->y_width--;
}
for ( ; lnum <= yankendlnum; lnum++, y_idx++)
{
switch (y_current->y_type)
{
case MBLOCK:
block_prep(oap, &bd, lnum, FALSE);
if (yank_copy_line(&bd, y_idx) == FAIL)
goto fail;
break;
case MLINE:
if ((y_current->y_array[y_idx] =
vim_strsave(ml_get(lnum))) == NULL)
goto fail;
break;
case MCHAR:
{
colnr_T startcol = 0, endcol = MAXCOL;
#ifdef FEAT_VIRTUALEDIT
int is_oneChar = FALSE;
colnr_T cs, ce;
#endif
p = ml_get(lnum);
bd.startspaces = 0;
bd.endspaces = 0;
if (lnum == oap->start.lnum)
{
startcol = oap->start.col;
#ifdef FEAT_VIRTUALEDIT
if (virtual_op)
{
getvcol(curwin, &oap->start, &cs, NULL, &ce);
if (ce != cs && oap->start.coladd > 0)
{
/* Part of a tab selected -- but don't
* double-count it. */
bd.startspaces = (ce - cs + 1)
- oap->start.coladd;
startcol++;
}
}
#endif
}
if (lnum == oap->end.lnum)
{
endcol = oap->end.col;
#ifdef FEAT_VIRTUALEDIT
if (virtual_op)
{
getvcol(curwin, &oap->end, &cs, NULL, &ce);
if (p[endcol] == NUL || (cs + oap->end.coladd < ce
# ifdef FEAT_MBYTE
/* Don't add space for double-wide
* char; endcol will be on last byte
* of multi-byte char. */
&& (*mb_head_off)(p, p + endcol) == 0
# endif
))
{
if (oap->start.lnum == oap->end.lnum
&& oap->start.col == oap->end.col)
{
/* Special case: inside a single char */
is_oneChar = TRUE;
bd.startspaces = oap->end.coladd
- oap->start.coladd + oap->inclusive;
endcol = startcol;
}
else
{
bd.endspaces = oap->end.coladd
+ oap->inclusive;
endcol -= oap->inclusive;
}
}
}
#endif
}
if (endcol == MAXCOL)
endcol = (colnr_T)STRLEN(p);
if (startcol > endcol
#ifdef FEAT_VIRTUALEDIT
|| is_oneChar
#endif
)
bd.textlen = 0;
else
{
bd.textlen = endcol - startcol + oap->inclusive;
}
bd.textstart = p + startcol;
if (yank_copy_line(&bd, y_idx) == FAIL)
goto fail;
break;
}
/* NOTREACHED */
}
}
if (curr != y_current) /* append the new block to the old block */
{
new_ptr = (char_u **)lalloc((long_u)(sizeof(char_u *) *
(curr->y_size + y_current->y_size)), TRUE);
if (new_ptr == NULL)
goto fail;
for (j = 0; j < curr->y_size; ++j)
new_ptr[j] = curr->y_array[j];
vim_free(curr->y_array);
curr->y_array = new_ptr;
#ifdef FEAT_VIMINFO
curr->y_time_set = vim_time();
#endif
if (yanktype == MLINE) /* MLINE overrides MCHAR and MBLOCK */
curr->y_type = MLINE;
/* Concatenate the last line of the old block with the first line of
* the new block, unless being Vi compatible. */
if (curr->y_type == MCHAR && vim_strchr(p_cpo, CPO_REGAPPEND) == NULL)
{
pnew = lalloc((long_u)(STRLEN(curr->y_array[curr->y_size - 1])
+ STRLEN(y_current->y_array[0]) + 1), TRUE);
if (pnew == NULL)
{
y_idx = y_current->y_size - 1;
goto fail;
}
STRCPY(pnew, curr->y_array[--j]);
STRCAT(pnew, y_current->y_array[0]);
vim_free(curr->y_array[j]);
vim_free(y_current->y_array[0]);
curr->y_array[j++] = pnew;
y_idx = 1;
}
else
y_idx = 0;
while (y_idx < y_current->y_size)
curr->y_array[j++] = y_current->y_array[y_idx++];
curr->y_size = j;
vim_free(y_current->y_array);
y_current = curr;
}
if (curwin->w_p_rnu)
redraw_later(SOME_VALID); /* cursor moved to start */
if (mess) /* Display message about yank? */
{
if (yanktype == MCHAR
&& !oap->block_mode
&& yanklines == 1)
yanklines = 0;
/* Some versions of Vi use ">=" here, some don't... */
if (yanklines > p_report)
{
char namebuf[100];
if (oap->regname == NUL)
*namebuf = NUL;
else
vim_snprintf(namebuf, sizeof(namebuf),
_(" into \"%c"), oap->regname);
/* redisplay now, so message is not deleted */
update_topline_redraw();
if (yanklines == 1)
{
if (oap->block_mode)
smsg((char_u *)_("block of 1 line yanked%s"), namebuf);
else
smsg((char_u *)_("1 line yanked%s"), namebuf);
}
else if (oap->block_mode)
smsg((char_u *)_("block of %ld lines yanked%s"),
yanklines, namebuf);
else
smsg((char_u *)_("%ld lines yanked%s"), yanklines,
namebuf);
}
}
/*
* Set "'[" and "']" marks.
*/
curbuf->b_op_start = oap->start;
curbuf->b_op_end = oap->end;
if (yanktype == MLINE && !oap->block_mode)
{
curbuf->b_op_start.col = 0;
curbuf->b_op_end.col = MAXCOL;
}
#ifdef FEAT_CLIPBOARD
/*
* If we were yanking to the '*' register, send result to clipboard.
* If no register was specified, and "unnamed" in 'clipboard', make a copy
* to the '*' register.
*/
if (clip_star.available
&& (curr == &(y_regs[STAR_REGISTER])
|| (!deleting && oap->regname == 0
&& ((clip_unnamed | clip_unnamed_saved) & CLIP_UNNAMED))))
{
if (curr != &(y_regs[STAR_REGISTER]))
/* Copy the text from register 0 to the clipboard register. */
copy_yank_reg(&(y_regs[STAR_REGISTER]));
clip_own_selection(&clip_star);
clip_gen_set_selection(&clip_star);
# ifdef FEAT_X11
did_star = TRUE;
# endif
}
# ifdef FEAT_X11
/*
* If we were yanking to the '+' register, send result to selection.
* Also copy to the '*' register, in case auto-select is off.
*/
if (clip_plus.available
&& (curr == &(y_regs[PLUS_REGISTER])
|| (!deleting && oap->regname == 0
&& ((clip_unnamed | clip_unnamed_saved) &
CLIP_UNNAMED_PLUS))))
{
if (curr != &(y_regs[PLUS_REGISTER]))
/* Copy the text from register 0 to the clipboard register. */
copy_yank_reg(&(y_regs[PLUS_REGISTER]));
clip_own_selection(&clip_plus);
clip_gen_set_selection(&clip_plus);
if (!clip_isautosel_star() && !clip_isautosel_plus()
&& !did_star && curr == &(y_regs[PLUS_REGISTER]))
{
copy_yank_reg(&(y_regs[STAR_REGISTER]));
clip_own_selection(&clip_star);
clip_gen_set_selection(&clip_star);
}
}
# endif
#endif
#if defined(FEAT_EVAL)
if (!deleting && has_textyankpost())
yank_do_autocmd(oap, y_current);
#endif
return OK;
fail: /* free the allocated lines */
free_yank(y_idx + 1);
y_current = curr;
return FAIL;
}
static int
yank_copy_line(struct block_def *bd, long y_idx)
{
char_u *pnew;
if ((pnew = alloc(bd->startspaces + bd->endspaces + bd->textlen + 1))
== NULL)
return FAIL;
y_current->y_array[y_idx] = pnew;
vim_memset(pnew, ' ', (size_t)bd->startspaces);
pnew += bd->startspaces;
mch_memmove(pnew, bd->textstart, (size_t)bd->textlen);
pnew += bd->textlen;
vim_memset(pnew, ' ', (size_t)bd->endspaces);
pnew += bd->endspaces;
*pnew = NUL;
return OK;
}
#ifdef FEAT_CLIPBOARD
/*
* Make a copy of the y_current register to register "reg".
*/
static void
copy_yank_reg(yankreg_T *reg)
{
yankreg_T *curr = y_current;
long j;
y_current = reg;
free_yank_all();
*y_current = *curr;
y_current->y_array = (char_u **)lalloc_clear(
(long_u)(sizeof(char_u *) * y_current->y_size), TRUE);
if (y_current->y_array == NULL)
y_current->y_size = 0;
else
for (j = 0; j < y_current->y_size; ++j)
if ((y_current->y_array[j] = vim_strsave(curr->y_array[j])) == NULL)
{
free_yank(j);
y_current->y_size = 0;
break;
}
y_current = curr;
}
#endif
/*
* Put contents of register "regname" into the text.
* Caller must check "regname" to be valid!
* "flags": PUT_FIXINDENT make indent look nice
* PUT_CURSEND leave cursor after end of new text
* PUT_LINE force linewise put (":put")
*/
void
do_put(
int regname,
int dir, /* BACKWARD for 'P', FORWARD for 'p' */
long count,
int flags)
{
char_u *ptr;
char_u *newp, *oldp;
int yanklen;
int totlen = 0; /* init for gcc */
linenr_T lnum;
colnr_T col;
long i; /* index in y_array[] */
int y_type;
long y_size;
int oldlen;
long y_width = 0;
colnr_T vcol;
int delcount;
int incr = 0;
long j;
struct block_def bd;
char_u **y_array = NULL;
long nr_lines = 0;
pos_T new_cursor;
int indent;
int orig_indent = 0; /* init for gcc */
int indent_diff = 0; /* init for gcc */
int first_indent = TRUE;
int lendiff = 0;
pos_T old_pos;
char_u *insert_string = NULL;
int allocated = FALSE;
long cnt;
#ifdef FEAT_CLIPBOARD
/* Adjust register name for "unnamed" in 'clipboard'. */
adjust_clip_reg(®name);
(void)may_get_selection(regname);
#endif
if (flags & PUT_FIXINDENT)
orig_indent = get_indent();
curbuf->b_op_start = curwin->w_cursor; /* default for '[ mark */
curbuf->b_op_end = curwin->w_cursor; /* default for '] mark */
/*
* Using inserted text works differently, because the register includes
* special characters (newlines, etc.).
*/
if (regname == '.')
{
if (VIsual_active)
stuffcharReadbuff(VIsual_mode);
(void)stuff_inserted((dir == FORWARD ? (count == -1 ? 'o' : 'a') :
(count == -1 ? 'O' : 'i')), count, FALSE);
/* Putting the text is done later, so can't really move the cursor to
* the next character. Use "l" to simulate it. */
if ((flags & PUT_CURSEND) && gchar_cursor() != NUL)
stuffcharReadbuff('l');
return;
}
/*
* For special registers '%' (file name), '#' (alternate file name) and
* ':' (last command line), etc. we have to create a fake yank register.
*/
if (get_spec_reg(regname, &insert_string, &allocated, TRUE))
{
if (insert_string == NULL)
return;
}
/* Autocommands may be executed when saving lines for undo, which may make
* y_array invalid. Start undo now to avoid that. */
u_save(curwin->w_cursor.lnum, curwin->w_cursor.lnum + 1);
if (insert_string != NULL)
{
y_type = MCHAR;
#ifdef FEAT_EVAL
if (regname == '=')
{
/* For the = register we need to split the string at NL
* characters.
* Loop twice: count the number of lines and save them. */
for (;;)
{
y_size = 0;
ptr = insert_string;
while (ptr != NULL)
{
if (y_array != NULL)
y_array[y_size] = ptr;
++y_size;
ptr = vim_strchr(ptr, '\n');
if (ptr != NULL)
{
if (y_array != NULL)
*ptr = NUL;
++ptr;
/* A trailing '\n' makes the register linewise. */
if (*ptr == NUL)
{
y_type = MLINE;
break;
}
}
}
if (y_array != NULL)
break;
y_array = (char_u **)alloc((unsigned)
(y_size * sizeof(char_u *)));
if (y_array == NULL)
goto end;
}
}
else
#endif
{
y_size = 1; /* use fake one-line yank register */
y_array = &insert_string;
}
}
else
{
get_yank_register(regname, FALSE);
y_type = y_current->y_type;
y_width = y_current->y_width;
y_size = y_current->y_size;
y_array = y_current->y_array;
}
if (y_type == MLINE)
{
if (flags & PUT_LINE_SPLIT)
{
char_u *p;
/* "p" or "P" in Visual mode: split the lines to put the text in
* between. */
if (u_save_cursor() == FAIL)
goto end;
p = ml_get_cursor();
if (dir == FORWARD && *p != NUL)
MB_PTR_ADV(p);
ptr = vim_strsave(p);
if (ptr == NULL)
goto end;
ml_append(curwin->w_cursor.lnum, ptr, (colnr_T)0, FALSE);
vim_free(ptr);
oldp = ml_get_curline();
p = oldp + curwin->w_cursor.col;
if (dir == FORWARD && *p != NUL)
MB_PTR_ADV(p);
ptr = vim_strnsave(oldp, p - oldp);
if (ptr == NULL)
goto end;
ml_replace(curwin->w_cursor.lnum, ptr, FALSE);
++nr_lines;
dir = FORWARD;
}
if (flags & PUT_LINE_FORWARD)
{
/* Must be "p" for a Visual block, put lines below the block. */
curwin->w_cursor = curbuf->b_visual.vi_end;
dir = FORWARD;
}
curbuf->b_op_start = curwin->w_cursor; /* default for '[ mark */
curbuf->b_op_end = curwin->w_cursor; /* default for '] mark */
}
if (flags & PUT_LINE) /* :put command or "p" in Visual line mode. */
y_type = MLINE;
if (y_size == 0 || y_array == NULL)
{
EMSG2(_("E353: Nothing in register %s"),
regname == 0 ? (char_u *)"\"" : transchar(regname));
goto end;
}
if (y_type == MBLOCK)
{
lnum = curwin->w_cursor.lnum + y_size + 1;
if (lnum > curbuf->b_ml.ml_line_count)
lnum = curbuf->b_ml.ml_line_count + 1;
if (u_save(curwin->w_cursor.lnum - 1, lnum) == FAIL)
goto end;
}
else if (y_type == MLINE)
{
lnum = curwin->w_cursor.lnum;
#ifdef FEAT_FOLDING
/* Correct line number for closed fold. Don't move the cursor yet,
* u_save() uses it. */
if (dir == BACKWARD)
(void)hasFolding(lnum, &lnum, NULL);
else
(void)hasFolding(lnum, NULL, &lnum);
#endif
if (dir == FORWARD)
++lnum;
/* In an empty buffer the empty line is going to be replaced, include
* it in the saved lines. */
if ((BUFEMPTY() ? u_save(0, 2) : u_save(lnum - 1, lnum)) == FAIL)
goto end;
#ifdef FEAT_FOLDING
if (dir == FORWARD)
curwin->w_cursor.lnum = lnum - 1;
else
curwin->w_cursor.lnum = lnum;
curbuf->b_op_start = curwin->w_cursor; /* for mark_adjust() */
#endif
}
else if (u_save_cursor() == FAIL)
goto end;
yanklen = (int)STRLEN(y_array[0]);
#ifdef FEAT_VIRTUALEDIT
if (ve_flags == VE_ALL && y_type == MCHAR)
{
if (gchar_cursor() == TAB)
{
/* Don't need to insert spaces when "p" on the last position of a
* tab or "P" on the first position. */
if (dir == FORWARD
? (int)curwin->w_cursor.coladd < curbuf->b_p_ts - 1
: curwin->w_cursor.coladd > 0)
coladvance_force(getviscol());
else
curwin->w_cursor.coladd = 0;
}
else if (curwin->w_cursor.coladd > 0 || gchar_cursor() == NUL)
coladvance_force(getviscol() + (dir == FORWARD));
}
#endif
lnum = curwin->w_cursor.lnum;
col = curwin->w_cursor.col;
/*
* Block mode
*/
if (y_type == MBLOCK)
{
int c = gchar_cursor();
colnr_T endcol2 = 0;
if (dir == FORWARD && c != NUL)
{
#ifdef FEAT_VIRTUALEDIT
if (ve_flags == VE_ALL)
getvcol(curwin, &curwin->w_cursor, &col, NULL, &endcol2);
else
#endif
getvcol(curwin, &curwin->w_cursor, NULL, NULL, &col);
#ifdef FEAT_MBYTE
if (has_mbyte)
/* move to start of next multi-byte character */
curwin->w_cursor.col += (*mb_ptr2len)(ml_get_cursor());
else
#endif
#ifdef FEAT_VIRTUALEDIT
if (c != TAB || ve_flags != VE_ALL)
#endif
++curwin->w_cursor.col;
++col;
}
else
getvcol(curwin, &curwin->w_cursor, &col, NULL, &endcol2);
#ifdef FEAT_VIRTUALEDIT
col += curwin->w_cursor.coladd;
if (ve_flags == VE_ALL
&& (curwin->w_cursor.coladd > 0
|| endcol2 == curwin->w_cursor.col))
{
if (dir == FORWARD && c == NUL)
++col;
if (dir != FORWARD && c != NUL)
++curwin->w_cursor.col;
if (c == TAB)
{
if (dir == BACKWARD && curwin->w_cursor.col)
curwin->w_cursor.col--;
if (dir == FORWARD && col - 1 == endcol2)
curwin->w_cursor.col++;
}
}
curwin->w_cursor.coladd = 0;
#endif
bd.textcol = 0;
for (i = 0; i < y_size; ++i)
{
int spaces;
char shortline;
bd.startspaces = 0;
bd.endspaces = 0;
vcol = 0;
delcount = 0;
/* add a new line */
if (curwin->w_cursor.lnum > curbuf->b_ml.ml_line_count)
{
if (ml_append(curbuf->b_ml.ml_line_count, (char_u *)"",
(colnr_T)1, FALSE) == FAIL)
break;
++nr_lines;
}
/* get the old line and advance to the position to insert at */
oldp = ml_get_curline();
oldlen = (int)STRLEN(oldp);
for (ptr = oldp; vcol < col && *ptr; )
{
/* Count a tab for what it's worth (if list mode not on) */
incr = lbr_chartabsize_adv(oldp, &ptr, (colnr_T)vcol);
vcol += incr;
}
bd.textcol = (colnr_T)(ptr - oldp);
shortline = (vcol < col) || (vcol == col && !*ptr) ;
if (vcol < col) /* line too short, padd with spaces */
bd.startspaces = col - vcol;
else if (vcol > col)
{
bd.endspaces = vcol - col;
bd.startspaces = incr - bd.endspaces;
--bd.textcol;
delcount = 1;
#ifdef FEAT_MBYTE
if (has_mbyte)
bd.textcol -= (*mb_head_off)(oldp, oldp + bd.textcol);
#endif
if (oldp[bd.textcol] != TAB)
{
/* Only a Tab can be split into spaces. Other
* characters will have to be moved to after the
* block, causing misalignment. */
delcount = 0;
bd.endspaces = 0;
}
}
yanklen = (int)STRLEN(y_array[i]);
/* calculate number of spaces required to fill right side of block*/
spaces = y_width + 1;
for (j = 0; j < yanklen; j++)
spaces -= lbr_chartabsize(NULL, &y_array[i][j], 0);
if (spaces < 0)
spaces = 0;
/* insert the new text */
totlen = count * (yanklen + spaces) + bd.startspaces + bd.endspaces;
newp = alloc_check((unsigned)totlen + oldlen + 1);
if (newp == NULL)
break;
/* copy part up to cursor to new line */
ptr = newp;
mch_memmove(ptr, oldp, (size_t)bd.textcol);
ptr += bd.textcol;
/* may insert some spaces before the new text */
vim_memset(ptr, ' ', (size_t)bd.startspaces);
ptr += bd.startspaces;
/* insert the new text */
for (j = 0; j < count; ++j)
{
mch_memmove(ptr, y_array[i], (size_t)yanklen);
ptr += yanklen;
/* insert block's trailing spaces only if there's text behind */
if ((j < count - 1 || !shortline) && spaces)
{
vim_memset(ptr, ' ', (size_t)spaces);
ptr += spaces;
}
}
/* may insert some spaces after the new text */
vim_memset(ptr, ' ', (size_t)bd.endspaces);
ptr += bd.endspaces;
/* move the text after the cursor to the end of the line. */
mch_memmove(ptr, oldp + bd.textcol + delcount,
(size_t)(oldlen - bd.textcol - delcount + 1));
ml_replace(curwin->w_cursor.lnum, newp, FALSE);
++curwin->w_cursor.lnum;
if (i == 0)
curwin->w_cursor.col += bd.startspaces;
}
changed_lines(lnum, 0, curwin->w_cursor.lnum, nr_lines);
/* Set '[ mark. */
curbuf->b_op_start = curwin->w_cursor;
curbuf->b_op_start.lnum = lnum;
/* adjust '] mark */
curbuf->b_op_end.lnum = curwin->w_cursor.lnum - 1;
curbuf->b_op_end.col = bd.textcol + totlen - 1;
# ifdef FEAT_VIRTUALEDIT
curbuf->b_op_end.coladd = 0;
# endif
if (flags & PUT_CURSEND)
{
colnr_T len;
curwin->w_cursor = curbuf->b_op_end;
curwin->w_cursor.col++;
/* in Insert mode we might be after the NUL, correct for that */
len = (colnr_T)STRLEN(ml_get_curline());
if (curwin->w_cursor.col > len)
curwin->w_cursor.col = len;
}
else
curwin->w_cursor.lnum = lnum;
}
else
{
/*
* Character or Line mode
*/
if (y_type == MCHAR)
{
/* if type is MCHAR, FORWARD is the same as BACKWARD on the next
* char */
if (dir == FORWARD && gchar_cursor() != NUL)
{
#ifdef FEAT_MBYTE
if (has_mbyte)
{
int bytelen = (*mb_ptr2len)(ml_get_cursor());
/* put it on the next of the multi-byte character. */
col += bytelen;
if (yanklen)
{
curwin->w_cursor.col += bytelen;
curbuf->b_op_end.col += bytelen;
}
}
else
#endif
{
++col;
if (yanklen)
{
++curwin->w_cursor.col;
++curbuf->b_op_end.col;
}
}
}
curbuf->b_op_start = curwin->w_cursor;
}
/*
* Line mode: BACKWARD is the same as FORWARD on the previous line
*/
else if (dir == BACKWARD)
--lnum;
new_cursor = curwin->w_cursor;
/*
* simple case: insert into current line
*/
if (y_type == MCHAR && y_size == 1)
{
linenr_T end_lnum = 0; /* init for gcc */
if (VIsual_active)
{
end_lnum = curbuf->b_visual.vi_end.lnum;
if (end_lnum < curbuf->b_visual.vi_start.lnum)
end_lnum = curbuf->b_visual.vi_start.lnum;
}
do {
totlen = count * yanklen;
if (totlen > 0)
{
oldp = ml_get(lnum);
if (VIsual_active && col > (int)STRLEN(oldp))
{
lnum++;
continue;
}
newp = alloc_check((unsigned)(STRLEN(oldp) + totlen + 1));
if (newp == NULL)
goto end; /* alloc() gave an error message */
mch_memmove(newp, oldp, (size_t)col);
ptr = newp + col;
for (i = 0; i < count; ++i)
{
mch_memmove(ptr, y_array[0], (size_t)yanklen);
ptr += yanklen;
}
STRMOVE(ptr, oldp + col);
ml_replace(lnum, newp, FALSE);
/* Place cursor on last putted char. */
if (lnum == curwin->w_cursor.lnum)
{
/* make sure curwin->w_virtcol is updated */
changed_cline_bef_curs();
curwin->w_cursor.col += (colnr_T)(totlen - 1);
}
}
if (VIsual_active)
lnum++;
} while (VIsual_active && lnum <= end_lnum);
if (VIsual_active) /* reset lnum to the last visual line */
lnum--;
curbuf->b_op_end = curwin->w_cursor;
/* For "CTRL-O p" in Insert mode, put cursor after last char */
if (totlen && (restart_edit != 0 || (flags & PUT_CURSEND)))
++curwin->w_cursor.col;
changed_bytes(lnum, col);
}
else
{
/*
* Insert at least one line. When y_type is MCHAR, break the first
* line in two.
*/
for (cnt = 1; cnt <= count; ++cnt)
{
i = 0;
if (y_type == MCHAR)
{
/*
* Split the current line in two at the insert position.
* First insert y_array[size - 1] in front of second line.
* Then append y_array[0] to first line.
*/
lnum = new_cursor.lnum;
ptr = ml_get(lnum) + col;
totlen = (int)STRLEN(y_array[y_size - 1]);
newp = alloc_check((unsigned)(STRLEN(ptr) + totlen + 1));
if (newp == NULL)
goto error;
STRCPY(newp, y_array[y_size - 1]);
STRCAT(newp, ptr);
/* insert second line */
ml_append(lnum, newp, (colnr_T)0, FALSE);
vim_free(newp);
oldp = ml_get(lnum);
newp = alloc_check((unsigned)(col + yanklen + 1));
if (newp == NULL)
goto error;
/* copy first part of line */
mch_memmove(newp, oldp, (size_t)col);
/* append to first line */
mch_memmove(newp + col, y_array[0], (size_t)(yanklen + 1));
ml_replace(lnum, newp, FALSE);
curwin->w_cursor.lnum = lnum;
i = 1;
}
for (; i < y_size; ++i)
{
if ((y_type != MCHAR || i < y_size - 1)
&& ml_append(lnum, y_array[i], (colnr_T)0, FALSE)
== FAIL)
goto error;
lnum++;
++nr_lines;
if (flags & PUT_FIXINDENT)
{
old_pos = curwin->w_cursor;
curwin->w_cursor.lnum = lnum;
ptr = ml_get(lnum);
if (cnt == count && i == y_size - 1)
lendiff = (int)STRLEN(ptr);
#if defined(FEAT_SMARTINDENT) || defined(FEAT_CINDENT)
if (*ptr == '#' && preprocs_left())
indent = 0; /* Leave # lines at start */
else
#endif
if (*ptr == NUL)
indent = 0; /* Ignore empty lines */
else if (first_indent)
{
indent_diff = orig_indent - get_indent();
indent = orig_indent;
first_indent = FALSE;
}
else if ((indent = get_indent() + indent_diff) < 0)
indent = 0;
(void)set_indent(indent, 0);
curwin->w_cursor = old_pos;
/* remember how many chars were removed */
if (cnt == count && i == y_size - 1)
lendiff -= (int)STRLEN(ml_get(lnum));
}
}
}
error:
/* Adjust marks. */
if (y_type == MLINE)
{
curbuf->b_op_start.col = 0;
if (dir == FORWARD)
curbuf->b_op_start.lnum++;
}
/* Skip mark_adjust when adding lines after the last one, there
* can't be marks there. But still needed in diff mode. */
if (curbuf->b_op_start.lnum + (y_type == MCHAR) - 1 + nr_lines
< curbuf->b_ml.ml_line_count
#ifdef FEAT_DIFF
|| curwin->w_p_diff
#endif
)
mark_adjust(curbuf->b_op_start.lnum + (y_type == MCHAR),
(linenr_T)MAXLNUM, nr_lines, 0L);
/* note changed text for displaying and folding */
if (y_type == MCHAR)
changed_lines(curwin->w_cursor.lnum, col,
curwin->w_cursor.lnum + 1, nr_lines);
else
changed_lines(curbuf->b_op_start.lnum, 0,
curbuf->b_op_start.lnum, nr_lines);
/* put '] mark at last inserted character */
curbuf->b_op_end.lnum = lnum;
/* correct length for change in indent */
col = (colnr_T)STRLEN(y_array[y_size - 1]) - lendiff;
if (col > 1)
curbuf->b_op_end.col = col - 1;
else
curbuf->b_op_end.col = 0;
if (flags & PUT_CURSLINE)
{
/* ":put": put cursor on last inserted line */
curwin->w_cursor.lnum = lnum;
beginline(BL_WHITE | BL_FIX);
}
else if (flags & PUT_CURSEND)
{
/* put cursor after inserted text */
if (y_type == MLINE)
{
if (lnum >= curbuf->b_ml.ml_line_count)
curwin->w_cursor.lnum = curbuf->b_ml.ml_line_count;
else
curwin->w_cursor.lnum = lnum + 1;
curwin->w_cursor.col = 0;
}
else
{
curwin->w_cursor.lnum = lnum;
curwin->w_cursor.col = col;
}
}
else if (y_type == MLINE)
{
/* put cursor on first non-blank in first inserted line */
curwin->w_cursor.col = 0;
if (dir == FORWARD)
++curwin->w_cursor.lnum;
beginline(BL_WHITE | BL_FIX);
}
else /* put cursor on first inserted character */
curwin->w_cursor = new_cursor;
}
}
msgmore(nr_lines);
curwin->w_set_curswant = TRUE;
end:
if (allocated)
vim_free(insert_string);
if (regname == '=')
vim_free(y_array);
VIsual_active = FALSE;
/* If the cursor is past the end of the line put it at the end. */
adjust_cursor_eol();
}
/*
* When the cursor is on the NUL past the end of the line and it should not be
* there move it left.
*/
void
adjust_cursor_eol(void)
{
if (curwin->w_cursor.col > 0
&& gchar_cursor() == NUL
#ifdef FEAT_VIRTUALEDIT
&& (ve_flags & VE_ONEMORE) == 0
#endif
&& !(restart_edit || (State & INSERT)))
{
/* Put the cursor on the last character in the line. */
dec_cursor();
#ifdef FEAT_VIRTUALEDIT
if (ve_flags == VE_ALL)
{
colnr_T scol, ecol;
/* Coladd is set to the width of the last character. */
getvcol(curwin, &curwin->w_cursor, &scol, NULL, &ecol);
curwin->w_cursor.coladd = ecol - scol + 1;
}
#endif
}
}
#if defined(FEAT_SMARTINDENT) || defined(FEAT_CINDENT) || defined(PROTO)
/*
* Return TRUE if lines starting with '#' should be left aligned.
*/
int
preprocs_left(void)
{
return
# ifdef FEAT_SMARTINDENT
# ifdef FEAT_CINDENT
(curbuf->b_p_si && !curbuf->b_p_cin) ||
# else
curbuf->b_p_si
# endif
# endif
# ifdef FEAT_CINDENT
(curbuf->b_p_cin && in_cinkeys('#', ' ', TRUE)
&& curbuf->b_ind_hash_comment == 0)
# endif
;
}
#endif
/*
* Return the character name of the register with the given number.
*/
int
get_register_name(int num)
{
if (num == -1)
return '"';
else if (num < 10)
return num + '0';
else if (num == DELETION_REGISTER)
return '-';
#ifdef FEAT_CLIPBOARD
else if (num == STAR_REGISTER)
return '*';
else if (num == PLUS_REGISTER)
return '+';
#endif
else
{
#ifdef EBCDIC
int i;
/* EBCDIC is really braindead ... */
i = 'a' + (num - 10);
if (i > 'i')
i += 7;
if (i > 'r')
i += 8;
return i;
#else
return num + 'a' - 10;
#endif
}
}
/*
* ":dis" and ":registers": Display the contents of the yank registers.
*/
void
ex_display(exarg_T *eap)
{
int i, n;
long j;
char_u *p;
yankreg_T *yb;
int name;
int attr;
char_u *arg = eap->arg;
#ifdef FEAT_MBYTE
int clen;
#else
# define clen 1
#endif
if (arg != NULL && *arg == NUL)
arg = NULL;
attr = HL_ATTR(HLF_8);
/* Highlight title */
MSG_PUTS_TITLE(_("\n--- Registers ---"));
for (i = -1; i < NUM_REGISTERS && !got_int; ++i)
{
name = get_register_name(i);
if (arg != NULL && vim_strchr(arg, name) == NULL
#ifdef ONE_CLIPBOARD
/* Star register and plus register contain the same thing. */
&& (name != '*' || vim_strchr(arg, '+') == NULL)
#endif
)
continue; /* did not ask for this register */
#ifdef FEAT_CLIPBOARD
/* Adjust register name for "unnamed" in 'clipboard'.
* When it's a clipboard register, fill it with the current contents
* of the clipboard. */
adjust_clip_reg(&name);
(void)may_get_selection(name);
#endif
if (i == -1)
{
if (y_previous != NULL)
yb = y_previous;
else
yb = &(y_regs[0]);
}
else
yb = &(y_regs[i]);
#ifdef FEAT_EVAL
if (name == MB_TOLOWER(redir_reg)
|| (redir_reg == '"' && yb == y_previous))
continue; /* do not list register being written to, the
* pointer can be freed */
#endif
if (yb->y_array != NULL)
{
msg_putchar('\n');
msg_putchar('"');
msg_putchar(name);
MSG_PUTS(" ");
n = (int)Columns - 6;
for (j = 0; j < yb->y_size && n > 1; ++j)
{
if (j)
{
MSG_PUTS_ATTR("^J", attr);
n -= 2;
}
for (p = yb->y_array[j]; *p && (n -= ptr2cells(p)) >= 0; ++p)
{
#ifdef FEAT_MBYTE
clen = (*mb_ptr2len)(p);
#endif
msg_outtrans_len(p, clen);
#ifdef FEAT_MBYTE
p += clen - 1;
#endif
}
}
if (n > 1 && yb->y_type == MLINE)
MSG_PUTS_ATTR("^J", attr);
out_flush(); /* show one line at a time */
}
ui_breakcheck();
}
/*
* display last inserted text
*/
if ((p = get_last_insert()) != NULL
&& (arg == NULL || vim_strchr(arg, '.') != NULL) && !got_int)
{
MSG_PUTS("\n\". ");
dis_msg(p, TRUE);
}
/*
* display last command line
*/
if (last_cmdline != NULL && (arg == NULL || vim_strchr(arg, ':') != NULL)
&& !got_int)
{
MSG_PUTS("\n\": ");
dis_msg(last_cmdline, FALSE);
}
/*
* display current file name
*/
if (curbuf->b_fname != NULL
&& (arg == NULL || vim_strchr(arg, '%') != NULL) && !got_int)
{
MSG_PUTS("\n\"% ");
dis_msg(curbuf->b_fname, FALSE);
}
/*
* display alternate file name
*/
if ((arg == NULL || vim_strchr(arg, '%') != NULL) && !got_int)
{
char_u *fname;
linenr_T dummy;
if (buflist_name_nr(0, &fname, &dummy) != FAIL)
{
MSG_PUTS("\n\"# ");
dis_msg(fname, FALSE);
}
}
/*
* display last search pattern
*/
if (last_search_pat() != NULL
&& (arg == NULL || vim_strchr(arg, '/') != NULL) && !got_int)
{
MSG_PUTS("\n\"/ ");
dis_msg(last_search_pat(), FALSE);
}
#ifdef FEAT_EVAL
/*
* display last used expression
*/
if (expr_line != NULL && (arg == NULL || vim_strchr(arg, '=') != NULL)
&& !got_int)
{
MSG_PUTS("\n\"= ");
dis_msg(expr_line, FALSE);
}
#endif
}
/*
* display a string for do_dis()
* truncate at end of screen line
*/
static void
dis_msg(
char_u *p,
int skip_esc) /* if TRUE, ignore trailing ESC */
{
int n;
#ifdef FEAT_MBYTE
int l;
#endif
n = (int)Columns - 6;
while (*p != NUL
&& !(*p == ESC && skip_esc && *(p + 1) == NUL)
&& (n -= ptr2cells(p)) >= 0)
{
#ifdef FEAT_MBYTE
if (has_mbyte && (l = (*mb_ptr2len)(p)) > 1)
{
msg_outtrans_len(p, l);
p += l;
}
else
#endif
msg_outtrans_len(p++, 1);
}
ui_breakcheck();
}
#if defined(FEAT_COMMENTS) || defined(PROTO)
/*
* If "process" is TRUE and the line begins with a comment leader (possibly
* after some white space), return a pointer to the text after it. Put a boolean
* value indicating whether the line ends with an unclosed comment in
* "is_comment".
* line - line to be processed,
* process - if FALSE, will only check whether the line ends with an unclosed
* comment,
* include_space - whether to also skip space following the comment leader,
* is_comment - will indicate whether the current line ends with an unclosed
* comment.
*/
char_u *
skip_comment(
char_u *line,
int process,
int include_space,
int *is_comment)
{
char_u *comment_flags = NULL;
int lead_len;
int leader_offset = get_last_leader_offset(line, &comment_flags);
*is_comment = FALSE;
if (leader_offset != -1)
{
/* Let's check whether the line ends with an unclosed comment.
* If the last comment leader has COM_END in flags, there's no comment.
*/
while (*comment_flags)
{
if (*comment_flags == COM_END
|| *comment_flags == ':')
break;
++comment_flags;
}
if (*comment_flags != COM_END)
*is_comment = TRUE;
}
if (process == FALSE)
return line;
lead_len = get_leader_len(line, &comment_flags, FALSE, include_space);
if (lead_len == 0)
return line;
/* Find:
* - COM_END,
* - colon,
* whichever comes first.
*/
while (*comment_flags)
{
if (*comment_flags == COM_END
|| *comment_flags == ':')
{
break;
}
++comment_flags;
}
/* If we found a colon, it means that we are not processing a line
* starting with a closing part of a three-part comment. That's good,
* because we don't want to remove those as this would be annoying.
*/
if (*comment_flags == ':' || *comment_flags == NUL)
line += lead_len;
return line;
}
#endif
/*
* Join 'count' lines (minimal 2) at cursor position.
* When "save_undo" is TRUE save lines for undo first.
* Set "use_formatoptions" to FALSE when e.g. processing backspace and comment
* leaders should not be removed.
* When setmark is TRUE, sets the '[ and '] mark, else, the caller is expected
* to set those marks.
*
* return FAIL for failure, OK otherwise
*/
int
do_join(
long count,
int insert_space,
int save_undo,
int use_formatoptions UNUSED,
int setmark)
{
char_u *curr = NULL;
char_u *curr_start = NULL;
char_u *cend;
char_u *newp;
char_u *spaces; /* number of spaces inserted before a line */
int endcurr1 = NUL;
int endcurr2 = NUL;
int currsize = 0; /* size of the current line */
int sumsize = 0; /* size of the long new line */
linenr_T t;
colnr_T col = 0;
int ret = OK;
#if defined(FEAT_COMMENTS) || defined(PROTO)
int *comments = NULL;
int remove_comments = (use_formatoptions == TRUE)
&& has_format_option(FO_REMOVE_COMS);
int prev_was_comment;
#endif
if (save_undo && u_save((linenr_T)(curwin->w_cursor.lnum - 1),
(linenr_T)(curwin->w_cursor.lnum + count)) == FAIL)
return FAIL;
/* Allocate an array to store the number of spaces inserted before each
* line. We will use it to pre-compute the length of the new line and the
* proper placement of each original line in the new one. */
spaces = lalloc_clear((long_u)count, TRUE);
if (spaces == NULL)
return FAIL;
#if defined(FEAT_COMMENTS) || defined(PROTO)
if (remove_comments)
{
comments = (int *)lalloc_clear((long_u)count * sizeof(int), TRUE);
if (comments == NULL)
{
vim_free(spaces);
return FAIL;
}
}
#endif
/*
* Don't move anything, just compute the final line length
* and setup the array of space strings lengths
*/
for (t = 0; t < count; ++t)
{
curr = curr_start = ml_get((linenr_T)(curwin->w_cursor.lnum + t));
if (t == 0 && setmark)
{
/* Set the '[ mark. */
curwin->w_buffer->b_op_start.lnum = curwin->w_cursor.lnum;
curwin->w_buffer->b_op_start.col = (colnr_T)STRLEN(curr);
}
#if defined(FEAT_COMMENTS) || defined(PROTO)
if (remove_comments)
{
/* We don't want to remove the comment leader if the
* previous line is not a comment. */
if (t > 0 && prev_was_comment)
{
char_u *new_curr = skip_comment(curr, TRUE, insert_space,
&prev_was_comment);
comments[t] = (int)(new_curr - curr);
curr = new_curr;
}
else
curr = skip_comment(curr, FALSE, insert_space,
&prev_was_comment);
}
#endif
if (insert_space && t > 0)
{
curr = skipwhite(curr);
if (*curr != ')' && currsize != 0 && endcurr1 != TAB
#ifdef FEAT_MBYTE
&& (!has_format_option(FO_MBYTE_JOIN)
|| (mb_ptr2char(curr) < 0x100 && endcurr1 < 0x100))
&& (!has_format_option(FO_MBYTE_JOIN2)
|| mb_ptr2char(curr) < 0x100 || endcurr1 < 0x100)
#endif
)
{
/* don't add a space if the line is ending in a space */
if (endcurr1 == ' ')
endcurr1 = endcurr2;
else
++spaces[t];
/* extra space when 'joinspaces' set and line ends in '.' */
if ( p_js
&& (endcurr1 == '.'
|| (vim_strchr(p_cpo, CPO_JOINSP) == NULL
&& (endcurr1 == '?' || endcurr1 == '!'))))
++spaces[t];
}
}
currsize = (int)STRLEN(curr);
sumsize += currsize + spaces[t];
endcurr1 = endcurr2 = NUL;
if (insert_space && currsize > 0)
{
#ifdef FEAT_MBYTE
if (has_mbyte)
{
cend = curr + currsize;
MB_PTR_BACK(curr, cend);
endcurr1 = (*mb_ptr2char)(cend);
if (cend > curr)
{
MB_PTR_BACK(curr, cend);
endcurr2 = (*mb_ptr2char)(cend);
}
}
else
#endif
{
endcurr1 = *(curr + currsize - 1);
if (currsize > 1)
endcurr2 = *(curr + currsize - 2);
}
}
line_breakcheck();
if (got_int)
{
ret = FAIL;
goto theend;
}
}
/* store the column position before last line */
col = sumsize - currsize - spaces[count - 1];
/* allocate the space for the new line */
newp = alloc_check((unsigned)(sumsize + 1));
cend = newp + sumsize;
*cend = 0;
/*
* Move affected lines to the new long one.
*
* Move marks from each deleted line to the joined line, adjusting the
* column. This is not Vi compatible, but Vi deletes the marks, thus that
* should not really be a problem.
*/
for (t = count - 1; ; --t)
{
cend -= currsize;
mch_memmove(cend, curr, (size_t)currsize);
if (spaces[t] > 0)
{
cend -= spaces[t];
vim_memset(cend, ' ', (size_t)(spaces[t]));
}
mark_col_adjust(curwin->w_cursor.lnum + t, (colnr_T)0, (linenr_T)-t,
(long)(cend - newp + spaces[t] - (curr - curr_start)));
if (t == 0)
break;
curr = curr_start = ml_get((linenr_T)(curwin->w_cursor.lnum + t - 1));
#if defined(FEAT_COMMENTS) || defined(PROTO)
if (remove_comments)
curr += comments[t - 1];
#endif
if (insert_space && t > 1)
curr = skipwhite(curr);
currsize = (int)STRLEN(curr);
}
ml_replace(curwin->w_cursor.lnum, newp, FALSE);
if (setmark)
{
/* Set the '] mark. */
curwin->w_buffer->b_op_end.lnum = curwin->w_cursor.lnum;
curwin->w_buffer->b_op_end.col = (colnr_T)STRLEN(newp);
}
/* Only report the change in the first line here, del_lines() will report
* the deleted line. */
changed_lines(curwin->w_cursor.lnum, currsize,
curwin->w_cursor.lnum + 1, 0L);
/*
* Delete following lines. To do this we move the cursor there
* briefly, and then move it back. After del_lines() the cursor may
* have moved up (last line deleted), so the current lnum is kept in t.
*/
t = curwin->w_cursor.lnum;
++curwin->w_cursor.lnum;
del_lines(count - 1, FALSE);
curwin->w_cursor.lnum = t;
/*
* Set the cursor column:
* Vi compatible: use the column of the first join
* vim: use the column of the last join
*/
curwin->w_cursor.col =
(vim_strchr(p_cpo, CPO_JOINCOL) != NULL ? currsize : col);
check_cursor_col();
#ifdef FEAT_VIRTUALEDIT
curwin->w_cursor.coladd = 0;
#endif
curwin->w_set_curswant = TRUE;
theend:
vim_free(spaces);
#if defined(FEAT_COMMENTS) || defined(PROTO)
if (remove_comments)
vim_free(comments);
#endif
return ret;
}
#ifdef FEAT_COMMENTS
/*
* Return TRUE if the two comment leaders given are the same. "lnum" is
* the first line. White-space is ignored. Note that the whole of
* 'leader1' must match 'leader2_len' characters from 'leader2' -- webb
*/
static int
same_leader(
linenr_T lnum,
int leader1_len,
char_u *leader1_flags,
int leader2_len,
char_u *leader2_flags)
{
int idx1 = 0, idx2 = 0;
char_u *p;
char_u *line1;
char_u *line2;
if (leader1_len == 0)
return (leader2_len == 0);
/*
* If first leader has 'f' flag, the lines can be joined only if the
* second line does not have a leader.
* If first leader has 'e' flag, the lines can never be joined.
* If fist leader has 's' flag, the lines can only be joined if there is
* some text after it and the second line has the 'm' flag.
*/
if (leader1_flags != NULL)
{
for (p = leader1_flags; *p && *p != ':'; ++p)
{
if (*p == COM_FIRST)
return (leader2_len == 0);
if (*p == COM_END)
return FALSE;
if (*p == COM_START)
{
if (*(ml_get(lnum) + leader1_len) == NUL)
return FALSE;
if (leader2_flags == NULL || leader2_len == 0)
return FALSE;
for (p = leader2_flags; *p && *p != ':'; ++p)
if (*p == COM_MIDDLE)
return TRUE;
return FALSE;
}
}
}
/*
* Get current line and next line, compare the leaders.
* The first line has to be saved, only one line can be locked at a time.
*/
line1 = vim_strsave(ml_get(lnum));
if (line1 != NULL)
{
for (idx1 = 0; VIM_ISWHITE(line1[idx1]); ++idx1)
;
line2 = ml_get(lnum + 1);
for (idx2 = 0; idx2 < leader2_len; ++idx2)
{
if (!VIM_ISWHITE(line2[idx2]))
{
if (line1[idx1++] != line2[idx2])
break;
}
else
while (VIM_ISWHITE(line1[idx1]))
++idx1;
}
vim_free(line1);
}
return (idx2 == leader2_len && idx1 == leader1_len);
}
#endif
/*
* Implementation of the format operator 'gq'.
*/
void
op_format(
oparg_T *oap,
int keep_cursor) /* keep cursor on same text char */
{
long old_line_count = curbuf->b_ml.ml_line_count;
/* Place the cursor where the "gq" or "gw" command was given, so that "u"
* can put it back there. */
curwin->w_cursor = oap->cursor_start;
if (u_save((linenr_T)(oap->start.lnum - 1),
(linenr_T)(oap->end.lnum + 1)) == FAIL)
return;
curwin->w_cursor = oap->start;
if (oap->is_VIsual)
/* When there is no change: need to remove the Visual selection */
redraw_curbuf_later(INVERTED);
/* Set '[ mark at the start of the formatted area */
curbuf->b_op_start = oap->start;
/* For "gw" remember the cursor position and put it back below (adjusted
* for joined and split lines). */
if (keep_cursor)
saved_cursor = oap->cursor_start;
format_lines(oap->line_count, keep_cursor);
/*
* Leave the cursor at the first non-blank of the last formatted line.
* If the cursor was moved one line back (e.g. with "Q}") go to the next
* line, so "." will do the next lines.
*/
if (oap->end_adjusted && curwin->w_cursor.lnum < curbuf->b_ml.ml_line_count)
++curwin->w_cursor.lnum;
beginline(BL_WHITE | BL_FIX);
old_line_count = curbuf->b_ml.ml_line_count - old_line_count;
msgmore(old_line_count);
/* put '] mark on the end of the formatted area */
curbuf->b_op_end = curwin->w_cursor;
if (keep_cursor)
{
curwin->w_cursor = saved_cursor;
saved_cursor.lnum = 0;
}
if (oap->is_VIsual)
{
win_T *wp;
FOR_ALL_WINDOWS(wp)
{
if (wp->w_old_cursor_lnum != 0)
{
/* When lines have been inserted or deleted, adjust the end of
* the Visual area to be redrawn. */
if (wp->w_old_cursor_lnum > wp->w_old_visual_lnum)
wp->w_old_cursor_lnum += old_line_count;
else
wp->w_old_visual_lnum += old_line_count;
}
}
}
}
#if defined(FEAT_EVAL) || defined(PROTO)
/*
* Implementation of the format operator 'gq' for when using 'formatexpr'.
*/
void
op_formatexpr(oparg_T *oap)
{
if (oap->is_VIsual)
/* When there is no change: need to remove the Visual selection */
redraw_curbuf_later(INVERTED);
if (fex_format(oap->start.lnum, oap->line_count, NUL) != 0)
/* As documented: when 'formatexpr' returns non-zero fall back to
* internal formatting. */
op_format(oap, FALSE);
}
int
fex_format(
linenr_T lnum,
long count,
int c) /* character to be inserted */
{
int use_sandbox = was_set_insecurely((char_u *)"formatexpr",
OPT_LOCAL);
int r;
char_u *fex;
/*
* Set v:lnum to the first line number and v:count to the number of lines.
* Set v:char to the character to be inserted (can be NUL).
*/
set_vim_var_nr(VV_LNUM, lnum);
set_vim_var_nr(VV_COUNT, count);
set_vim_var_char(c);
/* Make a copy, the option could be changed while calling it. */
fex = vim_strsave(curbuf->b_p_fex);
if (fex == NULL)
return 0;
/*
* Evaluate the function.
*/
if (use_sandbox)
++sandbox;
r = (int)eval_to_number(fex);
if (use_sandbox)
--sandbox;
set_vim_var_string(VV_CHAR, NULL, -1);
vim_free(fex);
return r;
}
#endif
/*
* Format "line_count" lines, starting at the cursor position.
* When "line_count" is negative, format until the end of the paragraph.
* Lines after the cursor line are saved for undo, caller must have saved the
* first line.
*/
void
format_lines(
linenr_T line_count,
int avoid_fex) /* don't use 'formatexpr' */
{
int max_len;
int is_not_par; /* current line not part of parag. */
int next_is_not_par; /* next line not part of paragraph */
int is_end_par; /* at end of paragraph */
int prev_is_end_par = FALSE;/* prev. line not part of parag. */
int next_is_start_par = FALSE;
#ifdef FEAT_COMMENTS
int leader_len = 0; /* leader len of current line */
int next_leader_len; /* leader len of next line */
char_u *leader_flags = NULL; /* flags for leader of current line */
char_u *next_leader_flags; /* flags for leader of next line */
int do_comments; /* format comments */
int do_comments_list = 0; /* format comments with 'n' or '2' */
#endif
int advance = TRUE;
int second_indent = -1; /* indent for second line (comment
* aware) */
int do_second_indent;
int do_number_indent;
int do_trail_white;
int first_par_line = TRUE;
int smd_save;
long count;
int need_set_indent = TRUE; /* set indent of next paragraph */
int force_format = FALSE;
int old_State = State;
/* length of a line to force formatting: 3 * 'tw' */
max_len = comp_textwidth(TRUE) * 3;
/* check for 'q', '2' and '1' in 'formatoptions' */
#ifdef FEAT_COMMENTS
do_comments = has_format_option(FO_Q_COMS);
#endif
do_second_indent = has_format_option(FO_Q_SECOND);
do_number_indent = has_format_option(FO_Q_NUMBER);
do_trail_white = has_format_option(FO_WHITE_PAR);
/*
* Get info about the previous and current line.
*/
if (curwin->w_cursor.lnum > 1)
is_not_par = fmt_check_par(curwin->w_cursor.lnum - 1
#ifdef FEAT_COMMENTS
, &leader_len, &leader_flags, do_comments
#endif
);
else
is_not_par = TRUE;
next_is_not_par = fmt_check_par(curwin->w_cursor.lnum
#ifdef FEAT_COMMENTS
, &next_leader_len, &next_leader_flags, do_comments
#endif
);
is_end_par = (is_not_par || next_is_not_par);
if (!is_end_par && do_trail_white)
is_end_par = !ends_in_white(curwin->w_cursor.lnum - 1);
curwin->w_cursor.lnum--;
for (count = line_count; count != 0 && !got_int; --count)
{
/*
* Advance to next paragraph.
*/
if (advance)
{
curwin->w_cursor.lnum++;
prev_is_end_par = is_end_par;
is_not_par = next_is_not_par;
#ifdef FEAT_COMMENTS
leader_len = next_leader_len;
leader_flags = next_leader_flags;
#endif
}
/*
* The last line to be formatted.
*/
if (count == 1 || curwin->w_cursor.lnum == curbuf->b_ml.ml_line_count)
{
next_is_not_par = TRUE;
#ifdef FEAT_COMMENTS
next_leader_len = 0;
next_leader_flags = NULL;
#endif
}
else
{
next_is_not_par = fmt_check_par(curwin->w_cursor.lnum + 1
#ifdef FEAT_COMMENTS
, &next_leader_len, &next_leader_flags, do_comments
#endif
);
if (do_number_indent)
next_is_start_par =
(get_number_indent(curwin->w_cursor.lnum + 1) > 0);
}
advance = TRUE;
is_end_par = (is_not_par || next_is_not_par || next_is_start_par);
if (!is_end_par && do_trail_white)
is_end_par = !ends_in_white(curwin->w_cursor.lnum);
/*
* Skip lines that are not in a paragraph.
*/
if (is_not_par)
{
if (line_count < 0)
break;
}
else
{
/*
* For the first line of a paragraph, check indent of second line.
* Don't do this for comments and empty lines.
*/
if (first_par_line
&& (do_second_indent || do_number_indent)
&& prev_is_end_par
&& curwin->w_cursor.lnum < curbuf->b_ml.ml_line_count)
{
if (do_second_indent && !LINEEMPTY(curwin->w_cursor.lnum + 1))
{
#ifdef FEAT_COMMENTS
if (leader_len == 0 && next_leader_len == 0)
{
/* no comment found */
#endif
second_indent =
get_indent_lnum(curwin->w_cursor.lnum + 1);
#ifdef FEAT_COMMENTS
}
else
{
second_indent = next_leader_len;
do_comments_list = 1;
}
#endif
}
else if (do_number_indent)
{
#ifdef FEAT_COMMENTS
if (leader_len == 0 && next_leader_len == 0)
{
/* no comment found */
#endif
second_indent =
get_number_indent(curwin->w_cursor.lnum);
#ifdef FEAT_COMMENTS
}
else
{
/* get_number_indent() is now "comment aware"... */
second_indent =
get_number_indent(curwin->w_cursor.lnum);
do_comments_list = 1;
}
#endif
}
}
/*
* When the comment leader changes, it's the end of the paragraph.
*/
if (curwin->w_cursor.lnum >= curbuf->b_ml.ml_line_count
#ifdef FEAT_COMMENTS
|| !same_leader(curwin->w_cursor.lnum,
leader_len, leader_flags,
next_leader_len, next_leader_flags)
#endif
)
is_end_par = TRUE;
/*
* If we have got to the end of a paragraph, or the line is
* getting long, format it.
*/
if (is_end_par || force_format)
{
if (need_set_indent)
/* replace indent in first line with minimal number of
* tabs and spaces, according to current options */
(void)set_indent(get_indent(), SIN_CHANGED);
/* put cursor on last non-space */
State = NORMAL; /* don't go past end-of-line */
coladvance((colnr_T)MAXCOL);
while (curwin->w_cursor.col && vim_isspace(gchar_cursor()))
dec_cursor();
/* do the formatting, without 'showmode' */
State = INSERT; /* for open_line() */
smd_save = p_smd;
p_smd = FALSE;
insertchar(NUL, INSCHAR_FORMAT
#ifdef FEAT_COMMENTS
+ (do_comments ? INSCHAR_DO_COM : 0)
+ (do_comments && do_comments_list
? INSCHAR_COM_LIST : 0)
#endif
+ (avoid_fex ? INSCHAR_NO_FEX : 0), second_indent);
State = old_State;
p_smd = smd_save;
second_indent = -1;
/* at end of par.: need to set indent of next par. */
need_set_indent = is_end_par;
if (is_end_par)
{
/* When called with a negative line count, break at the
* end of the paragraph. */
if (line_count < 0)
break;
first_par_line = TRUE;
}
force_format = FALSE;
}
/*
* When still in same paragraph, join the lines together. But
* first delete the leader from the second line.
*/
if (!is_end_par)
{
advance = FALSE;
curwin->w_cursor.lnum++;
curwin->w_cursor.col = 0;
if (line_count < 0 && u_save_cursor() == FAIL)
break;
#ifdef FEAT_COMMENTS
if (next_leader_len > 0)
{
(void)del_bytes((long)next_leader_len, FALSE, FALSE);
mark_col_adjust(curwin->w_cursor.lnum, (colnr_T)0, 0L,
(long)-next_leader_len);
} else
#endif
if (second_indent > 0) /* the "leader" for FO_Q_SECOND */
{
int indent = getwhitecols_curline();
if (indent > 0)
{
(void)del_bytes(indent, FALSE, FALSE);
mark_col_adjust(curwin->w_cursor.lnum,
(colnr_T)0, 0L, (long)-indent);
}
}
curwin->w_cursor.lnum--;
if (do_join(2, TRUE, FALSE, FALSE, FALSE) == FAIL)
{
beep_flush();
break;
}
first_par_line = FALSE;
/* If the line is getting long, format it next time */
if (STRLEN(ml_get_curline()) > (size_t)max_len)
force_format = TRUE;
else
force_format = FALSE;
}
}
line_breakcheck();
}
}
/*
* Return TRUE if line "lnum" ends in a white character.
*/
static int
ends_in_white(linenr_T lnum)
{
char_u *s = ml_get(lnum);
size_t l;
if (*s == NUL)
return FALSE;
/* Don't use STRLEN() inside VIM_ISWHITE(), SAS/C complains: "macro
* invocation may call function multiple times". */
l = STRLEN(s) - 1;
return VIM_ISWHITE(s[l]);
}
/*
* Blank lines, and lines containing only the comment leader, are left
* untouched by the formatting. The function returns TRUE in this
* case. It also returns TRUE when a line starts with the end of a comment
* ('e' in comment flags), so that this line is skipped, and not joined to the
* previous line. A new paragraph starts after a blank line, or when the
* comment leader changes -- webb.
*/
#ifdef FEAT_COMMENTS
static int
fmt_check_par(
linenr_T lnum,
int *leader_len,
char_u **leader_flags,
int do_comments)
{
char_u *flags = NULL; /* init for GCC */
char_u *ptr;
ptr = ml_get(lnum);
if (do_comments)
*leader_len = get_leader_len(ptr, leader_flags, FALSE, TRUE);
else
*leader_len = 0;
if (*leader_len > 0)
{
/*
* Search for 'e' flag in comment leader flags.
*/
flags = *leader_flags;
while (*flags && *flags != ':' && *flags != COM_END)
++flags;
}
return (*skipwhite(ptr + *leader_len) == NUL
|| (*leader_len > 0 && *flags == COM_END)
|| startPS(lnum, NUL, FALSE));
}
#else
static int
fmt_check_par(linenr_T lnum)
{
return (*skipwhite(ml_get(lnum)) == NUL || startPS(lnum, NUL, FALSE));
}
#endif
/*
* Return TRUE when a paragraph starts in line "lnum". Return FALSE when the
* previous line is in the same paragraph. Used for auto-formatting.
*/
int
paragraph_start(linenr_T lnum)
{
char_u *p;
#ifdef FEAT_COMMENTS
int leader_len = 0; /* leader len of current line */
char_u *leader_flags = NULL; /* flags for leader of current line */
int next_leader_len; /* leader len of next line */
char_u *next_leader_flags; /* flags for leader of next line */
int do_comments; /* format comments */
#endif
if (lnum <= 1)
return TRUE; /* start of the file */
p = ml_get(lnum - 1);
if (*p == NUL)
return TRUE; /* after empty line */
#ifdef FEAT_COMMENTS
do_comments = has_format_option(FO_Q_COMS);
#endif
if (fmt_check_par(lnum - 1
#ifdef FEAT_COMMENTS
, &leader_len, &leader_flags, do_comments
#endif
))
return TRUE; /* after non-paragraph line */
if (fmt_check_par(lnum
#ifdef FEAT_COMMENTS
, &next_leader_len, &next_leader_flags, do_comments
#endif
))
return TRUE; /* "lnum" is not a paragraph line */
if (has_format_option(FO_WHITE_PAR) && !ends_in_white(lnum - 1))
return TRUE; /* missing trailing space in previous line. */
if (has_format_option(FO_Q_NUMBER) && (get_number_indent(lnum) > 0))
return TRUE; /* numbered item starts in "lnum". */
#ifdef FEAT_COMMENTS
if (!same_leader(lnum - 1, leader_len, leader_flags,
next_leader_len, next_leader_flags))
return TRUE; /* change of comment leader. */
#endif
return FALSE;
}
/*
* prepare a few things for block mode yank/delete/tilde
*
* for delete:
* - textlen includes the first/last char to be (partly) deleted
* - start/endspaces is the number of columns that are taken by the
* first/last deleted char minus the number of columns that have to be
* deleted.
* for yank and tilde:
* - textlen includes the first/last char to be wholly yanked
* - start/endspaces is the number of columns of the first/last yanked char
* that are to be yanked.
*/
static void
block_prep(
oparg_T *oap,
struct block_def *bdp,
linenr_T lnum,
int is_del)
{
int incr = 0;
char_u *pend;
char_u *pstart;
char_u *line;
char_u *prev_pstart;
char_u *prev_pend;
bdp->startspaces = 0;
bdp->endspaces = 0;
bdp->textlen = 0;
bdp->start_vcol = 0;
bdp->end_vcol = 0;
#ifdef FEAT_VISUALEXTRA
bdp->is_short = FALSE;
bdp->is_oneChar = FALSE;
bdp->pre_whitesp = 0;
bdp->pre_whitesp_c = 0;
bdp->end_char_vcols = 0;
#endif
bdp->start_char_vcols = 0;
line = ml_get(lnum);
pstart = line;
prev_pstart = line;
while (bdp->start_vcol < oap->start_vcol && *pstart)
{
/* Count a tab for what it's worth (if list mode not on) */
incr = lbr_chartabsize(line, pstart, (colnr_T)bdp->start_vcol);
bdp->start_vcol += incr;
#ifdef FEAT_VISUALEXTRA
if (VIM_ISWHITE(*pstart))
{
bdp->pre_whitesp += incr;
bdp->pre_whitesp_c++;
}
else
{
bdp->pre_whitesp = 0;
bdp->pre_whitesp_c = 0;
}
#endif
prev_pstart = pstart;
MB_PTR_ADV(pstart);
}
bdp->start_char_vcols = incr;
if (bdp->start_vcol < oap->start_vcol) /* line too short */
{
bdp->end_vcol = bdp->start_vcol;
#ifdef FEAT_VISUALEXTRA
bdp->is_short = TRUE;
#endif
if (!is_del || oap->op_type == OP_APPEND)
bdp->endspaces = oap->end_vcol - oap->start_vcol + 1;
}
else
{
/* notice: this converts partly selected Multibyte characters to
* spaces, too. */
bdp->startspaces = bdp->start_vcol - oap->start_vcol;
if (is_del && bdp->startspaces)
bdp->startspaces = bdp->start_char_vcols - bdp->startspaces;
pend = pstart;
bdp->end_vcol = bdp->start_vcol;
if (bdp->end_vcol > oap->end_vcol) /* it's all in one character */
{
#ifdef FEAT_VISUALEXTRA
bdp->is_oneChar = TRUE;
#endif
if (oap->op_type == OP_INSERT)
bdp->endspaces = bdp->start_char_vcols - bdp->startspaces;
else if (oap->op_type == OP_APPEND)
{
bdp->startspaces += oap->end_vcol - oap->start_vcol + 1;
bdp->endspaces = bdp->start_char_vcols - bdp->startspaces;
}
else
{
bdp->startspaces = oap->end_vcol - oap->start_vcol + 1;
if (is_del && oap->op_type != OP_LSHIFT)
{
/* just putting the sum of those two into
* bdp->startspaces doesn't work for Visual replace,
* so we have to split the tab in two */
bdp->startspaces = bdp->start_char_vcols
- (bdp->start_vcol - oap->start_vcol);
bdp->endspaces = bdp->end_vcol - oap->end_vcol - 1;
}
}
}
else
{
prev_pend = pend;
while (bdp->end_vcol <= oap->end_vcol && *pend != NUL)
{
/* Count a tab for what it's worth (if list mode not on) */
prev_pend = pend;
incr = lbr_chartabsize_adv(line, &pend, (colnr_T)bdp->end_vcol);
bdp->end_vcol += incr;
}
if (bdp->end_vcol <= oap->end_vcol
&& (!is_del
|| oap->op_type == OP_APPEND
|| oap->op_type == OP_REPLACE)) /* line too short */
{
#ifdef FEAT_VISUALEXTRA
bdp->is_short = TRUE;
#endif
/* Alternative: include spaces to fill up the block.
* Disadvantage: can lead to trailing spaces when the line is
* short where the text is put */
/* if (!is_del || oap->op_type == OP_APPEND) */
if (oap->op_type == OP_APPEND || virtual_op)
bdp->endspaces = oap->end_vcol - bdp->end_vcol
+ oap->inclusive;
else
bdp->endspaces = 0; /* replace doesn't add characters */
}
else if (bdp->end_vcol > oap->end_vcol)
{
bdp->endspaces = bdp->end_vcol - oap->end_vcol - 1;
if (!is_del && bdp->endspaces)
{
bdp->endspaces = incr - bdp->endspaces;
if (pend != pstart)
pend = prev_pend;
}
}
}
#ifdef FEAT_VISUALEXTRA
bdp->end_char_vcols = incr;
#endif
if (is_del && bdp->startspaces)
pstart = prev_pstart;
bdp->textlen = (int)(pend - pstart);
}
bdp->textcol = (colnr_T) (pstart - line);
bdp->textstart = pstart;
}
/*
* Handle the add/subtract operator.
*/
void
op_addsub(
oparg_T *oap,
linenr_T Prenum1, /* Amount of add/subtract */
int g_cmd) /* was g<c-a>/g<c-x> */
{
pos_T pos;
struct block_def bd;
int change_cnt = 0;
linenr_T amount = Prenum1;
if (!VIsual_active)
{
pos = curwin->w_cursor;
if (u_save_cursor() == FAIL)
return;
change_cnt = do_addsub(oap->op_type, &pos, 0, amount);
if (change_cnt)
changed_lines(pos.lnum, 0, pos.lnum + 1, 0L);
}
else
{
int one_change;
int length;
pos_T startpos;
if (u_save((linenr_T)(oap->start.lnum - 1),
(linenr_T)(oap->end.lnum + 1)) == FAIL)
return;
pos = oap->start;
for (; pos.lnum <= oap->end.lnum; ++pos.lnum)
{
if (oap->block_mode) /* Visual block mode */
{
block_prep(oap, &bd, pos.lnum, FALSE);
pos.col = bd.textcol;
length = bd.textlen;
}
else if (oap->motion_type == MLINE)
{
curwin->w_cursor.col = 0;
pos.col = 0;
length = (colnr_T)STRLEN(ml_get(pos.lnum));
}
else /* oap->motion_type == MCHAR */
{
if (pos.lnum == oap->start.lnum && !oap->inclusive)
dec(&(oap->end));
length = (colnr_T)STRLEN(ml_get(pos.lnum));
pos.col = 0;
if (pos.lnum == oap->start.lnum)
{
pos.col += oap->start.col;
length -= oap->start.col;
}
if (pos.lnum == oap->end.lnum)
{
length = (int)STRLEN(ml_get(oap->end.lnum));
if (oap->end.col >= length)
oap->end.col = length - 1;
length = oap->end.col - pos.col + 1;
}
}
one_change = do_addsub(oap->op_type, &pos, length, amount);
if (one_change)
{
/* Remember the start position of the first change. */
if (change_cnt == 0)
startpos = curbuf->b_op_start;
++change_cnt;
}
#ifdef FEAT_NETBEANS_INTG
if (netbeans_active() && one_change)
{
char_u *ptr = ml_get_buf(curbuf, pos.lnum, FALSE);
netbeans_removed(curbuf, pos.lnum, pos.col, (long)length);
netbeans_inserted(curbuf, pos.lnum, pos.col,
&ptr[pos.col], length);
}
#endif
if (g_cmd && one_change)
amount += Prenum1;
}
if (change_cnt)
changed_lines(oap->start.lnum, 0, oap->end.lnum + 1, 0L);
if (!change_cnt && oap->is_VIsual)
/* No change: need to remove the Visual selection */
redraw_curbuf_later(INVERTED);
/* Set '[ mark if something changed. Keep the last end
* position from do_addsub(). */
if (change_cnt > 0)
curbuf->b_op_start = startpos;
if (change_cnt > p_report)
{
if (change_cnt == 1)
MSG(_("1 line changed"));
else
smsg((char_u *)_("%ld lines changed"), change_cnt);
}
}
}
/*
* Add or subtract 'Prenum1' from a number in a line
* op_type is OP_NR_ADD or OP_NR_SUB
*
* Returns TRUE if some character was changed.
*/
static int
do_addsub(
int op_type,
pos_T *pos,
int length,
linenr_T Prenum1)
{
int col;
char_u *buf1;
char_u buf2[NUMBUFLEN];
int pre; /* 'X'/'x': hex; '0': octal; 'B'/'b': bin */
static int hexupper = FALSE; /* 0xABC */
uvarnumber_T n;
uvarnumber_T oldn;
char_u *ptr;
int c;
int todel;
int dohex;
int dooct;
int dobin;
int doalp;
int firstdigit;
int subtract;
int negative = FALSE;
int was_positive = TRUE;
int visual = VIsual_active;
int did_change = FALSE;
pos_T save_cursor = curwin->w_cursor;
int maxlen = 0;
pos_T startpos;
pos_T endpos;
dohex = (vim_strchr(curbuf->b_p_nf, 'x') != NULL); /* "heX" */
dooct = (vim_strchr(curbuf->b_p_nf, 'o') != NULL); /* "Octal" */
dobin = (vim_strchr(curbuf->b_p_nf, 'b') != NULL); /* "Bin" */
doalp = (vim_strchr(curbuf->b_p_nf, 'p') != NULL); /* "alPha" */
curwin->w_cursor = *pos;
ptr = ml_get(pos->lnum);
col = pos->col;
if (*ptr == NUL)
goto theend;
/*
* First check if we are on a hexadecimal number, after the "0x".
*/
if (!VIsual_active)
{
if (dobin)
while (col > 0 && vim_isbdigit(ptr[col]))
{
--col;
#ifdef FEAT_MBYTE
if (has_mbyte)
col -= (*mb_head_off)(ptr, ptr + col);
#endif
}
if (dohex)
while (col > 0 && vim_isxdigit(ptr[col]))
{
--col;
#ifdef FEAT_MBYTE
if (has_mbyte)
col -= (*mb_head_off)(ptr, ptr + col);
#endif
}
if ( dobin
&& dohex
&& ! ((col > 0
&& (ptr[col] == 'X'
|| ptr[col] == 'x')
&& ptr[col - 1] == '0'
#ifdef FEAT_MBYTE
&& (!has_mbyte ||
!(*mb_head_off)(ptr, ptr + col - 1))
#endif
&& vim_isxdigit(ptr[col + 1]))))
{
/* In case of binary/hexadecimal pattern overlap match, rescan */
col = pos->col;
while (col > 0 && vim_isdigit(ptr[col]))
{
col--;
#ifdef FEAT_MBYTE
if (has_mbyte)
col -= (*mb_head_off)(ptr, ptr + col);
#endif
}
}
if (( dohex
&& col > 0
&& (ptr[col] == 'X'
|| ptr[col] == 'x')
&& ptr[col - 1] == '0'
#ifdef FEAT_MBYTE
&& (!has_mbyte ||
!(*mb_head_off)(ptr, ptr + col - 1))
#endif
&& vim_isxdigit(ptr[col + 1])) ||
( dobin
&& col > 0
&& (ptr[col] == 'B'
|| ptr[col] == 'b')
&& ptr[col - 1] == '0'
#ifdef FEAT_MBYTE
&& (!has_mbyte ||
!(*mb_head_off)(ptr, ptr + col - 1))
#endif
&& vim_isbdigit(ptr[col + 1])))
{
/* Found hexadecimal or binary number, move to its start. */
--col;
#ifdef FEAT_MBYTE
if (has_mbyte)
col -= (*mb_head_off)(ptr, ptr + col);
#endif
}
else
{
/*
* Search forward and then backward to find the start of number.
*/
col = pos->col;
while (ptr[col] != NUL
&& !vim_isdigit(ptr[col])
&& !(doalp && ASCII_ISALPHA(ptr[col])))
col += MB_PTR2LEN(ptr + col);
while (col > 0
&& vim_isdigit(ptr[col - 1])
&& !(doalp && ASCII_ISALPHA(ptr[col])))
{
--col;
#ifdef FEAT_MBYTE
if (has_mbyte)
col -= (*mb_head_off)(ptr, ptr + col);
#endif
}
}
}
if (visual)
{
while (ptr[col] != NUL && length > 0
&& !vim_isdigit(ptr[col])
&& !(doalp && ASCII_ISALPHA(ptr[col])))
{
int mb_len = MB_PTR2LEN(ptr + col);
col += mb_len;
length -= mb_len;
}
if (length == 0)
goto theend;
if (col > pos->col && ptr[col - 1] == '-'
#ifdef FEAT_MBYTE
&& (!has_mbyte ||
!(*mb_head_off)(ptr, ptr + col - 1))
#endif
)
{
negative = TRUE;
was_positive = FALSE;
}
}
/*
* If a number was found, and saving for undo works, replace the number.
*/
firstdigit = ptr[col];
if (!VIM_ISDIGIT(firstdigit) && !(doalp && ASCII_ISALPHA(firstdigit)))
{
beep_flush();
goto theend;
}
if (doalp && ASCII_ISALPHA(firstdigit))
{
/* decrement or increment alphabetic character */
if (op_type == OP_NR_SUB)
{
if (CharOrd(firstdigit) < Prenum1)
{
if (isupper(firstdigit))
firstdigit = 'A';
else
firstdigit = 'a';
}
else
#ifdef EBCDIC
firstdigit = EBCDIC_CHAR_ADD(firstdigit, -Prenum1);
#else
firstdigit -= Prenum1;
#endif
}
else
{
if (26 - CharOrd(firstdigit) - 1 < Prenum1)
{
if (isupper(firstdigit))
firstdigit = 'Z';
else
firstdigit = 'z';
}
else
#ifdef EBCDIC
firstdigit = EBCDIC_CHAR_ADD(firstdigit, Prenum1);
#else
firstdigit += Prenum1;
#endif
}
curwin->w_cursor.col = col;
if (!did_change)
startpos = curwin->w_cursor;
did_change = TRUE;
(void)del_char(FALSE);
ins_char(firstdigit);
endpos = curwin->w_cursor;
curwin->w_cursor.col = col;
}
else
{
if (col > 0 && ptr[col - 1] == '-'
#ifdef FEAT_MBYTE
&& (!has_mbyte ||
!(*mb_head_off)(ptr, ptr + col - 1))
#endif
&& !visual)
{
/* negative number */
--col;
negative = TRUE;
}
/* get the number value (unsigned) */
if (visual && VIsual_mode != 'V')
maxlen = (curbuf->b_visual.vi_curswant == MAXCOL
? (int)STRLEN(ptr) - col
: length);
vim_str2nr(ptr + col, &pre, &length,
0 + (dobin ? STR2NR_BIN : 0)
+ (dooct ? STR2NR_OCT : 0)
+ (dohex ? STR2NR_HEX : 0),
NULL, &n, maxlen);
/* ignore leading '-' for hex and octal and bin numbers */
if (pre && negative)
{
++col;
--length;
negative = FALSE;
}
/* add or subtract */
subtract = FALSE;
if (op_type == OP_NR_SUB)
subtract ^= TRUE;
if (negative)
subtract ^= TRUE;
oldn = n;
if (subtract)
n -= (uvarnumber_T)Prenum1;
else
n += (uvarnumber_T)Prenum1;
/* handle wraparound for decimal numbers */
if (!pre)
{
if (subtract)
{
if (n > oldn)
{
n = 1 + (n ^ (uvarnumber_T)-1);
negative ^= TRUE;
}
}
else
{
/* add */
if (n < oldn)
{
n = (n ^ (uvarnumber_T)-1);
negative ^= TRUE;
}
}
if (n == 0)
negative = FALSE;
}
if (visual && !was_positive && !negative && col > 0)
{
/* need to remove the '-' */
col--;
length++;
}
/*
* Delete the old number.
*/
curwin->w_cursor.col = col;
if (!did_change)
startpos = curwin->w_cursor;
did_change = TRUE;
todel = length;
c = gchar_cursor();
/*
* Don't include the '-' in the length, only the length of the
* part after it is kept the same.
*/
if (c == '-')
--length;
while (todel-- > 0)
{
if (c < 0x100 && isalpha(c))
{
if (isupper(c))
hexupper = TRUE;
else
hexupper = FALSE;
}
/* del_char() will mark line needing displaying */
(void)del_char(FALSE);
c = gchar_cursor();
}
/*
* Prepare the leading characters in buf1[].
* When there are many leading zeros it could be very long.
* Allocate a bit too much.
*/
buf1 = alloc((unsigned)length + NUMBUFLEN);
if (buf1 == NULL)
goto theend;
ptr = buf1;
if (negative && (!visual || was_positive))
{
*ptr++ = '-';
}
if (pre)
{
*ptr++ = '0';
--length;
}
if (pre == 'b' || pre == 'B' ||
pre == 'x' || pre == 'X')
{
*ptr++ = pre;
--length;
}
/*
* Put the number characters in buf2[].
*/
if (pre == 'b' || pre == 'B')
{
int i;
int bit = 0;
int bits = sizeof(uvarnumber_T) * 8;
/* leading zeros */
for (bit = bits; bit > 0; bit--)
if ((n >> (bit - 1)) & 0x1) break;
for (i = 0; bit > 0; bit--)
buf2[i++] = ((n >> (bit - 1)) & 0x1) ? '1' : '0';
buf2[i] = '\0';
}
else if (pre == 0)
vim_snprintf((char *)buf2, NUMBUFLEN, "%llu",
(long long unsigned)n);
else if (pre == '0')
vim_snprintf((char *)buf2, NUMBUFLEN, "%llo",
(long long unsigned)n);
else if (pre && hexupper)
vim_snprintf((char *)buf2, NUMBUFLEN, "%llX",
(long long unsigned)n);
else
vim_snprintf((char *)buf2, NUMBUFLEN, "%llx",
(long long unsigned)n);
length -= (int)STRLEN(buf2);
/*
* Adjust number of zeros to the new number of digits, so the
* total length of the number remains the same.
* Don't do this when
* the result may look like an octal number.
*/
if (firstdigit == '0' && !(dooct && pre == 0))
while (length-- > 0)
*ptr++ = '0';
*ptr = NUL;
STRCAT(buf1, buf2);
ins_str(buf1); /* insert the new number */
vim_free(buf1);
endpos = curwin->w_cursor;
if (did_change && curwin->w_cursor.col)
--curwin->w_cursor.col;
}
if (did_change)
{
/* set the '[ and '] marks */
curbuf->b_op_start = startpos;
curbuf->b_op_end = endpos;
if (curbuf->b_op_end.col > 0)
--curbuf->b_op_end.col;
}
theend:
if (visual)
curwin->w_cursor = save_cursor;
else if (did_change)
curwin->w_set_curswant = TRUE;
return did_change;
}
#ifdef FEAT_VIMINFO
static yankreg_T *y_read_regs = NULL;
#define REG_PREVIOUS 1
#define REG_EXEC 2
/*
* Prepare for reading viminfo registers when writing viminfo later.
*/
void
prepare_viminfo_registers(void)
{
y_read_regs = (yankreg_T *)alloc_clear(NUM_REGISTERS
* (int)sizeof(yankreg_T));
}
void
finish_viminfo_registers(void)
{
int i;
int j;
if (y_read_regs != NULL)
{
for (i = 0; i < NUM_REGISTERS; ++i)
if (y_read_regs[i].y_array != NULL)
{
for (j = 0; j < y_read_regs[i].y_size; j++)
vim_free(y_read_regs[i].y_array[j]);
vim_free(y_read_regs[i].y_array);
}
VIM_CLEAR(y_read_regs);
}
}
int
read_viminfo_register(vir_T *virp, int force)
{
int eof;
int do_it = TRUE;
int size;
int limit;
int i;
int set_prev = FALSE;
char_u *str;
char_u **array = NULL;
int new_type = MCHAR; /* init to shut up compiler */
colnr_T new_width = 0; /* init to shut up compiler */
/* We only get here (hopefully) if line[0] == '"' */
str = virp->vir_line + 1;
/* If the line starts with "" this is the y_previous register. */
if (*str == '"')
{
set_prev = TRUE;
str++;
}
if (!ASCII_ISALNUM(*str) && *str != '-')
{
if (viminfo_error("E577: ", _("Illegal register name"), virp->vir_line))
return TRUE; /* too many errors, pretend end-of-file */
do_it = FALSE;
}
get_yank_register(*str++, FALSE);
if (!force && y_current->y_array != NULL)
do_it = FALSE;
if (*str == '@')
{
/* "x@: register x used for @@ */
if (force || execreg_lastc == NUL)
execreg_lastc = str[-1];
}
size = 0;
limit = 100; /* Optimized for registers containing <= 100 lines */
if (do_it)
{
/*
* Build the new register in array[].
* y_array is kept as-is until done.
* The "do_it" flag is reset when something is wrong, in which case
* array[] needs to be freed.
*/
if (set_prev)
y_previous = y_current;
array = (char_u **)alloc((unsigned)(limit * sizeof(char_u *)));
str = skipwhite(skiptowhite(str));
if (STRNCMP(str, "CHAR", 4) == 0)
new_type = MCHAR;
else if (STRNCMP(str, "BLOCK", 5) == 0)
new_type = MBLOCK;
else
new_type = MLINE;
/* get the block width; if it's missing we get a zero, which is OK */
str = skipwhite(skiptowhite(str));
new_width = getdigits(&str);
}
while (!(eof = viminfo_readline(virp))
&& (virp->vir_line[0] == TAB || virp->vir_line[0] == '<'))
{
if (do_it)
{
if (size == limit)
{
char_u **new_array = (char_u **)
alloc((unsigned)(limit * 2 * sizeof(char_u *)));
if (new_array == NULL)
{
do_it = FALSE;
break;
}
for (i = 0; i < limit; i++)
new_array[i] = array[i];
vim_free(array);
array = new_array;
limit *= 2;
}
str = viminfo_readstring(virp, 1, TRUE);
if (str != NULL)
array[size++] = str;
else
/* error, don't store the result */
do_it = FALSE;
}
}
if (do_it)
{
/* free y_array[] */
for (i = 0; i < y_current->y_size; i++)
vim_free(y_current->y_array[i]);
vim_free(y_current->y_array);
y_current->y_type = new_type;
y_current->y_width = new_width;
y_current->y_size = size;
y_current->y_time_set = 0;
if (size == 0)
{
y_current->y_array = NULL;
}
else
{
/* Move the lines from array[] to y_array[]. */
y_current->y_array =
(char_u **)alloc((unsigned)(size * sizeof(char_u *)));
for (i = 0; i < size; i++)
{
if (y_current->y_array == NULL)
vim_free(array[i]);
else
y_current->y_array[i] = array[i];
}
}
}
else
{
/* Free array[] if it was filled. */
for (i = 0; i < size; i++)
vim_free(array[i]);
}
vim_free(array);
return eof;
}
/*
* Accept a new style register line from the viminfo, store it when it's new.
*/
void
handle_viminfo_register(garray_T *values, int force)
{
bval_T *vp = (bval_T *)values->ga_data;
int flags;
int name;
int type;
int linecount;
int width;
time_t timestamp;
yankreg_T *y_ptr;
int i;
/* Check the format:
* |{bartype},{flags},{name},{type},
* {linecount},{width},{timestamp},"line1","line2"
*/
if (values->ga_len < 6
|| vp[0].bv_type != BVAL_NR
|| vp[1].bv_type != BVAL_NR
|| vp[2].bv_type != BVAL_NR
|| vp[3].bv_type != BVAL_NR
|| vp[4].bv_type != BVAL_NR
|| vp[5].bv_type != BVAL_NR)
return;
flags = vp[0].bv_nr;
name = vp[1].bv_nr;
if (name < 0 || name >= NUM_REGISTERS)
return;
type = vp[2].bv_nr;
if (type != MCHAR && type != MLINE && type != MBLOCK)
return;
linecount = vp[3].bv_nr;
if (values->ga_len < 6 + linecount)
return;
width = vp[4].bv_nr;
if (width < 0)
return;
if (y_read_regs != NULL)
/* Reading viminfo for merging and writing. Store the register
* content, don't update the current registers. */
y_ptr = &y_read_regs[name];
else
y_ptr = &y_regs[name];
/* Do not overwrite unless forced or the timestamp is newer. */
timestamp = (time_t)vp[5].bv_nr;
if (y_ptr->y_array != NULL && !force
&& (timestamp == 0 || y_ptr->y_time_set > timestamp))
return;
if (y_ptr->y_array != NULL)
for (i = 0; i < y_ptr->y_size; i++)
vim_free(y_ptr->y_array[i]);
vim_free(y_ptr->y_array);
if (y_read_regs == NULL)
{
if (flags & REG_PREVIOUS)
y_previous = y_ptr;
if ((flags & REG_EXEC) && (force || execreg_lastc == NUL))
execreg_lastc = get_register_name(name);
}
y_ptr->y_type = type;
y_ptr->y_width = width;
y_ptr->y_size = linecount;
y_ptr->y_time_set = timestamp;
if (linecount == 0)
y_ptr->y_array = NULL;
else
{
y_ptr->y_array =
(char_u **)alloc((unsigned)(linecount * sizeof(char_u *)));
for (i = 0; i < linecount; i++)
{
if (vp[i + 6].bv_allocated)
{
y_ptr->y_array[i] = vp[i + 6].bv_string;
vp[i + 6].bv_string = NULL;
}
else
y_ptr->y_array[i] = vim_strsave(vp[i + 6].bv_string);
}
}
}
void
write_viminfo_registers(FILE *fp)
{
int i, j;
char_u *type;
char_u c;
int num_lines;
int max_num_lines;
int max_kbyte;
long len;
yankreg_T *y_ptr;
fputs(_("\n# Registers:\n"), fp);
/* Get '<' value, use old '"' value if '<' is not found. */
max_num_lines = get_viminfo_parameter('<');
if (max_num_lines < 0)
max_num_lines = get_viminfo_parameter('"');
if (max_num_lines == 0)
return;
max_kbyte = get_viminfo_parameter('s');
if (max_kbyte == 0)
return;
for (i = 0; i < NUM_REGISTERS; i++)
{
#ifdef FEAT_CLIPBOARD
/* Skip '*'/'+' register, we don't want them back next time */
if (i == STAR_REGISTER || i == PLUS_REGISTER)
continue;
#endif
#ifdef FEAT_DND
/* Neither do we want the '~' register */
if (i == TILDE_REGISTER)
continue;
#endif
/* When reading viminfo for merging and writing: Use the register from
* viminfo if it's newer. */
if (y_read_regs != NULL
&& y_read_regs[i].y_array != NULL
&& (y_regs[i].y_array == NULL ||
y_read_regs[i].y_time_set > y_regs[i].y_time_set))
y_ptr = &y_read_regs[i];
else if (y_regs[i].y_array == NULL)
continue;
else
y_ptr = &y_regs[i];
/* Skip empty registers. */
num_lines = y_ptr->y_size;
if (num_lines == 0
|| (num_lines == 1 && y_ptr->y_type == MCHAR
&& *y_ptr->y_array[0] == NUL))
continue;
if (max_kbyte > 0)
{
/* Skip register if there is more text than the maximum size. */
len = 0;
for (j = 0; j < num_lines; j++)
len += (long)STRLEN(y_ptr->y_array[j]) + 1L;
if (len > (long)max_kbyte * 1024L)
continue;
}
switch (y_ptr->y_type)
{
case MLINE:
type = (char_u *)"LINE";
break;
case MCHAR:
type = (char_u *)"CHAR";
break;
case MBLOCK:
type = (char_u *)"BLOCK";
break;
default:
sprintf((char *)IObuff, _("E574: Unknown register type %d"),
y_ptr->y_type);
emsg(IObuff);
type = (char_u *)"LINE";
break;
}
if (y_previous == &y_regs[i])
fprintf(fp, "\"");
c = get_register_name(i);
fprintf(fp, "\"%c", c);
if (c == execreg_lastc)
fprintf(fp, "@");
fprintf(fp, "\t%s\t%d\n", type, (int)y_ptr->y_width);
/* If max_num_lines < 0, then we save ALL the lines in the register */
if (max_num_lines > 0 && num_lines > max_num_lines)
num_lines = max_num_lines;
for (j = 0; j < num_lines; j++)
{
putc('\t', fp);
viminfo_writestring(fp, y_ptr->y_array[j]);
}
{
int flags = 0;
int remaining;
/* New style with a bar line. Format:
* |{bartype},{flags},{name},{type},
* {linecount},{width},{timestamp},"line1","line2"
* flags: REG_PREVIOUS - register is y_previous
* REG_EXEC - used for @@
*/
if (y_previous == &y_regs[i])
flags |= REG_PREVIOUS;
if (c == execreg_lastc)
flags |= REG_EXEC;
fprintf(fp, "|%d,%d,%d,%d,%d,%d,%ld", BARTYPE_REGISTER, flags,
i, y_ptr->y_type, num_lines, (int)y_ptr->y_width,
(long)y_ptr->y_time_set);
/* 11 chars for type/flags/name/type, 3 * 20 for numbers */
remaining = LSIZE - 71;
for (j = 0; j < num_lines; j++)
{
putc(',', fp);
--remaining;
remaining = barline_writestring(fp, y_ptr->y_array[j],
remaining);
}
putc('\n', fp);
}
}
}
#endif /* FEAT_VIMINFO */
#if defined(FEAT_CLIPBOARD) || defined(PROTO)
/*
* SELECTION / PRIMARY ('*')
*
* Text selection stuff that uses the GUI selection register '*'. When using a
* GUI this may be text from another window, otherwise it is the last text we
* had highlighted with VIsual mode. With mouse support, clicking the middle
* button performs the paste, otherwise you will need to do <"*p>. "
* If not under X, it is synonymous with the clipboard register '+'.
*
* X CLIPBOARD ('+')
*
* Text selection stuff that uses the GUI clipboard register '+'.
* Under X, this matches the standard cut/paste buffer CLIPBOARD selection.
* It will be used for unnamed cut/pasting is 'clipboard' contains "unnamed",
* otherwise you will need to do <"+p>. "
* If not under X, it is synonymous with the selection register '*'.
*/
/*
* Routine to export any final X selection we had to the environment
* so that the text is still available after Vim has exited. X selections
* only exist while the owning application exists, so we write to the
* permanent (while X runs) store CUT_BUFFER0.
* Dump the CLIPBOARD selection if we own it (it's logically the more
* 'permanent' of the two), otherwise the PRIMARY one.
* For now, use a hard-coded sanity limit of 1Mb of data.
*/
#if (defined(FEAT_X11) && defined(FEAT_CLIPBOARD)) || defined(PROTO)
void
x11_export_final_selection(void)
{
Display *dpy;
char_u *str = NULL;
long_u len = 0;
int motion_type = -1;
# ifdef FEAT_GUI
if (gui.in_use)
dpy = X_DISPLAY;
else
# endif
# ifdef FEAT_XCLIPBOARD
dpy = xterm_dpy;
# else
return;
# endif
/* Get selection to export */
if (clip_plus.owned)
motion_type = clip_convert_selection(&str, &len, &clip_plus);
else if (clip_star.owned)
motion_type = clip_convert_selection(&str, &len, &clip_star);
/* Check it's OK */
if (dpy != NULL && str != NULL && motion_type >= 0
&& len < 1024*1024 && len > 0)
{
#ifdef FEAT_MBYTE
int ok = TRUE;
/* The CUT_BUFFER0 is supposed to always contain latin1. Convert from
* 'enc' when it is a multi-byte encoding. When 'enc' is an 8-bit
* encoding conversion usually doesn't work, so keep the text as-is.
*/
if (has_mbyte)
{
vimconv_T vc;
vc.vc_type = CONV_NONE;
if (convert_setup(&vc, p_enc, (char_u *)"latin1") == OK)
{
int intlen = len;
char_u *conv_str;
vc.vc_fail = TRUE;
conv_str = string_convert(&vc, str, &intlen);
len = intlen;
if (conv_str != NULL)
{
vim_free(str);
str = conv_str;
}
else
{
ok = FALSE;
}
convert_setup(&vc, NULL, NULL);
}
else
{
ok = FALSE;
}
}
/* Do not store the string if conversion failed. Better to use any
* other selection than garbled text. */
if (ok)
#endif
{
XStoreBuffer(dpy, (char *)str, (int)len, 0);
XFlush(dpy);
}
}
vim_free(str);
}
#endif
void
clip_free_selection(VimClipboard *cbd)
{
yankreg_T *y_ptr = y_current;
if (cbd == &clip_plus)
y_current = &y_regs[PLUS_REGISTER];
else
y_current = &y_regs[STAR_REGISTER];
free_yank_all();
y_current->y_size = 0;
y_current = y_ptr;
}
/*
* Get the selected text and put it in the gui selection register '*' or '+'.
*/
void
clip_get_selection(VimClipboard *cbd)
{
yankreg_T *old_y_previous, *old_y_current;
pos_T old_cursor;
pos_T old_visual;
int old_visual_mode;
colnr_T old_curswant;
int old_set_curswant;
pos_T old_op_start, old_op_end;
oparg_T oa;
cmdarg_T ca;
if (cbd->owned)
{
if ((cbd == &clip_plus && y_regs[PLUS_REGISTER].y_array != NULL)
|| (cbd == &clip_star && y_regs[STAR_REGISTER].y_array != NULL))
return;
/* Get the text between clip_star.start & clip_star.end */
old_y_previous = y_previous;
old_y_current = y_current;
old_cursor = curwin->w_cursor;
old_curswant = curwin->w_curswant;
old_set_curswant = curwin->w_set_curswant;
old_op_start = curbuf->b_op_start;
old_op_end = curbuf->b_op_end;
old_visual = VIsual;
old_visual_mode = VIsual_mode;
clear_oparg(&oa);
oa.regname = (cbd == &clip_plus ? '+' : '*');
oa.op_type = OP_YANK;
vim_memset(&ca, 0, sizeof(ca));
ca.oap = &oa;
ca.cmdchar = 'y';
ca.count1 = 1;
ca.retval = CA_NO_ADJ_OP_END;
do_pending_operator(&ca, 0, TRUE);
y_previous = old_y_previous;
y_current = old_y_current;
curwin->w_cursor = old_cursor;
changed_cline_bef_curs(); /* need to update w_virtcol et al */
curwin->w_curswant = old_curswant;
curwin->w_set_curswant = old_set_curswant;
curbuf->b_op_start = old_op_start;
curbuf->b_op_end = old_op_end;
VIsual = old_visual;
VIsual_mode = old_visual_mode;
}
else if (!is_clipboard_needs_update())
{
clip_free_selection(cbd);
/* Try to get selected text from another window */
clip_gen_request_selection(cbd);
}
}
/*
* Convert from the GUI selection string into the '*'/'+' register.
*/
void
clip_yank_selection(
int type,
char_u *str,
long len,
VimClipboard *cbd)
{
yankreg_T *y_ptr;
if (cbd == &clip_plus)
y_ptr = &y_regs[PLUS_REGISTER];
else
y_ptr = &y_regs[STAR_REGISTER];
clip_free_selection(cbd);
str_to_reg(y_ptr, type, str, len, 0L, FALSE);
}
/*
* Convert the '*'/'+' register into a GUI selection string returned in *str
* with length *len.
* Returns the motion type, or -1 for failure.
*/
int
clip_convert_selection(char_u **str, long_u *len, VimClipboard *cbd)
{
char_u *p;
int lnum;
int i, j;
int_u eolsize;
yankreg_T *y_ptr;
if (cbd == &clip_plus)
y_ptr = &y_regs[PLUS_REGISTER];
else
y_ptr = &y_regs[STAR_REGISTER];
#ifdef USE_CRNL
eolsize = 2;
#else
eolsize = 1;
#endif
*str = NULL;
*len = 0;
if (y_ptr->y_array == NULL)
return -1;
for (i = 0; i < y_ptr->y_size; i++)
*len += (long_u)STRLEN(y_ptr->y_array[i]) + eolsize;
/*
* Don't want newline character at end of last line if we're in MCHAR mode.
*/
if (y_ptr->y_type == MCHAR && *len >= eolsize)
*len -= eolsize;
p = *str = lalloc(*len + 1, TRUE); /* add one to avoid zero */
if (p == NULL)
return -1;
lnum = 0;
for (i = 0, j = 0; i < (int)*len; i++, j++)
{
if (y_ptr->y_array[lnum][j] == '\n')
p[i] = NUL;
else if (y_ptr->y_array[lnum][j] == NUL)
{
#ifdef USE_CRNL
p[i++] = '\r';
#endif
#ifdef USE_CR
p[i] = '\r';
#else
p[i] = '\n';
#endif
lnum++;
j = -1;
}
else
p[i] = y_ptr->y_array[lnum][j];
}
return y_ptr->y_type;
}
/*
* If we have written to a clipboard register, send the text to the clipboard.
*/
static void
may_set_selection(void)
{
if (y_current == &(y_regs[STAR_REGISTER]) && clip_star.available)
{
clip_own_selection(&clip_star);
clip_gen_set_selection(&clip_star);
}
else if (y_current == &(y_regs[PLUS_REGISTER]) && clip_plus.available)
{
clip_own_selection(&clip_plus);
clip_gen_set_selection(&clip_plus);
}
}
#endif /* FEAT_CLIPBOARD || PROTO */
#if defined(FEAT_DND) || defined(PROTO)
/*
* Replace the contents of the '~' register with str.
*/
void
dnd_yank_drag_data(char_u *str, long len)
{
yankreg_T *curr;
curr = y_current;
y_current = &y_regs[TILDE_REGISTER];
free_yank_all();
str_to_reg(y_current, MCHAR, str, len, 0L, FALSE);
y_current = curr;
}
#endif
#if defined(FEAT_EVAL) || defined(PROTO)
/*
* Return the type of a register.
* Used for getregtype()
* Returns MAUTO for error.
*/
char_u
get_reg_type(int regname, long *reglen)
{
switch (regname)
{
case '%': /* file name */
case '#': /* alternate file name */
case '=': /* expression */
case ':': /* last command line */
case '/': /* last search-pattern */
case '.': /* last inserted text */
#ifdef FEAT_SEARCHPATH
case Ctrl_F: /* Filename under cursor */
case Ctrl_P: /* Path under cursor, expand via "path" */
#endif
case Ctrl_W: /* word under cursor */
case Ctrl_A: /* WORD (mnemonic All) under cursor */
case '_': /* black hole: always empty */
return MCHAR;
}
#ifdef FEAT_CLIPBOARD
regname = may_get_selection(regname);
#endif
if (regname != NUL && !valid_yank_reg(regname, FALSE))
return MAUTO;
get_yank_register(regname, FALSE);
if (y_current->y_array != NULL)
{
if (reglen != NULL && y_current->y_type == MBLOCK)
*reglen = y_current->y_width;
return y_current->y_type;
}
return MAUTO;
}
static char_u *getreg_wrap_one_line(char_u *s, int flags);
/*
* When "flags" has GREG_LIST return a list with text "s".
* Otherwise just return "s".
*/
static char_u *
getreg_wrap_one_line(char_u *s, int flags)
{
if (flags & GREG_LIST)
{
list_T *list = list_alloc();
if (list != NULL)
{
if (list_append_string(list, NULL, -1) == FAIL)
{
list_free(list);
return NULL;
}
list->lv_first->li_tv.vval.v_string = s;
}
return (char_u *)list;
}
return s;
}
/*
* Return the contents of a register as a single allocated string.
* Used for "@r" in expressions and for getreg().
* Returns NULL for error.
* Flags:
* GREG_NO_EXPR Do not allow expression register
* GREG_EXPR_SRC For the expression register: return expression itself,
* not the result of its evaluation.
* GREG_LIST Return a list of lines in place of a single string.
*/
char_u *
get_reg_contents(int regname, int flags)
{
long i;
char_u *retval;
int allocated;
long len;
/* Don't allow using an expression register inside an expression */
if (regname == '=')
{
if (flags & GREG_NO_EXPR)
return NULL;
if (flags & GREG_EXPR_SRC)
return getreg_wrap_one_line(get_expr_line_src(), flags);
return getreg_wrap_one_line(get_expr_line(), flags);
}
if (regname == '@') /* "@@" is used for unnamed register */
regname = '"';
/* check for valid regname */
if (regname != NUL && !valid_yank_reg(regname, FALSE))
return NULL;
#ifdef FEAT_CLIPBOARD
regname = may_get_selection(regname);
#endif
if (get_spec_reg(regname, &retval, &allocated, FALSE))
{
if (retval == NULL)
return NULL;
if (allocated)
return getreg_wrap_one_line(retval, flags);
return getreg_wrap_one_line(vim_strsave(retval), flags);
}
get_yank_register(regname, FALSE);
if (y_current->y_array == NULL)
return NULL;
if (flags & GREG_LIST)
{
list_T *list = list_alloc();
int error = FALSE;
if (list == NULL)
return NULL;
for (i = 0; i < y_current->y_size; ++i)
if (list_append_string(list, y_current->y_array[i], -1) == FAIL)
error = TRUE;
if (error)
{
list_free(list);
return NULL;
}
return (char_u *)list;
}
/*
* Compute length of resulting string.
*/
len = 0;
for (i = 0; i < y_current->y_size; ++i)
{
len += (long)STRLEN(y_current->y_array[i]);
/*
* Insert a newline between lines and after last line if
* y_type is MLINE.
*/
if (y_current->y_type == MLINE || i < y_current->y_size - 1)
++len;
}
retval = lalloc(len + 1, TRUE);
/*
* Copy the lines of the yank register into the string.
*/
if (retval != NULL)
{
len = 0;
for (i = 0; i < y_current->y_size; ++i)
{
STRCPY(retval + len, y_current->y_array[i]);
len += (long)STRLEN(retval + len);
/*
* Insert a NL between lines and after the last line if y_type is
* MLINE.
*/
if (y_current->y_type == MLINE || i < y_current->y_size - 1)
retval[len++] = '\n';
}
retval[len] = NUL;
}
return retval;
}
static int
init_write_reg(
int name,
yankreg_T **old_y_previous,
yankreg_T **old_y_current,
int must_append,
int *yank_type UNUSED)
{
if (!valid_yank_reg(name, TRUE)) /* check for valid reg name */
{
emsg_invreg(name);
return FAIL;
}
/* Don't want to change the current (unnamed) register */
*old_y_previous = y_previous;
*old_y_current = y_current;
get_yank_register(name, TRUE);
if (!y_append && !must_append)
free_yank_all();
return OK;
}
static void
finish_write_reg(
int name,
yankreg_T *old_y_previous,
yankreg_T *old_y_current)
{
# ifdef FEAT_CLIPBOARD
/* Send text of clipboard register to the clipboard. */
may_set_selection();
# endif
/* ':let @" = "val"' should change the meaning of the "" register */
if (name != '"')
y_previous = old_y_previous;
y_current = old_y_current;
}
/*
* Store string "str" in register "name".
* "maxlen" is the maximum number of bytes to use, -1 for all bytes.
* If "must_append" is TRUE, always append to the register. Otherwise append
* if "name" is an uppercase letter.
* Note: "maxlen" and "must_append" don't work for the "/" register.
* Careful: 'str' is modified, you may have to use a copy!
* If "str" ends in '\n' or '\r', use linewise, otherwise use characterwise.
*/
void
write_reg_contents(
int name,
char_u *str,
int maxlen,
int must_append)
{
write_reg_contents_ex(name, str, maxlen, must_append, MAUTO, 0L);
}
void
write_reg_contents_lst(
int name,
char_u **strings,
int maxlen UNUSED,
int must_append,
int yank_type,
long block_len)
{
yankreg_T *old_y_previous, *old_y_current;
if (name == '/'
#ifdef FEAT_EVAL
|| name == '='
#endif
)
{
char_u *s;
if (strings[0] == NULL)
s = (char_u *)"";
else if (strings[1] != NULL)
{
EMSG(_("E883: search pattern and expression register may not "
"contain two or more lines"));
return;
}
else
s = strings[0];
write_reg_contents_ex(name, s, -1, must_append, yank_type, block_len);
return;
}
if (name == '_') /* black hole: nothing to do */
return;
if (init_write_reg(name, &old_y_previous, &old_y_current, must_append,
&yank_type) == FAIL)
return;
str_to_reg(y_current, yank_type, (char_u *) strings, -1, block_len, TRUE);
finish_write_reg(name, old_y_previous, old_y_current);
}
void
write_reg_contents_ex(
int name,
char_u *str,
int maxlen,
int must_append,
int yank_type,
long block_len)
{
yankreg_T *old_y_previous, *old_y_current;
long len;
if (maxlen >= 0)
len = maxlen;
else
len = (long)STRLEN(str);
/* Special case: '/' search pattern */
if (name == '/')
{
set_last_search_pat(str, RE_SEARCH, TRUE, TRUE);
return;
}
if (name == '#')
{
buf_T *buf;
if (VIM_ISDIGIT(*str))
{
int num = atoi((char *)str);
buf = buflist_findnr(num);
if (buf == NULL)
EMSGN(_(e_nobufnr), (long)num);
}
else
buf = buflist_findnr(buflist_findpat(str, str + STRLEN(str),
TRUE, FALSE, FALSE));
if (buf == NULL)
return;
curwin->w_alt_fnum = buf->b_fnum;
return;
}
#ifdef FEAT_EVAL
if (name == '=')
{
char_u *p, *s;
p = vim_strnsave(str, (int)len);
if (p == NULL)
return;
if (must_append)
{
s = concat_str(get_expr_line_src(), p);
vim_free(p);
p = s;
}
set_expr_line(p);
return;
}
#endif
if (name == '_') /* black hole: nothing to do */
return;
if (init_write_reg(name, &old_y_previous, &old_y_current, must_append,
&yank_type) == FAIL)
return;
str_to_reg(y_current, yank_type, str, len, block_len, FALSE);
finish_write_reg(name, old_y_previous, old_y_current);
}
#endif /* FEAT_EVAL */
#if defined(FEAT_CLIPBOARD) || defined(FEAT_EVAL)
/*
* Put a string into a register. When the register is not empty, the string
* is appended.
*/
static void
str_to_reg(
yankreg_T *y_ptr, /* pointer to yank register */
int yank_type, /* MCHAR, MLINE, MBLOCK, MAUTO */
char_u *str, /* string to put in register */
long len, /* length of string */
long blocklen, /* width of Visual block */
int str_list) /* TRUE if str is char_u ** */
{
int type; /* MCHAR, MLINE or MBLOCK */
int lnum;
long start;
long i;
int extra;
int newlines; /* number of lines added */
int extraline = 0; /* extra line at the end */
int append = FALSE; /* append to last line in register */
char_u *s;
char_u **ss;
char_u **pp;
long maxlen;
if (y_ptr->y_array == NULL) /* NULL means empty register */
y_ptr->y_size = 0;
if (yank_type == MAUTO)
type = ((str_list || (len > 0 && (str[len - 1] == NL
|| str[len - 1] == CAR)))
? MLINE : MCHAR);
else
type = yank_type;
/*
* Count the number of lines within the string
*/
newlines = 0;
if (str_list)
{
for (ss = (char_u **) str; *ss != NULL; ++ss)
++newlines;
}
else
{
for (i = 0; i < len; i++)
if (str[i] == '\n')
++newlines;
if (type == MCHAR || len == 0 || str[len - 1] != '\n')
{
extraline = 1;
++newlines; /* count extra newline at the end */
}
if (y_ptr->y_size > 0 && y_ptr->y_type == MCHAR)
{
append = TRUE;
--newlines; /* uncount newline when appending first line */
}
}
/* Without any lines make the register empty. */
if (y_ptr->y_size + newlines == 0)
{
VIM_CLEAR(y_ptr->y_array);
return;
}
/*
* Allocate an array to hold the pointers to the new register lines.
* If the register was not empty, move the existing lines to the new array.
*/
pp = (char_u **)lalloc_clear((y_ptr->y_size + newlines)
* sizeof(char_u *), TRUE);
if (pp == NULL) /* out of memory */
return;
for (lnum = 0; lnum < y_ptr->y_size; ++lnum)
pp[lnum] = y_ptr->y_array[lnum];
vim_free(y_ptr->y_array);
y_ptr->y_array = pp;
maxlen = 0;
/*
* Find the end of each line and save it into the array.
*/
if (str_list)
{
for (ss = (char_u **) str; *ss != NULL; ++ss, ++lnum)
{
i = (long)STRLEN(*ss);
pp[lnum] = vim_strnsave(*ss, i);
if (i > maxlen)
maxlen = i;
}
}
else
{
for (start = 0; start < len + extraline; start += i + 1)
{
for (i = start; i < len; ++i) /* find the end of the line */
if (str[i] == '\n')
break;
i -= start; /* i is now length of line */
if (i > maxlen)
maxlen = i;
if (append)
{
--lnum;
extra = (int)STRLEN(y_ptr->y_array[lnum]);
}
else
extra = 0;
s = alloc((unsigned)(i + extra + 1));
if (s == NULL)
break;
if (extra)
mch_memmove(s, y_ptr->y_array[lnum], (size_t)extra);
if (append)
vim_free(y_ptr->y_array[lnum]);
if (i)
mch_memmove(s + extra, str + start, (size_t)i);
extra += i;
s[extra] = NUL;
y_ptr->y_array[lnum++] = s;
while (--extra >= 0)
{
if (*s == NUL)
*s = '\n'; /* replace NUL with newline */
++s;
}
append = FALSE; /* only first line is appended */
}
}
y_ptr->y_type = type;
y_ptr->y_size = lnum;
if (type == MBLOCK)
y_ptr->y_width = (blocklen < 0 ? maxlen - 1 : blocklen);
else
y_ptr->y_width = 0;
#ifdef FEAT_VIMINFO
y_ptr->y_time_set = vim_time();
#endif
}
#endif /* FEAT_CLIPBOARD || FEAT_EVAL || PROTO */
void
clear_oparg(oparg_T *oap)
{
vim_memset(oap, 0, sizeof(oparg_T));
}
static varnumber_T line_count_info(char_u *line, varnumber_T *wc, varnumber_T *cc, varnumber_T limit, int eol_size);
/*
* Count the number of bytes, characters and "words" in a line.
*
* "Words" are counted by looking for boundaries between non-space and
* space characters. (it seems to produce results that match 'wc'.)
*
* Return value is byte count; word count for the line is added to "*wc".
* Char count is added to "*cc".
*
* The function will only examine the first "limit" characters in the
* line, stopping if it encounters an end-of-line (NUL byte). In that
* case, eol_size will be added to the character count to account for
* the size of the EOL character.
*/
static varnumber_T
line_count_info(
char_u *line,
varnumber_T *wc,
varnumber_T *cc,
varnumber_T limit,
int eol_size)
{
varnumber_T i;
varnumber_T words = 0;
varnumber_T chars = 0;
int is_word = 0;
for (i = 0; i < limit && line[i] != NUL; )
{
if (is_word)
{
if (vim_isspace(line[i]))
{
words++;
is_word = 0;
}
}
else if (!vim_isspace(line[i]))
is_word = 1;
++chars;
#ifdef FEAT_MBYTE
i += (*mb_ptr2len)(line + i);
#else
++i;
#endif
}
if (is_word)
words++;
*wc += words;
/* Add eol_size if the end of line was reached before hitting limit. */
if (i < limit && line[i] == NUL)
{
i += eol_size;
chars += eol_size;
}
*cc += chars;
return i;
}
/*
* Give some info about the position of the cursor (for "g CTRL-G").
* In Visual mode, give some info about the selected region. (In this case,
* the *_count_cursor variables store running totals for the selection.)
* When "dict" is not NULL store the info there instead of showing it.
*/
void
cursor_pos_info(dict_T *dict)
{
char_u *p;
char_u buf1[50];
char_u buf2[40];
linenr_T lnum;
varnumber_T byte_count = 0;
#ifdef FEAT_MBYTE
varnumber_T bom_count = 0;
#endif
varnumber_T byte_count_cursor = 0;
varnumber_T char_count = 0;
varnumber_T char_count_cursor = 0;
varnumber_T word_count = 0;
varnumber_T word_count_cursor = 0;
int eol_size;
varnumber_T last_check = 100000L;
long line_count_selected = 0;
pos_T min_pos, max_pos;
oparg_T oparg;
struct block_def bd;
/*
* Compute the length of the file in characters.
*/
if (curbuf->b_ml.ml_flags & ML_EMPTY)
{
if (dict == NULL)
{
MSG(_(no_lines_msg));
return;
}
}
else
{
if (get_fileformat(curbuf) == EOL_DOS)
eol_size = 2;
else
eol_size = 1;
if (VIsual_active)
{
if (LT_POS(VIsual, curwin->w_cursor))
{
min_pos = VIsual;
max_pos = curwin->w_cursor;
}
else
{
min_pos = curwin->w_cursor;
max_pos = VIsual;
}
if (*p_sel == 'e' && max_pos.col > 0)
--max_pos.col;
if (VIsual_mode == Ctrl_V)
{
#ifdef FEAT_LINEBREAK
char_u * saved_sbr = p_sbr;
/* Make 'sbr' empty for a moment to get the correct size. */
p_sbr = empty_option;
#endif
oparg.is_VIsual = 1;
oparg.block_mode = TRUE;
oparg.op_type = OP_NOP;
getvcols(curwin, &min_pos, &max_pos,
&oparg.start_vcol, &oparg.end_vcol);
#ifdef FEAT_LINEBREAK
p_sbr = saved_sbr;
#endif
if (curwin->w_curswant == MAXCOL)
oparg.end_vcol = MAXCOL;
/* Swap the start, end vcol if needed */
if (oparg.end_vcol < oparg.start_vcol)
{
oparg.end_vcol += oparg.start_vcol;
oparg.start_vcol = oparg.end_vcol - oparg.start_vcol;
oparg.end_vcol -= oparg.start_vcol;
}
}
line_count_selected = max_pos.lnum - min_pos.lnum + 1;
}
for (lnum = 1; lnum <= curbuf->b_ml.ml_line_count; ++lnum)
{
/* Check for a CTRL-C every 100000 characters. */
if (byte_count > last_check)
{
ui_breakcheck();
if (got_int)
return;
last_check = byte_count + 100000L;
}
/* Do extra processing for VIsual mode. */
if (VIsual_active
&& lnum >= min_pos.lnum && lnum <= max_pos.lnum)
{
char_u *s = NULL;
long len = 0L;
switch (VIsual_mode)
{
case Ctrl_V:
#ifdef FEAT_VIRTUALEDIT
virtual_op = virtual_active();
#endif
block_prep(&oparg, &bd, lnum, 0);
#ifdef FEAT_VIRTUALEDIT
virtual_op = MAYBE;
#endif
s = bd.textstart;
len = (long)bd.textlen;
break;
case 'V':
s = ml_get(lnum);
len = MAXCOL;
break;
case 'v':
{
colnr_T start_col = (lnum == min_pos.lnum)
? min_pos.col : 0;
colnr_T end_col = (lnum == max_pos.lnum)
? max_pos.col - start_col + 1 : MAXCOL;
s = ml_get(lnum) + start_col;
len = end_col;
}
break;
}
if (s != NULL)
{
byte_count_cursor += line_count_info(s, &word_count_cursor,
&char_count_cursor, len, eol_size);
if (lnum == curbuf->b_ml.ml_line_count
&& !curbuf->b_p_eol
&& (curbuf->b_p_bin || !curbuf->b_p_fixeol)
&& (long)STRLEN(s) < len)
byte_count_cursor -= eol_size;
}
}
else
{
/* In non-visual mode, check for the line the cursor is on */
if (lnum == curwin->w_cursor.lnum)
{
word_count_cursor += word_count;
char_count_cursor += char_count;
byte_count_cursor = byte_count +
line_count_info(ml_get(lnum),
&word_count_cursor, &char_count_cursor,
(varnumber_T)(curwin->w_cursor.col + 1),
eol_size);
}
}
/* Add to the running totals */
byte_count += line_count_info(ml_get(lnum), &word_count,
&char_count, (varnumber_T)MAXCOL,
eol_size);
}
/* Correction for when last line doesn't have an EOL. */
if (!curbuf->b_p_eol && (curbuf->b_p_bin || !curbuf->b_p_fixeol))
byte_count -= eol_size;
if (dict == NULL)
{
if (VIsual_active)
{
if (VIsual_mode == Ctrl_V && curwin->w_curswant < MAXCOL)
{
getvcols(curwin, &min_pos, &max_pos, &min_pos.col,
&max_pos.col);
vim_snprintf((char *)buf1, sizeof(buf1), _("%ld Cols; "),
(long)(oparg.end_vcol - oparg.start_vcol + 1));
}
else
buf1[0] = NUL;
if (char_count_cursor == byte_count_cursor
&& char_count == byte_count)
vim_snprintf((char *)IObuff, IOSIZE,
_("Selected %s%ld of %ld Lines; %lld of %lld Words; %lld of %lld Bytes"),
buf1, line_count_selected,
(long)curbuf->b_ml.ml_line_count,
(long long)word_count_cursor,
(long long)word_count,
(long long)byte_count_cursor,
(long long)byte_count);
else
vim_snprintf((char *)IObuff, IOSIZE,
_("Selected %s%ld of %ld Lines; %lld of %lld Words; %lld of %lld Chars; %lld of %lld Bytes"),
buf1, line_count_selected,
(long)curbuf->b_ml.ml_line_count,
(long long)word_count_cursor,
(long long)word_count,
(long long)char_count_cursor,
(long long)char_count,
(long long)byte_count_cursor,
(long long)byte_count);
}
else
{
p = ml_get_curline();
validate_virtcol();
col_print(buf1, sizeof(buf1), (int)curwin->w_cursor.col + 1,
(int)curwin->w_virtcol + 1);
col_print(buf2, sizeof(buf2), (int)STRLEN(p),
linetabsize(p));
if (char_count_cursor == byte_count_cursor
&& char_count == byte_count)
vim_snprintf((char *)IObuff, IOSIZE,
_("Col %s of %s; Line %ld of %ld; Word %lld of %lld; Byte %lld of %lld"),
(char *)buf1, (char *)buf2,
(long)curwin->w_cursor.lnum,
(long)curbuf->b_ml.ml_line_count,
(long long)word_count_cursor, (long long)word_count,
(long long)byte_count_cursor, (long long)byte_count);
else
vim_snprintf((char *)IObuff, IOSIZE,
_("Col %s of %s; Line %ld of %ld; Word %lld of %lld; Char %lld of %lld; Byte %lld of %lld"),
(char *)buf1, (char *)buf2,
(long)curwin->w_cursor.lnum,
(long)curbuf->b_ml.ml_line_count,
(long long)word_count_cursor, (long long)word_count,
(long long)char_count_cursor, (long long)char_count,
(long long)byte_count_cursor, (long long)byte_count);
}
}
#ifdef FEAT_MBYTE
bom_count = bomb_size();
if (bom_count > 0)
vim_snprintf((char *)IObuff + STRLEN(IObuff), IOSIZE,
_("(+%lld for BOM)"), (long long)bom_count);
#endif
if (dict == NULL)
{
/* Don't shorten this message, the user asked for it. */
p = p_shm;
p_shm = (char_u *)"";
msg(IObuff);
p_shm = p;
}
}
#if defined(FEAT_EVAL)
if (dict != NULL)
{
dict_add_nr_str(dict, "words", word_count, NULL);
dict_add_nr_str(dict, "chars", char_count, NULL);
dict_add_nr_str(dict, "bytes", byte_count
# ifdef FEAT_MBYTE
+ bom_count
# endif
, NULL);
dict_add_nr_str(dict, VIsual_active ? "visual_bytes" : "cursor_bytes",
byte_count_cursor, NULL);
dict_add_nr_str(dict, VIsual_active ? "visual_chars" : "cursor_chars",
char_count_cursor, NULL);
dict_add_nr_str(dict, VIsual_active ? "visual_words" : "cursor_words",
word_count_cursor, NULL);
}
#endif
}
|
from models import Song
from random import choice
def random_song(genre):
results = Song.query().filter(Song.genre==genre).fetch()
print(results)
songs = choice(results)
random_song = {
"title": songs.song,
"album": songs.album,
"artist": songs.artist.lower(),
"genre": genre,
}
return random_song
|
/**
* Copyright (c) 2013-present, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree. An additional grant
* of patent rights can be found in the PATENTS file in the same directory.
*
* @providesModule KeyBindingUtil
* @typechecks
* @flow
*/
'use strict';
var UserAgent = require('UserAgent');
var isOSX = UserAgent.isPlatform('Mac OS X');
var KeyBindingUtil = {
/**
* Check whether the ctrlKey modifier is *not* being used in conjunction with
* the altKey modifier. If they are combined, the result is an `altGraph`
* key modifier, which should not be handled by this set of key bindings.
*/
isCtrlKeyCommand: function(e: SyntheticKeyboardEvent): boolean {
return !!e.ctrlKey && !e.altKey;
},
isOptionKeyCommand: function(e: SyntheticKeyboardEvent): boolean {
return isOSX && e.altKey;
},
hasCommandModifier: function(e: SyntheticKeyboardEvent): boolean {
return isOSX ?
(!!e.metaKey && !e.altKey) :
KeyBindingUtil.isCtrlKeyCommand(e);
},
};
module.exports = KeyBindingUtil;
|
from .job import Job
from .dagman import Dagman
from .visualize import visualize
from . import utils
from .__version__ import __version__
|
# Capture the original matplotlib rcParams
import matplotlib as mpl
_orig_rc_params = mpl.rcParams.copy()
# Import seaborn objects
from .rcmod import *
from .utils import *
from .palettes import *
from .relational import *
from .regression import *
from .categorical import *
from .distributions import *
from .matrix import *
from .miscplot import *
from .axisgrid import *
from .widgets import *
from .colors import xkcd_rgb, crayons
from . import cm
__version__ = "0.10.1"
|
import '@polymer/iron-flex-layout/iron-flex-layout.js';
import '@polymer/iron-icons/iron-icons.js';
import '@polymer/paper-card/paper-card.js';
import '@polymer/paper-dialog-scrollable/paper-dialog-scrollable.js';
import '@polymer/paper-dialog/paper-dialog.js';
import '@polymer/paper-toast/paper-toast.js';
import '@vaadin/vaadin-upload/vaadin-upload.js';
import '../util-scrollable.js';
import './ncg-asset-file.js';
import * as Polymer from '@polymer/polymer';
import {MutableData} from '@polymer/polymer/lib/mixins/mutable-data';
class NcgAssetCategory extends MutableData(Polymer.PolymerElement) {
static get template() {
return Polymer.html`
<style include="nodecg-theme">
:host {
display: block;
width: 100%;
box-sizing: border-box;
}
#add {
display: flex;
align-items: center;
}
paper-card {
width: 100%;
}
#header {
@apply --layout-vertical;
background-color: #525F78;
padding: 12px 0;
}
#header-main {
@apply --layout-center;
@apply --layout-horizontal;
@apply --layout-justified;
}
#title {
@apply --paper-font-headline;
}
#files {
background-color: #2F3A4F;
max-height: 400px;
margin: 0 -16px;
padding-left: 16px;
--util-scrollable: {
padding: 0;
};
@apply --layout-vertical;
}
vaadin-upload {
width: 400px;
margin: 16px;
}
#acceptsMsg {
margin-top: 8px;
text-align: center;
}
#add {
--nodecg-background-color: #00A651;
}
#add iron-icon {
position: relative;
top: -1px;
}
</style>
<paper-toast id="toast"></paper-toast>
<div id="header">
<div id="header-main">
<span id="title">[[category.title]]</span>
<paper-button id="add" on-click="openUploadDialog">
<iron-icon icon="add"></iron-icon>
Add File(s)
</paper-button>
</div>
<div id="empty">
There are no assets in this category.
</div>
</div>
<util-scrollable id="files">
<template is="dom-repeat" items="[[files]]" as="file" mutable-data="">
<ncg-asset-file file="[[file]]" on-deleted="_handleDeleted" on-deletion-failed="_handleDeletionFailed">
</ncg-asset-file>
</template>
</util-scrollable>
<!-- 2017/03/18: Had to remove with-backdrop during the dashboard re-write -->
<paper-dialog id="uploadDialog">
<paper-dialog-scrollable>
<vaadin-upload id="uploader" target="/assets/[[collectionName]]/[[category.name]]" on-upload-start="refitUploadDialog" on-upload-before="_onUploadBefore" on-file-reject="_onFileReject" on-upload-success="_onUploadSuccess">
<template is="dom-if" if="[[category.allowedTypes.length]]">
<div id="acceptsMsg">[[acceptsMsg]]</div>
</template>
</vaadin-upload>
</paper-dialog-scrollable>
<div class="buttons">
<paper-button dialog-dismiss="">Close</paper-button>
</div>
</paper-dialog>
`;
}
static get is() {
return 'ncg-asset-category';
}
static get properties() {
return {
files: Array,
collectionName: {
type: String,
reflectToAttribute: true
},
category: Object,
categoryName: {
type: String,
reflectToAttribute: true,
computed: '_computeCategoryName(category.name)'
},
acceptsMsg: {
type: String,
computed: '_computeAcceptsMsg(category.allowedTypes)'
},
_successfulUploads: {
type: Number,
value: 0
},
_assetCategoryReplicant: {
type: Object
}
};
}
static get observers() {
return [
'_onAllowedTypesChanged(category.allowedTypes)',
'_computeAssetCategoryReplicant(category.name, collectionName)'
];
}
connectedCallback() {
super.connectedCallback();
this.$.uploadDialog.fitInto = document.body.querySelector('ncg-dashboard').shadowRoot.getElementById('pages');
this.$.uploadDialog.resetFit();
}
refitUploadDialog() {
this.$.uploadDialog.refit();
}
_onAllowedTypesChanged(allowedTypes) {
const prefixed = allowedTypes.map(type => '.' + type);
this.$.uploader.accept = prefixed.join(',');
}
_computeAcceptsMsg(allowedTypes) {
let msg = 'Accepts ';
allowedTypes.forEach((type, index) => {
type = type.toUpperCase();
if (index === 0) {
msg += type;
} else if (index === allowedTypes.length - 1) {
if (index === 1) {
msg += ' and ' + type;
} else {
msg += ', and ' + type;
}
} else {
msg += ', ' + type;
}
});
return msg;
}
_handleDeleted(e) {
this.$.toast.text = `Deleted ${e.target.file.base}`;
this.$.toast.show();
}
_handleDeletionFailed(e) {
this.$.toast.text = `Failed to delete ${e.target.file.base}`;
this.$.toast.show();
}
openUploadDialog() {
this.$.uploadDialog.open();
this.refitUploadDialog();
}
_onUploadBefore(event) {
// Custom upload request url for file
const {file} = event.detail;
file.uploadTarget = `${event.target.target}/${file.name}`;
}
_onFileReject(event) {
this.refitUploadDialog();
this.$.toast.text = `${event.detail.file.name} error: ${event.detail.error}`;
this.$.toast.open();
}
_onUploadSuccess() {
this._successfulUploads++;
}
_computeCategoryName(categoryName) {
return categoryName;
}
_computeAssetCategoryReplicant(categoryName, collectionName) {
const newRep = new NodeCG.Replicant(`assets:${categoryName}`, collectionName);
const oldRep = this._assetCategoryReplicant;
if (oldRep) {
oldRep.removeEventListener('change');
}
newRep.on('change', newVal => {
this.files = newVal;
if (Array.isArray(newVal) && newVal.length > 0) {
this.$.empty.style.display = 'none';
} else {
this.$.empty.style.display = 'block';
}
});
this._assetCategoryReplicant = newRep;
}
}
customElements.define(NcgAssetCategory.is, NcgAssetCategory);
|
/*!
Flatdoc (http://ricostacruz.com/flatdoc)
(c) 2013 Rico Sta. Cruz. MIT licensed.
Also includes:
marked
a markdown parser
(c) 2011-2013, Christopher Jeffrey. (MIT Licensed)
https://github.com/chjj/marked
base64.js
http://github.com/dankogai/js-base64
*/
!function($){var exports=this;var marked;var Flatdoc=exports.Flatdoc={};Flatdoc.run=function(options){$(function(){new Flatdoc.runner(options).run()})};Flatdoc.file=function(url){return function(callback){$.get(url).fail(function(e){callback(e,null)}).done(function(data){callback(null,data)})}};Flatdoc.github=function(repo,filepath){var url;if(filepath){url="https://api.github.com/repos/"+repo+"/contents/"+filepath}else{url="https://api.github.com/repos/"+repo+"/readme"}return function(callback){$.get(url).fail(function(e){callback(e,null)}).done(function(data){var markdown=exports.Base64.decode(data.content);callback(null,markdown)})}};Flatdoc.bitbucket=function(repo,filepath,branch){var url;if(!filepath){filepath="readme.md"}if(!branch){branch="default"}url="https://bitbucket.org/api/1.0/repositories/"+repo+"/src/"+branch+"/"+filepath;return function(callback){$.ajax({url:url,dataType:"jsonp",error:function(xhr,status,error){alert(error)},success:function(response){var markdown=response.data;callback(null,markdown)}})}};var Parser=Flatdoc.parser={};Parser.parse=function(source){marked=exports.marked;Parser.setMarkedOptions();var html=$("<div>"+marked(source));var h1=html.find("h1").eq(0);var title=h1.text();Transformer.mangle(html);var menu=Transformer.getMenu(html);return{title:title,content:html,menu:menu}};Parser.setMarkedOptions=function(){marked.setOptions({highlight:function(code,lang){if(lang){var fn=Flatdoc.highlighters[lang]||Flatdoc.highlighters.generic;return fn(code)}return code}})};var Transformer=Flatdoc.transformer={};Transformer.mangle=function($content){this.addIDs($content);this.buttonize($content);this.smartquotes($content)};Transformer.addIDs=function($content){var slugs=["","",""];$content.find("h1, h2, h3").each(function(){var $el=$(this);var num=parseInt(this.nodeName[1]);var text=$el.text();var slug=slugify(text);if(num>1)slug=slugs[num-2]+"-"+slug;slugs.length=num-1;slugs=slugs.concat([slug,slug]);$el.attr("id",slug)})};Transformer.getMenu=function($content){var root={items:[],id:"",level:0};var cache=[root];function mkdir_p(level){var obj=cache[level];if(!obj){var parent=level>1?mkdir_p(level-1):root;var obj={items:[],level:level};cache.length=level+1;cache=cache.concat([obj,obj]);parent.items.push(obj)}return obj}$content.find("h1, h2, h3").each(function(){var $el=$(this);var level=+this.nodeName.substr(1);parent=mkdir_p(level-1);var obj={section:$el.text(),items:[],level:level,id:$el.attr("id")};parent.items.push(obj);cache[level]=obj});return root};Transformer.buttonize=function($content){$content.find("a").each(function(){var $a=$(this);var m=$a.text().match(/^(.*) >$/);if(m)$a.text(m[1]).addClass("button")})};Transformer.smartquotes=function($content){var nodes=getTextNodesIn($content),len=nodes.length;for(var i=0;i<len;i++){var node=nodes[i];node.nodeValue=quotify(node.nodeValue)}};var Highlighters=Flatdoc.highlighters={};Highlighters.js=Highlighters.javascript=function(code){return code.replace(/</g,"<").replace(/>/g,">").replace(/("[^\"]*?")/g,'<span class="string">$1</span>').replace(/('[^\']*?')/g,'<span class="string">$1</span>').replace(/\/\/(.*)/gm,'<span class="comment">//$1</span>').replace(/\/\*(.*)\*\//gm,'<span class="comment">/*$1*/</span>').replace(/(\d+\.\d+)/gm,'<span class="number">$1</span>').replace(/(\d+)/gm,'<span class="number">$1</span>').replace(/\bnew *(\w+)/gm,'<span class="keyword">new</span> <span class="init">$1</span>').replace(/\b(function|new|throw|return|var|if|else)\b/gm,'<span class="keyword">$1</span>')};Highlighters.html=function(code){return code.replace(/</g,"<").replace(/>/g,">").replace(/("[^\"]*?")/g,'<span class="string">$1</span>').replace(/('[^\']*?')/g,'<span class="string">$1</span>').replace(/<!--(.*)-->/g,'<span class="comment"><!--$1--></span>').replace(/<([^!][^\s&]*)/g,'<<span class="keyword">$1</span>')};Highlighters.generic=function(code){return code.replace(/</g,"<").replace(/>/g,">").replace(/("[^\"]*?")/g,'<span class="string">$1</span>').replace(/('[^\']*?')/g,'<span class="string">$1</span>').replace(/(\/\/|#)(.*)/gm,'<span class="comment">$1$2</span>').replace(/(\d+\.\d+)/gm,'<span class="number">$1</span>').replace(/(\d+)/gm,'<span class="number">$1</span>')};var MenuView=Flatdoc.menuView=function(menu){var $el=$("<ul>");function process(node,$parent){var id=node.id||"root";var $li=$("<li>").attr("id",id+"-item").addClass("level-"+node.level).appendTo($parent);if(node.section){var $a=$("<a>").html(node.section).attr("id",id+"-link").attr("href","#"+node.id).addClass("level-"+node.level).appendTo($li)}if(node.items.length>0){var $ul=$("<ul>").addClass("level-"+(node.level+1)).attr("id",id+"-list").appendTo($li);node.items.forEach(function(item){process(item,$ul)})}}process(menu,$el);return $el};var Runner=Flatdoc.runner=function(options){this.initialize(options)};Runner.prototype.root='[role~="flatdoc"]';Runner.prototype.menu='[role~="flatdoc-menu"]';Runner.prototype.title='[role~="flatdoc-title"]';Runner.prototype.content='[role~="flatdoc-content"]';Runner.prototype.initialize=function(options){$.extend(this,options)};Runner.prototype.run=function(){var doc=this;$(doc.root).trigger("flatdoc:loading");doc.fetcher(function(err,markdown){if(err){console.error("[Flatdoc] fetching Markdown data failed.",err);return}var data=Flatdoc.parser.parse(markdown);doc.applyData(data,doc);$(doc.root).trigger("flatdoc:ready")})};Runner.prototype.applyData=function(data){var elements=this;elements.el("title").html(data.title);elements.el("content").html(data.content.find(">*"));elements.el("menu").html(MenuView(data.menu))};Runner.prototype.el=function(aspect){return $(this[aspect],this.root)};function getTextNodesIn(el){var exclude="iframe,pre,code";return $(el).find(":not("+exclude+")").andSelf().contents().filter(function(){return this.nodeType==3&&$(this).closest(exclude).length===0})}function quotify(a){a=a.replace(/(^|[\-\u2014\s(\["])'/g,"$1‘");a=a.replace(/'/g,"’");a=a.replace(/(^|[\-\u2014\/\[(\u2018\s])"/g,"$1“");a=a.replace(/"/g,"”");a=a.replace(/\.\.\./g,"…");a=a.replace(/--/g,"—");return a}function slugify(text){if(typeof text!=="string")return"";return text.toLowerCase().match(/[a-z0-9]+/g).join("-")}}(jQuery);/*!
* marked - a markdown parser
* Copyright (c) 2011-2013, Christopher Jeffrey. (MIT Licensed)
* https://github.com/chjj/marked
*/
!function(){var t={newline:/^\n+/,code:/^( {4}[^\n]+\n*)+/,fences:o,hr:/^( *[-*_]){3,} *(?:\n+|$)/,heading:/^ *(#{1,6}) *([^\n]+?) *#* *(?:\n+|$)/,nptable:o,lheading:/^([^\n]+)\n *(=|-){3,} *\n*/,blockquote:/^( *>[^\n]+(\n[^\n]+)*\n*)+/,list:/^( *)(bull) [\s\S]+?(?:hr|\n{2,}(?! )(?!\1bull )\n*|\s*$)/,html:/^ *(?:comment|closed|closing) *(?:\n{2,}|\s*$)/,def:/^ *\[([^\]]+)\]: *<?([^\s>]+)>?(?: +["(]([^\n]+)[")])? *(?:\n+|$)/,table:o,paragraph:/^((?:[^\n]+\n?(?!hr|heading|lheading|blockquote|tag|def))+)\n*/,text:/^[^\n]+/};t.bullet=/(?:[*+-]|\d+\.)/;t.item=/^( *)(bull) [^\n]*(?:\n(?!\1bull )[^\n]*)*/;t.item=l(t.item,"gm")(/bull/g,t.bullet)();t.list=l(t.list)(/bull/g,t.bullet)("hr",/\n+(?=(?: *[-*_]){3,} *(?:\n+|$))/)();t._tag="(?!(?:"+"a|em|strong|small|s|cite|q|dfn|abbr|data|time|code"+"|var|samp|kbd|sub|sup|i|b|u|mark|ruby|rt|rp|bdi|bdo"+"|span|br|wbr|ins|del|img)\\b)\\w+(?!:/|@)\\b";t.html=l(t.html)("comment",/<!--[\s\S]*?-->/)("closed",/<(tag)[\s\S]+?<\/\1>/)("closing",/<tag(?:"[^"]*"|'[^']*'|[^'">])*?>/)(/tag/g,t._tag)();t.paragraph=l(t.paragraph)("hr",t.hr)("heading",t.heading)("lheading",t.lheading)("blockquote",t.blockquote)("tag","<"+t._tag)("def",t.def)();t.normal=h({},t);t.gfm=h({},t.normal,{fences:/^ *(`{3,}|~{3,}) *(\S+)? *\n([\s\S]+?)\s*\1 *(?:\n+|$)/,paragraph:/^/});t.gfm.paragraph=l(t.paragraph)("(?!","(?!"+t.gfm.fences.source.replace("\\1","\\2")+"|")();t.tables=h({},t.gfm,{nptable:/^ *(\S.*\|.*)\n *([-:]+ *\|[-| :]*)\n((?:.*\|.*(?:\n|$))*)\n*/,table:/^ *\|(.+)\n *\|( *[-:]+[-| :]*)\n((?: *\|.*(?:\n|$))*)\n*/});function e(e){this.tokens=[];this.tokens.links={};this.options=e||a.defaults;this.rules=t.normal;if(this.options.gfm){if(this.options.tables){this.rules=t.tables}else{this.rules=t.gfm}}}e.rules=t;e.lex=function(t,n){var s=new e(n);return s.lex(t)};e.prototype.lex=function(t){t=t.replace(/\r\n|\r/g,"\n").replace(/\t/g," ").replace(/\u00a0/g," ").replace(/\u2424/g,"\n");return this.token(t,true)};e.prototype.token=function(e,n){var e=e.replace(/^ +$/gm,""),s,i,r,l,o,h,a,u,p;while(e){if(r=this.rules.newline.exec(e)){e=e.substring(r[0].length);if(r[0].length>1){this.tokens.push({type:"space"})}}if(r=this.rules.code.exec(e)){e=e.substring(r[0].length);r=r[0].replace(/^ {4}/gm,"");this.tokens.push({type:"code",text:!this.options.pedantic?r.replace(/\n+$/,""):r});continue}if(r=this.rules.fences.exec(e)){e=e.substring(r[0].length);this.tokens.push({type:"code",lang:r[2],text:r[3]});continue}if(r=this.rules.heading.exec(e)){e=e.substring(r[0].length);this.tokens.push({type:"heading",depth:r[1].length,text:r[2]});continue}if(n&&(r=this.rules.nptable.exec(e))){e=e.substring(r[0].length);h={type:"table",header:r[1].replace(/^ *| *\| *$/g,"").split(/ *\| */),align:r[2].replace(/^ *|\| *$/g,"").split(/ *\| */),cells:r[3].replace(/\n$/,"").split("\n")};for(u=0;u<h.align.length;u++){if(/^ *-+: *$/.test(h.align[u])){h.align[u]="right"}else if(/^ *:-+: *$/.test(h.align[u])){h.align[u]="center"}else if(/^ *:-+ *$/.test(h.align[u])){h.align[u]="left"}else{h.align[u]=null}}for(u=0;u<h.cells.length;u++){h.cells[u]=h.cells[u].split(/ *\| */)}this.tokens.push(h);continue}if(r=this.rules.lheading.exec(e)){e=e.substring(r[0].length);this.tokens.push({type:"heading",depth:r[2]==="="?1:2,text:r[1]});continue}if(r=this.rules.hr.exec(e)){e=e.substring(r[0].length);this.tokens.push({type:"hr"});continue}if(r=this.rules.blockquote.exec(e)){e=e.substring(r[0].length);this.tokens.push({type:"blockquote_start"});r=r[0].replace(/^ *> ?/gm,"");this.token(r,n);this.tokens.push({type:"blockquote_end"});continue}if(r=this.rules.list.exec(e)){e=e.substring(r[0].length);l=r[2];this.tokens.push({type:"list_start",ordered:l.length>1});r=r[0].match(this.rules.item);s=false;p=r.length;u=0;for(;u<p;u++){h=r[u];a=h.length;h=h.replace(/^ *([*+-]|\d+\.) +/,"");if(~h.indexOf("\n ")){a-=h.length;h=!this.options.pedantic?h.replace(new RegExp("^ {1,"+a+"}","gm"),""):h.replace(/^ {1,4}/gm,"")}if(this.options.smartLists&&u!==p-1){o=t.bullet.exec(r[u+1])[0];if(l!==o&&!(l.length>1&&o.length>1)){e=r.slice(u+1).join("\n")+e;u=p-1}}i=s||/\n\n(?!\s*$)/.test(h);if(u!==p-1){s=h[h.length-1]==="\n";if(!i)i=s}this.tokens.push({type:i?"loose_item_start":"list_item_start"});this.token(h,false);this.tokens.push({type:"list_item_end"})}this.tokens.push({type:"list_end"});continue}if(r=this.rules.html.exec(e)){e=e.substring(r[0].length);this.tokens.push({type:this.options.sanitize?"paragraph":"html",pre:r[1]==="pre"||r[1]==="script",text:r[0]});continue}if(n&&(r=this.rules.def.exec(e))){e=e.substring(r[0].length);this.tokens.links[r[1].toLowerCase()]={href:r[2],title:r[3]};continue}if(n&&(r=this.rules.table.exec(e))){e=e.substring(r[0].length);h={type:"table",header:r[1].replace(/^ *| *\| *$/g,"").split(/ *\| */),align:r[2].replace(/^ *|\| *$/g,"").split(/ *\| */),cells:r[3].replace(/(?: *\| *)?\n$/,"").split("\n")};for(u=0;u<h.align.length;u++){if(/^ *-+: *$/.test(h.align[u])){h.align[u]="right"}else if(/^ *:-+: *$/.test(h.align[u])){h.align[u]="center"}else if(/^ *:-+ *$/.test(h.align[u])){h.align[u]="left"}else{h.align[u]=null}}for(u=0;u<h.cells.length;u++){h.cells[u]=h.cells[u].replace(/^ *\| *| *\| *$/g,"").split(/ *\| */)}this.tokens.push(h);continue}if(n&&(r=this.rules.paragraph.exec(e))){e=e.substring(r[0].length);this.tokens.push({type:"paragraph",text:r[1][r[1].length-1]==="\n"?r[1].slice(0,-1):r[1]});continue}if(r=this.rules.text.exec(e)){e=e.substring(r[0].length);this.tokens.push({type:"text",text:r[0]});continue}if(e){throw new Error("Infinite loop on byte: "+e.charCodeAt(0))}}return this.tokens};var n={escape:/^\\([\\`*{}\[\]()#+\-.!_>])/,autolink:/^<([^ >]+(@|:\/)[^ >]+)>/,url:o,tag:/^<!--[\s\S]*?-->|^<\/?\w+(?:"[^"]*"|'[^']*'|[^'">])*?>/,link:/^!?\[(inside)\]\(href\)/,reflink:/^!?\[(inside)\]\s*\[([^\]]*)\]/,nolink:/^!?\[((?:\[[^\]]*\]|[^\[\]])*)\]/,strong:/^__([\s\S]+?)__(?!_)|^\*\*([\s\S]+?)\*\*(?!\*)/,em:/^\b_((?:__|[\s\S])+?)_\b|^\*((?:\*\*|[\s\S])+?)\*(?!\*)/,code:/^(`+)\s*([\s\S]*?[^`])\s*\1(?!`)/,br:/^ {2,}\n(?!\s*$)/,del:o,text:/^[\s\S]+?(?=[\\<!\[_*`]| {2,}\n|$)/};n._inside=/(?:\[[^\]]*\]|[^\]]|\](?=[^\[]*\]))*/;n._href=/\s*<?([^\s]*?)>?(?:\s+['"]([\s\S]*?)['"])?\s*/;n.link=l(n.link)("inside",n._inside)("href",n._href)();n.reflink=l(n.reflink)("inside",n._inside)();n.normal=h({},n);n.pedantic=h({},n.normal,{strong:/^__(?=\S)([\s\S]*?\S)__(?!_)|^\*\*(?=\S)([\s\S]*?\S)\*\*(?!\*)/,em:/^_(?=\S)([\s\S]*?\S)_(?!_)|^\*(?=\S)([\s\S]*?\S)\*(?!\*)/});n.gfm=h({},n.normal,{escape:l(n.escape)("])","~|])")(),url:/^(https?:\/\/[^\s<]+[^<.,:;"')\]\s])/,del:/^~~(?=\S)([\s\S]*?\S)~~/,text:l(n.text)("]|","~]|")("|","|https?://|")()});n.breaks=h({},n.gfm,{br:l(n.br)("{2,}","*")(),text:l(n.gfm.text)("{2,}","*")()});function s(t,e){this.options=e||a.defaults;this.links=t;this.rules=n.normal;if(!this.links){throw new Error("Tokens array requires a `links` property.")}if(this.options.gfm){if(this.options.breaks){this.rules=n.breaks}else{this.rules=n.gfm}}else if(this.options.pedantic){this.rules=n.pedantic}}s.rules=n;s.output=function(t,e,n){var i=new s(e,n);return i.output(t)};s.prototype.output=function(t){var e="",n,s,i,l;while(t){if(l=this.rules.escape.exec(t)){t=t.substring(l[0].length);e+=l[1];continue}if(l=this.rules.autolink.exec(t)){t=t.substring(l[0].length);if(l[2]==="@"){s=l[1][6]===":"?this.mangle(l[1].substring(7)):this.mangle(l[1]);i=this.mangle("mailto:")+s}else{s=r(l[1]);i=s}e+='<a href="'+i+'">'+s+"</a>";continue}if(l=this.rules.url.exec(t)){t=t.substring(l[0].length);s=r(l[1]);i=s;e+='<a href="'+i+'">'+s+"</a>";continue}if(l=this.rules.tag.exec(t)){t=t.substring(l[0].length);e+=this.options.sanitize?r(l[0]):l[0];continue}if(l=this.rules.link.exec(t)){t=t.substring(l[0].length);e+=this.outputLink(l,{href:l[2],title:l[3]});continue}if((l=this.rules.reflink.exec(t))||(l=this.rules.nolink.exec(t))){t=t.substring(l[0].length);n=(l[2]||l[1]).replace(/\s+/g," ");n=this.links[n.toLowerCase()];if(!n||!n.href){e+=l[0][0];t=l[0].substring(1)+t;continue}e+=this.outputLink(l,n);continue}if(l=this.rules.strong.exec(t)){t=t.substring(l[0].length);e+="<strong>"+this.output(l[2]||l[1])+"</strong>";continue}if(l=this.rules.em.exec(t)){t=t.substring(l[0].length);e+="<em>"+this.output(l[2]||l[1])+"</em>";continue}if(l=this.rules.code.exec(t)){t=t.substring(l[0].length);e+="<code>"+r(l[2],true)+"</code>";continue}if(l=this.rules.br.exec(t)){t=t.substring(l[0].length);e+="<br>";continue}if(l=this.rules.del.exec(t)){t=t.substring(l[0].length);e+="<del>"+this.output(l[1])+"</del>";continue}if(l=this.rules.text.exec(t)){t=t.substring(l[0].length);e+=r(l[0]);continue}if(t){throw new Error("Infinite loop on byte: "+t.charCodeAt(0))}}return e};s.prototype.outputLink=function(t,e){if(t[0][0]!=="!"){return'<a href="'+r(e.href)+'"'+(e.title?' title="'+r(e.title)+'"':"")+">"+this.output(t[1])+"</a>"}else{return'<img src="'+r(e.href)+'" alt="'+r(t[1])+'"'+(e.title?' title="'+r(e.title)+'"':"")+">"}};s.prototype.smartypants=function(t){if(!this.options.smartypants)return t;return t.replace(/--/g,"—").replace(/'([^']*)'/g,"‘$1’").replace(/"([^"]*)"/g,"“$1”").replace(/\.{3}/g,"…")};s.prototype.mangle=function(t){var e="",n=t.length,s=0,i;for(;s<n;s++){i=t.charCodeAt(s);if(Math.random()>.5){i="x"+i.toString(16)}e+="&#"+i+";"}return e};function i(t){this.tokens=[];this.token=null;this.options=t||a.defaults}i.parse=function(t,e){var n=new i(e);return n.parse(t)};i.prototype.parse=function(t){this.inline=new s(t.links,this.options);this.tokens=t.reverse();var e="";while(this.next()){e+=this.tok()}return e};i.prototype.next=function(){return this.token=this.tokens.pop()};i.prototype.peek=function(){return this.tokens[this.tokens.length-1]||0};i.prototype.parseText=function(){var t=this.token.text;while(this.peek().type==="text"){t+="\n"+this.next().text}return this.inline.output(t)};i.prototype.tok=function(){switch(this.token.type){case"space":{return""}case"hr":{return"<hr>\n"}case"heading":{return"<h"+this.token.depth+">"+this.inline.output(this.token.text)+"</h"+this.token.depth+">\n"}case"code":{if(this.options.highlight){var t=this.options.highlight(this.token.text,this.token.lang);if(t!=null&&t!==this.token.text){this.token.escaped=true;this.token.text=t}}if(!this.token.escaped){this.token.text=r(this.token.text,true)}return"<pre><code"+(this.token.lang?' class="'+this.options.langPrefix+this.token.lang+'"':"")+">"+this.token.text+"</code></pre>\n"}case"table":{var e="",n,s,i,l,o;e+="<thead>\n<tr>\n";for(s=0;s<this.token.header.length;s++){n=this.inline.output(this.token.header[s]);e+=this.token.align[s]?'<th align="'+this.token.align[s]+'">'+n+"</th>\n":"<th>"+n+"</th>\n"}e+="</tr>\n</thead>\n";e+="<tbody>\n";for(s=0;s<this.token.cells.length;s++){i=this.token.cells[s];e+="<tr>\n";for(o=0;o<i.length;o++){l=this.inline.output(i[o]);e+=this.token.align[o]?'<td align="'+this.token.align[o]+'">'+l+"</td>\n":"<td>"+l+"</td>\n"}e+="</tr>\n"}e+="</tbody>\n";return"<table>\n"+e+"</table>\n"}case"blockquote_start":{var e="";while(this.next().type!=="blockquote_end"){e+=this.tok()}return"<blockquote>\n"+e+"</blockquote>\n"}case"list_start":{var h=this.token.ordered?"ol":"ul",e="";while(this.next().type!=="list_end"){e+=this.tok()}return"<"+h+">\n"+e+"</"+h+">\n"}case"list_item_start":{var e="";while(this.next().type!=="list_item_end"){e+=this.token.type==="text"?this.parseText():this.tok()}return"<li>"+e+"</li>\n"}case"loose_item_start":{var e="";while(this.next().type!=="list_item_end"){e+=this.tok()}return"<li>"+e+"</li>\n"}case"html":{return!this.token.pre&&!this.options.pedantic?this.inline.output(this.token.text):this.token.text}case"paragraph":{return"<p>"+this.inline.output(this.token.text)+"</p>\n"}case"text":{return"<p>"+this.parseText()+"</p>\n"}}};function r(t,e){return t.replace(!e?/&(?!#?\w+;)/g:/&/g,"&").replace(/</g,"<").replace(/>/g,">").replace(/"/g,""").replace(/'/g,"'")}function l(t,e){t=t.source;e=e||"";return function n(s,i){if(!s)return new RegExp(t,e);i=i.source||i;i=i.replace(/(^|[^\[])\^/g,"$1");t=t.replace(s,i);return n}}function o(){}o.exec=o;function h(t){var e=1,n,s;for(;e<arguments.length;e++){n=arguments[e];for(s in n){if(Object.prototype.hasOwnProperty.call(n,s)){t[s]=n[s]}}}return t}function a(t,n,s){if(s||typeof n==="function"){if(!s){s=n;n=null}if(n)n=h({},a.defaults,n);var l=e.lex(l,n),o=n.highlight,u=0,p=l.length,g=0;if(!o||o.length<3){return s(null,i.parse(l,n))}var c=function(){delete n.highlight;var t=i.parse(l,n);n.highlight=o;return s(null,t)};for(;g<p;g++){!function(t){if(t.type!=="code")return;u++;return o(t.text,t.lang,function(e,n){if(n==null||n===t.text){return--u||c()}t.text=n;t.escaped=true;--u||c()})}(l[g])}return}try{if(n)n=h({},a.defaults,n);return i.parse(e.lex(t,n),n)}catch(f){f.message+="\nPlease report this to https://github.com/chjj/marked.";if((n||a.defaults).silent){return"<p>An error occured:</p><pre>"+r(f.message+"",true)+"</pre>"}throw f}}a.options=a.setOptions=function(t){h(a.defaults,t);return a};a.defaults={gfm:true,tables:true,breaks:false,pedantic:false,sanitize:false,smartLists:false,silent:false,highlight:null,langPrefix:"lang-"};a.Parser=i;a.parser=i.parse;a.Lexer=e;a.lexer=e.lex;a.InlineLexer=s;a.inlineLexer=s.output;a.parse=a;if(typeof exports==="object"){module.exports=a}else if(typeof define==="function"&&define.amd){define(function(){return a})}else{this.marked=a}}.call(function(){return this||(typeof window!=="undefined"?window:global)}());/*!
* base46.js
*/
!function(r){"use strict";if(r.Base64)return;var e="2.1.2";var t;if(typeof module!=="undefined"&&module.exports){t=require("buffer").Buffer}var n="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";var a=function(r){var e={};for(var t=0,n=r.length;t<n;t++)e[r.charAt(t)]=t;return e}(n);var o=String.fromCharCode;var u=function(r){if(r.length<2){var e=r.charCodeAt(0);return e<128?r:e<2048?o(192|e>>>6)+o(128|e&63):o(224|e>>>12&15)+o(128|e>>>6&63)+o(128|e&63)}else{var e=65536+(r.charCodeAt(0)-55296)*1024+(r.charCodeAt(1)-56320);return o(240|e>>>18&7)+o(128|e>>>12&63)+o(128|e>>>6&63)+o(128|e&63)}};var c=/[\uD800-\uDBFF][\uDC00-\uDFFFF]|[^\x00-\x7F]/g;var i=function(r){return r.replace(c,u)};var f=function(r){var e=[0,2,1][r.length%3],t=r.charCodeAt(0)<<16|(r.length>1?r.charCodeAt(1):0)<<8|(r.length>2?r.charCodeAt(2):0),a=[n.charAt(t>>>18),n.charAt(t>>>12&63),e>=2?"=":n.charAt(t>>>6&63),e>=1?"=":n.charAt(t&63)];return a.join("")};var h=r.btoa||function(r){return r.replace(/[\s\S]{1,3}/g,f)};var d=t?function(r){return new t(r).toString("base64")}:function(r){return h(i(r))};var v=function(r,e){return!e?d(r):d(r).replace(/[+\/]/g,function(r){return r=="+"?"-":"_"}).replace(/=/g,"")};var g=function(r){return v(r,true)};var l=new RegExp(["[À-ß][-¿]","[à-ï][-¿]{2}","[ð-÷][-¿]{3}"].join("|"),"g");var A=function(r){switch(r.length){case 4:var e=(7&r.charCodeAt(0))<<18|(63&r.charCodeAt(1))<<12|(63&r.charCodeAt(2))<<6|63&r.charCodeAt(3),t=e-65536;return o((t>>>10)+55296)+o((t&1023)+56320);case 3:return o((15&r.charCodeAt(0))<<12|(63&r.charCodeAt(1))<<6|63&r.charCodeAt(2));default:return o((31&r.charCodeAt(0))<<6|63&r.charCodeAt(1))}};var s=function(r){return r.replace(l,A)};var p=function(r){var e=r.length,t=e%4,n=(e>0?a[r.charAt(0)]<<18:0)|(e>1?a[r.charAt(1)]<<12:0)|(e>2?a[r.charAt(2)]<<6:0)|(e>3?a[r.charAt(3)]:0),u=[o(n>>>16),o(n>>>8&255),o(n&255)];u.length-=[0,0,2,1][t];return u.join("")};var C=r.atob||function(r){return r.replace(/[\s\S]{1,4}/g,p)};var b=t?function(r){return new t(r,"base64").toString()}:function(r){return s(C(r))};var B=function(r){return b(r.replace(/[-_]/g,function(r){return r=="-"?"+":"/"}).replace(/[^A-Za-z0-9\+\/]/g,""))};r.Base64={VERSION:e,atob:C,btoa:h,fromBase64:B,toBase64:v,utob:i,encode:v,encodeURI:g,btou:s,decode:B};if(typeof Object.defineProperty==="function"){var S=function(r){return{value:r,enumerable:false,writable:true,configurable:true}};r.Base64.extendString=function(){Object.defineProperty(String.prototype,"fromBase64",S(function(){return B(this)}));Object.defineProperty(String.prototype,"toBase64",S(function(r){return v(this,r)}));Object.defineProperty(String.prototype,"toBase64URI",S(function(){return v(this,true)}))}}}(this);
|
#!/usr/bin/env python
import os
from setuptools import setup, find_packages
from codecs import open
here = os.path.abspath(os.path.dirname(__file__))
execfile(os.path.join(here, "src/fileseq/__version__.py"))
# Get the long description from the README file
with open(os.path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
descript = 'A Python library for parsing frame ranges and file ' \
'sequences based on a similar library found in Katana.'
setup(name='Fileseq',
version=__version__,
package_dir = {'': 'src'},
packages=find_packages('src'),
test_suite="test.run",
author='Matt Chambers',
author_email='yougotrooted@gmail.com',
maintainer='Justin Israel',
maintainer_email='justinisrael@gmail.com',
url='https://github.com/sqlboy/fileseq',
description=descript,
long_description=long_description,
license='MIT',
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
],
keywords='vfx visual effects file sequence frames image',
)
|
import { mount } from '@vue/test-utils'
import { leftClick, findLabelContainerByNodeId } from './shared'
import Treeselect from '@src/components/Treeselect'
import { UNCHECKED, INDETERMINATE, CHECKED } from '@src/constants'
describe('Single-select', () => {
it('basic', () => {
const wrapper = mount(Treeselect, {
propsData: {
multiple: false,
options: [ {
id: 'a',
label: 'a',
children: [ {
id: 'aa',
label: 'aa',
}, {
id: 'ab',
label: 'ab',
}, {
id: 'ac',
label: 'ac',
} ],
}, {
id: 'b',
label: 'b',
} ],
},
})
const { vm } = wrapper
const { a, aa } = vm.forest.nodeMap
expect(vm.forest.selectedNodeIds).toBeEmptyArray()
vm.select(a) // select one
expect(vm.forest.selectedNodeIds).toEqual([ 'a' ])
expect(vm.forest.selectedNodeMap).toEqual({ a: true })
vm.select(aa) // select another
expect(vm.forest.selectedNodeIds).toEqual([ 'aa' ])
expect(vm.forest.selectedNodeMap).toEqual({ aa: true })
vm.select(aa) // select again
expect(vm.forest.selectedNodeIds).toEqual([ 'aa' ])
expect(vm.forest.selectedNodeMap).toEqual({ aa: true })
})
it('should blur the input after selecting an option when closeOnSelect=true & searchable=true', async () => {
const wrapper = mount(Treeselect, {
sync: false,
propsData: {
options: [ {
id: 'a',
label: 'a',
} ],
multiple: false,
searchable: true,
closeOnSelect: true,
},
})
const { vm } = wrapper
vm.openMenu()
await vm.$nextTick()
const labelContainer = findLabelContainerByNodeId(wrapper, 'a')
leftClick(labelContainer)
expect(vm.forest.selectedNodeIds).toEqual([ 'a' ])
expect(vm.trigger.isFocused).toEqual(false)
expect(vm.menu.isOpen).toEqual(false)
})
})
describe('Multi-select', () => {
let wrapper, vm
beforeEach(() => {
wrapper = mount(Treeselect, {
propsData: {
multiple: true,
sortValueBy: 'ORDER_SELECTED',
options: [ {
id: 'a',
label: 'a',
children: [ {
id: 'aa',
label: 'aa',
children: [ {
id: 'aaa',
label: 'aaa',
}, {
id: 'aab',
label: 'aab',
} ],
}, {
id: 'ab',
label: 'ab',
} ],
}, {
id: 'b',
label: 'b',
} ],
},
})
vm = wrapper.vm
})
it('case #1', () => {
// current:
// [ ] a <- select
// |--[ ] aa
// | |--[ ] aaa
// | |--[ ] aab
// |--[ ] ab
// [ ] b
// expected result:
// [v] a
// |--[v] aa
// | |--[v] aaa
// | |--[v] aab
// |--[v] ab
// [ ] b
vm.select(vm.forest.nodeMap.a)
expect(vm.forest.selectedNodeIds).toEqual([ 'a', 'aa', 'ab', 'aaa', 'aab' ])
expect(vm.forest.selectedNodeMap).toEqual({
a: true,
aa: true,
ab: true,
aaa: true,
aab: true,
})
expect(vm.forest.checkedStateMap).toEqual({
a: CHECKED,
aa: CHECKED,
aaa: CHECKED,
aab: CHECKED,
ab: CHECKED,
b: UNCHECKED,
})
// current:
// [v] a
// |--[v] aa <- deselect
// | |--[v] aaa
// | |--[v] aab
// |--[v] ab
// [ ] b
// expected result:
// [-] a
// |--[ ] aa
// | |--[ ] aaa
// | |--[ ] aab
// |--[v] ab
// [ ] b
vm.select(vm.forest.nodeMap.aa)
expect(vm.forest.selectedNodeIds).toEqual([ 'ab' ])
expect(vm.forest.selectedNodeMap).toEqual({ ab: true })
expect(vm.forest.checkedStateMap).toEqual({
a: INDETERMINATE,
aa: UNCHECKED,
aaa: UNCHECKED,
aab: UNCHECKED,
ab: CHECKED,
b: UNCHECKED,
})
// current:
// [-] a
// |--[ ] aa
// | |--[ ] aaa
// | |--[ ] aab
// |--[v] ab
// [ ] b <- select
// expected result:
// [-] a
// |--[ ] aa
// | |--[ ] aaa
// | |--[ ] aab
// |--[v] ab
// [v] b
vm.select(vm.forest.nodeMap.b)
expect(vm.forest.selectedNodeIds).toEqual([ 'ab', 'b' ])
expect(vm.forest.selectedNodeMap).toEqual({ ab: true, b: true })
expect(vm.forest.checkedStateMap).toEqual({
a: INDETERMINATE,
aa: UNCHECKED,
aaa: UNCHECKED,
aab: UNCHECKED,
ab: CHECKED,
b: CHECKED,
})
// current:
// [-] a
// |--[ ] aa <- select again
// | |--[ ] aaa
// | |--[ ] aab
// |--[v] ab
// [v] b
// expected result:
// [v] a
// |--[v] aa
// | |--[v] aaa
// | |--[v] aab
// |--[v] ab
// [v] b
vm.select(vm.forest.nodeMap.aa)
expect(vm.forest.selectedNodeIds).toEqual([ 'ab', 'b', 'aa', 'aaa', 'aab', 'a' ]) // a should be after b
expect(vm.forest.selectedNodeMap).toEqual({
a: true,
aa: true,
aaa: true,
aab: true,
ab: true,
b: true,
})
expect(vm.forest.checkedStateMap).toEqual({
a: CHECKED,
aa: CHECKED,
aaa: CHECKED,
aab: CHECKED,
ab: CHECKED,
b: CHECKED,
})
// current:
// [v] a <- deselect
// |--[v] aa
// | |--[v] aaa
// | |--[v] aab
// |--[v] ab
// [v] b
// expected result:
// [ ] a
// |--[ ] aa
// | |--[ ] aaa
// | |--[ ] aab
// |--[ ] ab
// [v] b
vm.select(vm.forest.nodeMap.a)
expect(vm.forest.selectedNodeIds).toEqual([ 'b' ])
expect(vm.forest.selectedNodeMap).toEqual({ b: true })
expect(vm.forest.checkedStateMap).toEqual({
a: UNCHECKED,
aa: UNCHECKED,
aaa: UNCHECKED,
aab: UNCHECKED,
ab: UNCHECKED,
b: CHECKED,
})
// current:
// [ ] a
// |--[ ] aa <- select
// | |--[ ] aaa
// | |--[ ] aab
// |--[ ] ab
// [v] b
// expected result:
// [-] a
// |--[v] aa
// | |--[v] aaa
// | |--[v] aab
// |--[ ] ab
// [v] b
vm.select(vm.forest.nodeMap.aa)
expect(vm.forest.selectedNodeIds).toEqual([ 'b', 'aa', 'aaa', 'aab' ])
expect(vm.forest.selectedNodeMap).toEqual({
aa: true,
aaa: true,
aab: true,
b: true,
})
expect(vm.forest.checkedStateMap).toEqual({
a: INDETERMINATE,
aa: CHECKED,
aaa: CHECKED,
aab: CHECKED,
ab: UNCHECKED,
b: CHECKED,
})
// current:
// [ ] a
// |--[v] aa <- deselect
// | |--[v] aaa
// | |--[v] aab
// |--[ ] ab
// [v] b
// expected result:
// [ ] a
// |--[ ] aa
// | |--[ ] aaa
// | |--[ ] aab
// |--[ ] ab
// [v] b
vm.select(vm.forest.nodeMap.aa)
expect(vm.forest.selectedNodeIds).toEqual([ 'b' ])
expect(vm.forest.selectedNodeMap).toEqual({ b: true })
expect(vm.forest.checkedStateMap).toEqual({
a: UNCHECKED,
aa: UNCHECKED,
aaa: UNCHECKED,
aab: UNCHECKED,
ab: UNCHECKED,
b: CHECKED,
})
})
it('case #2', () => {
// current:
// [ ] a <- select
// |--[ ] aa
// | |--[ ] aaa
// | |--[ ] aab
// |--[ ] ab
// [ ] b
// expected result:
// [v] a
// |--[v] aa
// | |--[v] aaa
// | |--[v] aab
// |--[v] ab
// [ ] b
vm.select(vm.forest.nodeMap.a)
expect(vm.forest.selectedNodeIds).toEqual([ 'a', 'aa', 'ab', 'aaa', 'aab' ])
expect(vm.forest.selectedNodeMap).toEqual({
a: true,
aa: true,
ab: true,
aaa: true,
aab: true,
})
expect(vm.forest.checkedStateMap).toEqual({
a: CHECKED,
aa: CHECKED,
aaa: CHECKED,
aab: CHECKED,
ab: CHECKED,
b: UNCHECKED,
})
// current:
// [v] a
// |--[v] aa
// | |--[v] aaa
// | |--[v] aab
// |--[v] ab
// [ ] b <- select
// expected result:
// [v] a
// |--[v] aa
// | |--[v] aaa
// | |--[v] aab
// |--[v] ab
// [v] b
vm.select(vm.forest.nodeMap.b)
expect(vm.forest.selectedNodeIds).toEqual([ 'a', 'aa', 'ab', 'aaa', 'aab', 'b' ])
expect(vm.forest.selectedNodeMap).toEqual({
a: true,
aa: true,
ab: true,
aaa: true,
aab: true,
b: true,
})
expect(vm.forest.checkedStateMap).toEqual({
a: CHECKED,
aa: CHECKED,
aaa: CHECKED,
aab: CHECKED,
ab: CHECKED,
b: CHECKED,
})
// current:
// [v] a
// |--[v] aa
// | |--[v] aaa <- deselect
// | |--[v] aab
// |--[v] ab
// [v] b
// expected result:
// [-] a
// |--[-] aa
// | |--[ ] aaa
// | |--[v] aab
// |--[v] ab
// [v] b
vm.select(vm.forest.nodeMap.aaa)
expect(vm.forest.selectedNodeIds).toEqual([ 'ab', 'aab', 'b' ]) // keep order
expect(vm.forest.selectedNodeMap).toEqual({ aab: true, ab: true, b: true })
expect(vm.forest.checkedStateMap).toEqual({
a: INDETERMINATE,
aa: INDETERMINATE,
aaa: UNCHECKED,
aab: CHECKED,
ab: CHECKED,
b: CHECKED,
})
// current:
// [-] a
// |--[-] aa
// | |--[ ] aaa <- select again
// | |--[v] aab
// |--[v] ab
// [v] b
// expected result:
// [v] a
// |--[v] aa
// | |--[v] aaa
// | |--[v] aab
// |--[v] ab
// [v] b
vm.select(vm.forest.nodeMap.aaa)
expect(vm.forest.selectedNodeIds).toEqual([ 'ab', 'aab', 'b', 'aaa', 'aa', 'a' ]) // keep order
expect(vm.forest.selectedNodeMap).toEqual({
a: true,
aa: true,
ab: true,
aaa: true,
aab: true,
b: true,
})
expect(vm.forest.checkedStateMap).toEqual({
a: CHECKED,
aa: CHECKED,
aaa: CHECKED,
aab: CHECKED,
ab: CHECKED,
b: CHECKED,
})
})
it('case #3', () => {
// current:
// [ ] a
// |--[ ] aa
// | |--[ ] aaa <- select
// | |--[ ] aab
// |--[ ] ab
// [ ] b
// expected result:
// [-] a
// |--[-] aa
// | |--[v] aaa
// | |--[ ] aab
// |--[ ] ab
// [ ] b
vm.select(vm.forest.nodeMap.aaa)
expect(vm.forest.selectedNodeIds).toEqual([ 'aaa' ])
expect(vm.forest.selectedNodeMap).toEqual({ aaa: true })
expect(vm.forest.checkedStateMap).toEqual({
a: INDETERMINATE,
aa: INDETERMINATE,
aaa: CHECKED,
aab: UNCHECKED,
ab: UNCHECKED,
b: UNCHECKED,
})
// current:
// [-] a
// |--[-] aa
// | |--[v] aaa
// | |--[ ] aab
// |--[ ] ab <- select
// [ ] b
// expected result:
// [-] a
// |--[-] aa
// | |--[v] aaa
// | |--[ ] aab
// |--[v] ab
// [ ] b
vm.select(vm.forest.nodeMap.ab)
expect(vm.forest.selectedNodeIds).toEqual([ 'aaa', 'ab' ])
expect(vm.forest.selectedNodeMap).toEqual({ aaa: true, ab: true })
expect(vm.forest.checkedStateMap).toEqual({
a: INDETERMINATE,
aa: INDETERMINATE,
aaa: CHECKED,
aab: UNCHECKED,
ab: CHECKED,
b: UNCHECKED,
})
// current:
// [-] a
// |--[-] aa
// | |--[v] aaa
// | |--[ ] aab <- select
// |--[v] ab
// [ ] b
// expected result:
// [v] a
// |--[v] aa
// | |--[v] aaa
// | |--[v] aab
// |--[v] ab
// [ ] b
vm.select(vm.forest.nodeMap.aab)
expect(vm.forest.selectedNodeIds).toEqual([ 'aaa', 'ab', 'aab', 'aa', 'a' ])
expect(vm.forest.selectedNodeMap).toEqual({
a: true,
aa: true,
ab: true,
aaa: true,
aab: true,
})
expect(vm.forest.checkedStateMap).toEqual({
a: CHECKED,
aa: CHECKED,
aaa: CHECKED,
aab: CHECKED,
ab: CHECKED,
b: UNCHECKED,
})
})
it('case #4', () => {
// current:
// [ ] a
// |--[ ] aa
// | |--[ ] aaa
// | |--[ ] aab
// |--[ ] ab <- select
// [ ] b
// expected result:
// [-] a
// |--[ ] aa
// | |--[ ] aaa
// | |--[ ] aab
// |--[v] ab
// [ ] b
vm.select(vm.forest.nodeMap.ab)
expect(vm.forest.selectedNodeIds).toEqual([ 'ab' ])
expect(vm.forest.selectedNodeMap).toEqual({ ab: true })
expect(vm.forest.checkedStateMap).toEqual({
a: INDETERMINATE,
aa: UNCHECKED,
aaa: UNCHECKED,
aab: UNCHECKED,
ab: CHECKED,
b: UNCHECKED,
})
// current:
// [-] a
// |--[ ] aa
// | |--[ ] aaa
// | |--[ ] aab
// |--[v] ab
// [ ] b <- select
// expected result:
// [-] a
// |--[ ] aa
// | |--[ ] aaa
// | |--[ ] aab
// |--[v] ab
// [v] b
vm.select(vm.forest.nodeMap.b)
expect(vm.forest.selectedNodeIds).toEqual([ 'ab', 'b' ])
expect(vm.forest.selectedNodeMap).toEqual({ ab: true, b: true })
expect(vm.forest.checkedStateMap).toEqual({
a: INDETERMINATE,
aa: UNCHECKED,
aaa: UNCHECKED,
aab: UNCHECKED,
ab: CHECKED,
b: CHECKED,
})
// current:
// [-] a
// |--[ ] aa
// | |--[ ] aaa
// | |--[ ] aab <- select
// |--[v] ab
// [v] b
// expected result:
// [-] a
// |--[-] aa
// | |--[ ] aaa
// | |--[v] aab
// |--[v] ab
// [v] b
vm.select(vm.forest.nodeMap.aab)
expect(vm.forest.selectedNodeIds).toEqual([ 'ab', 'b', 'aab' ])
expect(vm.forest.selectedNodeMap).toEqual({ aab: true, ab: true, b: true })
expect(vm.forest.checkedStateMap).toEqual({
a: INDETERMINATE,
aa: INDETERMINATE,
aaa: UNCHECKED,
aab: CHECKED,
ab: CHECKED,
b: CHECKED,
})
// current:
// [-] a
// |--[-] aa
// | |--[ ] aaa <- select
// | |--[v] aab
// |--[v] ab
// [v] b
// expected result:
// [v] a
// |--[v] aa
// | |--[v] aaa
// | |--[v] aab
// |--[v] ab
// [v] b
vm.select(vm.forest.nodeMap.aaa)
expect(vm.forest.selectedNodeIds).toEqual([ 'ab', 'b', 'aab', 'aaa', 'aa', 'a' ]) // keep order
expect(vm.forest.selectedNodeMap).toEqual({
a: true,
aa: true,
ab: true,
aaa: true,
aab: true,
b: true,
})
expect(vm.forest.checkedStateMap).toEqual({
a: CHECKED,
aa: CHECKED,
aaa: CHECKED,
aab: CHECKED,
ab: CHECKED,
b: CHECKED,
})
})
it('case #5', () => {
// current:
// [ ] a
// |--[ ] aa <- select
// | |--[ ] aaa
// | |--[ ] aab
// |--[ ] ab
// [ ] b
// expected result:
// [-] a
// |--[v] aa
// | |--[v] aaa
// | |--[v] aab
// |--[ ] ab
// [ ] b
vm.select(vm.forest.nodeMap.aa)
expect(vm.forest.selectedNodeIds).toEqual([ 'aa', 'aaa', 'aab' ])
expect(vm.forest.selectedNodeMap).toEqual({ aa: true, aaa: true, aab: true })
expect(vm.forest.checkedStateMap).toEqual({
a: INDETERMINATE,
aa: CHECKED,
aaa: CHECKED,
aab: CHECKED,
ab: UNCHECKED,
b: UNCHECKED,
})
vm.select(vm.forest.nodeMap.a)
expect(vm.forest.selectedNodeIds).toEqual([])
expect(vm.forest.selectedNodeMap).toEqual({})
expect(vm.forest.checkedStateMap).toEqual({
a: UNCHECKED,
aa: UNCHECKED,
aaa: UNCHECKED,
aab: UNCHECKED,
ab: UNCHECKED,
b: UNCHECKED,
})
})
it('case #6', () => {
wrapper.setProps({
options: [ {
id: 'a',
label: 'a',
children: [],
} ],
})
// current:
// [ ] a <- select
// |-- (no children options)
// expected result:
// [v] a
// |-- (no children options)
vm.select(vm.forest.nodeMap.a)
expect(vm.forest.selectedNodeIds).toEqual([ 'a' ])
expect(vm.forest.selectedNodeMap).toEqual({ a: true })
expect(vm.forest.checkedStateMap).toEqual({ a: CHECKED })
// current:
// [v] a <- deselect
// |-- (no children options)
// expected result:
// [ ] a
// |-- (no children options)
vm.select(vm.forest.nodeMap.a)
expect(vm.forest.selectedNodeIds).toEqual([])
expect(vm.forest.selectedNodeMap).toEqual({})
expect(vm.forest.checkedStateMap).toEqual({ a: UNCHECKED })
})
})
describe('Disable Item Selection', () => {
describe('Single-select', () => {
it('basic', () => {
const wrapper = mount(Treeselect, {
propsData: {
multiple: false,
options: [ {
id: 'a',
label: 'a',
isDisabled: true,
}, {
id: 'b',
label: 'b',
} ],
value: 'a',
},
})
const { vm } = wrapper
vm.select(vm.forest.nodeMap.b)
expect(vm.forest.selectedNodeIds).toEqual([ 'b' ])
vm.select(vm.forest.nodeMap.a)
expect(vm.forest.selectedNodeIds).toEqual([ 'b' ])
})
it('nested', () => {
const wrapper = mount(Treeselect, {
propsData: {
multiple: false,
options: [ {
id: 'a',
label: 'a',
isDisabled: true,
children: [ {
id: 'aa',
label: 'aa',
} ],
}, {
id: 'b',
label: 'b',
children: [ {
id: 'ba',
label: 'ba',
isDisabled: true,
}, {
id: 'bb',
label: 'bb',
} ],
}, {
id: 'c',
label: 'c',
children: [ {
id: 'ca',
label: 'ca',
isDisabled: true,
}, {
id: 'cb',
label: 'cb',
children: [ {
id: 'cba',
label: 'cba',
isDisabled: true,
}, {
id: 'cbb',
label: 'cbb',
} ],
} ],
} ],
},
})
const { vm } = wrapper
vm.select(vm.forest.nodeMap.a)
expect(vm.forest.selectedNodeIds).toEqual([])
vm.select(vm.forest.nodeMap.aa)
expect(vm.forest.selectedNodeIds).toEqual([])
vm.select(vm.forest.nodeMap.b)
expect(vm.forest.selectedNodeIds).toEqual([ 'b' ])
vm.select(vm.forest.nodeMap.ba)
expect(vm.forest.selectedNodeIds).toEqual([ 'b' ])
vm.select(vm.forest.nodeMap.bb)
expect(vm.forest.selectedNodeIds).toEqual([ 'bb' ])
vm.select(vm.forest.nodeMap.c)
expect(vm.forest.selectedNodeIds).toEqual([ 'c' ])
vm.select(vm.forest.nodeMap.ca)
expect(vm.forest.selectedNodeIds).toEqual([ 'c' ])
vm.select(vm.forest.nodeMap.cb)
expect(vm.forest.selectedNodeIds).toEqual([ 'cb' ])
vm.select(vm.forest.nodeMap.cba)
expect(vm.forest.selectedNodeIds).toEqual([ 'cb' ])
vm.select(vm.forest.nodeMap.cbb)
expect(vm.forest.selectedNodeIds).toEqual([ 'cbb' ])
})
})
describe('Multi-select', () => {
describe('flat=false', () => {
it('basic', () => {
const wrapper = mount(Treeselect, {
propsData: {
options: [ {
id: 'a',
label: 'a',
isDisabled: true,
}, {
id: 'b',
label: 'b',
isDisabled: true,
}, {
id: 'c',
label: 'c',
} ],
multiple: true,
value: [ 'a' ],
},
})
const { vm } = wrapper
vm.select(vm.forest.nodeMap.a)
expect(vm.forest.selectedNodeIds).toEqual([ 'a' ])
vm.select(vm.forest.nodeMap.b)
expect(vm.forest.selectedNodeIds).toEqual([ 'a' ])
vm.select(vm.forest.nodeMap.c)
expect(vm.forest.selectedNodeIds).toEqual([ 'a', 'c' ])
})
it('disabled parent node', () => {
const wrapper = mount(Treeselect, {
propsData: {
options: [ {
id: 'a',
label: 'a',
isDisabled: true,
children: [ {
id: 'aa',
label: 'aa',
}, {
id: 'ab',
label: 'ab',
} ],
}, {
id: 'b',
label: 'b',
isDisabled: true,
children: [ {
id: 'ba',
label: 'ba',
}, {
id: 'bb',
label: 'bb',
} ],
}, {
id: 'c',
label: 'c',
isDisabled: true,
children: [ {
id: 'ca',
label: 'ca',
}, {
id: 'cb',
label: 'cb',
} ],
} ],
multiple: true,
value: [ 'ba', 'c' ],
},
})
const { vm } = wrapper
// current:
// { } a <- select
// |--{ } aa
// |--{ } ab
// {-} b
// |--{v} ba
// |--{ } bb
// {v} c
// |--{v} ca
// |--{v} cb
// expected result:
// { } a
// |--{ } aa
// |--{ } ab
// {-} b
// |--{v} ba
// |--{ } bb
// {v} c
// |--{v} ca
// |--{v} cb
vm.select(vm.forest.nodeMap.a)
expect(vm.forest.selectedNodeIds).toEqual([ 'ba', 'c', 'ca', 'cb' ])
// current:
// { } a
// |--{ } aa
// |--{ } ab
// {-} b <- deselect
// |--{v} ba
// |--{ } bb
// {v} c
// |--{v} ca
// |--{v} cb
// expected result:
// { } a
// |--{ } aa
// |--{ } ab
// {-} b
// |--{v} ba
// |--{ } bb
// {v} c
// |--{v} ca
// |--{v} cb
vm.select(vm.forest.nodeMap.b)
expect(vm.forest.selectedNodeIds).toEqual([ 'ba', 'c', 'ca', 'cb' ])
// current:
// { } a
// |--{ } aa
// |--{ } ab
// {-} b
// |--{v} ba
// |--{ } bb
// {v} c <- deselect
// |--{v} ca
// |--{v} cb
// expected result:
// { } a
// |--{ } aa
// |--{ } ab
// {-} b
// |--{v} ba
// |--{ } bb
// {v} c
// |--{v} ca
// |--{v} cb
vm.select(vm.forest.nodeMap.c)
expect(vm.forest.selectedNodeIds).toEqual([ 'ba', 'c', 'ca', 'cb' ])
})
it('disabled child node', () => {
const wrapper = mount(Treeselect, {
propsData: {
options: [ {
id: 'a',
label: 'a',
children: [ {
id: 'aa',
label: 'aa',
isDisabled: true,
}, {
id: 'ab',
label: 'ab',
} ],
}, {
id: 'b',
label: 'b',
children: [ {
id: 'ba',
label: 'ba',
isDisabled: true,
}, {
id: 'bb',
label: 'bb',
isDisabled: true,
} ],
}, {
id: 'c',
label: 'c',
children: [ {
id: 'ca',
label: 'ca',
isDisabled: true,
}, {
id: 'cb',
label: 'cb',
isDisabled: true,
} ],
}, {
id: 'd',
label: 'd',
children: [ {
id: 'da',
label: 'da',
isDisabled: true,
}, {
id: 'db',
label: 'db',
isDisabled: true,
}, {
id: 'dc',
label: 'dc',
} ],
} ],
multiple: true,
value: [ 'aa', 'b', 'da' ],
},
})
const { vm } = wrapper
// current:
// [-] a <- deselect
// |--{v} aa
// |--[ ] ab
// [v] b
// |--{v} ba
// |--{v} bb
// [ ] c
// |--{ } ca
// |--{ } cb
// [-] d
// |--{v} da
// |--{ } db
// |--[ ] dc
// expected result:
// [-] a
// |--{v} aa
// |--[ ] ab
// [v] b
// |--{v} ba
// |--{v} bb
// [ ] c
// |--{ } ca
// |--{ } cb
// [-] d
// |--{v} da
// |--{ } db
// |--[ ] dc
vm.select(vm.forest.nodeMap.a)
expect(vm.forest.selectedNodeIds).toEqual([ 'aa', 'b', 'ba', 'bb', 'da' ])
// current:
// [-] a
// |--{v} aa
// |--[ ] ab <- select
// [v] b
// |--{v} ba
// |--{v} bb
// [ ] c
// |--{ } ca
// |--{ } cb
// [-] d
// |--{v} da
// |--{ } db
// |--[ ] dc
// expected result:
// [v] a
// |--{v} aa
// |--[v] ab
// [v] b
// |--{v} ba
// |--{v} bb
// [ ] c
// |--{ } ca
// |--{ } cb
// [-] d
// |--{v} da
// |--{ } db
// |--[ ] dc
vm.select(vm.forest.nodeMap.ab)
expect(vm.forest.selectedNodeIds).toEqual([ 'aa', 'b', 'ba', 'bb', 'da', 'ab', 'a' ])
// current:
// [v] a <- deselect
// |--{v} aa
// |--[v] ab
// [v] b
// |--{v} ba
// |--{v} bb
// [ ] c
// |--{ } ca
// |--{ } cb
// [-] d
// |--{v} da
// |--{ } db
// |--[ ] dc
// expected result:
// [-] a
// |--{v} aa
// |--[ ] ab
// [v] b
// |--{v} ba
// |--{v} bb
// [ ] c
// |--{ } ca
// |--{ } cb
// [-] d
// |--{v} da
// |--{ } db
// |--[ ] dc
vm.select(vm.forest.nodeMap.a)
expect(vm.forest.selectedNodeIds).toEqual([ 'aa', 'b', 'ba', 'bb', 'da' ])
// current:
// [-] a
// |--{v} aa
// |--[ ] ab <- select
// [v] b
// |--{v} ba
// |--{v} bb
// [ ] c
// |--{ } ca
// |--{ } cb
// [-] d
// |--{v} da
// |--{ } db
// |--[ ] dc
// expected result:
// [-] a
// |--{v} aa
// |--[ ] ab
// [v] b
// |--{v} ba
// |--{v} bb
// [ ] c
// |--{ } ca
// |--{ } cb
// [-] d
// |--{v} da
// |--{ } db
// |--[ ] dc
vm.select(vm.forest.nodeMap.ab)
expect(vm.forest.selectedNodeIds).toEqual([ 'aa', 'b', 'ba', 'bb', 'da', 'ab', 'a' ])
// current:
// [-] a
// |--{v} aa
// |--[ ] ab
// [v] b <- deselect
// |--{v} ba
// |--{v} bb
// [ ] c
// |--{ } ca
// |--{ } cb
// [-] d
// |--{v} da
// |--{ } db
// |--[ ] dc
// expected result:
// [-] a
// |--{v} aa
// |--[ ] ab
// [v] b
// |--{v} ba
// |--{v} bb
// [ ] c
// |--{ } ca
// |--{ } cb
// [-] d
// |--{v} da
// |--{ } db
// |--[ ] dc
vm.select(vm.forest.nodeMap.b)
expect(vm.forest.selectedNodeIds).toEqual([ 'aa', 'b', 'ba', 'bb', 'da', 'ab', 'a' ])
// current:
// [-] a
// |--{v} aa
// |--[ ] ab
// [v] b
// |--{v} ba
// |--{v} bb
// [ ] c <- select
// |--{ } ca
// |--{ } cb
// [-] d
// |--{v} da
// |--{ } db
// |--[ ] dc
// expected result:
// [-] a
// |--{v} aa
// |--[ ] ab
// [v] b
// |--{v} ba
// |--{v} bb
// [ ] c
// |--{ } ca
// |--{ } cb
// [-] d
// |--{v} da
// |--{ } db
// |--[ ] dc
vm.select(vm.forest.nodeMap.c)
expect(vm.forest.selectedNodeIds).toEqual([ 'aa', 'b', 'ba', 'bb', 'da', 'ab', 'a' ])
// current:
// [-] a
// |--{v} aa
// |--[ ] ab
// [v] b
// |--{v} ba
// |--{v} bb
// [ ] c
// |--{ } ca
// |--{ } cb
// [-] d <- deselect
// |--{v} da
// |--{ } db
// |--[ ] dc
// expected result:
// [-] a
// |--{v} aa
// |--[ ] ab
// [v] b
// |--{v} ba
// |--{v} bb
// [ ] c
// |--{ } ca
// |--{ } cb
// [-] d
// |--{v} da
// |--{ } db
// |--[ ] dc
vm.select(vm.forest.nodeMap.d)
expect(vm.forest.selectedNodeIds).toEqual([ 'aa', 'b', 'ba', 'bb', 'da', 'ab', 'a' ])
// current:
// [-] a
// |--{v} aa
// |--[ ] ab
// [v] b
// |--{v} ba
// |--{v} bb
// [ ] c
// |--{ } ca
// |--{ } cb
// [-] d
// |--{v} da
// |--{ } db
// |--[ ] dc <- select
// expected result:
// [-] a
// |--{v} aa
// |--[ ] ab
// [v] b
// |--{v} ba
// |--{v} bb
// [ ] c
// |--{ } ca
// |--{ } cb
// [-] d
// |--{v} da
// |--{ } db
// |--[v] dc
vm.select(vm.forest.nodeMap.dc)
expect(vm.forest.selectedNodeIds).toEqual([ 'aa', 'b', 'ba', 'bb', 'da', 'ab', 'a', 'dc' ])
// current:
// [-] a
// |--{v} aa
// |--[ ] ab
// [v] b
// |--{v} ba
// |--{v} bb
// [ ] c
// |--{ } ca
// |--{ } cb
// [-] d <- deselect
// |--{v} da
// |--{ } db
// |--[v] dc
// expected result:
// [-] a
// |--{v} aa
// |--[ ] ab
// [v] b
// |--{v} ba
// |--{v} bb
// [ ] c
// |--{ } ca
// |--{ } cb
// [-] d
// |--{v} da
// |--{ } db
// |--[ ] dc
vm.select(vm.forest.nodeMap.d)
expect(vm.forest.selectedNodeIds).toEqual([ 'aa', 'b', 'ba', 'bb', 'da', 'ab', 'a' ])
})
it('nested', () => {
const wrapper = mount(Treeselect, {
propsData: {
options: [ {
id: 'a',
label: 'a',
children: [ {
id: 'aa',
label: 'aa',
isDisabled: true,
children: [ {
id: 'aaa',
label: 'aaa',
}, {
id: 'aab',
label: 'aab',
} ],
}, {
id: 'ab',
label: 'ab',
children: [ {
id: 'aba',
label: 'aba',
isDisabled: true,
}, {
id: 'abb',
label: 'abb',
} ],
} ],
} ],
multiple: true,
value: [ 'aa', 'aba' ],
},
})
const { vm } = wrapper
// current:
// [-] a <- deselect
// |--{v} aa
// | |--{v} aaa
// | |--{v} aab
// |--[-] ab
// | |--{v} aba
// | |--[ ] abb
// expected result:
// [-] a
// |--{v} aa
// | |--{v} aaa
// | |--{v} aab
// |--[-] ab
// | |--{v} aba
// | |--[ ] abb
vm.select(vm.forest.nodeMap.a)
expect(vm.forest.selectedNodeIds).toEqual([ 'aa', 'aaa', 'aab', 'aba' ])
// current:
// [-] a
// |--{v} aa
// | |--{v} aaa
// | |--{v} aab
// |--[-] ab <- deselect
// | |--{v} aba
// | |--[ ] abb
// expected result:
// [-] a
// |--{v} aa
// | |--{v} aaa
// | |--{v} aab
// |--[-] ab
// | |--{v} aba
// | |--[ ] abb
vm.select(vm.forest.nodeMap.ab)
expect(vm.forest.selectedNodeIds).toEqual([ 'aa', 'aaa', 'aab', 'aba' ])
// current:
// [-] a
// |--{v} aa
// | |--{v} aaa
// | |--{v} aab
// |--[-] ab
// | |--{v} aba
// | |--[ ] abb <- select
// expected result:
// [v] a
// |--{v} aa
// | |--{v} aaa
// | |--{v} aab
// |--[v] ab
// | |--{v} aba
// | |--[v] abb
vm.select(vm.forest.nodeMap.abb)
expect(vm.forest.selectedNodeIds).toEqual([ 'aa', 'aaa', 'aab', 'aba', 'abb', 'ab', 'a' ])
})
})
describe('flat=true', () => {
it('basic', () => {
const wrapper = mount(Treeselect, {
propsData: {
flat: true,
multiple: true,
options: [ {
id: 'a',
label: 'a',
isDisabled: true,
children: [ {
id: 'aa',
label: 'aa',
} ],
}, {
id: 'b',
label: 'b',
children: [ {
id: 'ba',
label: 'ba',
isDisabled: true,
}, {
id: 'bb',
label: 'bb',
} ],
} ],
},
})
const { vm } = wrapper
vm.select(vm.forest.nodeMap.a)
expect(vm.forest.selectedNodeIds).toEqual([])
vm.select(vm.forest.nodeMap.aa)
expect(vm.forest.selectedNodeIds).toEqual([ 'aa' ])
vm.select(vm.forest.nodeMap.aa)
expect(vm.forest.selectedNodeIds).toEqual([])
vm.select(vm.forest.nodeMap.b)
expect(vm.forest.selectedNodeIds).toEqual([ 'b' ])
vm.select(vm.forest.nodeMap.ba)
expect(vm.forest.selectedNodeIds).toEqual([ 'b' ])
vm.select(vm.forest.nodeMap.bb)
expect(vm.forest.selectedNodeIds).toEqual([ 'b', 'bb' ])
})
it('nested', () => {
const wrapper = mount(Treeselect, {
propsData: {
flat: true,
multiple: true,
options: [ {
id: 'a',
label: 'a',
children: [ {
id: 'aa',
label: 'aa',
isDisabled: true,
children: [ {
id: 'aaa',
label: 'aaa',
isDisabled: true,
}, {
id: 'aab',
label: 'aab',
} ],
} ],
} ],
},
})
const { vm } = wrapper
vm.select(vm.forest.nodeMap.a)
expect(vm.forest.selectedNodeIds).toEqual([ 'a' ])
vm.select(vm.forest.nodeMap.aa)
expect(vm.forest.selectedNodeIds).toEqual([ 'a' ])
vm.select(vm.forest.nodeMap.aaa)
expect(vm.forest.selectedNodeIds).toEqual([ 'a' ])
vm.select(vm.forest.nodeMap.aab)
expect(vm.forest.selectedNodeIds).toEqual([ 'a', 'aab' ])
})
})
})
})
|
"""
Orders in Number Fields
AUTHORS:
- William Stein and Robert Bradshaw (2007-09): initial version
EXAMPLES:
We define an absolute order::
sage: K.<a> = NumberField(x^2 + 1); O = K.order(2*a)
sage: O.basis()
[1, 2*a]
We compute a basis for an order in a relative extension
that is generated by 2 elements::
sage: K.<a,b> = NumberField([x^2 + 1, x^2 - 3]); O = K.order([3*a,2*b])
sage: O.basis()
[1, 3*a - 2*b, -6*b*a + 6, 3*a]
We compute a maximal order of a degree 10 field::
sage: K.<a> = NumberField((x+1)^10 + 17)
sage: K.maximal_order()
Maximal Order in Number Field in a with defining polynomial x^10 + 10*x^9 + 45*x^8 + 120*x^7 + 210*x^6 + 252*x^5 + 210*x^4 + 120*x^3 + 45*x^2 + 10*x + 18
We compute a suborder, which has index a power of 17 in the maximal order::
sage: O = K.order(17*a); O
Order in Number Field in a with defining polynomial x^10 + 10*x^9 + 45*x^8 + 120*x^7 + 210*x^6 + 252*x^5 + 210*x^4 + 120*x^3 + 45*x^2 + 10*x + 18
sage: m = O.index_in(K.maximal_order()); m
23453165165327788911665591944416226304630809183732482257
sage: factor(m)
17^45
"""
# ****************************************************************************
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# http://www.gnu.org/licenses/
# ****************************************************************************
from __future__ import absolute_import
import six
from sage.misc.cachefunc import cached_method
from sage.rings.ring import IntegralDomain
from sage.structure.sequence import Sequence
from sage.rings.integer_ring import ZZ
from sage.structure.element import is_Element
from .number_field_element import OrderElement_absolute, OrderElement_relative
from .number_field_element_quadratic import OrderElement_quadratic
from sage.rings.monomials import monomials
from sage.libs.pari.all import pari
def is_NumberFieldOrder(R):
r"""
Return True if R is either an order in a number field or is the ring `\ZZ` of integers.
EXAMPLES::
sage: from sage.rings.number_field.order import is_NumberFieldOrder
sage: is_NumberFieldOrder(NumberField(x^2+1,'a').maximal_order())
True
sage: is_NumberFieldOrder(ZZ)
True
sage: is_NumberFieldOrder(QQ)
False
sage: is_NumberFieldOrder(45)
False
"""
return isinstance(R, Order) or R == ZZ
def EquationOrder(f, names, **kwds):
r"""
Return the equation order generated by a root of the irreducible
polynomial f or list of polynomials `f` (to construct a relative
equation order).
IMPORTANT: Note that the generators of the returned order need
*not* be roots of `f`, since the generators of an order are -- in
Sage -- module generators.
EXAMPLES::
sage: O.<a,b> = EquationOrder([x^2+1, x^2+2])
sage: O
Relative Order in Number Field in a with defining polynomial x^2 + 1 over its base field
sage: O.0
-b*a - 1
sage: O.1
-3*a + 2*b
Of course the input polynomial must be integral::
sage: R = EquationOrder(x^3 + x + 1/3, 'alpha'); R
Traceback (most recent call last):
...
ValueError: each generator must be integral
sage: R = EquationOrder( [x^3 + x + 1, x^2 + 1/2], 'alpha'); R
Traceback (most recent call last):
...
ValueError: each generator must be integral
"""
from .number_field import NumberField
R = ZZ['x']
if isinstance(f, (list, tuple)):
for g in f:
try:
R(g)
except TypeError:
raise ValueError('each generator must be integral')
else:
try:
R(f)
except TypeError:
raise ValueError('each generator must be integral')
K = NumberField(f, names=names, **kwds)
return K.order(K.gens())
class Order(IntegralDomain):
r"""
An order in a number field.
An order is a subring of the number field that has `\ZZ`-rank equal
to the degree of the number field over `\QQ`.
EXAMPLES::
sage: K.<theta> = NumberField(x^4 + x + 17)
sage: K.maximal_order()
Maximal Order in Number Field in theta with defining polynomial x^4 + x + 17
sage: R = K.order(17*theta); R
Order in Number Field in theta with defining polynomial x^4 + x + 17
sage: R.basis()
[1, 17*theta, 289*theta^2, 4913*theta^3]
sage: R = K.order(17*theta, 13*theta); R
Order in Number Field in theta with defining polynomial x^4 + x + 17
sage: R.basis()
[1, theta, theta^2, theta^3]
sage: R = K.order([34*theta, 17*theta + 17]); R
Order in Number Field in theta with defining polynomial x^4 + x + 17
sage: K.<b> = NumberField(x^4 + x^2 + 2)
sage: (b^2).charpoly().factor()
(x^2 + x + 2)^2
sage: K.order(b^2)
Traceback (most recent call last):
...
ValueError: the rank of the span of gens is wrong
"""
def __init__(self, K, is_maximal):
"""
This is called when creating an order to set the ambient field.
EXAMPLES::
sage: k = CyclotomicField(5)
sage: k.maximal_order()
Maximal Order in Cyclotomic Field of order 5 and degree 4
TESTS::
sage: k.<alg> = NumberField(x^7+3*x+1, embedding=CC(0,1))
sage: O = k.order(alg)
sage: ordelt = O(alg)
sage: CC(ordelt)
0.0535229072603327 + 1.20934552493846*I
"""
self._K = K
self._is_maximal = is_maximal
IntegralDomain.__init__(self, ZZ, names=K.variable_names(),
normalize=False)
self._populate_coercion_lists_(embedding=self.number_field())
def fractional_ideal(self, *args, **kwds):
"""
Return the fractional ideal of the maximal order with given
generators.
EXAMPLES::
sage: K.<a> = NumberField(x^2 + 2)
sage: R = K.maximal_order()
sage: R.fractional_ideal(2/3 + 7*a, a)
Fractional ideal (1/3*a)
"""
return self.number_field().fractional_ideal(*args, **kwds)
def ideal(self, *args, **kwds):
"""
Return the integral ideal with given generators.
EXAMPLES::
sage: K.<a> = NumberField(x^2 + 7)
sage: R = K.maximal_order()
sage: R.ideal(2/3 + 7*a, a)
Traceback (most recent call last):
...
ValueError: ideal must be integral; use fractional_ideal to create a non-integral ideal.
sage: R.ideal(7*a, 77 + 28*a)
Fractional ideal (7)
sage: R = K.order(4*a)
sage: R.ideal(8)
Traceback (most recent call last):
...
NotImplementedError: ideals of non-maximal orders not yet supported.
This function is called implicitly below::
sage: R = EquationOrder(x^2 + 2, 'a'); R
Order in Number Field in a with defining polynomial x^2 + 2
sage: (3,15)*R
Fractional ideal (3)
The zero ideal is handled properly::
sage: R.ideal(0)
Ideal (0) of Number Field in a with defining polynomial x^2 + 2
"""
if not self.is_maximal():
raise NotImplementedError("ideals of non-maximal orders not yet supported.")
I = self.number_field().ideal(*args, **kwds)
if not I.is_integral():
raise ValueError("ideal must be integral; use fractional_ideal to create a non-integral ideal.")
return I
def _coerce_map_from_(self, R):
"""
Orders currently only have coerce maps from the integers.
EXAMPLES::
sage: k.<a> = NumberField(x^2 + 5077)
sage: Ok = k.maximal_order()
sage: Ok.has_coerce_map_from(k) #indirect doctest
False
sage: Ok.has_coerce_map_from(ZZ)
True
"""
return R is ZZ or R in six.integer_types
def __mul__(self, right):
"""
Create an ideal in this order using the notation ``Ok*gens``
EXAMPLES::
sage: k.<a> = NumberField(x^2 + 5077); G = k.class_group(); G
Class group of order 22 with structure C22 of Number Field in a with defining polynomial x^2 + 5077
sage: G.0 ^ -9
Fractional ideal class (11, a + 7)
sage: Ok = k.maximal_order(); Ok
Maximal Order in Number Field in a with defining polynomial x^2 + 5077
sage: Ok * (11, a + 7)
Fractional ideal (11, a + 7)
sage: (11, a + 7) * Ok
Fractional ideal (11, a + 7)
"""
if self.is_maximal():
return self._K.ideal(right)
raise TypeError
def __rmul__(self, left):
"""
Create an ideal in this order using the notation ``gens*Ok``.
EXAMPLES::
sage: k.<a> = NumberField(x^2 + 431); G = k.class_group(); G
Class group of order 21 with structure C21 of Number Field in a with defining polynomial x^2 + 431
sage: G.0 # random output
Fractional ideal class (6, 1/2*a + 11/2)
sage: Ok = k.maximal_order(); Ok
Maximal Order in Number Field in a with defining polynomial x^2 + 431
sage: (6, 1/2*a + 11/2)*Ok # random output
Fractional ideal (6, 1/2*a + 11/2)
sage: 17*Ok
Fractional ideal (17)
"""
return self * left
def is_maximal(self):
"""
Return ``True`` if this is the maximal order.
EXAMPLES::
sage: k.<i> = NumberField(x^2 + 1)
sage: O3 = k.order(3*i); O5 = k.order(5*i); Ok = k.maximal_order(); Osum = O3 + O5
sage: Osum.is_maximal()
True
sage: O3.is_maximal()
False
sage: O5.is_maximal()
False
sage: Ok.is_maximal()
True
An example involving a relative order::
sage: K.<a,b> = NumberField([x^2 + 1, x^2 - 3]); O = K.order([3*a,2*b]); O
Relative Order in Number Field in a with defining polynomial x^2 + 1 over its base field
sage: O.is_maximal()
False
"""
if self._is_maximal is None:
self._is_maximal = (self.absolute_discriminant() == self._K.absolute_discriminant())
return self._is_maximal
def is_field(self, proof=True):
r"""
Return ``False`` (because an order is never a field).
EXAMPLES::
sage: L.<alpha> = NumberField(x**4 - x**2 + 7)
sage: O = L.maximal_order() ; O.is_field()
False
sage: CyclotomicField(12).ring_of_integers().is_field()
False
"""
return False
def is_noetherian(self):
r"""
Return ``True`` (because orders are always Noetherian)
EXAMPLES::
sage: L.<alpha> = NumberField(x**4 - x**2 + 7)
sage: O = L.maximal_order() ; O.is_noetherian()
True
sage: E.<w> = NumberField(x^2 - x + 2)
sage: OE = E.ring_of_integers(); OE.is_noetherian()
True
"""
return True
def is_integrally_closed(self):
"""
Return ``True`` if this ring is integrally closed, i.e., is equal
to the maximal order.
EXAMPLES::
sage: K.<a> = NumberField(x^2 + 189*x + 394)
sage: R = K.order(2*a)
sage: R.is_integrally_closed()
False
sage: R
Order in Number Field in a with defining polynomial x^2 + 189*x + 394
sage: S = K.maximal_order(); S
Maximal Order in Number Field in a with defining polynomial x^2 + 189*x + 394
sage: S.is_integrally_closed()
True
"""
return self.is_maximal()
def krull_dimension(self):
"""
Return the Krull dimension of this order, which is 1.
EXAMPLES::
sage: K.<a> = QuadraticField(5)
sage: OK = K.maximal_order()
sage: OK.krull_dimension()
1
sage: O2 = K.order(2*a)
sage: O2.krull_dimension()
1
"""
return ZZ(1)
def integral_closure(self):
"""
Return the integral closure of this order.
EXAMPLES::
sage: K.<a> = QuadraticField(5)
sage: O2 = K.order(2*a); O2
Order in Number Field in a with defining polynomial x^2 - 5 with a = 2.236067977499790?
sage: O2.integral_closure()
Maximal Order in Number Field in a with defining polynomial x^2 - 5 with a = 2.236067977499790?
sage: OK = K.maximal_order()
sage: OK is OK.integral_closure()
True
"""
if self.is_maximal():
return self
else:
return self.number_field().maximal_order()
def gen(self, i):
r"""
Return `i`'th module generator of this order.
EXAMPLES::
sage: K.<c> = NumberField(x^3 + 2*x + 17)
sage: O = K.maximal_order(); O
Maximal Order in Number Field in c with defining polynomial x^3 + 2*x + 17
sage: O.basis()
[1, c, c^2]
sage: O.gen(1)
c
sage: O.gen(2)
c^2
sage: O.gen(5)
Traceback (most recent call last):
...
IndexError: no 5th generator
sage: O.gen(-1)
Traceback (most recent call last):
...
IndexError: no -1th generator
"""
b = self.basis()
if i < 0 or i >= len(b):
raise IndexError("no %sth generator" % i)
return self.basis()[i]
def ngens(self):
"""
Return the number of module generators of this order.
EXAMPLES::
sage: K.<a> = NumberField(x^3 + x^2 - 2*x + 8)
sage: O = K.maximal_order()
sage: O.ngens()
3
"""
return self.absolute_degree()
def basis(self): # this must be defined in derived class
r"""
Return a basis over `\ZZ` of this order.
EXAMPLES::
sage: K.<a> = NumberField(x^3 + x^2 - 16*x + 16)
sage: O = K.maximal_order(); O
Maximal Order in Number Field in a with defining polynomial x^3 + x^2 - 16*x + 16
sage: O.basis()
[1, 1/4*a^2 + 1/4*a, a^2]
"""
raise NotImplementedError
def coordinates(self, x):
r"""
Return the coordinate vector of `x` with respect to this order.
INPUT:
- ``x`` -- an element of the number field of this order.
OUTPUT:
A vector of length `n` (the degree of the field) giving
the coordinates of `x` with respect to the integral basis
of the order. In general this will be a vector of
rationals; it will consist of integers if and only if `x`
is in the order.
AUTHOR: John Cremona 2008-11-15
ALGORITHM:
Uses linear algebra. The change-of-basis matrix is
cached. Provides simpler implementations for
``_contains_()``, ``is_integral()`` and ``smallest_integer()``.
EXAMPLES::
sage: K.<i> = QuadraticField(-1)
sage: OK = K.ring_of_integers()
sage: OK_basis = OK.basis(); OK_basis
[1, i]
sage: a = 23-14*i
sage: acoords = OK.coordinates(a); acoords
(23, -14)
sage: sum([OK_basis[j]*acoords[j] for j in range(2)]) == a
True
sage: OK.coordinates((120+340*i)/8)
(15, 85/2)
sage: O = K.order(3*i)
sage: O.is_maximal()
False
sage: O.index_in(OK)
3
sage: acoords = O.coordinates(a); acoords
(23, -14/3)
sage: sum([O.basis()[j]*acoords[j] for j in range(2)]) == a
True
"""
K = self.number_field()
V, from_V, to_V = K.absolute_vector_space()
try:
M = self.__basis_matrix_inverse
except AttributeError:
from sage.matrix.constructor import Matrix
self.__basis_matrix_inverse = Matrix([to_V(b) for b in self.basis()]).inverse()
M = self.__basis_matrix_inverse
return to_V(K(x))*M
def free_module(self):
r"""
Return the free `\ZZ`-module contained in the vector space
associated to the ambient number field, that corresponds
to this order.
EXAMPLES::
sage: K.<a> = NumberField(x^3 + x^2 - 2*x + 8)
sage: O = K.maximal_order(); O.basis()
[1, 1/2*a^2 + 1/2*a, a^2]
sage: O.free_module()
Free module of degree 3 and rank 3 over Integer Ring
User basis matrix:
[ 1 0 0]
[ 0 1/2 1/2]
[ 0 0 1]
An example in a relative extension. Notice that the module is
a `\ZZ`-module in the absolute_field associated to the relative
field::
sage: K.<a,b> = NumberField([x^2 + 1, x^2 + 2])
sage: O = K.maximal_order(); O.basis()
[(-3/2*b - 5)*a + 7/2*b - 2, -3*a + 2*b, -2*b*a - 3, -7*a + 5*b]
sage: O.free_module()
Free module of degree 4 and rank 4 over Integer Ring
User basis matrix:
[1/4 1/4 3/4 3/4]
[ 0 1/2 0 1/2]
[ 0 0 1 0]
[ 0 0 0 1]
"""
try:
return self.__free_module
except AttributeError:
pass
from .number_field_ideal import basis_to_module
M = basis_to_module(self.basis(), self.number_field())
self.__free_module = M
return M
@cached_method
def ring_generators(self):
"""
Return generators for self as a ring.
EXAMPLES::
sage: K.<i> = NumberField(x^2 + 1)
sage: O = K.maximal_order(); O
Gaussian Integers in Number Field in i with defining polynomial x^2 + 1
sage: O.ring_generators()
[i]
This is an example where 2 generators are required (because 2 is an essential
discriminant divisor).::
sage: K.<a> = NumberField(x^3 + x^2 - 2*x + 8)
sage: O = K.maximal_order(); O.basis()
[1, 1/2*a^2 + 1/2*a, a^2]
sage: O.ring_generators()
[1/2*a^2 + 1/2*a, a^2]
An example in a relative number field::
sage: K.<a, b> = NumberField([x^2 + x + 1, x^3 - 3])
sage: O = K.maximal_order()
sage: O.ring_generators()
[(-5/3*b^2 + 3*b - 2)*a - 7/3*b^2 + b + 3, (-5*b^2 - 9)*a - 5*b^2 - b, (-6*b^2 - 11)*a - 6*b^2 - b]
"""
K = self._K
n = []
V, from_V, to_V = self._K.absolute_vector_space()
A = ZZ**K.absolute_degree()
remaining = [x for x in self.basis() if x != 1]
gens = []
while remaining:
g = remaining.pop(0)
gens.append(g)
n.append(g.absolute_minpoly().degree())
W = A.span([to_V(x) for x in monomials(gens, n)])
remaining = [x for x in remaining if not to_V(x) in W]
return Sequence(gens,immutable=True)
@cached_method
def _defining_names(self):
"""
Return the generators of the ambient number field, but with
this order as parent.
EXAMPLES::
sage: B.<z> = EquationOrder(x^2 + 3)
sage: B._defining_names()
(z,)
For relative extensions::
sage: O.<a,b> = EquationOrder([x^2 + 1, x^2 + 2])
sage: O._defining_names()
(a, b)
"""
gens = self.number_field().gens()
return tuple(self(g) for g in gens)
def zeta(self, n=2, all=False):
r"""
Return a primitive n-th root of unity in this order, if it
contains one. If all is True, return all of them.
EXAMPLES::
sage: F.<alpha> = NumberField(x**2+3)
sage: F.ring_of_integers().zeta(6)
1/2*alpha + 1/2
sage: O = F.order([3*alpha])
sage: O.zeta(3)
Traceback (most recent call last):
...
ArithmeticError: There are no 3rd roots of unity in self.
"""
roots_in_field = self.number_field().zeta(n, True)
roots_in_self = [self(x) for x in roots_in_field if x in self]
if not roots_in_self:
if all:
return []
else:
raise ArithmeticError("There are no %s roots of unity in self." % n.ordinal_str())
if all:
return roots_in_self
else:
return roots_in_self[0]
def number_field(self):
"""
Return the number field of this order, which is the ambient
number field that this order is embedded in.
EXAMPLES::
sage: K.<b> = NumberField(x^4 + x^2 + 2)
sage: O = K.order(2*b); O
Order in Number Field in b with defining polynomial x^4 + x^2 + 2
sage: O.basis()
[1, 2*b, 4*b^2, 8*b^3]
sage: O.number_field()
Number Field in b with defining polynomial x^4 + x^2 + 2
sage: O.number_field() is K
True
"""
return self._K
def ambient(self):
r"""
Return the ambient number field that contains self.
This is the same as ``self.number_field()`` and
``self.fraction_field()``
EXAMPLES::
sage: k.<z> = NumberField(x^2 - 389)
sage: o = k.order(389*z + 1)
sage: o
Order in Number Field in z with defining polynomial x^2 - 389
sage: o.basis()
[1, 389*z]
sage: o.ambient()
Number Field in z with defining polynomial x^2 - 389
"""
return self._K
def residue_field(self, prime, names=None, check=False):
"""
Return the residue field of this order at a given prime, ie `O/pO`.
INPUT:
- ``prime`` -- a prime ideal of the maximal order in this number field.
- ``names`` -- the name of the variable in the residue field
- ``check`` -- whether or not to check the primality of prime.
OUTPUT:
The residue field at this prime.
EXAMPLES::
sage: R.<x> = QQ[]
sage: K.<a> = NumberField(x^4+3*x^2-17)
sage: P = K.ideal(61).factor()[0][0]
sage: OK = K.maximal_order()
sage: OK.residue_field(P)
Residue field in abar of Fractional ideal (61, a^2 + 30)
sage: Fp.<b> = OK.residue_field(P)
sage: Fp
Residue field in b of Fractional ideal (61, a^2 + 30)
"""
if self.is_maximal():
return self.number_field().residue_field(prime, names, check)
raise NotImplementedError("Residue fields of non-maximal orders "
"are not yet supported.")
def fraction_field(self):
"""
Return the fraction field of this order, which is the
ambient number field.
EXAMPLES::
sage: K.<b> = NumberField(x^4 + 17*x^2 + 17)
sage: O = K.order(17*b); O
Order in Number Field in b with defining polynomial x^4 + 17*x^2 + 17
sage: O.fraction_field()
Number Field in b with defining polynomial x^4 + 17*x^2 + 17
"""
return self._K
def degree(self):
r"""
Return the degree of this order, which is the rank of this order as a
`\ZZ`-module.
EXAMPLES::
sage: k.<c> = NumberField(x^3 + x^2 - 2*x+8)
sage: o = k.maximal_order()
sage: o.degree()
3
sage: o.rank()
3
"""
return self._K.degree()
def rank(self):
r"""
Return the rank of this order, which is the rank of the underlying
`\ZZ`-module, or the degree of the ambient number field that contains
this order.
This is a synonym for ``degree()``.
EXAMPLES::
sage: k.<c> = NumberField(x^5 + x^2 + 1)
sage: o = k.maximal_order(); o
Maximal Order in Number Field in c with defining polynomial x^5 + x^2 + 1
sage: o.rank()
5
"""
return self.degree()
def class_number(self, proof=None):
r"""
Return the class number of this order.
EXAMPLES::
sage: ZZ[2^(1/3)].class_number()
1
sage: QQ[sqrt(-23)].maximal_order().class_number()
3
sage: ZZ[120*sqrt(-23)].class_number()
288
Note that non-maximal orders are only supported in quadratic fields::
sage: ZZ[120*sqrt(-23)].class_number()
288
sage: ZZ[100*sqrt(3)].class_number()
4
sage: ZZ[11*2^(1/3)].class_number()
Traceback (most recent call last):
...
NotImplementedError: computation of class numbers of non-maximal orders not in quadratic fields is not implemented
"""
if not self.is_maximal():
K = self.number_field()
if K.degree() != 2:
raise NotImplementedError("computation of class numbers of non-maximal orders not in quadratic fields is not implemented")
return ZZ(pari.qfbclassno(self.discriminant()))
return self.number_field().class_number(proof=proof)
def class_group(self, proof=None, names='c'):
r"""
Return the class group of this order.
(Currently only implemented for the maximal order.)
EXAMPLES::
sage: k.<a> = NumberField(x^2 + 5077)
sage: O = k.maximal_order(); O
Maximal Order in Number Field in a with defining polynomial x^2 + 5077
sage: O.class_group()
Class group of order 22 with structure C22 of Number Field in a with defining polynomial x^2 + 5077
"""
if self.is_maximal():
return self.number_field().class_group(proof=proof, names=names)
else:
raise NotImplementedError
def is_suborder(self, other):
"""
Return True if self and other are both orders in the
same ambient number field and self is a subset of other.
EXAMPLES::
sage: W.<i> = NumberField(x^2 + 1)
sage: O5 = W.order(5*i)
sage: O10 = W.order(10*i)
sage: O15 = W.order(15*i)
sage: O15.is_suborder(O5)
True
sage: O5.is_suborder(O15)
False
sage: O10.is_suborder(O15)
False
We create another isomorphic but different field::
sage: W2.<j> = NumberField(x^2 + 1)
sage: P5 = W2.order(5*j)
This is False because the ambient number fields are not equal.::
sage: O5.is_suborder(P5)
False
We create a field that contains (in no natural way!) W,
and of course again is_suborder returns False::
sage: K.<z> = NumberField(x^4 + 1)
sage: M = K.order(5*z)
sage: O5.is_suborder(M)
False
"""
if not isinstance(other, Order):
return False
if other.number_field() != self.number_field():
return False
return self.module().is_submodule(other.module())
def __eq__(self, other):
r"""
Check whether the order ``self`` is equal to ``other``.
.. NOTE::
This method is just for equality. If you want to check if
``self`` is contained in ``other``, use instead
``self.is_suborder(other)`` to determine inclusion.
EXAMPLES::
sage: K.<a> = NumberField(x^3 + 2)
sage: O1 = K.order(a); O1
Order in Number Field in a with defining polynomial x^3 + 2
sage: O2 = K.order(a^2); O2
Order in Number Field in a with defining polynomial x^3 + 2
sage: O1 == O2
False
sage: O1 == K
False
sage: K == O1
False
Here is how to check for inclusion::
sage: O2.is_suborder(O1)
True
"""
if not isinstance(other, Order):
return False
if self._K != other._K:
return False
if self is other:
return True
return self._module_rep == other._module_rep
def __ne__(self, other):
"""
Check whether the order ``self`` is not equal to ``other``.
EXAMPLES::
sage: K.<a> = NumberField(x^3 + 2)
sage: O1 = K.order(a); O1
Order in Number Field in a with defining polynomial x^3 + 2
sage: O2 = K.order(a^2); O2
Order in Number Field in a with defining polynomial x^3 + 2
sage: O1 != O2
True
"""
return not (self == other)
def __hash__(self):
"""
Compute the hash of ``self``.
EXAMPLES::
sage: K.<a> = NumberField(x^3 + 2)
sage: L.<b> = NumberField(x^3 + 3)
sage: O1 = K.order(a)
sage: hash(O1) == hash(K.order(a))
True
sage: hash(O1) == hash(K.order(a^2))
False
sage: hash(O1) == hash(L.order(b))
False
"""
return hash((self._K, self._module_rep))
def random_element(self, *args, **kwds):
r"""
Return a random element of this order.
INPUT:
- ``args``, ``kwds`` -- parameters passed to the random
integer function. See the documentation for
``ZZ.random_element()`` for details.
OUTPUT:
A random element of this order, computed as a random
`\ZZ`-linear combination of the basis.
EXAMPLES::
sage: K.<a> = NumberField(x^3 + 2)
sage: OK = K.ring_of_integers()
sage: OK.random_element() # random output
-2*a^2 - a - 2
sage: OK.random_element(distribution="uniform") # random output
-a^2 - 1
sage: OK.random_element(-10,10) # random output
-10*a^2 - 9*a - 2
sage: K.order(a).random_element() # random output
a^2 - a - 3
::
sage: K.<z> = CyclotomicField(17)
sage: OK = K.ring_of_integers()
sage: OK.random_element() # random output
z^15 - z^11 - z^10 - 4*z^9 + z^8 + 2*z^7 + z^6 - 2*z^5 - z^4 - 445*z^3 - 2*z^2 - 15*z - 2
sage: OK.random_element().is_integral()
True
sage: OK.random_element().parent() is OK
True
A relative example::
sage: K.<a, b> = NumberField([x^2 + 2, x^2 + 1000*x + 1])
sage: OK = K.ring_of_integers()
sage: OK.random_element() # random output
(42221/2*b + 61/2)*a + 7037384*b + 7041
sage: OK.random_element().is_integral() # random output
True
sage: OK.random_element().parent() is OK # random output
True
An example in a non-maximal order::
sage: K.<a> = QuadraticField(-3)
sage: R = K.ring_of_integers()
sage: A = K.order(a)
sage: A.index_in(R)
2
sage: R.random_element() # random output
-39/2*a - 1/2
sage: A.random_element() # random output
2*a - 1
sage: A.random_element().is_integral()
True
sage: A.random_element().parent() is A
True
"""
return sum([ZZ.random_element(*args, **kwds)*a for a in self.basis()])
def absolute_degree(self):
r"""
Return the absolute degree of this order, ie the degree of this order over `\ZZ`.
EXAMPLES::
sage: K.<a> = NumberField(x^3 + 2)
sage: O = K.maximal_order()
sage: O.absolute_degree()
3
"""
return self.number_field().absolute_degree()
def valuation(self, p):
r"""
Return the ``p``-adic valuation on this order.
EXAMPLES:
The valuation can be specified with an integer ``prime`` that is
completely ramified or unramified::
sage: K.<a> = NumberField(x^2 + 1)
sage: O = K.order(2*a)
sage: valuations.pAdicValuation(O, 2)
2-adic valuation
sage: GaussianIntegers().valuation(2)
2-adic valuation
::
sage: GaussianIntegers().valuation(3)
3-adic valuation
A ``prime`` that factors into pairwise distinct factors, results in an error::
sage: GaussianIntegers().valuation(5)
Traceback (most recent call last):
...
ValueError: The valuation Gauss valuation induced by 5-adic valuation does not approximate a unique extension of 5-adic valuation with respect to x^2 + 1
The valuation can also be selected by giving a valuation on the base
ring that extends uniquely::
sage: CyclotomicField(5).ring_of_integers().valuation(ZZ.valuation(5))
5-adic valuation
When the extension is not unique, this does not work::
sage: GaussianIntegers().valuation(ZZ.valuation(5))
Traceback (most recent call last):
...
ValueError: The valuation Gauss valuation induced by 5-adic valuation does not approximate a unique extension of 5-adic valuation with respect to x^2 + 1
If the fraction field is of the form `K[x]/(G)`, you can specify a
valuation by providing a discrete pseudo-valuation on `K[x]` which
sends `G` to infinity::
sage: R.<x> = QQ[]
sage: v = GaussianIntegers().valuation(GaussValuation(R, QQ.valuation(5)).augmentation(x + 2, infinity))
sage: w = GaussianIntegers().valuation(GaussValuation(R, QQ.valuation(5)).augmentation(x + 1/2, infinity))
sage: v == w
False
.. SEEALSO::
:meth:`NumberField_generic.valuation() <sage.rings.number_field.number_field.NumberField_generic.valuation>`,
:meth:`pAdicGeneric.valuation() <sage.rings.padics.padic_generic.pAdicGeneric.valuation>`
"""
from sage.rings.padics.padic_valuation import pAdicValuation
return pAdicValuation(self, p)
def some_elements(self):
"""
Return a list of elements of the given order.
EXAMPLES::
sage: G = GaussianIntegers(); G
Gaussian Integers in Number Field in I with defining polynomial x^2 + 1 with I = 1*I
sage: G.some_elements()
[1, I, 2*I, -1, 0, -I, 2, 4*I, -2, -2*I, -4]
sage: R.<t> = QQ[]
sage: K.<a> = QQ.extension(t^3 - 2); K
Number Field in a with defining polynomial t^3 - 2
sage: Z = K.ring_of_integers(); Z
Maximal Order in Number Field in a with defining polynomial t^3 - 2
sage: Z.some_elements()
[1, a, a^2, 2*a, 0, 2, a^2 + 2*a + 1, ..., a^2 + 1, 2*a^2 + 2, a^2 + 2*a, 4*a^2 + 4]
TESTS:
This also works for trivial extensions::
sage: R.<t> = QQ[]
sage: K.<a> = QQ.extension(t); K
Number Field in a with defining polynomial t
sage: Z = K.ring_of_integers(); Z
Maximal Order in Number Field in a with defining polynomial t
sage: Z.some_elements()
[1, 0, 2, -1, -2, 4]
"""
elements = list(self.basis())
for a in self.fraction_field().some_elements():
if a in self and a not in elements:
elements.append(self(a))
return elements
## def absolute_polynomial(self):
## """
## Return the absolute polynomial of this order, which is just the absolute polynomial of the number field.
## EXAMPLES::
## sage: K.<a, b> = NumberField([x^2 + 1, x^3 + x + 1]); OK = K.maximal_order()
## Traceback (most recent call last):
## ...
## NotImplementedError
## #sage: OK.absolute_polynomial()
## #x^6 + 5*x^4 - 2*x^3 + 4*x^2 + 4*x + 1
## """
## return self.number_field().absolute_polynomial()
## def polynomial(self):
## """
## Return the polynomial defining the number field that contains self.
## """
## return self.number_field().polynomial()
## def polynomial_ntl(self):
## """
## Return defining polynomial of the parent number field as a
## pair, an ntl polynomial and a denominator.
## This is used mainly to implement some internal arithmetic.
## EXAMPLES::
## sage: NumberField(x^2 + 1,'a').maximal_order().polynomial_ntl()
## ([1 0 1], 1)
## """
## return self.number_field().polynomial_ntl()
class AbsoluteOrder(Order):
def __init__(self, K, module_rep, is_maximal=None, check=True):
"""
EXAMPLES::
sage: from sage.rings.number_field.order import *
sage: x = polygen(QQ)
sage: K.<a> = NumberField(x^3+2)
sage: V, from_v, to_v = K.vector_space()
sage: M = span([to_v(a^2), to_v(a), to_v(1)],ZZ)
sage: O = AbsoluteOrder(K, M); O
Order in Number Field in a with defining polynomial x^3 + 2
sage: M = span([to_v(a^2), to_v(a), to_v(2)],ZZ)
sage: O = AbsoluteOrder(K, M); O
Traceback (most recent call last):
...
ValueError: 1 is not in the span of the module, hence not an order.
sage: loads(dumps(O)) == O
True
Quadratic elements have a special optimized type:
"""
if K.degree() == 2:
self._element_type = OrderElement_quadratic
# adding the following attribute makes the comparison of elements
# faster.
self._standard_embedding = K._standard_embedding
else:
self._element_type = OrderElement_absolute
self._module_rep = module_rep
V, from_v, to_v = K.vector_space()
Order.__init__(self, K, is_maximal=is_maximal)
if check:
if not K.is_absolute():
raise ValueError("AbsoluteOrder must be called with an absolute number field.")
if to_v(1) not in module_rep:
raise ValueError("1 is not in the span of the module, hence not an order.")
if module_rep.rank() != self._K.degree():
raise ValueError("the module must have full rank.")
def _element_constructor_(self, x):
r"""
Coerce ``x`` into this order.
EXAMPLES::
sage: x = polygen(QQ)
sage: k.<z> = NumberField(x^2 - 389)
sage: m = k.order(3*z); m
Order in Number Field in z with defining polynomial x^2 - 389
sage: m(6*z)
6*z
sage: k(m(6*z))
6*z
If ``x`` is a list or tuple the element constructed is the
linear combination of the generators with these coefficients
(see :trac:`10017`)::
sage: x = polygen(QQ)
sage: K.<a> = NumberField(x^3-10)
sage: ZK = K.ring_of_integers()
sage: ZK.basis()
[1/3*a^2 + 1/3*a + 1/3, a, a^2]
sage: ZK([1,2,3])
10/3*a^2 + 7/3*a + 1/3
sage: K([1,2,3])
3*a^2 + 2*a + 1
"""
if isinstance(x, (tuple, list)):
x = sum(xi*gi for xi,gi in zip(x,self.gens()))
if not is_Element(x) or x.parent() is not self._K:
x = self._K(x)
V, _, embedding = self._K.vector_space()
if not embedding(x) in self._module_rep:
raise TypeError("Not an element of the order.")
return self._element_type(self, x)
def __reduce__(self):
r"""
Used in pickling.
We test that :trac:`6462` is fixed. This used to fail because
pickling the order also pickled the cached results of the
``basis`` call, which were elements of the order.
::
sage: L.<a> = QuadraticField(-1)
sage: OL = L.maximal_order()
sage: _ = OL.basis()
sage: loads(dumps(OL)) == OL
True
"""
return (AbsoluteOrder, (self.number_field(), self.free_module(), self._is_maximal, False))
def __add__(left, right):
"""
Add two orders.
EXAMPLES::
sage: K.<a> = NumberField(polygen(QQ,'z')^3 - 2)
sage: O6 = K.order(6*a); O6
Order in Number Field in a with defining polynomial z^3 - 2
sage: O6.basis()
[1, 6*a, 36*a^2]
sage: O15 = K.order(15*a^2); O15.basis()
[1, 450*a, 15*a^2]
sage: R = O6 + O15; R
Order in Number Field in a with defining polynomial z^3 - 2
sage: R.basis()
[1, 6*a, 3*a^2]
"""
if not isinstance(left, AbsoluteOrder) or not isinstance(right, AbsoluteOrder):
raise NotImplementedError
if left.number_field() != right.number_field():
raise TypeError("Number fields don't match.")
if left._is_maximal:
return left
elif right._is_maximal:
return right
return AbsoluteOrder(left._K, left._module_rep + right._module_rep, None)
def __and__(left, right):
"""
Intersect orders.
EXAMPLES::
sage: K.<i> = QuadraticField(-1)
sage: O3 = K.order(3*i); O5 = K.order(5*i)
sage: R = O3 & O5; R
Order in Number Field in i with defining polynomial x^2 + 1 with i = 1*I
sage: R.basis()
[1, 15*i]
sage: O3.intersection(O5).basis()
[1, 15*i]
"""
if not isinstance(left, AbsoluteOrder) or not isinstance(right, AbsoluteOrder):
raise NotImplementedError
if left.number_field() != right.number_field():
raise TypeError("Number fields don't match.")
return AbsoluteOrder(left._K, left._module_rep.intersection(right._module_rep), False)
def _magma_init_(self, magma):
"""
Return Magma version of this absolute order.
INPUT:
- ``magma`` -- a magma interpreter
OUTPUT:
a MagmaElement, the magma version of this absolute order
EXAMPLES::
sage: K.<a> = NumberField(x^3 + 2) # optional - magma
sage: magma(K.maximal_order()) # optional - magma
Equation Order with defining polynomial x^3 + 2 over its ground order
_magma_init_ was called implicitly by the above call::
sage: K.maximal_order()._magma_init_(magma) # optional - magma
'Order([(_sage_[...]![1, 0, 0]),(_sage_[...]![0, 1, 0]),(_sage_[...]![0, 0, 1])])'
"""
K = self.number_field()
v = (K(a)._magma_init_(magma) for a in self.basis())
return 'Order([{}])'.format(','.join(v))
def discriminant(self):
"""
Return the discriminant of this order.
EXAMPLES::
sage: K.<a> = NumberField(x^8 + x^3 - 13*x + 26)
sage: O = K.maximal_order()
sage: factor(O.discriminant())
3 * 11 * 13^2 * 613 * 1575917857
sage: L = K.order(13*a^2)
sage: factor(L.discriminant())
3^3 * 5^2 * 11 * 13^60 * 613 * 733^2 * 1575917857
sage: factor(L.index_in(O))
3 * 5 * 13^29 * 733
sage: L.discriminant() / O.discriminant() == L.index_in(O)^2
True
"""
try:
return self.__discriminant
except AttributeError:
if self._is_maximal:
D = self._K.discriminant()
else:
D = self._K.discriminant(self.basis())
self.__discriminant = D
return D
absolute_discriminant = discriminant
def change_names(self, names):
"""
Return a new order isomorphic to this one in the number field with
given variable names.
EXAMPLES::
sage: R = EquationOrder(x^3 + x + 1, 'alpha'); R
Order in Number Field in alpha with defining polynomial x^3 + x + 1
sage: R.basis()
[1, alpha, alpha^2]
sage: S = R.change_names('gamma'); S
Order in Number Field in gamma with defining polynomial x^3 + x + 1
sage: S.basis()
[1, gamma, gamma^2]
"""
K = self.number_field().change_names(names)
_, to_K = K.structure()
B = [to_K(a) for a in self.basis()]
return K.order(B, check_is_integral=False, check_rank=False, allow_subfield=True)
def index_in(self, other):
"""
Return the index of self in other.
This is a lattice index,
so it is a rational number if self is not contained in other.
INPUT:
- ``other`` -- another absolute order with the same ambient number field.
OUTPUT:
a rational number
EXAMPLES::
sage: k.<i> = NumberField(x^2 + 1)
sage: O1 = k.order(i)
sage: O5 = k.order(5*i)
sage: O5.index_in(O1)
5
sage: k.<a> = NumberField(x^3 + x^2 - 2*x+8)
sage: o = k.maximal_order()
sage: o
Maximal Order in Number Field in a with defining polynomial x^3 + x^2 - 2*x + 8
sage: O1 = k.order(a); O1
Order in Number Field in a with defining polynomial x^3 + x^2 - 2*x + 8
sage: O1.index_in(o)
2
sage: O2 = k.order(1+2*a); O2
Order in Number Field in a with defining polynomial x^3 + x^2 - 2*x + 8
sage: O1.basis()
[1, a, a^2]
sage: O2.basis()
[1, 2*a, 4*a^2]
sage: o.index_in(O2)
1/16
"""
if not isinstance(other, AbsoluteOrder):
raise TypeError("other must be an absolute order.")
if other.ambient() != self.ambient():
raise ValueError("other must have the same ambient number field as self.")
return self._module_rep.index_in(other._module_rep)
def module(self):
"""
Return the underlying free module corresponding to this
order, embedded in the vector space corresponding to the
ambient number field.
EXAMPLES::
sage: k.<a> = NumberField(x^3 + x + 3)
sage: m = k.order(3*a); m
Order in Number Field in a with defining polynomial x^3 + x + 3
sage: m.module()
Free module of degree 3 and rank 3 over Integer Ring
Echelon basis matrix:
[1 0 0]
[0 3 0]
[0 0 9]
"""
return self._module_rep
def intersection(self, other):
"""
Return the intersection of this order with another order.
EXAMPLES::
sage: k.<i> = NumberField(x^2 + 1)
sage: O6 = k.order(6*i)
sage: O9 = k.order(9*i)
sage: O6.basis()
[1, 6*i]
sage: O9.basis()
[1, 9*i]
sage: O6.intersection(O9).basis()
[1, 18*i]
sage: (O6 & O9).basis()
[1, 18*i]
sage: (O6 + O9).basis()
[1, 3*i]
"""
return self & other
def _repr_(self):
"""
Return print representation of this absolute order.
EXAMPLES::
sage: K.<a> = NumberField(x^4 - 5)
sage: K.maximal_order()._repr_()
'Maximal Order in Number Field in a with defining polynomial x^4 - 5'
sage: K.order(a)._repr_()
'Order in Number Field in a with defining polynomial x^4 - 5'
We have special cases for Gaussian and Eisenstein integers::
sage: K = CyclotomicField(4)
sage: K.ring_of_integers()
Gaussian Integers in Cyclotomic Field of order 4 and degree 2
sage: K = QuadraticField(-3)
sage: K.ring_of_integers()
Eisenstein Integers in Number Field in a with defining polynomial x^2 + 3 with a = 1.732050807568878?*I
"""
if self._is_maximal:
s = "Maximal Order"
if self.degree() == 2:
D = self.discriminant()
if D == -3:
s = "Eisenstein Integers"
if D == -4:
s = "Gaussian Integers"
else:
s = "Order"
return s + " in " + repr(self._K)
def basis(self):
r"""
Return the basis over `\ZZ` for this order.
EXAMPLES::
sage: k.<c> = NumberField(x^3 + x^2 + 1)
sage: O = k.maximal_order(); O
Maximal Order in Number Field in c with defining polynomial x^3 + x^2 + 1
sage: O.basis()
[1, c, c^2]
The basis is an immutable sequence::
sage: type(O.basis())
<class 'sage.structure.sequence.Sequence_generic'>
The generator functionality uses the basis method::
sage: O.0
1
sage: O.1
c
sage: O.basis()
[1, c, c^2]
sage: O.ngens()
3
"""
try:
return self.__basis
except AttributeError:
V, from_V, to_V = self._K.vector_space()
B = Sequence([self(from_V(b)) for b in self._module_rep.basis()], immutable=True)
self.__basis = B
return B
def absolute_order(self):
"""
Return the absolute order associated to this order, which is
just this order again since this is an absolute order.
EXAMPLES::
sage: K.<a> = NumberField(x^3 + 2)
sage: O1 = K.order(a); O1
Order in Number Field in a with defining polynomial x^3 + 2
sage: O1.absolute_order() is O1
True
"""
return self
class RelativeOrder(Order):
"""
A relative order in a number field.
A relative order is an order in some relative number field
Invariants of this order may be computed with respect to the
contained order.
"""
def __init__(self, K, absolute_order, is_maximal=None, check=True):
"""
Create the relative order.
EXAMPLES::
sage: k.<a,b> = NumberFieldTower([x^2 - 3, x^2 + 1])
sage: O = k.maximal_order(); O # indirect doctest
Maximal Relative Order in Number Field in a with defining polynomial x^2 - 3 over its base field
sage: _ = O.basis()
sage: loads(dumps(O)) == O
True
"""
self._absolute_order = absolute_order
self._module_rep = absolute_order._module_rep
Order.__init__(self, K, is_maximal=is_maximal)
def _element_constructor_(self, x):
"""
Coerce an element into this relative order.
EXAMPLES::
sage: K.<a, b> = NumberField([x^2 + 2, x^2 + 1000*x + 1])
sage: OK = K.ring_of_integers()
sage: OK(a)
a
sage: OK([3, 4])
4*a + 3
The following used to fail; see :trac:`5276`::
sage: S.<y> = OK[]; S
Univariate Polynomial Ring in y over Maximal Relative Order in Number Field in a with defining polynomial x^2 + 2 over its base field
We test that :trac:`4193` is also fixed::
sage: K1.<a> = NumberField(x^3 - 2)
sage: R.<y> = PolynomialRing(K1)
sage: K2.<b> = K1.extension(y^2 - a)
sage: R = K2.order(b)
sage: b in R
True
sage: bb = R.basis()[1] # b by any other name
sage: bb == b
True
sage: bb.parent() is R
True
sage: bb in R # this used to return False
True
sage: R(bb) == bb # this used to raise an error
True
"""
x = self._K(x)
abs_order = self._absolute_order
to_abs = abs_order._K.structure()[1]
x = abs_order(to_abs(x)) # will test membership
return OrderElement_relative(self, x)
def _repr_(self):
"""
Return print representation of this relative order.
EXAMPLES::
sage: O = EquationOrder([x^2 + x + 1, x^3 - 2],'a,b')
sage: O._repr_()
'Relative Order in Number Field in a with defining polynomial x^2 + x + 1 over its base field'
"""
return "%sRelative Order in %r" % ("Maximal " if self._is_maximal else "", self._K)
def absolute_order(self, names='z'):
"""
Return underlying absolute order associated to this relative
order.
INPUT:
- ``names`` -- string (default: 'z'); name of generator of absolute extension.
.. note::
There *is* a default variable name, since this absolute
order is frequently used for internal algorithms.
EXAMPLES::
sage: R = EquationOrder([x^2 + 1, x^2 - 5], 'i,g'); R
Relative Order in Number Field in i with defining polynomial x^2 + 1 over its base field
sage: R.basis()
[1, 6*i - g, -g*i + 2, 7*i - g]
sage: S = R.absolute_order(); S
Order in Number Field in z with defining polynomial x^4 - 8*x^2 + 36
sage: S.basis()
[1, 5/12*z^3 + 1/6*z, 1/2*z^2, 1/2*z^3]
We compute a relative order in alpha0, alpha1, then make the
number field that contains the absolute order be called
gamma.::
sage: R = EquationOrder( [x^2 + 2, x^2 - 3], 'alpha'); R
Relative Order in Number Field in alpha0 with defining polynomial x^2 + 2 over its base field
sage: R.absolute_order('gamma')
Order in Number Field in gamma with defining polynomial x^4 - 2*x^2 + 25
sage: R.absolute_order('gamma').basis()
[1/2*gamma^2 + 1/2, 7/10*gamma^3 + 1/10*gamma, gamma^2, gamma^3]
"""
if names == 'z' or names == ('z',):
return self._absolute_order
else:
return self._absolute_order.change_names(names)
def __reduce__(self):
r"""
Used for pickling.
EXAMPLES::
sage: L.<a, b> = NumberField([x^2 + 1, x^2 - 5])
sage: O = L.maximal_order()
sage: _ = O.basis()
sage: O == loads(dumps(O))
True
"""
return (RelativeOrder, (self.number_field(), self.absolute_order(), self._is_maximal, False))
def basis(self):
r"""
Return a basis for this order as `\ZZ`-module.
EXAMPLES::
sage: K.<a,b> = NumberField([x^2+1, x^2+3])
sage: O = K.order([a,b])
sage: O.basis()
[1, -2*a + b, -b*a - 2, -5*a + 3*b]
sage: z = O.1; z
-2*a + b
sage: z.absolute_minpoly()
x^4 + 14*x^2 + 1
"""
try:
return self.__basis
except AttributeError:
pass
O = self._absolute_order
K = O.number_field()
from_K, _ = K.structure()
self.__basis = [OrderElement_relative(self, from_K(a)) for a in O.basis()]
return self.__basis
def __add__(left, right):
"""
Add two relative orders or a relative order to an absolute
order (which always results in an absolute order).
EXAMPLES::
sage: K.<a,b> = NumberField([x^2+1, x^2+3])
sage: O2 = K.order([2*a, b]); O2.absolute_discriminant()
36864
sage: O3 = K.order([3*a, 2*b]); O3.absolute_discriminant()
2985984
sage: R = O2 + O3; R
Relative Order in Number Field in a with defining polynomial x^2 + 1 over its base field
sage: R.absolute_discriminant()
9216
sage: R.is_suborder(O2)
False
sage: O2.is_suborder(R)
True
sage: O3.is_suborder(R)
True
"""
if isinstance(left, AbsoluteOrder):
return left + right._absolute_order
elif isinstance(right, AbsoluteOrder):
return left._absolute_order + right
elif isinstance(left, RelativeOrder) and isinstance(right, RelativeOrder):
if left._K != right._K:
raise TypeError("Number fields don't match.")
return RelativeOrder(left._K, left._absolute_order + right._absolute_order,
check=False)
else:
raise NotImplementedError
def __and__(left, right):
"""
Intersect two relative orders or a relative and absolute order
(which always results in an absolute order).
EXAMPLES::
sage: L.<a, b> = NumberField([x^2 + 1, x^2 - 5])
sage: O1 = L.order([a, 2*b])
sage: O2 = L.order([2*a, b])
sage: O3 = O1 & O2; O3
Relative Order in Number Field in a with defining polynomial x^2 + 1 over its base field
sage: O3.index_in(L.maximal_order())
32
"""
if isinstance(left, AbsoluteOrder):
return left & right._absolute_order
elif isinstance(right, AbsoluteOrder):
return left._absolute_order & right
elif isinstance(left, RelativeOrder) and isinstance(right, RelativeOrder):
if left._K != right._K:
raise TypeError("Number fields don't match.")
return RelativeOrder(left._K,
left._absolute_order & right._absolute_order,
check=False)
else:
raise NotImplementedError
def absolute_discriminant(self):
"""
Return the absolute discriminant of self, which is the discriminant
of the absolute order associated to self.
OUTPUT:
an integer
EXAMPLES::
sage: R = EquationOrder([x^2 + 1, x^3 + 2], 'a,b')
sage: d = R.absolute_discriminant(); d
-746496
sage: d is R.absolute_discriminant()
True
sage: factor(d)
-1 * 2^10 * 3^6
"""
return self.absolute_order().discriminant()
def is_suborder(self, other):
"""
Return True if self is a subset of the order other.
EXAMPLES::
sage: K.<a,b> = NumberField([x^2 + 1, x^3 + 2])
sage: R1 = K.order([a,b])
sage: R2 = K.order([2*a,b])
sage: R3 = K.order([a + b, b + 2*a])
sage: R1.is_suborder(R2)
False
sage: R2.is_suborder(R1)
True
sage: R3.is_suborder(R1)
True
sage: R1.is_suborder(R3)
True
sage: R1 == R3
True
"""
return self.absolute_order().is_suborder(other.absolute_order())
def index_in(self, other):
"""
Return the index of self in other.
This is a lattice index,
so it is a rational number if self is not contained in other.
INPUT:
- ``other`` -- another order with the same ambient absolute number field.
OUTPUT:
a rational number
EXAMPLES::
sage: K.<a,b> = NumberField([x^3 + x + 3, x^2 + 1])
sage: R1 = K.order([3*a, 2*b])
sage: R2 = K.order([a, 4*b])
sage: R1.index_in(R2)
729/8
sage: R2.index_in(R1)
8/729
"""
if not isinstance(other, Order):
raise TypeError("other must be an absolute order.")
return self.absolute_order().index_in(other.absolute_order())
def each_is_integral(v):
"""
Return whether every element of the list ``v`` of elements of a number
field is integral.
EXAMPLES::
sage: W.<sqrt5> = NumberField(x^2 - 5)
sage: from sage.rings.number_field.order import each_is_integral
sage: each_is_integral([sqrt5, 2, (1+sqrt5)/2])
True
sage: each_is_integral([sqrt5, (1+sqrt5)/3])
False
"""
return all(x.is_integral() for x in v)
def absolute_order_from_ring_generators(gens, check_is_integral=True,
check_rank=True, is_maximal=None,
allow_subfield=False):
"""
INPUT:
- ``gens`` -- list of integral elements of an absolute order.
- ``check_is_integral`` -- bool (default: True), whether to check that each
generator is integral.
- ``check_rank`` -- bool (default: True), whether to check that the ring
generated by gens is of full rank.
- ``is_maximal`` -- bool (or None); set if maximality of the generated order is
known
- ``allow_subfield`` -- bool (default: False), if True and the generators do
not generate an order, i.e., they generate a subring of smaller rank,
instead of raising an error, return an order in a smaller number field.
EXAMPLES::
sage: K.<a> = NumberField(x^4 - 5)
sage: K.order(a)
Order in Number Field in a with defining polynomial x^4 - 5
We have to explicitly import this function, since typically it is called
with ``K.order`` as above.::
sage: from sage.rings.number_field.order import absolute_order_from_ring_generators
sage: absolute_order_from_ring_generators([a])
Order in Number Field in a with defining polynomial x^4 - 5
sage: absolute_order_from_ring_generators([3*a, 2, 6*a+1])
Order in Number Field in a with defining polynomial x^4 - 5
If one of the inputs is non-integral, it is an error.::
sage: absolute_order_from_ring_generators([a/2])
Traceback (most recent call last):
...
ValueError: each generator must be integral
If the gens do not generate an order, i.e., generate a ring of full
rank, then it is an error.::
sage: absolute_order_from_ring_generators([a^2])
Traceback (most recent call last):
...
ValueError: the rank of the span of gens is wrong
Both checking for integrality and checking for full rank can be
turned off in order to save time, though one can get nonsense as
illustrated below.::
sage: absolute_order_from_ring_generators([a/2], check_is_integral=False)
Order in Number Field in a with defining polynomial x^4 - 5
sage: absolute_order_from_ring_generators([a^2], check_rank=False)
Order in Number Field in a with defining polynomial x^4 - 5
"""
if check_is_integral and not each_is_integral(gens):
raise ValueError("each generator must be integral")
gens = Sequence(gens)
n = [x.absolute_minpoly().degree() for x in gens]
module_gens = monomials(gens, n)
return absolute_order_from_module_generators(module_gens,
check_integral=False,
check_is_ring=False,
check_rank=check_rank,
is_maximal=is_maximal,
allow_subfield=allow_subfield)
def absolute_order_from_module_generators(gens,
check_integral=True, check_rank=True,
check_is_ring=True, is_maximal=None,
allow_subfield=False):
"""
INPUT:
- ``gens`` -- list of elements of an absolute number field that generates an
order in that number field as a ZZ *module*.
- ``check_integral`` -- check that each gen is integral
- ``check_rank`` -- check that the gens span a module of the correct rank
- ``check_is_ring`` -- check that the module is closed under multiplication
(this is very expensive)
- ``is_maximal`` -- bool (or None); set if maximality of the generated order is known
OUTPUT:
an absolute order
EXAMPLES:
We have to explicitly import the function, since it is not meant
for regular usage::
sage: from sage.rings.number_field.order import absolute_order_from_module_generators
sage: K.<a> = NumberField(x^4 - 5)
sage: O = K.maximal_order(); O
Maximal Order in Number Field in a with defining polynomial x^4 - 5
sage: O.basis()
[1/2*a^2 + 1/2, 1/2*a^3 + 1/2*a, a^2, a^3]
sage: O.module()
Free module of degree 4 and rank 4 over Integer Ring
Echelon basis matrix:
[1/2 0 1/2 0]
[ 0 1/2 0 1/2]
[ 0 0 1 0]
[ 0 0 0 1]
sage: g = O.basis(); g
[1/2*a^2 + 1/2, 1/2*a^3 + 1/2*a, a^2, a^3]
sage: absolute_order_from_module_generators(g)
Order in Number Field in a with defining polynomial x^4 - 5
We illustrate each check flag -- the output is the same but in case
the function would run ever so slightly faster::
sage: absolute_order_from_module_generators(g, check_is_ring=False)
Order in Number Field in a with defining polynomial x^4 - 5
sage: absolute_order_from_module_generators(g, check_rank=False)
Order in Number Field in a with defining polynomial x^4 - 5
sage: absolute_order_from_module_generators(g, check_integral=False)
Order in Number Field in a with defining polynomial x^4 - 5
Next we illustrate constructing "fake" orders to illustrate turning
off various check flags::
sage: k.<i> = NumberField(x^2 + 1)
sage: R = absolute_order_from_module_generators([2, 2*i], check_is_ring=False); R
Order in Number Field in i with defining polynomial x^2 + 1
sage: R.basis()
[2, 2*i]
sage: R = absolute_order_from_module_generators([k(1)], check_rank=False); R
Order in Number Field in i with defining polynomial x^2 + 1
sage: R.basis()
[1]
If the order contains a non-integral element, even if we do not check
that, we will find that the rank is wrong or that the order is not closed
under multiplication::
sage: absolute_order_from_module_generators([1/2, i], check_integral=False)
Traceback (most recent call last):
...
ValueError: the module span of the gens is not closed under multiplication.
sage: R = absolute_order_from_module_generators([1/2, i], check_is_ring=False, check_integral=False); R
Order in Number Field in i with defining polynomial x^2 + 1
sage: R.basis()
[1/2, i]
We turn off all check flags and make a really messed up order::
sage: R = absolute_order_from_module_generators([1/2, i], check_is_ring=False, check_integral=False, check_rank=False); R
Order in Number Field in i with defining polynomial x^2 + 1
sage: R.basis()
[1/2, i]
An order that lives in a subfield::
sage: F.<alpha> = NumberField(x**4+3)
sage: F.order([alpha**2], allow_subfield=True)
Order in Number Field in beta with defining polynomial x^2 + 2*x + 13 with beta = 2*alpha^2 - 1
"""
if not gens:
raise ValueError("gens must span an order over ZZ")
gens = Sequence(gens)
if check_integral and not each_is_integral(gens):
raise ValueError("each generator must be integral")
K = gens.universe()
if is_NumberFieldOrder(K):
K = K.number_field()
V, from_V, to_V = K.vector_space()
mod_gens = [to_V(x) for x in gens]
ambient = ZZ**V.dimension()
W = ambient.span(mod_gens)
if allow_subfield:
if W.rank() < K.degree():
# We have to make the order in a smaller field.
# We do this by choosing a random element of W,
# moving it back to K, and checking that it defines
# a field of degree equal to the degree of W.
# Then we move everything into that field, where
# W does define an order.
while True:
alpha = from_V(W.random_element())
if alpha.minpoly().degree() == W.rank():
break
# Now alpha generates a subfield where W is an order
# (with the right rank).
# We move each generator of W to this subfield.
K, _ = K.subfield(alpha, 'beta')
gens = [K(x) for x in gens]
V, from_V, to_V = K.vector_space()
mod_gens = [to_V(x) for x in gens]
ambient = ZZ**V.dimension()
W = ambient.span(mod_gens)
elif check_rank:
if W.rank() != K.degree():
raise ValueError("the rank of the span of gens is wrong")
if check_is_ring:
if any(to_V(x * y) not in W for x in gens for y in gens):
raise ValueError("the module span of the gens is not closed under multiplication.")
return AbsoluteOrder(K, W, check=False, is_maximal=is_maximal) # we have already checked everything
def relative_order_from_ring_generators(gens,
check_is_integral=True,
check_rank=True,
is_maximal=None,
allow_subfield=False):
"""
INPUT:
- ``gens`` -- list of integral elements of an absolute order.
- ``check_is_integral`` -- bool (default: True), whether to check that each
generator is integral.
- ``check_rank`` -- bool (default: True), whether to check that the ring
generated by gens is of full rank.
- ``is_maximal`` -- bool (or None); set if maximality of the generated order is
known
EXAMPLES:
We have to explicitly import this function, since it is not meant
for regular usage::
sage: from sage.rings.number_field.order import relative_order_from_ring_generators
sage: K.<i, a> = NumberField([x^2 + 1, x^2 - 17])
sage: R = K.base_field().maximal_order()
sage: S = relative_order_from_ring_generators([i,a]); S
Relative Order in Number Field in i with defining polynomial x^2 + 1 over its base field
Basis for the relative order, which is obtained by computing the algebra generated
by i and a::
sage: S.basis()
[1, 7*i - 2*a, -a*i + 8, 25*i - 7*a]
"""
if check_is_integral and not each_is_integral(gens):
raise ValueError("each generator must be integral")
gens = Sequence(gens)
# The top number field that contains the order.
K = gens.universe()
# The absolute version of that field.
Kabs = K.absolute_field('z')
from_Kabs, to_Kabs = Kabs.structure()
module_gens = [to_Kabs(a) for a in gens]
n = [a.absolute_minpoly().degree() for a in gens]
absolute_order_module_gens = monomials(module_gens, n)
abs_order = absolute_order_from_module_generators(absolute_order_module_gens,
check_integral=False,
check_is_ring=False,
check_rank=check_rank)
return RelativeOrder(K, abs_order, check=False, is_maximal=is_maximal)
def GaussianIntegers(names="I"):
r"""
Return the ring of Gaussian integers.
This is the ring of all complex numbers
of the form `a + b I` with `a` and `b` integers and `I = \sqrt{-1}`.
EXAMPLES::
sage: ZZI.<I> = GaussianIntegers()
sage: ZZI
Gaussian Integers in Number Field in I with defining polynomial x^2 + 1 with I = 1*I
sage: factor(3 + I)
(-I) * (I + 1) * (2*I + 1)
sage: CC(I)
1.00000000000000*I
sage: I.minpoly()
x^2 + 1
sage: GaussianIntegers().basis()
[1, I]
"""
from sage.rings.all import CDF, NumberField
f = ZZ['x']([1, 0, 1])
nf = NumberField(f, names, embedding=CDF(0, 1))
return nf.ring_of_integers()
def EisensteinIntegers(names="omega"):
r"""
Return the ring of Eisenstein integers.
This is the ring of all complex numbers
of the form `a + b \omega` with `a` and `b` integers and
`omega = (-1 + \sqrt{-3})/2`.
EXAMPLES::
sage: R.<omega> = EisensteinIntegers()
sage: R
Eisenstein Integers in Number Field in omega with defining polynomial x^2 + x + 1 with omega = -0.50000000000000000? + 0.866025403784439?*I
sage: factor(3 + omega)
(omega) * (-3*omega - 2)
sage: CC(omega)
-0.500000000000000 + 0.866025403784439*I
sage: omega.minpoly()
x^2 + x + 1
sage: EisensteinIntegers().basis()
[1, omega]
"""
from sage.rings.all import CDF, NumberField
f = ZZ['x']([1, 1, 1])
nf = NumberField(f, names, embedding=CDF(-0.5, 0.8660254037844386))
return nf.ring_of_integers()
|
from typing import Optional, Set
import click
import copy
from datetime import datetime
import json
import logging
import os
import subprocess
import sys
import time
import urllib
import urllib.parse
import yaml
from socket import socket
import ray
import psutil
import ray._private.services as services
import ray.ray_constants as ray_constants
import ray._private.utils
from ray.autoscaler._private.commands import (
attach_cluster, exec_cluster, create_or_update_cluster, monitor_cluster,
rsync, teardown_cluster, get_head_node_ip, kill_node, get_worker_node_ips,
get_local_dump_archive, get_cluster_dump_archive, debug_status,
RUN_ENV_TYPES)
from ray.autoscaler._private.constants import RAY_PROCESSES
from ray.autoscaler._private.fake_multi_node.node_provider import \
FAKE_HEAD_NODE_ID
from ray.autoscaler._private.util import DEBUG_AUTOSCALING_ERROR, \
DEBUG_AUTOSCALING_STATUS
from ray.internal.internal_api import memory_summary
from ray.autoscaler._private.cli_logger import cli_logger, cf
from ray.core.generated import gcs_service_pb2
from ray.core.generated import gcs_service_pb2_grpc
from distutils.dir_util import copy_tree
logger = logging.getLogger(__name__)
logging_options = [
click.option(
"--log-style",
required=False,
type=click.Choice(cli_logger.VALID_LOG_STYLES, case_sensitive=False),
default="auto",
help=("If 'pretty', outputs with formatting and color. If 'record', "
"outputs record-style without formatting. "
"'auto' defaults to 'pretty', and disables pretty logging "
"if stdin is *not* a TTY.")),
click.option(
"--log-color",
required=False,
type=click.Choice(["auto", "false", "true"], case_sensitive=False),
default="auto",
help=("Use color logging. "
"Auto enables color logging if stdout is a TTY.")),
click.option("-v", "--verbose", default=None, count=True)
]
def add_click_options(options):
def wrapper(f):
for option in reversed(logging_options):
f = option(f)
return f
return wrapper
@click.group()
@click.option(
"--logging-level",
required=False,
default=ray_constants.LOGGER_LEVEL,
type=str,
help=ray_constants.LOGGER_LEVEL_HELP)
@click.option(
"--logging-format",
required=False,
default=ray_constants.LOGGER_FORMAT,
type=str,
help=ray_constants.LOGGER_FORMAT_HELP)
@click.version_option()
def cli(logging_level, logging_format):
level = logging.getLevelName(logging_level.upper())
ray._private.ray_logging.setup_logger(level, logging_format)
cli_logger.set_format(format_tmpl=logging_format)
@click.command()
@click.argument("cluster_config_file", required=True, type=str)
@click.option(
"--cluster-name",
"-n",
required=False,
type=str,
help="Override the configured cluster name.")
@click.option(
"--port",
"-p",
required=False,
type=int,
default=ray_constants.DEFAULT_DASHBOARD_PORT,
help="The local port to forward to the dashboard")
@click.option(
"--remote-port",
required=False,
type=int,
default=ray_constants.DEFAULT_DASHBOARD_PORT,
help="The remote port your dashboard runs on")
@click.option(
"--no-config-cache",
is_flag=True,
default=False,
help="Disable the local cluster config cache.")
def dashboard(cluster_config_file, cluster_name, port, remote_port,
no_config_cache):
"""Port-forward a Ray cluster's dashboard to the local machine."""
# Sleeping in a loop is preferable to `sleep infinity` because the latter
# only works on linux.
# Find the first open port sequentially from `remote_port`.
try:
port_forward = [
(port, remote_port),
]
click.echo("Attempting to establish dashboard locally at"
" localhost:{} connected to"
" remote port {}".format(port, remote_port))
# We want to probe with a no-op that returns quickly to avoid
# exceptions caused by network errors.
exec_cluster(
cluster_config_file,
override_cluster_name=cluster_name,
port_forward=port_forward,
no_config_cache=no_config_cache)
click.echo("Successfully established connection.")
except Exception as e:
raise click.ClickException(
"Failed to forward dashboard from remote port {1} to local port "
"{0}. There are a couple possibilities: \n 1. The remote port is "
"incorrectly specified \n 2. The local port {0} is already in "
"use.\n The exception is: {2}".format(port, remote_port, e)) \
from None
def continue_debug_session(live_jobs: Set[str]):
"""Continue active debugging session.
This function will connect 'ray debug' to the right debugger
when a user is stepping between Ray tasks.
"""
active_sessions = ray.experimental.internal_kv._internal_kv_list(
"RAY_PDB_")
for active_session in active_sessions:
if active_session.startswith(b"RAY_PDB_CONTINUE"):
# Check to see that the relevant job is still alive.
data = ray.experimental.internal_kv._internal_kv_get(
active_session)
if json.loads(data)["job_id"] not in live_jobs:
ray.experimental.internal_kv._internal_kv_del(active_session)
continue
print("Continuing pdb session in different process...")
key = b"RAY_PDB_" + active_session[len("RAY_PDB_CONTINUE_"):]
while True:
data = ray.experimental.internal_kv._internal_kv_get(key)
if data:
session = json.loads(data)
if ("exit_debugger" in session
or session["job_id"] not in live_jobs):
ray.experimental.internal_kv._internal_kv_del(key)
return
host, port = session["pdb_address"].split(":")
ray.util.rpdb.connect_pdb_client(host, int(port))
ray.experimental.internal_kv._internal_kv_del(key)
continue_debug_session(live_jobs)
return
time.sleep(1.0)
def format_table(table):
"""Format a table as a list of lines with aligned columns."""
result = []
col_width = [max(len(x) for x in col) for col in zip(*table)]
for line in table:
result.append(" | ".join(
"{0:{1}}".format(x, col_width[i]) for i, x in enumerate(line)))
return result
@cli.command()
@click.option(
"--address",
required=False,
type=str,
help="Override the address to connect to.")
def debug(address):
"""Show all active breakpoints and exceptions in the Ray debugger."""
if not address:
address = services.get_ray_address_to_use_or_die()
logger.info(f"Connecting to Ray instance at {address}.")
ray.init(address=address, log_to_driver=False)
while True:
# Used to filter out and clean up entries from dead jobs.
live_jobs = {
job["JobID"]
for job in ray.state.jobs() if not job["IsDead"]
}
continue_debug_session(live_jobs)
active_sessions = ray.experimental.internal_kv._internal_kv_list(
"RAY_PDB_")
print("Active breakpoints:")
sessions_data = []
for active_session in active_sessions:
data = json.loads(
ray.experimental.internal_kv._internal_kv_get(active_session))
# Check that the relevant job is alive, else clean up the entry.
if data["job_id"] in live_jobs:
sessions_data.append(data)
else:
ray.experimental.internal_kv._internal_kv_del(active_session)
sessions_data = sorted(
sessions_data, key=lambda data: data["timestamp"], reverse=True)
table = [["index", "timestamp", "Ray task", "filename:lineno"]]
for i, data in enumerate(sessions_data):
date = datetime.utcfromtimestamp(
data["timestamp"]).strftime("%Y-%m-%d %H:%M:%S")
table.append([
str(i), date, data["proctitle"],
data["filename"] + ":" + str(data["lineno"])
])
for i, line in enumerate(format_table(table)):
print(line)
if i >= 1 and not sessions_data[i - 1]["traceback"].startswith(
"NoneType: None"):
print(sessions_data[i - 1]["traceback"])
inp = input("Enter breakpoint index or press enter to refresh: ")
if inp == "":
print()
continue
else:
index = int(inp)
session = json.loads(
ray.experimental.internal_kv._internal_kv_get(
active_sessions[index]))
host, port = session["pdb_address"].split(":")
ray.util.rpdb.connect_pdb_client(host, int(port))
@cli.command()
@click.option(
"--node-ip-address",
required=False,
type=str,
help="the IP address of this node")
@click.option(
"--address", required=False, type=str, help="the address to use for Ray")
@click.option(
"--port",
type=int,
required=False,
help=f"the port of the head ray process. If not provided, defaults to "
f"{ray_constants.DEFAULT_PORT}; if port is set to 0, we will"
f" allocate an available port.")
@click.option(
"--redis-password",
required=False,
hidden=True,
type=str,
default=ray_constants.REDIS_DEFAULT_PASSWORD,
help="If provided, secure Redis ports with this password")
@click.option(
"--redis-shard-ports",
required=False,
hidden=True,
type=str,
help="the port to use for the Redis shards other than the "
"primary Redis shard")
@click.option(
"--object-manager-port",
required=False,
type=int,
help="the port to use for starting the object manager")
@click.option(
"--node-manager-port",
required=False,
type=int,
default=0,
help="the port to use for starting the node manager")
@click.option(
"--gcs-server-port",
required=False,
type=int,
help="Port number for the GCS server.")
@click.option(
"--min-worker-port",
required=False,
type=int,
default=10002,
help="the lowest port number that workers will bind on. If not set, "
"random ports will be chosen.")
@click.option(
"--max-worker-port",
required=False,
type=int,
default=10999,
help="the highest port number that workers will bind on. If set, "
"'--min-worker-port' must also be set.")
@click.option(
"--worker-port-list",
required=False,
help="a comma-separated list of open ports for workers to bind on. "
"Overrides '--min-worker-port' and '--max-worker-port'.")
@click.option(
"--ray-client-server-port",
required=False,
type=int,
default=10001,
help="the port number the ray client server will bind on. If not set, "
"the ray client server will not be started.")
@click.option(
"--memory",
required=False,
hidden=True,
type=int,
help="The amount of memory (in bytes) to make available to workers. "
"By default, this is set to the available memory on the node.")
@click.option(
"--object-store-memory",
required=False,
type=int,
help="The amount of memory (in bytes) to start the object store with. "
"By default, this is capped at 20GB but can be set higher.")
@click.option(
"--redis-max-memory",
required=False,
hidden=True,
type=int,
help="The max amount of memory (in bytes) to allow redis to use. Once the "
"limit is exceeded, redis will start LRU eviction of entries. This only "
"applies to the sharded redis tables (task, object, and profile tables). "
"By default this is capped at 10GB but can be set higher.")
@click.option(
"--num-cpus",
required=False,
type=int,
help="the number of CPUs on this node")
@click.option(
"--num-gpus",
required=False,
type=int,
help="the number of GPUs on this node")
@click.option(
"--resources",
required=False,
default="{}",
type=str,
help="a JSON serialized dictionary mapping resource name to "
"resource quantity")
@click.option(
"--head",
is_flag=True,
default=False,
help="provide this argument for the head node")
@click.option(
"--include-dashboard",
default=None,
type=bool,
help="provide this argument to start the Ray dashboard GUI")
@click.option(
"--dashboard-host",
required=False,
default="localhost",
help="the host to bind the dashboard server to, either localhost "
"(127.0.0.1) or 0.0.0.0 (available from all interfaces). By default, this"
"is localhost.")
@click.option(
"--dashboard-port",
required=False,
type=int,
default=ray_constants.DEFAULT_DASHBOARD_PORT,
help="the port to bind the dashboard server to--defaults to {}".format(
ray_constants.DEFAULT_DASHBOARD_PORT))
@click.option(
"--dashboard-agent-listen-port",
type=int,
hidden=True,
default=0,
help="the port for dashboard agents to listen for http on.")
@click.option(
"--block",
is_flag=True,
default=False,
help="provide this argument to block forever in this command")
@click.option(
"--plasma-directory",
required=False,
type=str,
help="object store directory for memory mapped files")
@click.option(
"--autoscaling-config",
required=False,
type=str,
help="the file that contains the autoscaling config")
@click.option(
"--no-redirect-worker-output",
is_flag=True,
default=False,
help="do not redirect worker stdout and stderr to files")
@click.option(
"--no-redirect-output",
is_flag=True,
default=False,
help="do not redirect non-worker stdout and stderr to files")
@click.option(
"--plasma-store-socket-name",
default=None,
help="manually specify the socket name of the plasma store")
@click.option(
"--raylet-socket-name",
default=None,
help="manually specify the socket path of the raylet process")
@click.option(
"--temp-dir",
hidden=True,
default=None,
help="manually specify the root temporary dir of the Ray process")
@click.option(
"--system-config",
default=None,
hidden=True,
type=json.loads,
help="Override system configuration defaults.")
@click.option(
"--enable-object-reconstruction",
is_flag=True,
default=False,
hidden=True,
help="Specify whether object reconstruction will be used for this cluster."
)
@click.option(
"--metrics-export-port",
type=int,
hidden=True,
default=None,
help="the port to use to expose Ray metrics through a "
"Prometheus endpoint.")
@click.option(
"--no-monitor",
is_flag=True,
hidden=True,
default=False,
help="If True, the ray autoscaler monitor for this cluster will not be "
"started.")
@click.option(
"--tracing-startup-hook",
type=str,
hidden=True,
default=None,
help="The function that sets up tracing with a tracing provider, remote "
"span processor, and additional instruments. See docs.ray.io/tracing.html "
"for more info.")
@click.option(
"--ray-debugger-external",
is_flag=True,
default=False,
help="Make the Ray debugger available externally to the node. This is only"
"safe to activate if the node is behind a firewall.")
@add_click_options(logging_options)
def start(node_ip_address, address, port, redis_password, redis_shard_ports,
object_manager_port, node_manager_port, gcs_server_port,
min_worker_port, max_worker_port, worker_port_list,
ray_client_server_port, memory, object_store_memory,
redis_max_memory, num_cpus, num_gpus, resources, head,
include_dashboard, dashboard_host, dashboard_port,
dashboard_agent_listen_port, block, plasma_directory,
autoscaling_config, no_redirect_worker_output, no_redirect_output,
plasma_store_socket_name, raylet_socket_name, temp_dir,
system_config, enable_object_reconstruction, metrics_export_port,
no_monitor, tracing_startup_hook, ray_debugger_external, log_style,
log_color, verbose):
"""Start Ray processes manually on the local machine."""
cli_logger.configure(log_style, log_color, verbose)
if gcs_server_port and not head:
raise ValueError(
"gcs_server_port can be only assigned when you specify --head.")
# Convert hostnames to numerical IP address.
if node_ip_address is not None:
node_ip_address = services.address_to_ip(node_ip_address)
try:
resources = json.loads(resources)
except Exception:
cli_logger.error("`{}` is not a valid JSON string.",
cf.bold("--resources"))
cli_logger.abort(
"Valid values look like this: `{}`",
cf.bold("--resources='{\"CustomResource3\": 1, "
"\"CustomResource2\": 2}'"))
raise Exception("Unable to parse the --resources argument using "
"json.loads. Try using a format like\n\n"
" --resources='{\"CustomResource1\": 3, "
"\"CustomReseource2\": 2}'")
redirect_worker_output = None if not no_redirect_worker_output else True
redirect_output = None if not no_redirect_output else True
ray_params = ray._private.parameter.RayParams(
node_ip_address=node_ip_address,
min_worker_port=min_worker_port,
max_worker_port=max_worker_port,
worker_port_list=worker_port_list,
ray_client_server_port=ray_client_server_port,
object_manager_port=object_manager_port,
node_manager_port=node_manager_port,
gcs_server_port=gcs_server_port,
memory=memory,
object_store_memory=object_store_memory,
redis_password=redis_password,
redirect_worker_output=redirect_worker_output,
redirect_output=redirect_output,
num_cpus=num_cpus,
num_gpus=num_gpus,
resources=resources,
plasma_directory=plasma_directory,
huge_pages=False,
plasma_store_socket_name=plasma_store_socket_name,
raylet_socket_name=raylet_socket_name,
temp_dir=temp_dir,
include_dashboard=include_dashboard,
dashboard_host=dashboard_host,
dashboard_port=dashboard_port,
dashboard_agent_listen_port=dashboard_agent_listen_port,
_system_config=system_config,
enable_object_reconstruction=enable_object_reconstruction,
metrics_export_port=metrics_export_port,
no_monitor=no_monitor,
tracing_startup_hook=tracing_startup_hook,
ray_debugger_external=ray_debugger_external)
if head:
# Use default if port is none, allocate an available port if port is 0
if port is None:
port = ray_constants.DEFAULT_PORT
if port == 0:
with socket() as s:
s.bind(("", 0))
port = s.getsockname()[1]
if os.environ.get("RAY_FAKE_CLUSTER"):
ray_params.env_vars = {
"RAY_OVERRIDE_NODE_ID_FOR_TESTING": FAKE_HEAD_NODE_ID
}
num_redis_shards = None
# Start Ray on the head node.
if redis_shard_ports is not None and address is None:
redis_shard_ports = redis_shard_ports.split(",")
# Infer the number of Redis shards from the ports if the number is
# not provided.
num_redis_shards = len(redis_shard_ports)
if address is not None:
cli_logger.print(
"Will use value of `{}` as remote Redis server address(es). "
"If the primary one is not reachable, we starts new one(s) "
"with `{}` in local.", cf.bold("--address"), cf.bold("--port"))
external_addresses = address.split(",")
# We reuse primary redis as sharding when there's only one
# instance provided.
if len(external_addresses) == 1:
external_addresses.append(external_addresses[0])
reachable = False
try:
[primary_redis_ip, port] = external_addresses[0].split(":")
ray._private.services.wait_for_redis_to_start(
primary_redis_ip, port, password=redis_password)
reachable = True
# We catch a generic Exception here in case someone later changes
# the type of the exception.
except Exception:
cli_logger.print(
"The primary external redis server `{}` is not reachable. "
"Will starts new one(s) with `{}` in local.",
cf.bold(external_addresses[0]), cf.bold("--port"))
if reachable:
ray_params.update_if_absent(
external_addresses=external_addresses)
num_redis_shards = len(external_addresses) - 1
if redis_password == ray_constants.REDIS_DEFAULT_PASSWORD:
cli_logger.warning(
"`{}` should not be specified as empty string if "
"external redis server(s) `{}` points to requires "
"password.", cf.bold("--redis-password"),
cf.bold("--address"))
# Get the node IP address if one is not provided.
ray_params.update_if_absent(
node_ip_address=services.get_node_ip_address())
cli_logger.labeled_value("Local node IP", ray_params.node_ip_address)
ray_params.update_if_absent(
redis_port=port,
redis_shard_ports=redis_shard_ports,
redis_max_memory=redis_max_memory,
num_redis_shards=num_redis_shards,
redis_max_clients=None,
autoscaling_config=autoscaling_config,
)
# Fail early when starting a new cluster when one is already running
if address is None:
default_address = f"{ray_params.node_ip_address}:{port}"
redis_addresses = services.find_redis_address(default_address)
if len(redis_addresses) > 0:
raise ConnectionError(
f"Ray is already running at {default_address}. "
f"Please specify a different port using the `--port`"
f" command to `ray start`.")
node = ray.node.Node(
ray_params, head=True, shutdown_at_exit=block, spawn_reaper=block)
redis_address = node.redis_address
if temp_dir is None:
# Default temp directory.
temp_dir = ray._private.utils.get_user_temp_dir()
# Using the user-supplied temp dir unblocks on-prem
# users who can't write to the default temp.
current_cluster_path = os.path.join(temp_dir, "ray_current_cluster")
# TODO: Consider using the custom temp_dir for this file across the
# code base. (https://github.com/ray-project/ray/issues/16458)
with open(current_cluster_path, "w") as f:
print(redis_address, file=f)
# this is a noop if new-style is not set, so the old logger calls
# are still in place
cli_logger.newline()
startup_msg = "Ray runtime started."
cli_logger.success("-" * len(startup_msg))
cli_logger.success(startup_msg)
cli_logger.success("-" * len(startup_msg))
cli_logger.newline()
with cli_logger.group("Next steps"):
cli_logger.print(
"To connect to this Ray runtime from another node, run")
# NOTE(kfstorm): Java driver rely on this line to get the address
# of the cluster. Please be careful when updating this line.
cli_logger.print(
cf.bold(" ray start --address='{}'{}"), redis_address,
f" --redis-password='{redis_password}'"
if redis_password else "")
cli_logger.newline()
cli_logger.print("Alternatively, use the following Python code:")
with cli_logger.indented():
with cf.with_style("monokai") as c:
cli_logger.print("{} ray", c.magenta("import"))
cli_logger.print(
"ray{}init(address{}{}{})", c.magenta("."),
c.magenta("="), c.yellow("'auto'"),
", _redis_password{}{}".format(
c.magenta("="),
c.yellow("'" + redis_password + "'"))
if redis_password else "")
cli_logger.newline()
cli_logger.print("To connect to this Ray runtime from outside of "
"the cluster, for example to")
cli_logger.print("connect to a remote cluster from your laptop "
"directly, use the following")
cli_logger.print("Python code:")
with cli_logger.indented():
with cf.with_style("monokai") as c:
cli_logger.print("{} ray", c.magenta("import"))
cli_logger.print(
"ray{}init(address{}{})", c.magenta("."),
c.magenta("="),
c.yellow("'ray://<head_node_ip_address>:"
f"{ray_client_server_port}'"))
cli_logger.newline()
cli_logger.print(
cf.underlined("If connection fails, check your "
"firewall settings and "
"network configuration."))
cli_logger.newline()
cli_logger.print("To terminate the Ray runtime, run")
cli_logger.print(cf.bold(" ray stop"))
else:
# Start Ray on a non-head node.
redis_address = None
if address is not None:
(redis_address, redis_address_ip,
redis_address_port) = services.validate_redis_address(address)
if not (port is None):
cli_logger.abort("`{}` should not be specified without `{}`.",
cf.bold("--port"), cf.bold("--head"))
raise Exception("If --head is not passed in, --port is not "
"allowed.")
if redis_shard_ports is not None:
cli_logger.abort("`{}` should not be specified without `{}`.",
cf.bold("--redis-shard-ports"), cf.bold("--head"))
raise Exception("If --head is not passed in, --redis-shard-ports "
"is not allowed.")
if redis_address is None:
cli_logger.abort("`{}` is required unless starting with `{}`.",
cf.bold("--address"), cf.bold("--head"))
raise Exception("If --head is not passed in, --address must "
"be provided.")
if include_dashboard:
cli_logger.abort("`{}` should not be specified without `{}`.",
cf.bold("--include-dashboard"), cf.bold("--head"))
raise ValueError(
"If --head is not passed in, the --include-dashboard"
"flag is not relevant.")
# Wait for the Redis server to be started. And throw an exception if we
# can't connect to it.
services.wait_for_redis_to_start(
redis_address_ip, redis_address_port, password=redis_password)
# Create a Redis client.
redis_client = services.create_redis_client(
redis_address, password=redis_password)
# Check that the version information on this node matches the version
# information that the cluster was started with.
services.check_version_info(redis_client)
# Get the node IP address if one is not provided.
ray_params.update_if_absent(
node_ip_address=services.get_node_ip_address(redis_address))
cli_logger.labeled_value("Local node IP", ray_params.node_ip_address)
ray_params.update(redis_address=redis_address)
node = ray.node.Node(
ray_params, head=False, shutdown_at_exit=block, spawn_reaper=block)
cli_logger.newline()
startup_msg = "Ray runtime started."
cli_logger.success("-" * len(startup_msg))
cli_logger.success(startup_msg)
cli_logger.success("-" * len(startup_msg))
cli_logger.newline()
cli_logger.print("To terminate the Ray runtime, run")
cli_logger.print(cf.bold(" ray stop"))
cli_logger.flush()
if block:
cli_logger.newline()
with cli_logger.group(cf.bold("--block")):
cli_logger.print(
"This command will now block until terminated by a signal.")
cli_logger.print(
"Running subprocesses are monitored and a message will be "
"printed if any of them terminate unexpectedly.")
cli_logger.flush()
while True:
time.sleep(1)
deceased = node.dead_processes()
if len(deceased) > 0:
cli_logger.newline()
cli_logger.error("Some Ray subprcesses exited unexpectedly:")
with cli_logger.indented():
for process_type, process in deceased:
cli_logger.error(
"{}",
cf.bold(str(process_type)),
_tags={"exit code": str(process.returncode)})
# shutdown_at_exit will handle cleanup.
cli_logger.newline()
cli_logger.error("Remaining processes will be killed.")
sys.exit(1)
@cli.command()
@click.option(
"-f",
"--force",
is_flag=True,
help="If set, ray will send SIGKILL instead of SIGTERM.")
@add_click_options(logging_options)
def stop(force, verbose, log_style, log_color):
"""Stop Ray processes manually on the local machine."""
cli_logger.configure(log_style, log_color, verbose)
# Note that raylet needs to exit before object store, otherwise
# it cannot exit gracefully.
is_linux = sys.platform.startswith("linux")
processes_to_kill = RAY_PROCESSES
process_infos = []
for proc in psutil.process_iter(["name", "cmdline"]):
try:
process_infos.append((proc, proc.name(), proc.cmdline()))
except psutil.Error:
pass
total_found = 0
total_stopped = 0
stopped = []
for keyword, filter_by_cmd in processes_to_kill:
if filter_by_cmd and is_linux and len(keyword) > 15:
# getting here is an internal bug, so we do not use cli_logger
msg = ("The filter string should not be more than {} "
"characters. Actual length: {}. Filter: {}").format(
15, len(keyword), keyword)
raise ValueError(msg)
found = []
for candidate in process_infos:
proc, proc_cmd, proc_args = candidate
corpus = (proc_cmd
if filter_by_cmd else subprocess.list2cmdline(proc_args))
if keyword in corpus:
found.append(candidate)
for proc, proc_cmd, proc_args in found:
total_found += 1
proc_string = str(subprocess.list2cmdline(proc_args))
try:
if force:
proc.kill()
else:
# TODO(mehrdadn): On Windows, this is forceful termination.
# We don't want CTRL_BREAK_EVENT, because that would
# terminate the entire process group. What to do?
proc.terminate()
if force:
cli_logger.verbose("Killed `{}` {} ", cf.bold(proc_string),
cf.dimmed("(via SIGKILL)"))
else:
cli_logger.verbose("Send termination request to `{}` {}",
cf.bold(proc_string),
cf.dimmed("(via SIGTERM)"))
total_stopped += 1
stopped.append(proc)
except psutil.NoSuchProcess:
cli_logger.verbose(
"Attempted to stop `{}`, but process was already dead.",
cf.bold(proc_string))
total_stopped += 1
except (psutil.Error, OSError) as ex:
cli_logger.error("Could not terminate `{}` due to {}",
cf.bold(proc_string), str(ex))
if total_found == 0:
cli_logger.print("Did not find any active Ray processes.")
else:
if total_stopped == total_found:
cli_logger.success("Stopped all {} Ray processes.", total_stopped)
else:
cli_logger.warning(
"Stopped only {} out of {} Ray processes. "
"Set `{}` to see more details.", total_stopped, total_found,
cf.bold("-v"))
cli_logger.warning("Try running the command again, or use `{}`.",
cf.bold("--force"))
try:
os.remove(
os.path.join(ray._private.utils.get_user_temp_dir(),
"ray_current_cluster"))
except OSError:
# This just means the file doesn't exist.
pass
# Wait for the processes to actually stop.
psutil.wait_procs(stopped, timeout=2)
@cli.command()
@click.argument("cluster_config_file", required=True, type=str)
@click.option(
"--min-workers",
required=False,
type=int,
help="Override the configured min worker node count for the cluster.")
@click.option(
"--max-workers",
required=False,
type=int,
help="Override the configured max worker node count for the cluster.")
@click.option(
"--no-restart",
is_flag=True,
default=False,
help=("Whether to skip restarting Ray services during the update. "
"This avoids interrupting running jobs."))
@click.option(
"--restart-only",
is_flag=True,
default=False,
help=("Whether to skip running setup commands and only restart Ray. "
"This cannot be used with 'no-restart'."))
@click.option(
"--yes",
"-y",
is_flag=True,
default=False,
help="Don't ask for confirmation.")
@click.option(
"--cluster-name",
"-n",
required=False,
type=str,
help="Override the configured cluster name.")
@click.option(
"--no-config-cache",
is_flag=True,
default=False,
help="Disable the local cluster config cache.")
@click.option(
"--redirect-command-output",
is_flag=True,
default=False,
help="Whether to redirect command output to a file.")
@click.option(
"--use-login-shells/--use-normal-shells",
is_flag=True,
default=True,
help=("Ray uses login shells (bash --login -i) to run cluster commands "
"by default. If your workflow is compatible with normal shells, "
"this can be disabled for a better user experience."))
@add_click_options(logging_options)
def up(cluster_config_file, min_workers, max_workers, no_restart, restart_only,
yes, cluster_name, no_config_cache, redirect_command_output,
use_login_shells, log_style, log_color, verbose):
"""Create or update a Ray cluster."""
cli_logger.configure(log_style, log_color, verbose)
if restart_only or no_restart:
cli_logger.doassert(restart_only != no_restart,
"`{}` is incompatible with `{}`.",
cf.bold("--restart-only"), cf.bold("--no-restart"))
assert restart_only != no_restart, "Cannot set both 'restart_only' " \
"and 'no_restart' at the same time!"
if urllib.parse.urlparse(cluster_config_file).scheme in ("http", "https"):
try:
response = urllib.request.urlopen(cluster_config_file, timeout=5)
content = response.read()
file_name = cluster_config_file.split("/")[-1]
with open(file_name, "wb") as f:
f.write(content)
cluster_config_file = file_name
except urllib.error.HTTPError as e:
cli_logger.warning("{}", str(e))
cli_logger.warning(
"Could not download remote cluster configuration file.")
create_or_update_cluster(
config_file=cluster_config_file,
override_min_workers=min_workers,
override_max_workers=max_workers,
no_restart=no_restart,
restart_only=restart_only,
yes=yes,
override_cluster_name=cluster_name,
no_config_cache=no_config_cache,
redirect_command_output=redirect_command_output,
use_login_shells=use_login_shells)
@cli.command()
@click.argument("cluster_config_file", required=True, type=str)
@click.option(
"--yes",
"-y",
is_flag=True,
default=False,
help="Don't ask for confirmation.")
@click.option(
"--workers-only",
is_flag=True,
default=False,
help="Only destroy the workers.")
@click.option(
"--cluster-name",
"-n",
required=False,
type=str,
help="Override the configured cluster name.")
@click.option(
"--keep-min-workers",
is_flag=True,
default=False,
help="Retain the minimal amount of workers specified in the config.")
@add_click_options(logging_options)
def down(cluster_config_file, yes, workers_only, cluster_name,
keep_min_workers, log_style, log_color, verbose):
"""Tear down a Ray cluster."""
cli_logger.configure(log_style, log_color, verbose)
teardown_cluster(cluster_config_file, yes, workers_only, cluster_name,
keep_min_workers)
@cli.command(hidden=True)
@click.argument("cluster_config_file", required=True, type=str)
@click.option(
"--yes",
"-y",
is_flag=True,
default=False,
help="Don't ask for confirmation.")
@click.option(
"--hard",
is_flag=True,
default=False,
help="Terminates the node via node provider (defaults to a 'soft kill'"
" which terminates Ray but does not actually delete the instances).")
@click.option(
"--cluster-name",
"-n",
required=False,
type=str,
help="Override the configured cluster name.")
def kill_random_node(cluster_config_file, yes, hard, cluster_name):
"""Kills a random Ray node. For testing purposes only."""
click.echo("Killed node with IP " +
kill_node(cluster_config_file, yes, hard, cluster_name))
@cli.command()
@click.argument("cluster_config_file", required=True, type=str)
@click.option(
"--lines",
required=False,
default=100,
type=int,
help="Number of lines to tail.")
@click.option(
"--cluster-name",
"-n",
required=False,
type=str,
help="Override the configured cluster name.")
@add_click_options(logging_options)
def monitor(cluster_config_file, lines, cluster_name, log_style, log_color,
verbose):
"""Tails the autoscaler logs of a Ray cluster."""
cli_logger.configure(log_style, log_color, verbose)
monitor_cluster(cluster_config_file, lines, cluster_name)
@cli.command()
@click.argument("cluster_config_file", required=True, type=str)
@click.option(
"--start",
is_flag=True,
default=False,
help="Start the cluster if needed.")
@click.option(
"--screen", is_flag=True, default=False, help="Run the command in screen.")
@click.option(
"--tmux", is_flag=True, default=False, help="Run the command in tmux.")
@click.option(
"--cluster-name",
"-n",
required=False,
type=str,
help="Override the configured cluster name.")
@click.option(
"--no-config-cache",
is_flag=True,
default=False,
help="Disable the local cluster config cache.")
@click.option(
"--new", "-N", is_flag=True, help="Force creation of a new screen.")
@click.option(
"--port-forward",
"-p",
required=False,
multiple=True,
type=int,
help="Port to forward. Use this multiple times to forward multiple ports.")
@add_click_options(logging_options)
def attach(cluster_config_file, start, screen, tmux, cluster_name,
no_config_cache, new, port_forward, log_style, log_color, verbose):
"""Create or attach to a SSH session to a Ray cluster."""
cli_logger.configure(log_style, log_color, verbose)
port_forward = [(port, port) for port in list(port_forward)]
attach_cluster(
cluster_config_file,
start,
screen,
tmux,
cluster_name,
no_config_cache=no_config_cache,
new=new,
port_forward=port_forward)
@cli.command()
@click.argument("cluster_config_file", required=True, type=str)
@click.argument("source", required=False, type=str)
@click.argument("target", required=False, type=str)
@click.option(
"--cluster-name",
"-n",
required=False,
type=str,
help="Override the configured cluster name.")
@add_click_options(logging_options)
def rsync_down(cluster_config_file, source, target, cluster_name, log_style,
log_color, verbose):
"""Download specific files from a Ray cluster."""
cli_logger.configure(log_style, log_color, verbose)
rsync(cluster_config_file, source, target, cluster_name, down=True)
@cli.command()
@click.argument("cluster_config_file", required=True, type=str)
@click.argument("source", required=False, type=str)
@click.argument("target", required=False, type=str)
@click.option(
"--cluster-name",
"-n",
required=False,
type=str,
help="Override the configured cluster name.")
@click.option(
"--all-nodes",
"-A",
is_flag=True,
required=False,
help="Upload to all nodes (workers and head).")
@add_click_options(logging_options)
def rsync_up(cluster_config_file, source, target, cluster_name, all_nodes,
log_style, log_color, verbose):
"""Upload specific files to a Ray cluster."""
cli_logger.configure(log_style, log_color, verbose)
if all_nodes:
cli_logger.warning(
"WARNING: the `all_nodes` option is deprecated and will be "
"removed in the future. "
"Rsync to worker nodes is not reliable since workers may be "
"added during autoscaling. Please use the `file_mounts` "
"feature instead for consistent file sync in autoscaling clusters")
rsync(
cluster_config_file,
source,
target,
cluster_name,
down=False,
all_nodes=all_nodes)
@cli.command(context_settings={"ignore_unknown_options": True})
@click.argument("cluster_config_file", required=True, type=str)
@click.option(
"--stop",
is_flag=True,
default=False,
help="Stop the cluster after the command finishes running.")
@click.option(
"--start",
is_flag=True,
default=False,
help="Start the cluster if needed.")
@click.option(
"--screen",
is_flag=True,
default=False,
help="Run the command in a screen.")
@click.option(
"--tmux", is_flag=True, default=False, help="Run the command in tmux.")
@click.option(
"--cluster-name",
"-n",
required=False,
type=str,
help="Override the configured cluster name.")
@click.option(
"--no-config-cache",
is_flag=True,
default=False,
help="Disable the local cluster config cache.")
@click.option(
"--port-forward",
"-p",
required=False,
multiple=True,
type=int,
help="Port to forward. Use this multiple times to forward multiple ports.")
@click.argument("script", required=True, type=str)
@click.option(
"--args",
required=False,
type=str,
help="(deprecated) Use '-- --arg1 --arg2' for script args.")
@click.argument("script_args", nargs=-1)
@add_click_options(logging_options)
def submit(cluster_config_file, screen, tmux, stop, start, cluster_name,
no_config_cache, port_forward, script, args, script_args, log_style,
log_color, verbose):
"""Uploads and runs a script on the specified cluster.
The script is automatically synced to the following location:
os.path.join("~", os.path.basename(script))
Example:
>>> ray submit [CLUSTER.YAML] experiment.py -- --smoke-test
"""
cli_logger.configure(log_style, log_color, verbose)
cli_logger.doassert(not (screen and tmux),
"`{}` and `{}` are incompatible.", cf.bold("--screen"),
cf.bold("--tmux"))
cli_logger.doassert(
not (script_args and args),
"`{0}` and `{1}` are incompatible. Use only `{1}`.\n"
"Example: `{2}`", cf.bold("--args"), cf.bold("-- <args ...>"),
cf.bold("ray submit script.py -- --arg=123 --flag"))
assert not (screen and tmux), "Can specify only one of `screen` or `tmux`."
assert not (script_args and args), "Use -- --arg1 --arg2 for script args."
if args:
cli_logger.warning(
"`{}` is deprecated and will be removed in the future.",
cf.bold("--args"))
cli_logger.warning("Use `{}` instead. Example: `{}`.",
cf.bold("-- <args ...>"),
cf.bold("ray submit script.py -- --arg=123 --flag"))
cli_logger.newline()
if start:
create_or_update_cluster(
config_file=cluster_config_file,
override_min_workers=None,
override_max_workers=None,
no_restart=False,
restart_only=False,
yes=True,
override_cluster_name=cluster_name,
no_config_cache=no_config_cache,
redirect_command_output=False,
use_login_shells=True)
target = os.path.basename(script)
target = os.path.join("~", target)
rsync(
cluster_config_file,
script,
target,
cluster_name,
no_config_cache=no_config_cache,
down=False)
command_parts = ["python", target]
if script_args:
command_parts += list(script_args)
elif args is not None:
command_parts += [args]
port_forward = [(port, port) for port in list(port_forward)]
cmd = " ".join(command_parts)
exec_cluster(
cluster_config_file,
cmd=cmd,
run_env="docker",
screen=screen,
tmux=tmux,
stop=stop,
start=False,
override_cluster_name=cluster_name,
no_config_cache=no_config_cache,
port_forward=port_forward)
@cli.command()
@click.argument("cluster_config_file", required=True, type=str)
@click.argument("cmd", required=True, type=str)
@click.option(
"--run-env",
required=False,
type=click.Choice(RUN_ENV_TYPES),
default="auto",
help="Choose whether to execute this command in a container or directly on"
" the cluster head. Only applies when docker is configured in the YAML.")
@click.option(
"--stop",
is_flag=True,
default=False,
help="Stop the cluster after the command finishes running.")
@click.option(
"--start",
is_flag=True,
default=False,
help="Start the cluster if needed.")
@click.option(
"--screen",
is_flag=True,
default=False,
help="Run the command in a screen.")
@click.option(
"--tmux", is_flag=True, default=False, help="Run the command in tmux.")
@click.option(
"--cluster-name",
"-n",
required=False,
type=str,
help="Override the configured cluster name.")
@click.option(
"--no-config-cache",
is_flag=True,
default=False,
help="Disable the local cluster config cache.")
@click.option(
"--port-forward",
"-p",
required=False,
multiple=True,
type=int,
help="Port to forward. Use this multiple times to forward multiple ports.")
@add_click_options(logging_options)
def exec(cluster_config_file, cmd, run_env, screen, tmux, stop, start,
cluster_name, no_config_cache, port_forward, log_style, log_color,
verbose):
"""Execute a command via SSH on a Ray cluster."""
cli_logger.configure(log_style, log_color, verbose)
port_forward = [(port, port) for port in list(port_forward)]
exec_cluster(
cluster_config_file,
cmd=cmd,
run_env=run_env,
screen=screen,
tmux=tmux,
stop=stop,
start=start,
override_cluster_name=cluster_name,
no_config_cache=no_config_cache,
port_forward=port_forward,
_allow_uninitialized_state=True)
@cli.command()
@click.argument("cluster_config_file", required=True, type=str)
@click.option(
"--cluster-name",
"-n",
required=False,
type=str,
help="Override the configured cluster name.")
def get_head_ip(cluster_config_file, cluster_name):
"""Return the head node IP of a Ray cluster."""
click.echo(get_head_node_ip(cluster_config_file, cluster_name))
@cli.command()
@click.argument("cluster_config_file", required=True, type=str)
@click.option(
"--cluster-name",
"-n",
required=False,
type=str,
help="Override the configured cluster name.")
def get_worker_ips(cluster_config_file, cluster_name):
"""Return the list of worker IPs of a Ray cluster."""
worker_ips = get_worker_node_ips(cluster_config_file, cluster_name)
click.echo("\n".join(worker_ips))
@cli.command()
def stack():
"""Take a stack dump of all Python workers on the local machine."""
COMMAND = """
pyspy=`which py-spy`
if [ ! -e "$pyspy" ]; then
echo "ERROR: Please 'pip install py-spy'" \
"or 'pip install ray[default]' first."
exit 1
fi
# Set IFS to iterate over lines instead of over words.
export IFS="
"
# Call sudo to prompt for password before anything has been printed.
sudo true
workers=$(
ps aux | grep -E ' ray::|default_worker.py' | grep -v raylet | grep -v grep
)
for worker in $workers; do
echo "Stack dump for $worker";
pid=`echo $worker | awk '{print $2}'`;
sudo $pyspy dump --pid $pid --native;
echo;
done
"""
subprocess.call(COMMAND, shell=True)
@cli.command()
def microbenchmark():
"""Run a local Ray microbenchmark on the current machine."""
from ray._private.ray_perf import main
main()
@cli.command()
@click.option(
"--address",
required=False,
type=str,
help="Override the redis address to connect to.")
def timeline(address):
"""Take a Chrome tracing timeline for a Ray cluster."""
if not address:
address = services.get_ray_address_to_use_or_die()
logger.info(f"Connecting to Ray instance at {address}.")
ray.init(address=address)
time = datetime.today().strftime("%Y-%m-%d_%H-%M-%S")
filename = os.path.join(ray._private.utils.get_user_temp_dir(),
f"ray-timeline-{time}.json")
ray.timeline(filename=filename)
size = os.path.getsize(filename)
logger.info(f"Trace file written to {filename} ({size} bytes).")
logger.info(
"You can open this with chrome://tracing in the Chrome browser.")
@cli.command()
@click.option(
"--address",
required=False,
type=str,
help="Override the address to connect to.")
@click.option(
"--redis_password",
required=False,
type=str,
default=ray_constants.REDIS_DEFAULT_PASSWORD,
help="Connect to ray with redis_password.")
@click.option(
"--group-by",
type=click.Choice(["NODE_ADDRESS", "STACK_TRACE"]),
default="NODE_ADDRESS",
help="Group object references by a GroupByType \
(e.g. NODE_ADDRESS or STACK_TRACE).")
@click.option(
"--sort-by",
type=click.Choice(["PID", "OBJECT_SIZE", "REFERENCE_TYPE"]),
default="OBJECT_SIZE",
help="Sort object references in ascending order by a SortingType \
(e.g. PID, OBJECT_SIZE, or REFERENCE_TYPE).")
@click.option(
"--units",
type=click.Choice(["B", "KB", "MB", "GB"]),
default="B",
help="Specify unit metrics for displaying object sizes \
(e.g. B, KB, MB, GB).")
@click.option(
"--no-format",
is_flag=True,
type=bool,
default=True,
help="Display unformatted results. Defaults to true when \
terminal width is less than 137 characters.")
@click.option(
"--stats-only",
is_flag=True,
default=False,
help="Display plasma store stats only.")
@click.option(
"--num-entries",
"--n",
type=int,
default=None,
help="Specify number of sorted entries per group.")
def memory(address, redis_password, group_by, sort_by, units, no_format,
stats_only, num_entries):
"""Print object references held in a Ray cluster."""
if not address:
address = services.get_ray_address_to_use_or_die()
time = datetime.now()
header = "=" * 8 + f" Object references status: {time} " + "=" * 8
mem_stats = memory_summary(address, redis_password, group_by, sort_by,
units, no_format, stats_only, num_entries)
print(f"{header}\n{mem_stats}")
@cli.command()
@click.option(
"--address",
required=False,
type=str,
help="Override the address to connect to.")
@click.option(
"--redis_password",
required=False,
type=str,
default=ray_constants.REDIS_DEFAULT_PASSWORD,
help="Connect to ray with redis_password.")
def status(address, redis_password):
"""Print cluster status, including autoscaling info."""
if not address:
address = services.get_ray_address_to_use_or_die()
redis_client = ray._private.services.create_redis_client(
address, redis_password)
status = redis_client.hget(DEBUG_AUTOSCALING_STATUS, "value")
error = redis_client.hget(DEBUG_AUTOSCALING_ERROR, "value")
print(debug_status(status, error))
@cli.command(hidden=True)
@click.option(
"--stream",
"-S",
required=False,
type=bool,
is_flag=True,
default=False,
help="If True, will stream the binary archive contents to stdout")
@click.option(
"--output",
"-o",
required=False,
type=str,
default=None,
help="Output file.")
@click.option(
"--logs/--no-logs",
is_flag=True,
default=True,
help="Collect logs from ray session dir")
@click.option(
"--debug-state/--no-debug-state",
is_flag=True,
default=True,
help="Collect debug_state.txt from ray session dir")
@click.option(
"--pip/--no-pip",
is_flag=True,
default=True,
help="Collect installed pip packages")
@click.option(
"--processes/--no-processes",
is_flag=True,
default=True,
help="Collect info on running processes")
@click.option(
"--processes-verbose/--no-processes-verbose",
is_flag=True,
default=True,
help="Increase process information verbosity")
@click.option(
"--tempfile",
"-T",
required=False,
type=str,
default=None,
help="Temporary file to use")
def local_dump(stream: bool = False,
output: Optional[str] = None,
logs: bool = True,
debug_state: bool = True,
pip: bool = True,
processes: bool = True,
processes_verbose: bool = False,
tempfile: Optional[str] = None):
"""Collect local data and package into an archive.
Usage:
ray local-dump [--stream/--output file]
This script is called on remote nodes to fetch their data.
"""
# This may stream data to stdout, so no printing here
get_local_dump_archive(
stream=stream,
output=output,
logs=logs,
debug_state=debug_state,
pip=pip,
processes=processes,
processes_verbose=processes_verbose,
tempfile=tempfile)
@cli.command()
@click.argument("cluster_config_file", required=False, type=str)
@click.option(
"--host",
"-h",
required=False,
type=str,
help="Single or list of hosts, separated by comma.")
@click.option(
"--ssh-user",
"-U",
required=False,
type=str,
default=None,
help="Username of the SSH user.")
@click.option(
"--ssh-key",
"-K",
required=False,
type=str,
default=None,
help="Path to the SSH key file.")
@click.option(
"--docker",
"-d",
required=False,
type=str,
default=None,
help="Name of the docker container, if applicable.")
@click.option(
"--local",
"-L",
required=False,
type=bool,
is_flag=True,
default=None,
help="Also include information about the local node.")
@click.option(
"--output",
"-o",
required=False,
type=str,
default=None,
help="Output file.")
@click.option(
"--logs/--no-logs",
is_flag=True,
default=True,
help="Collect logs from ray session dir")
@click.option(
"--debug-state/--no-debug-state",
is_flag=True,
default=True,
help="Collect debug_state.txt from ray session dir")
@click.option(
"--pip/--no-pip",
is_flag=True,
default=True,
help="Collect installed pip packages")
@click.option(
"--processes/--no-processes",
is_flag=True,
default=True,
help="Collect info on running processes")
@click.option(
"--processes-verbose/--no-processes-verbose",
is_flag=True,
default=True,
help="Increase process information verbosity")
@click.option(
"--tempfile",
"-T",
required=False,
type=str,
default=None,
help="Temporary file to use")
def cluster_dump(cluster_config_file: Optional[str] = None,
host: Optional[str] = None,
ssh_user: Optional[str] = None,
ssh_key: Optional[str] = None,
docker: Optional[str] = None,
local: Optional[bool] = None,
output: Optional[str] = None,
logs: bool = True,
debug_state: bool = True,
pip: bool = True,
processes: bool = True,
processes_verbose: bool = False,
tempfile: Optional[str] = None):
"""Get log data from one or more nodes.
Best used with Ray cluster configs:
ray cluster-dump [cluster.yaml]
Include the --local flag to also collect and include data from the
local node.
Missing fields will be tried to be auto-filled.
You can also manually specify a list of hosts using the
``--host <host1,host2,...>`` parameter.
"""
archive_path = get_cluster_dump_archive(
cluster_config_file=cluster_config_file,
host=host,
ssh_user=ssh_user,
ssh_key=ssh_key,
docker=docker,
local=local,
output=output,
logs=logs,
debug_state=debug_state,
pip=pip,
processes=processes,
processes_verbose=processes_verbose,
tempfile=tempfile)
if archive_path:
click.echo(f"Created archive: {archive_path}")
else:
click.echo("Could not create archive.")
@cli.command(hidden=True)
@click.option(
"--address",
required=False,
type=str,
help="Override the address to connect to.")
def global_gc(address):
"""Trigger Python garbage collection on all cluster workers."""
if not address:
address = services.get_ray_address_to_use_or_die()
logger.info(f"Connecting to Ray instance at {address}.")
ray.init(address=address)
ray.internal.internal_api.global_gc()
print("Triggered gc.collect() on all workers.")
@cli.command(name="health-check", hidden=True)
@click.option(
"--address",
required=False,
type=str,
help="Override the address to connect to.")
@click.option(
"--redis_password",
required=False,
type=str,
default=ray_constants.REDIS_DEFAULT_PASSWORD,
help="Connect to ray with redis_password.")
@click.option(
"--component",
required=False,
type=str,
help="Health check for a specific component. Currently supports: "
"[ray_client_server]")
def healthcheck(address, redis_password, component):
"""
This is NOT a public api.
Health check a Ray or a specific component. Exit code 0 is healthy.
"""
if not address:
address = services.get_ray_address_to_use_or_die()
else:
address = services.address_to_ip(address)
redis_client = ray._private.services.create_redis_client(
address, redis_password)
if not component:
# If no component is specified, we are health checking the core. If
# client creation or ping fails, we will still exit with a non-zero
# exit code.
redis_client.ping()
try:
gcs_address = redis_client.get("GcsServerAddress").decode("utf-8")
options = (("grpc.enable_http_proxy", 0), )
channel = ray._private.utils.init_grpc_channel(
gcs_address, options)
stub = gcs_service_pb2_grpc.HeartbeatInfoGcsServiceStub(channel)
request = gcs_service_pb2.CheckAliveRequest()
reply = stub.CheckAlive(
request, timeout=ray.ray_constants.HEALTHCHECK_EXPIRATION_S)
if reply.status.code == 0:
sys.exit(0)
except Exception:
pass
sys.exit(1)
report_str = redis_client.hget(f"healthcheck:{component}", "value")
if not report_str:
# Status was never updated
sys.exit(1)
report = json.loads(report_str)
# TODO (Alex): We probably shouldn't rely on time here, but cloud providers
# have very well synchronized NTP servers, so this should be fine in
# practice.
cur_time = time.time()
report_time = float(report["time"])
# If the status is too old, the service has probably already died.
delta = cur_time - report_time
time_ok = delta < ray.ray_constants.HEALTHCHECK_EXPIRATION_S
if time_ok:
sys.exit(0)
else:
sys.exit(1)
@cli.command()
@click.option("-v", "--verbose", is_flag=True)
@click.option(
"--dryrun",
is_flag=True,
help="Identifies the wheel but does not execute the installation.")
def install_nightly(verbose, dryrun):
"""Install the latest wheels for Ray.
This uses the same python environment as the one that Ray is currently
installed in. Make sure that there is no Ray processes on this
machine (ray stop) when running this command.
"""
raydir = os.path.abspath(os.path.dirname(ray.__file__))
all_wheels_path = os.path.join(raydir, "nightly-wheels.yaml")
wheels = None
if os.path.exists(all_wheels_path):
with open(all_wheels_path) as f:
wheels = yaml.safe_load(f)
if not wheels:
raise click.ClickException(
f"Wheels not found in '{all_wheels_path}'! "
"Please visit https://docs.ray.io/en/master/installation.html to "
"obtain the latest wheels.")
platform = sys.platform
py_version = "{0}.{1}".format(*sys.version_info[:2])
matching_wheel = None
for target_platform, wheel_map in wheels.items():
if verbose:
print(f"Evaluating os={target_platform}, python={list(wheel_map)}")
if platform.startswith(target_platform):
if py_version in wheel_map:
matching_wheel = wheel_map[py_version]
break
if verbose:
print("Not matched.")
if matching_wheel is None:
raise click.ClickException(
"Unable to identify a matching platform. "
"Please visit https://docs.ray.io/en/master/installation.html to "
"obtain the latest wheels.")
if dryrun:
print(f"Found wheel: {matching_wheel}")
else:
cmd = [sys.executable, "-m", "pip", "install", "-U", matching_wheel]
print(f"Running: {' '.join(cmd)}.")
subprocess.check_call(cmd)
@cli.command()
@click.option(
"--show-library-path",
"-show",
required=False,
is_flag=True,
help="Show the cpp include path and library path, if provided.")
@click.option(
"--generate-bazel-project-template-to",
"-gen",
required=False,
type=str,
help="The directory to generate the bazel project template to,"
" if provided.")
@add_click_options(logging_options)
def cpp(show_library_path, generate_bazel_project_template_to, log_style,
log_color, verbose):
"""Show the cpp library path and generate the bazel project template."""
if not show_library_path and not generate_bazel_project_template_to:
raise ValueError(
"Please input at least one option of '--show-library-path'"
" and '--generate-bazel-project-template-to'.")
cli_logger.configure(log_style, log_color, verbose)
raydir = os.path.abspath(os.path.dirname(ray.__file__))
cpp_dir = os.path.join(raydir, "cpp")
cpp_templete_dir = os.path.join(cpp_dir, "example")
include_dir = os.path.join(cpp_dir, "include")
lib_dir = os.path.join(cpp_dir, "lib")
if not os.path.isdir(cpp_dir):
raise ValueError(
"Please install ray with C++ API by \"pip install ray[cpp]\".")
if show_library_path:
cli_logger.print("Ray C++ include path {} ", cf.bold(f"{include_dir}"))
cli_logger.print("Ray C++ library path {} ", cf.bold(f"{lib_dir}"))
if generate_bazel_project_template_to:
if not os.path.isdir(generate_bazel_project_template_to):
cli_logger.abort(
"The provided directory "
f"{generate_bazel_project_template_to} doesn't exist.")
copy_tree(cpp_templete_dir, generate_bazel_project_template_to)
out_include_dir = os.path.join(generate_bazel_project_template_to,
"thirdparty/include")
if not os.path.exists(out_include_dir):
os.makedirs(out_include_dir)
copy_tree(include_dir, out_include_dir)
out_lib_dir = os.path.join(generate_bazel_project_template_to,
"thirdparty/lib")
if not os.path.exists(out_lib_dir):
os.makedirs(out_lib_dir)
copy_tree(lib_dir, out_lib_dir)
cli_logger.print(
"Project template generated to {}",
cf.bold(f"{os.path.abspath(generate_bazel_project_template_to)}"))
cli_logger.print("To build and run this template, run")
cli_logger.print(
cf.bold(
f" cd {os.path.abspath(generate_bazel_project_template_to)}"
" && bash run.sh"))
def add_command_alias(command, name, hidden):
new_command = copy.deepcopy(command)
new_command.hidden = hidden
cli.add_command(new_command, name=name)
cli.add_command(dashboard)
cli.add_command(debug)
cli.add_command(start)
cli.add_command(stop)
cli.add_command(up)
add_command_alias(up, name="create_or_update", hidden=True)
cli.add_command(attach)
cli.add_command(exec)
add_command_alias(exec, name="exec_cmd", hidden=True)
add_command_alias(rsync_down, name="rsync_down", hidden=True)
add_command_alias(rsync_up, name="rsync_up", hidden=True)
cli.add_command(submit)
cli.add_command(down)
add_command_alias(down, name="teardown", hidden=True)
cli.add_command(kill_random_node)
add_command_alias(get_head_ip, name="get_head_ip", hidden=True)
cli.add_command(get_worker_ips)
cli.add_command(microbenchmark)
cli.add_command(stack)
cli.add_command(status)
cli.add_command(memory)
cli.add_command(local_dump)
cli.add_command(cluster_dump)
cli.add_command(global_gc)
cli.add_command(timeline)
cli.add_command(install_nightly)
cli.add_command(cpp)
try:
from ray.serve.scripts import serve_cli
cli.add_command(serve_cli)
except Exception as e:
logger.debug(f"Integrating ray serve command line tool failed with {e}")
def main():
return cli()
if __name__ == "__main__":
main()
|
"""
Feature processing backbones
"""
import torch.nn as nn
from .. import model_util
class FeatureMLP(nn.Module):
def __init__(self, input_size=16, output_size=16, n_layers=2):
super().__init__()
assert n_layers >= 2, "Need at least 2 layers"
layers = [nn.Linear(input_size, output_size)]
for _ in range(n_layers - 1):
layers.append(nn.ReLU())
layers.append(nn.Linear(output_size, output_size))
self.trunk = nn.Sequential(*layers)
self.n_layers = n_layers
self.input_size = input_size
self.output_size = output_size
self.final_feat_dim = self.output_size
def forward(self, x):
return self.trunk(x)
def reset_parameters(self):
model_util.reset_sequential(self.trunk)
|
'use strict';
$(document).ready(function() {
setTimeout(function() {
floatchart()
}, 700);
});
function floatchart() {
// [ amount-processed ] start
$(function() {
var options = {
chart: {
type: 'area',
height: 50,
sparkline: {
enabled: true
}
},
dataLabels: {
enabled: false
},
colors: ["#7267EF"],
fill: {
type: 'solid',
opacity: 0.3,
},
markers: {
size: 3,
opacity: 0.9,
colors: "#fff",
strokeColor: "#7267EF",
strokeWidth: 2,
hover: {
size: 7,
}
},
stroke: {
curve: 'straight',
width: 3,
},
series: [{
name: 'series1',
data: [25, 66, 41, 89, 63, 25, 44, 12, 36, 9, 54, 25, 66, 41, 89, 63, 54, 25, 66, 41, 89]
}],
tooltip: {
fixed: {
enabled: false
},
x: {
show: false
},
y: {
title: {
formatter: function(seriesName) {
return 'Amount Processed :'
}
}
},
marker: {
show: false
}
}
};
var chart = new ApexCharts(document.querySelector("#amount-processed"), options);
chart.render();
});
// [ amount-processed ] end
// [ amount-spent ] start
$(function() {
var options = {
chart: {
type: 'bar',
height: 50,
sparkline: {
enabled: true
}
},
dataLabels: {
enabled: false
},
colors: ["#17C666"],
plotOptions: {
bar: {
columnWidth: '60%'
}
},
series: [{
data: [25, 66, 41, 89, 63, 25, 44, 12, 36, 9, 54, 25, 66, 41, 89, 63, 54, 25, 66, 41, 89, 63, 25, 44, 12]
}],
xaxis: {
crosshairs: {
width: 1
},
},
tooltip: {
fixed: {
enabled: false
},
x: {
show: false
},
y: {
title: {
formatter: function(seriesName) {
return 'Amount Spent :'
}
}
},
marker: {
show: false
}
}
};
var chart = new ApexCharts(document.querySelector("#amount-spent"), options);
chart.render();
});
// [ amount-spent ] end
// [ profit-processed ] start
$(function() {
var options = {
chart: {
type: 'area',
height: 50,
sparkline: {
enabled: true
}
},
dataLabels: {
enabled: false
},
colors: ["#ffa21d"],
fill: {
type: 'solid',
opacity: 0.3,
},
markers: {
size: 3,
opacity: 0.9,
colors: "#fff",
strokeColor: "#ffa21d",
strokeWidth: 2,
hover: {
size: 7,
}
},
stroke: {
curve: 'straight',
width: 3,
},
series: [{
name: 'series1',
data: [25, 44, 12, 36, 9, 54, 25, 66, 41, 89, 25, 66, 41, 89, 63, 54, 25, 66, 41, 89, 63]
}],
tooltip: {
fixed: {
enabled: false
},
x: {
show: false
},
y: {
title: {
formatter: function(seriesName) {
return 'Profit Processed :'
}
}
},
marker: {
show: false
}
}
};
var chart = new ApexCharts(document.querySelector("#profit-processed"), options);
chart.render();
});
// [ profit-processed ] end
// [ sec-ecommerce-chart-line ] start
$(function() {
var options = {
chart: {
type: 'area',
height: 50,
sparkline: {
enabled: true
}
},
dataLabels: {
enabled: false
},
colors: ["#fff"],
fill: {
type: 'solid',
opacity: 0,
},
markers: {
size: 3,
opacity: 0.9,
colors: "#fff",
strokeColor: "#fff",
strokeWidth: 2,
hover: {
size: 4,
}
},
stroke: {
curve: 'straight',
width: 3,
},
series: [{
name: 'series1',
data: [25, 44, 12, 36, 9, 54, 25, 66, 41, 89, 25, 66, 41, 89, 63, 54, 25, 66, 41, 89, 63]
}],
tooltip: {
fixed: {
enabled: false
},
x: {
show: false
},
y: {
title: {
formatter: function(seriesName) {
return 'Referral :'
}
}
},
marker: {
show: false
}
}
};
var chart = new ApexCharts(document.querySelector("#sec-ecommerce-chart-line"), options);
chart.render();
});
// [ sec-ecommerce-chart-line ] end
// [ sec-ecommerce-chart-bar ] start
$(function() {
var options = {
chart: {
type: 'bar',
height: 80,
sparkline: {
enabled: true
}
},
dataLabels: {
enabled: false
},
colors: ["#079e4b"],
plotOptions: {
bar: {
columnWidth: '60%'
}
},
series: [{
data: [25, 66, 41, 89, 63, 25, 44, 12, 36, 9, 54, 25, 66, 41, 89, 63, 54, 25, 66, 41, 89]
}],
xaxis: {
crosshairs: {
width: 1
},
},
tooltip: {
fixed: {
enabled: false
},
x: {
show: false
},
y: {
title: {
formatter: function(seriesName) {
return 'Affiliate :'
}
}
},
marker: {
show: false
}
}
};
var chart = new ApexCharts(document.querySelector("#sec-ecommerce-chart-bar"), options);
chart.render();
});
// [ sec-ecommerce-chart-bar ] end
// [ seo-ecommerce-barchart ] start
$(function() {
var options = {
chart: {
type: 'bar',
height: 170,
zoom: {
enabled: false
},
toolbar: {
show: false,
},
},
dataLabels: {
enabled: false,
},
colors: ["#7267EF"],
plotOptions: {
bar: {
color: '#7267EF',
columnWidth: '60%',
}
},
fill: {
type: 'solid',
},
series: [{
data: [25, 66, 41, 89, 63, 25, 44, 12, 36, 9, 54, 25, 66, 41, 89, 63, 54, 25, 66, 41, 89, 63, 25, 44, 12, 36, 9, 54, 25, 66, 41, 89, 63, 25, 44, 12]
}],
xaxis: {
crosshairs: {
width: 1
},
labels: {
show: false,
},
},
grid: {
padding: {
bottom: 0,
left: 10,
}
},
tooltip: {
fixed: {
enabled: false
},
x: {
show: false
},
y: {
title: {
formatter: function(seriesName) {
return 'Active Users :'
}
}
},
marker: {
show: false
}
}
};
var chart = new ApexCharts(document.querySelector("#seo-ecommerce-barchart"), options);
chart.render();
});
// [ seo-ecommerce-barchart ] end
// [ sal-income ] start
$(function() {
var options = {
chart: {
type: 'area',
height: 100,
sparkline: {
enabled: true
}
},
dataLabels: {
enabled: false
},
colors: ["#7267EF"],
fill: {
type: 'solid',
opacity: 0.3,
},
markers: {
size: 0,
opacity: 0.9,
colors: "#fff",
strokeColor: "#7267EF",
strokeWidth: 2,
hover: {
size: 7,
}
},
stroke: {
width: 3,
},
series: [{
name: 'series1',
data: [25, 66, 41, 89, 25, 44, 12, 36, 9, 54, 25, 66, 41, 66, 41, 89]
}],
tooltip: {
fixed: {
enabled: false
},
x: {
show: false
},
y: {
title: {
formatter: function(seriesName) {
return 'Sale Income :'
}
}
},
marker: {
show: false
}
}
};
var chart = new ApexCharts(document.querySelector("#sal-income"), options);
chart.render();
});
// [ sal-income ] end
// [ rent-income ] start
$(function() {
var options = {
chart: {
type: 'area',
height: 100,
sparkline: {
enabled: true
}
},
dataLabels: {
enabled: false
},
colors: ["#17C666"],
fill: {
type: 'solid',
opacity: 0.3,
},
markers: {
size: 0,
opacity: 0.9,
colors: "#fff",
strokeColor: "#17C666",
strokeWidth: 2,
hover: {
size: 7,
}
},
stroke: {
width: 3,
},
series: [{
name: 'series1',
data: [9, 54, 25, 66, 41, 66, 41, 89, 25, 66, 41, 89, 25, 44, 12, 36, ]
}],
tooltip: {
fixed: {
enabled: false
},
x: {
show: false
},
y: {
title: {
formatter: function(seriesName) {
return 'Rent Income :'
}
}
},
marker: {
show: false
}
}
};
var chart = new ApexCharts(document.querySelector("#rent-income"), options);
chart.render();
});
// [ rent-income ] end
// [ income-analysis ] start
$(function() {
var options = {
chart: {
type: 'area',
height: 100,
sparkline: {
enabled: true
}
},
dataLabels: {
enabled: false
},
colors: ["#EA4D4D"],
fill: {
type: 'solid',
opacity: 0.3,
},
markers: {
size: 0,
opacity: 0.9,
colors: "#fff",
strokeColor: "#EA4D4D",
strokeWidth: 2,
hover: {
size: 7,
}
},
stroke: {
width: 3,
},
series: [{
name: 'series1',
data: [25, 66, 41, 89, 25, 44, 12, 36, 9, 54, 25, 66, 41, 66, 41, 89]
}],
tooltip: {
fixed: {
enabled: false
},
x: {
show: false
},
y: {
title: {
formatter: function(seriesName) {
return 'Income Analysis :'
}
}
},
marker: {
show: false
}
}
};
var chart = new ApexCharts(document.querySelector("#income-analysis"), options);
chart.render();
});
// [ income-analysis ] end
// [ sale-report ] start
$(function() {
var options = {
chart: {
type: 'bar',
height: 150,
sparkline: {
enabled: true
}
},
dataLabels: {
enabled: false
},
colors: ["#7267EF"],
plotOptions: {
bar: {
columnWidth: '60%'
}
},
series: [{
data: [25, 66, 41, 89, 63, 25, 44, 12, 9, 54, 25, 66, 41, 69, 23]
}],
xaxis: {
crosshairs: {
width: 1
},
},
tooltip: {
fixed: {
enabled: false
},
x: {
show: false
},
y: {
title: {
formatter: function(seriesName) {
return 'Daily Sales :'
}
}
},
marker: {
show: false
}
}
};
var chart = new ApexCharts(document.querySelector("#sale-report-1"), options);
chart.render();
});
$(function() {
var options = {
chart: {
type: 'bar',
height: 150,
sparkline: {
enabled: true
}
},
dataLabels: {
enabled: false
},
colors: ["#17C666"],
plotOptions: {
bar: {
columnWidth: '60%'
}
},
series: [{
data: [25, 66, 41, 89, 63, 25, 44, 12, 9, 54, 25, 66, 41, 69, 23]
}],
xaxis: {
crosshairs: {
width: 1
},
},
tooltip: {
fixed: {
enabled: false
},
x: {
show: false
},
y: {
title: {
formatter: function(seriesName) {
return 'Weekly Sales :'
}
}
},
marker: {
show: false
}
}
};
var chart = new ApexCharts(document.querySelector("#sale-report-2"), options);
chart.render();
});
$(function() {
var options = {
chart: {
type: 'bar',
height: 150,
sparkline: {
enabled: true
}
},
dataLabels: {
enabled: false
},
colors: ["#EA4D4D"],
plotOptions: {
bar: {
columnWidth: '60%'
}
},
series: [{
data: [25, 66, 41, 89, 63, 25, 44, 12, 9, 54, 25, 66, 41, 69, 23]
}],
xaxis: {
crosshairs: {
width: 1
},
},
tooltip: {
fixed: {
enabled: false
},
x: {
show: false
},
y: {
title: {
formatter: function(seriesName) {
return 'Monthly Sales :'
}
}
},
marker: {
show: false
}
}
};
var chart = new ApexCharts(document.querySelector("#sale-report-3"), options);
chart.render();
});
$(function() {
var options = {
chart: {
type: 'bar',
height: 150,
sparkline: {
enabled: true
}
},
dataLabels: {
enabled: false
},
colors: ["#ffa21d"],
plotOptions: {
bar: {
columnWidth: '60%'
}
},
series: [{
data: [25, 66, 41, 89, 63, 25, 44, 12, 9, 54, 25, 66, 41, 69, 23]
}],
xaxis: {
crosshairs: {
width: 1
},
},
tooltip: {
fixed: {
enabled: false
},
x: {
show: false
},
y: {
title: {
formatter: function(seriesName) {
return 'Yearly Sales :'
}
}
},
marker: {
show: false
}
}
};
var chart = new ApexCharts(document.querySelector("#sale-report-4"), options);
chart.render();
});
// [ sale-report ] end
// [ this-month ] start
$(function() {
var options = {
chart: {
type: 'bar',
height: 150,
sparkline: {
enabled: true
}
},
dataLabels: {
enabled: false
},
colors: ["#17C666"],
plotOptions: {
bar: {
columnWidth: '60%'
}
},
series: [{
data: [25, 66, 41, 89, 63, 25, 44, 12, 9, 54, 25, 66, 41, 69, 23]
}],
xaxis: {
crosshairs: {
width: 1
},
},
tooltip: {
fixed: {
enabled: false
},
x: {
show: false
},
y: {
title: {
formatter: function(seriesName) {
return 'Income in $'
}
}
},
marker: {
show: false
}
}
};
var chart = new ApexCharts(document.querySelector("#this-month"), options);
chart.render();
});
// [ this-month ] end
// [ sale-chart1 ] start
$(function() {
var options = {
chart: {
type: 'line',
height: 117,
sparkline: {
enabled: true
}
},
dataLabels: {
enabled: false
},
colors: ["#fff"],
stroke: {
curve: 'smooth',
width: 3,
},
series: [{
name: 'series1',
data: [55, 35, 75, 25, 90, 50]
}],
yaxis: {
min: 20,
max: 100,
},
tooltip: {
theme: 'dark',
fixed: {
enabled: false
},
x: {
show: false,
},
y: {
title: {
formatter: function(seriesName) {
return 'Sales Per Day'
}
}
},
marker: {
show: false
}
}
};
var chart = new ApexCharts(document.querySelector("#sale-chart1"), options);
chart.render();
});
// [ sale-chart1 ] end
// [ sale-chart3 ] start
$(function() {
var options = {
chart: {
type: 'line',
height: 117,
sparkline: {
enabled: true
}
},
dataLabels: {
enabled: false
},
colors: ["#fff"],
stroke: {
curve: 'smooth',
width: 3,
},
series: [{
name: 'series1',
data: [55, 35, 75, 50, 90, 50]
}],
yaxis: {
min: 20,
max: 100,
},
tooltip: {
theme: 'dark',
fixed: {
enabled: false
},
x: {
show: false,
},
y: {
title: {
formatter: function(seriesName) {
return 'Orders'
}
}
},
marker: {
show: false
}
}
};
var chart = new ApexCharts(document.querySelector("#sale-chart3"), options);
chart.render();
});
// [ sale-chart3 ] end
// [ power-card-chart1 ] start
$(function() {
var options = {
chart: {
type: 'line',
height: 75,
sparkline: {
enabled: true
}
},
dataLabels: {
enabled: false
},
colors: ["#EA4D4D"],
stroke: {
curve: 'smooth',
width: 3,
},
series: [{
name: 'series1',
data: [55, 35, 75, 50, 90, 50]
}],
yaxis: {
min: 10,
max: 100,
},
tooltip: {
theme: 'dark',
fixed: {
enabled: false
},
x: {
show: false,
},
y: {
title: {
formatter: function(seriesName) {
return 'Power'
}
}
},
marker: {
show: false
}
}
};
var chart = new ApexCharts(document.querySelector("#power-card-chart1"), options);
chart.render();
});
// [ power-card-chart1 ] end
// [ power-card-chart2 ] start
$(function() {
var options = {
chart: {
type: 'line',
height: 75,
sparkline: {
enabled: true
}
},
dataLabels: {
enabled: false
},
colors: ["#7267EF"],
stroke: {
curve: 'smooth',
width: 3,
},
series: [{
name: 'series1',
data: [50, 90, 50, 75, 55, 80]
}],
yaxis: {
min: 10,
max: 100,
},
tooltip: {
theme: 'dark',
fixed: {
enabled: false
},
x: {
show: false,
},
y: {
title: {
formatter: function(seriesName) {
return 'Water'
}
}
},
marker: {
show: false
}
}
};
var chart = new ApexCharts(document.querySelector("#power-card-chart2"), options);
chart.render();
});
// [ power-card-chart2 ] end
// [ power-card-chart3 ] start
$(function() {
var options = {
chart: {
type: 'line',
height: 75,
sparkline: {
enabled: true
}
},
dataLabels: {
enabled: false
},
colors: ["#ffa21d"],
stroke: {
curve: 'smooth',
width: 3,
},
series: [{
name: 'series1',
data: [55, 35, 75, 50, 90, 50]
}],
yaxis: {
min: 10,
max: 100,
},
tooltip: {
theme: 'dark',
fixed: {
enabled: false
},
x: {
show: false,
},
y: {
title: {
formatter: function(seriesName) {
return 'Temperature'
}
}
},
marker: {
show: false
}
}
};
var chart = new ApexCharts(document.querySelector("#power-card-chart3"), options);
chart.render();
});
// [ power-card-chart3 ] end
// [ revenue-map ] start
$(function() {
var options = {
chart: {
height: 220,
type: 'line',
toolbar: {
show: false,
},
},
dataLabels: {
enabled: false
},
stroke: {
width: 2,
curve: 'smooth'
},
series: [{
name: 'Market Days',
data: [20, 50, 30, 60, 30, 50, 40]
}, {
name: 'Market Days ALL',
data: [40, 20, 50, 15, 40, 65, 20]
}],
xaxis: {
type: 'datetime',
categories: ['1/11/2000', '2/11/2000', '3/11/2000', '4/11/2000', '5/11/2000', '6/11/2000', '7/11/2000'],
},
colors: ['#448aff', '#17C666'],
fill: {
type: 'solid',
},
markers: {
size: 5,
colors: ['#448aff', '#17C666'],
opacity: 0.9,
strokeWidth: 2,
hover: {
size: 7,
}
},
grid: {
borderColor: '#e2e5e885',
},
yaxis: {
title: {
text: 'Revenue Market'
},
min: 10,
max: 70,
}
};
var chart = new ApexCharts(document.querySelector("#revenue-map"), options);
chart.render();
});
// [ revenue-map ] end
// [ proj-earning ] start
$(function() {
var options = {
chart: {
type: 'bar',
height: 200,
zoom: {
enabled: false
},
toolbar: {
show: false,
},
},
dataLabels: {
enabled: false,
},
colors: ["#fff"],
plotOptions: {
bar: {
color: '#fff',
columnWidth: '60%',
}
},
fill: {
type: 'solid',
opacity: 1,
},
series: [{
data: [25, 66, 41, 89, 63, 25, 44, 12, 36, 9, 54, 25, 66, 41, 89, 63, 54, 25, 66, 41, 89, 63, 25, 44, 12, 36]
}],
xaxis: {
crosshairs: {
width: 1
},
labels: {
show: false,
},
},
yaxis: {
labels: {
style: {
color: '#fff',
}
},
},
grid: {
borderColor: '#ffffff85',
padding: {
bottom: 0,
left: 10,
}
},
tooltip: {
fixed: {
enabled: false
},
x: {
show: false
},
y: {
title: {
formatter: function(seriesName) {
return 'Total Earnings'
}
}
},
marker: {
show: false
}
}
};
var chart = new ApexCharts(document.querySelector("#proj-earning"), options);
chart.render();
});
// [ proj-earning ] end
// [ realtime-visit-chart ] start
$(function() {
var lastDate = 0;
var data = [];
function getDayWiseTimeSeries(baseval, count, yrange) {
var i = 0;
while (i < count) {
var x = baseval;
var y = Math.floor(Math.random() * (yrange.max - yrange.min + 1)) + yrange.min;
data.push({
x,
y
});
lastDate = baseval
baseval += 86400000;
i++;
}
}
getDayWiseTimeSeries(new Date('11 Feb 2017 GMT').getTime(), 10, {
min: 10,
max: 90
})
function getNewSeries(baseval, yrange) {
var newDate = baseval + 86400000;
lastDate = newDate
data.push({
x: newDate,
y: Math.floor(Math.random() * (yrange.max - yrange.min + 1)) + yrange.min
})
}
function resetData() {
data = data.slice(data.length - 10, data.length);
}
var options = {
chart: {
height: 230,
type: 'area',
animations: {
enabled: true,
easing: 'linear',
dynamicAnimation: {
speed: 2000
}
},
toolbar: {
show: false
},
zoom: {
enabled: false
}
},
dataLabels: {
enabled: false
},
stroke: {
curve: 'smooth'
},
series: [{
name: 'active Users :',
data: data
}],
colors: ["#EA4D4D"],
fill: {
type: 'solid',
opacity: 0,
},
markers: {
size: 0
},
xaxis: {
type: 'datetime',
range: 777600000,
},
yaxis: {
max: 100
},
legend: {
show: false
},
}
var chart = new ApexCharts(
document.querySelector("#realtime-visit-chart"),
options
);
chart.render();
var dataPointsLength = 10;
window.setInterval(function() {
getNewSeries(lastDate, {
min: 10,
max: 90
})
chart.updateSeries([{
data: data
}])
}, 2000)
window.setInterval(function() {
resetData()
chart.updateSeries([{
data
}], false, true)
}, 60000)
});
// [ realtime-visit-chart ] end
// [ seo-anlytics1 ] start
$(function() {
var options = {
chart: {
type: 'area',
height: 50,
sparkline: {
enabled: true
}
},
dataLabels: {
enabled: false
},
colors: ["#7267EF"],
fill: {
type: 'solid',
opacity: 0,
},
grid: {
padding: {
left: 5,
right: 5
}
},
markers: {
size: 3,
opacity: 0.9,
colors: "#7267EF",
strokeColor: "#7267EF",
strokeWidth: 1,
hover: {
size: 4,
}
},
stroke: {
curve: 'straight',
width: 2,
},
series: [{
name: 'series1',
data: [25, 66, 41, 89, 63, 25, 44, 12, 36, 9, 54, 25, 66, 41, 89]
}],
tooltip: {
fixed: {
enabled: false
},
x: {
show: false
},
y: {
title: {
formatter: function(seriesName) {
return 'Site Analysis :'
}
}
},
marker: {
show: false
}
}
};
var chart = new ApexCharts(document.querySelector("#seo-anlytics1"), options);
chart.render();
});
// [ seo-anlytics1 ] end
// [ seo-anlytics2 ] start
$(function() {
var options = {
chart: {
type: 'area',
height: 50,
sparkline: {
enabled: true
}
},
dataLabels: {
enabled: false
},
colors: ["#17C666"],
fill: {
type: 'solid',
opacity: 0,
},
grid: {
padding: {
left: 5,
right: 5
}
},
markers: {
size: 3,
opacity: 0.9,
colors: "#17C666",
strokeColor: "#17C666",
strokeWidth: 1,
hover: {
size: 4,
}
},
stroke: {
curve: 'straight',
width: 2,
},
series: [{
name: 'series1',
data: [12, 25, 36, 9, 54, 25, 66, 66, 41, 89, 63, 25, 44, 89, 41]
}],
tooltip: {
fixed: {
enabled: false
},
x: {
show: false
},
y: {
title: {
formatter: function(seriesName) {
return 'Total Sales :'
}
}
},
marker: {
show: false
}
}
};
var chart = new ApexCharts(document.querySelector("#seo-anlytics2"), options);
chart.render();
});
// [ seo-anlytics2 ] end
// [ seo-anlytics3 ] start
$(function() {
var options = {
chart: {
type: 'area',
height: 50,
sparkline: {
enabled: true
}
},
dataLabels: {
enabled: false
},
colors: ["#EA4D4D"],
fill: {
type: 'solid',
opacity: 0,
},
grid: {
padding: {
left: 5,
right: 5
}
},
markers: {
size: 3,
opacity: 0.9,
colors: "#EA4D4D",
strokeColor: "#EA4D4D",
strokeWidth: 1,
hover: {
size: 4,
}
},
stroke: {
curve: 'straight',
width: 2,
},
series: [{
name: 'series1',
data: [25, 66, 41, 89, 63, 25, 44, 12, 36, 9, 54, 25, 66, 41, 89]
}],
tooltip: {
fixed: {
enabled: false
},
x: {
show: false
},
y: {
title: {
formatter: function(seriesName) {
return 'Total Visits :'
}
}
},
marker: {
show: false
}
}
};
var chart = new ApexCharts(document.querySelector("#seo-anlytics3"), options);
chart.render();
});
// [ seo-anlytics3 ] end
// [ seo-anlytics4 ] start
$(function() {
var options = {
chart: {
type: 'area',
height: 50,
sparkline: {
enabled: true
}
},
dataLabels: {
enabled: false
},
colors: ["#ffa21d"],
fill: {
type: 'solid',
opacity: 0,
},
grid: {
padding: {
left: 5,
right: 5
}
},
markers: {
size: 3,
opacity: 0.9,
colors: "#ffa21d",
strokeColor: "#ffa21d",
strokeWidth: 1,
hover: {
size: 4,
}
},
stroke: {
curve: 'straight',
width: 2,
},
series: [{
name: 'series1',
data: [12, 25, 36, 9, 54, 25, 66, 66, 41, 89, 63, 25, 44, 89, 41]
}],
tooltip: {
fixed: {
enabled: false
},
x: {
show: false
},
y: {
title: {
formatter: function(seriesName) {
return 'Total Usage :'
}
}
},
marker: {
show: false
}
}
};
var chart = new ApexCharts(document.querySelector("#seo-anlytics4"), options);
chart.render();
});
// [ seo-anlytics4 ] end
// [ total-value-graph-1 ] start
$(function() {
var options = {
chart: {
type: 'area',
height: 100,
sparkline: {
enabled: true
}
},
dataLabels: {
enabled: false
},
colors: ["#FFF"],
fill: {
type: 'solid',
opacity: 0.4,
},
stroke: {
curve: 'smooth',
width: 3,
},
series: [{
name: 'series1',
data: [20, 10, 18, 12, 25, 10, 20]
}],
yaxis: {
min: 0,
max: 30,
},
tooltip: {
theme: 'dark',
fixed: {
enabled: false
},
x: {
show: false,
},
y: {
title: {
formatter: function(seriesName) {
return 'Total Sales'
}
}
},
marker: {
show: false
}
}
};
var chart = new ApexCharts(document.querySelector("#total-value-graph-1"), options);
chart.render();
});
// [ total-value-graph-1 ] end
// [ total-value-graph-2 ] start
$(function() {
var options = {
chart: {
type: 'area',
height: 100,
sparkline: {
enabled: true
}
},
dataLabels: {
enabled: false
},
colors: ["#FFF"],
fill: {
type: 'solid',
opacity: 0.4,
},
stroke: {
curve: 'smooth',
width: 3,
},
series: [{
name: 'series1',
data: [10, 20, 18, 25, 12, 10, 20]
}],
yaxis: {
min: 0,
max: 30,
},
tooltip: {
theme: 'dark',
fixed: {
enabled: false
},
x: {
show: false,
},
y: {
title: {
formatter: function(seriesName) {
return 'Total Comment'
}
}
},
marker: {
show: false
}
}
};
var chart = new ApexCharts(document.querySelector("#total-value-graph-2"), options);
chart.render();
});
// [ total-value-graph-2 ] end
// [ total-value-graph-3 ] start
$(function() {
var options = {
chart: {
type: 'area',
height: 100,
sparkline: {
enabled: true
}
},
dataLabels: {
enabled: false
},
colors: ["#FFF"],
fill: {
type: 'solid',
opacity: 0.4,
},
stroke: {
curve: 'smooth',
width: 3,
},
series: [{
name: 'series1',
data: [20, 10, 25, 18, 18, 10, 12]
}],
yaxis: {
min: 0,
max: 30,
},
tooltip: {
theme: 'dark',
fixed: {
enabled: false
},
x: {
show: false,
},
y: {
title: {
formatter: function(seriesName) {
return 'Income Status'
}
}
},
marker: {
show: false
}
}
};
var chart = new ApexCharts(document.querySelector("#total-value-graph-3"), options);
chart.render();
});
// [ total-value-graph-3 ] end
// [ total-value-graph-4 ] start
$(function() {
var options = {
chart: {
type: 'area',
height: 100,
sparkline: {
enabled: true
}
},
dataLabels: {
enabled: false
},
colors: ["#FFF"],
fill: {
type: 'solid',
opacity: 0.4,
},
stroke: {
curve: 'smooth',
width: 3,
},
series: [{
name: 'series1',
data: [18, 10, 20, 10, 12, 25, 20]
}],
yaxis: {
min: 0,
max: 30,
},
tooltip: {
theme: 'dark',
fixed: {
enabled: false
},
x: {
show: false,
},
y: {
title: {
formatter: function(seriesName) {
return 'Total Visitors'
}
}
},
marker: {
show: false
}
}
};
var chart = new ApexCharts(document.querySelector("#total-value-graph-4"), options);
chart.render();
});
// [ total-value-graph-4 ] end
// [ monthlyprofit-1 ] start
$(function() {
var options = {
chart: {
type: 'area',
height: 40,
sparkline: {
enabled: true
}
},
dataLabels: {
enabled: false
},
colors: ["#7267EF"],
fill: {
type: 'solid',
opacity: 0.3,
},
markers: {
size: 2,
opacity: 0.9,
colors: "#7267EF",
strokeColor: "#7267EF",
strokeWidth: 2,
hover: {
size: 4,
}
},
stroke: {
curve: 'straight',
width: 3,
},
series: [{
name: 'series1',
data: [9, 66, 41, 89, 63, 25, 44, 12, 36, 20, 54, 25, 66, 41, 89, 63, 54, 25, 66, 41, 9]
}],
tooltip: {
fixed: {
enabled: false
},
x: {
show: false
},
y: {
title: {
formatter: function(seriesName) {
return 'Monthly Profit :'
}
}
},
marker: {
show: false
}
}
};
var chart = new ApexCharts(document.querySelector("#monthlyprofit-1"), options);
chart.render();
});
// [ monthlyprofit-1 ] end
// [ monthlyprofit-2 ] start
$(function() {
var options = {
chart: {
type: 'area',
height: 40,
sparkline: {
enabled: true
}
},
dataLabels: {
enabled: false
},
colors: ["#17C666"],
fill: {
type: 'solid',
opacity: 0.3,
},
markers: {
size: 2,
opacity: 0.9,
colors: "#17C666",
strokeColor: "#17C666",
strokeWidth: 2,
hover: {
size: 4,
}
},
stroke: {
curve: 'straight',
width: 3,
},
series: [{
name: 'series1',
data: [9, 66, 41, 36, 20, 54, 25, 66, 41, 89, 63, 89, 63, 25, 44, 12, 54, 25, 66, 41, 9]
}],
tooltip: {
fixed: {
enabled: false
},
x: {
show: false
},
y: {
title: {
formatter: function(seriesName) {
return 'Total Sales :'
}
}
},
marker: {
show: false
}
}
};
var chart = new ApexCharts(document.querySelector("#monthlyprofit-2"), options);
chart.render();
});
// [ monthlyprofit-2 ] end
// [ monthlyprofit-3 ] start
$(function() {
var options = {
chart: {
type: 'area',
height: 40,
sparkline: {
enabled: true
}
},
dataLabels: {
enabled: false
},
colors: ["#EA4D4D"],
fill: {
type: 'solid',
opacity: 0.3,
},
markers: {
size: 2,
opacity: 0.9,
colors: "#EA4D4D",
strokeColor: "#EA4D4D",
strokeWidth: 2,
hover: {
size: 4,
}
},
stroke: {
curve: 'straight',
width: 3,
},
series: [{
name: 'series1',
data: [9, 66, 41, 89, 63, 25, 44, 12, 36, 20, 54, 25, 66, 41, 89, 63, 54, 25, 66, 41, 9]
}],
tooltip: {
fixed: {
enabled: false
},
x: {
show: false
},
y: {
title: {
formatter: function(seriesName) {
return 'Unique Visitors :'
}
}
},
marker: {
show: false
}
}
};
var chart = new ApexCharts(document.querySelector("#monthlyprofit-3"), options);
chart.render();
});
// [ monthlyprofit-3 ] end
// [ seo-chart1 ] start
$(function() {
var options = {
chart: {
type: 'area',
height: 40,
sparkline: {
enabled: true
}
},
dataLabels: {
enabled: false
},
colors: ["#7267EF"],
fill: {
type: 'solid',
opacity: 0.3,
},
markers: {
size: 2,
opacity: 0.9,
colors: "#7267EF",
strokeColor: "#7267EF",
strokeWidth: 2,
hover: {
size: 4,
}
},
stroke: {
curve: 'straight',
width: 3,
},
series: [{
name: 'series1',
data: [9, 66, 41, 89, 63, 25, 44, 12, 36, 20, 54, 25, 9]
}],
tooltip: {
fixed: {
enabled: false
},
x: {
show: false
},
y: {
title: {
formatter: function(seriesName) {
return 'Visits :'
}
}
},
marker: {
show: false
}
}
};
var chart = new ApexCharts(document.querySelector("#seo-chart1"), options);
chart.render();
});
// [ seo-chart1 ] end
// [ seo-chart2 ] start
$(function() {
var options = {
chart: {
type: 'bar',
height: 40,
sparkline: {
enabled: true
}
},
dataLabels: {
enabled: false
},
colors: ["#17C666"],
plotOptions: {
bar: {
columnWidth: '60%'
}
},
series: [{
data: [25, 66, 41, 89, 63, 25, 44, 12, 36, 9, 54, 25, 66, 41, 89, 63]
}],
xaxis: {
crosshairs: {
width: 1
},
},
tooltip: {
fixed: {
enabled: false
},
x: {
show: false
},
y: {
title: {
formatter: function(seriesName) {
return 'Bounce Rate :'
}
}
},
marker: {
show: false
}
}
};
var chart = new ApexCharts(document.querySelector("#seo-chart2"), options);
chart.render();
});
// [ seo-chart2 ] end
// [ seo-chart3 ] start
$(function() {
var options = {
chart: {
type: 'area',
height: 40,
sparkline: {
enabled: true
}
},
dataLabels: {
enabled: false
},
colors: ["#EA4D4D"],
fill: {
type: 'solid',
opacity: 0,
},
markers: {
size: 2,
opacity: 0.9,
colors: "#EA4D4D",
strokeColor: "#EA4D4D",
strokeWidth: 2,
hover: {
size: 4,
}
},
stroke: {
curve: 'straight',
width: 3,
},
series: [{
name: 'series1',
data: [9, 66, 41, 89, 63, 25, 44, 12, 36, 20, 54, 25, 9]
}],
tooltip: {
fixed: {
enabled: false
},
x: {
show: false
},
y: {
title: {
formatter: function(seriesName) {
return 'Products :'
}
}
},
marker: {
show: false
}
}
};
var chart = new ApexCharts(document.querySelector("#seo-chart3"), options);
chart.render();
});
// [ seo-chart3 ] end
// [ client-map-1 ] start
$(function() {
var options = {
chart: {
type: 'area',
height: 70,
sparkline: {
enabled: true
}
},
dataLabels: {
enabled: false
},
colors: ["#7267EF"],
fill: {
type: 'solid',
opacity: 0.4,
},
stroke: {
curve: 'smooth',
width: 3,
},
series: [{
name: 'series1',
data: [20, 10, 18, 12, 25, 10, 20]
}],
yaxis: {
min: 0,
max: 30,
},
tooltip: {
theme: 'dark',
fixed: {
enabled: false
},
x: {
show: false,
},
y: {
title: {
formatter: function(seriesName) {
return 'Activity'
}
}
},
marker: {
show: false
}
}
};
var chart = new ApexCharts(document.querySelector("#client-map-1"), options);
chart.render();
});
// [ client-map-1 ] end
// [ client-map-2 ] start
$(function() {
var options = {
chart: {
type: 'area',
height: 70,
sparkline: {
enabled: true
}
},
dataLabels: {
enabled: false
},
colors: ["#EA4D4D"],
fill: {
type: 'solid',
opacity: 0.3,
},
markers: {
size: 3,
opacity: 0.9,
colors: "#EA4D4D",
strokeColor: "#EA4D4D",
strokeWidth: 2,
hover: {
size: 4,
}
},
stroke: {
curve: 'straight',
width: 2,
},
series: [{
name: 'series1',
data: [9, 66, 41, 89, 63, 25, 44, 12, 36, 20, 54, 25, 66, 41, 89, 63, 54, 25, 66, 41, 9]
}],
tooltip: {
fixed: {
enabled: false
},
x: {
show: false
},
y: {
title: {
formatter: function(seriesName) {
return 'Activity :'
}
}
},
marker: {
show: false
}
}
};
var chart = new ApexCharts(document.querySelector("#client-map-2"), options);
chart.render();
});
// [ client-map-2 ] end
// [ client-map-3 ] start
$(function() {
var options = {
chart: {
type: 'bar',
height: 70,
sparkline: {
enabled: true
}
},
dataLabels: {
enabled: false
},
colors: ["#17C666"],
plotOptions: {
bar: {
columnWidth: '60%'
}
},
series: [{
data: [25, 66, 41, 89, 63, 25, 44, 12, 36, 9, 54, 25, 66, 41, 89, 63]
}],
xaxis: {
crosshairs: {
width: 1
},
},
tooltip: {
fixed: {
enabled: false
},
x: {
show: false
},
y: {
title: {
formatter: function(seriesName) {
return 'Activity :'
}
}
},
marker: {
show: false
}
}
};
var chart = new ApexCharts(document.querySelector("#client-map-3"), options);
chart.render();
});
// [ client-map-3 ] end
// [ tot-lead ] start
$(function() {
var options = {
chart: {
type: 'area',
height: 150,
sparkline: {
enabled: true
}
},
dataLabels: {
enabled: false
},
colors: ["#7267EF"],
fill: {
type: 'solid',
opacity: 0.3,
},
stroke: {
curve: 'straight',
width: 3,
},
series: [{
name: 'series1',
data: [25, 66, 41, 89, 25, 44, 12, 36, 9, 54, 25, 66, 41, 89]
}],
tooltip: {
fixed: {
enabled: false
},
x: {
show: false
},
y: {
title: {
formatter: function(seriesName) {
return 'Total Leads :'
}
}
},
marker: {
show: false
}
}
};
var chart = new ApexCharts(document.querySelector("#tot-lead"), options);
chart.render();
});
// [ tot-lead ] end
// [ tot-vendor ] start
$(function() {
var options = {
chart: {
type: 'area',
height: 150,
sparkline: {
enabled: true
}
},
dataLabels: {
enabled: false
},
colors: ["#17C666"],
fill: {
type: 'solid',
opacity: 0.3,
},
stroke: {
curve: 'straight',
width: 3,
},
series: [{
name: 'series1',
data: [25, 44, 12, 36, 9, 54, 25, 66, 41, 89, 25, 66, 41, 50]
}],
tooltip: {
fixed: {
enabled: false
},
x: {
show: false
},
y: {
title: {
formatter: function(seriesName) {
return 'Total Vendors :'
}
}
},
marker: {
show: false
}
}
};
var chart = new ApexCharts(document.querySelector("#tot-vendor"), options);
chart.render();
});
// [ tot-vendor ] end
// [ invoice-gen ] start
$(function() {
var options = {
chart: {
type: 'area',
height: 150,
sparkline: {
enabled: true
}
},
dataLabels: {
enabled: false
},
colors: ["#EA4D4D"],
fill: {
type: 'solid',
opacity: 0.3,
},
stroke: {
curve: 'straight',
width: 3,
},
series: [{
name: 'series1',
data: [25, 66, 41, 89, 25, 44, 12, 36, 9, 54, 25, 66, 41, 89]
}],
tooltip: {
fixed: {
enabled: false
},
x: {
show: false
},
y: {
title: {
formatter: function(seriesName) {
return 'Invoice Generate :'
}
}
},
marker: {
show: false
}
}
};
var chart = new ApexCharts(document.querySelector("#invoice-gen"), options);
chart.render();
});
// [ invoice-gen ] end
// ===================================================================
// ===================================================================
// ===================================================================
// [ peity-chart ] start
$(function() {
$("span.pie_1").peity("pie", {
fill: ["#7267EF", "#eff3f6"]
});
$("span.pie_2").peity("pie", {
fill: ["#eff3f6", "#7267EF"]
});
$("span.pie_3").peity("pie", {
fill: ["#eff3f6", "#7267EF"]
});
$(".data-attributes").peity("donut");
});
// [ peity-chart ] end
// [ Support tracker ] start
$(function() {
var options = {
chart: {
type: 'line',
height: 80,
sparkline: {
enabled: true
}
},
stroke: {
width: 3,
curve: "smooth",
},
series: [{
data: [45, 66, 41, 89, 25, 44, 9, 54]
}],
tooltip: {
fixed: {
enabled: false
},
x: {
show: false
},
y: {
title: {
formatter: function(seriesName) {
return 'hii'
}
}
},
marker: {
show: false
}
}
}
var chart = new ApexCharts(document.querySelector("#hd-complited-ticket"), options);
chart.render()
});
// [ Support tracker ] end
// [ account-chart ] start
$(function() {
var options1 = {
chart: {
type: 'area',
height: 215,
sparkline: {
enabled: true
}
},
colors: ["#7267EF", "#0e9e4a", "#EA4D4D"],
stroke: {
curve: 'smooth',
width: 2,
},
series: [{
name: 'series1',
data: [20, 90, 65, 85, 20, 80, 30]
}, {
name: 'series2',
data: [70, 30, 40, 15, 60, 40, 95]
}],
tooltip: {
fixed: {
enabled: false
},
x: {
show: false
},
y: {
title: {
formatter: function(seriesName) {
return ''
}
}
},
marker: {
show: false
}
}
}
new ApexCharts(document.querySelector("#account-chart"), options1).render();
});
// [ account-chart ] end
// [ Support tracker ] start
$(function() {
var options = {
chart: {
height: 120,
type: 'bar',
sparkline: {
enabled: true
},
},
colors: ["#7267EF", "#0e9e4a", "#EA4D4D"],
plotOptions: {
bar: {
columnWidth: '55%',
distributed: true
}
},
dataLabels: {
enabled: false,
},
stroke: {
width: 0
},
series: [{
name: 'Requests',
data: [66.6, 29.7, 32.8]
}],
xaxis: {
categories: ['Desktop', 'Tablet', 'Mobile'],
}
}
var chart = new ApexCharts(
document.querySelector("#chart-percent"),
options
);
chart.render()
});
// [ Support tracker ] end
// [ Cloud Computing ] start
$(function() {
var options1 = {
chart: {
type: 'area',
height: 95,
sparkline: {
enabled: true
}
},
colors: ["#7267EF", "#3ec9d6"],
stroke: {
curve: 'smooth',
width: 2,
},
series: [{
name: 'Storage',
data: [100, 40, 28, 51, 42, 109, 100]
}, {
name: 'Bandwidth',
data: [41, 109, 45, 109, 34, 52, 41]
}],
tooltip: {
fixed: {
enabled: false
},
x: {
show: false
},
marker: {
show: false
}
}
}
new ApexCharts(document.querySelector("#storage-chart"), options1).render();
});
// [ Cloud Computing ] end
// [ Transection ] start
$(function() {
var options1 = {
chart: {
type: 'bar',
height: 60,
sparkline: {
enabled: true
}
},
colors: ["#7267EF"],
plotOptions: {
bar: {
columnWidth: '80%'
}
},
series: [{
data: [25, 66, 41, 89, 63, 25, 44, 12, 36, 9, 54]
}],
labels: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
xaxis: {
crosshairs: {
width: 1
},
},
tooltip: {
fixed: {
enabled: false
},
x: {
show: false
},
y: {
title: {
formatter: function(seriesName) {
return 'Inbound'
}
}
},
marker: {
show: false
}
}
}
new ApexCharts(document.querySelector("#transactions1"), options1).render();
var options2 = {
chart: {
type: 'bar',
height: 60,
sparkline: {
enabled: true
}
},
colors: ["#EA4D4D"],
plotOptions: {
bar: {
columnWidth: '80%'
}
},
series: [{
data: [25, 66, 41, 89, 63, 25, 44, 12, 36, 9, 54]
}],
labels: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
xaxis: {
crosshairs: {
width: 1
},
},
tooltip: {
fixed: {
enabled: false
},
x: {
show: false
},
y: {
title: {
formatter: function(seriesName) {
return 'Outbound'
}
}
},
marker: {
show: false
}
}
}
new ApexCharts(document.querySelector("#transactions2"), options2).render();
});
// [ Transection ] end
// [ Support tracker ] start
$(function() {
var options = {
chart: {
height: 135,
type: 'donut',
sparkline: {
enabled: true
}
},
dataLabels: {
enabled: false
},
series: [66.6, 29.7, 38.6],
labels: ['Desktop', 'Mobile', 'Tablet'],
grid: {
padding: {
top: 20,
right: 0,
bottom: 0,
left: 0
},
},
legend: {
show: false
}
}
var chart = new ApexCharts(
document.querySelector("#device-chart"),
options
);
chart.render()
});
// [ Support tracker ] end
// [ order join chart ] start
$(function() {
var spark1 = {
chart: {
type: 'line',
height: 30,
sparkline: {
enabled: true
},
},
stroke: {
curve: 'straight',
width: 2,
},
series: [{
data: [3, 0, 1, 2, 1, 1, 2]
}],
yaxis: {
min: -2,
max: 5
},
tooltip: {
fixed: {
enabled: false
},
x: {
show: false
},
y: {
title: {
formatter: function(seriesName) {
return ''
}
}
},
marker: {
show: false
}
},
colors: ['#FF9800'],
}
var chart = new ApexCharts(document.querySelector("#real4-chart"), spark1);
chart.render()
var spark2 = {
chart: {
type: 'line',
height: 30,
sparkline: {
enabled: true
},
},
stroke: {
curve: 'straight',
width: 2,
},
series: [{
data: [2, 1, 2, 1, 1, 3, 0]
}],
yaxis: {
min: -3,
max: 5
},
tooltip: {
fixed: {
enabled: false
},
x: {
show: false
},
y: {
title: {
formatter: function(seriesName) {
return ''
}
}
},
marker: {
show: false
}
},
colors: ['#dc6788'],
}
var chart = new ApexCharts(document.querySelector("#real6-chart"), spark2);
chart.render()
var spark3 = {
chart: {
type: 'line',
height: 30,
sparkline: {
enabled: true
},
},
stroke: {
curve: 'straight',
width: 2,
},
series: [{
data: [3, 0, 1, 2, 1, 1, 2]
}],
yaxis: {
min: -3,
max: 5
},
tooltip: {
fixed: {
enabled: false
},
x: {
show: false
},
y: {
title: {
formatter: function(seriesName) {
return ''
}
}
},
marker: {
show: false
}
},
colors: ['#EA4D4D'],
}
var chart = new ApexCharts(document.querySelector("#real1-chart"), spark3);
chart.render()
var spark4 = {
chart: {
type: 'line',
height: 30,
sparkline: {
enabled: true
},
},
stroke: {
curve: 'straight',
width: 2,
},
series: [{
data: [2, 1, 2, 1, 1, 3, 0]
}],
yaxis: {
min: -3,
max: 5
},
tooltip: {
fixed: {
enabled: false
},
x: {
show: false
},
y: {
title: {
formatter: function(seriesName) {
return ''
}
}
},
marker: {
show: false
}
},
colors: ['#7759de'],
}
var chart = new ApexCharts(document.querySelector("#real5-chart"), spark4);
chart.render()
var spark5 = {
chart: {
type: 'line',
height: 30,
sparkline: {
enabled: true
},
},
stroke: {
curve: 'straight',
width: 2,
},
series: [{
data: [3, 0, 1, 2, 1, 1, 2]
}],
yaxis: {
min: -3,
max: 5
},
tooltip: {
fixed: {
enabled: false
},
x: {
show: false
},
y: {
title: {
formatter: function(seriesName) {
return ''
}
}
},
marker: {
show: false
}
},
colors: ['#7267EF'],
}
var chart = new ApexCharts(document.querySelector("#real2-chart"), spark5);
chart.render()
var spark6 = {
chart: {
type: 'line',
height: 30,
sparkline: {
enabled: true
},
},
stroke: {
curve: 'straight',
width: 2,
},
series: [{
data: [2, 1, 2, 1, 1, 3, 0]
}],
yaxis: {
min: -3,
max: 5
},
tooltip: {
fixed: {
enabled: false
},
x: {
show: false
},
y: {
title: {
formatter: function(seriesName) {
return ''
}
}
},
marker: {
show: false
}
},
colors: ['#17C666'],
}
var chart = new ApexCharts(document.querySelector("#real3-chart"), spark6);
chart.render()
});
// [ order join chart ] end
// [ Session chart ] start
$(function() {
function generateDatasehratheat(count, yrange) {
var i = 0;
var series = [];
while (i < count) {
var x = 'w' + (i + 1).toString();
var y = Math.floor(Math.random() * (yrange.max - yrange.min + 1)) + yrange.min;
series.push({
x: x,
y: y
});
i++;
}
return series;
}
var options = {
chart: {
height: 400,
type: 'heatmap',
},
dataLabels: {
enabled: false
},
colors: ["#7267EF"],
series: [{
name: 'Metric1',
data: generateDatasehratheat(12, {
min: 0,
max: 90
})
},
{
name: 'Metric2',
data: generateDatasehratheat(12, {
min: 0,
max: 90
})
},
{
name: 'Metric3',
data: generateDatasehratheat(12, {
min: 0,
max: 90
})
},
{
name: 'Metric4',
data: generateDatasehratheat(12, {
min: 0,
max: 90
})
},
{
name: 'Metric5',
data: generateDatasehratheat(12, {
min: 0,
max: 90
})
},
{
name: 'Metric6',
data: generateDatasehratheat(12, {
min: 0,
max: 90
})
},
{
name: 'Metric7',
data: generateDatasehratheat(12, {
min: 0,
max: 90
})
},
{
name: 'Metric8',
data: generateDatasehratheat(12, {
min: 0,
max: 90
})
},
{
name: 'Metric9',
data: generateDatasehratheat(12, {
min: 0,
max: 90
})
},
{
name: 'Metric10',
data: generateDatasehratheat(12, {
min: 0,
max: 90
})
},
{
name: 'Metric11',
data: generateDatasehratheat(12, {
min: 0,
max: 90
})
},
{
name: 'Metric12',
data: generateDatasehratheat(12, {
min: 0,
max: 90
})
},
{
name: 'Metric13',
data: generateDatasehratheat(12, {
min: 0,
max: 90
})
},
{
name: 'Metric14',
data: generateDatasehratheat(12, {
min: 0,
max: 90
})
}
]
}
var chart = new ApexCharts(
document.querySelector("#time-user"),
options
);
chart.render();
});
// [ Session chart ] end
// [ horizontal-bar-chart ] start
$(function() {
var options = {
chart: {
height: 350,
type: 'bar',
},
plotOptions: {
bar: {
horizontal: true,
dataLabels: {
position: 'top',
},
}
},
colors: ["#7267EF", "#0e9e4a", "#EA4D4D"],
dataLabels: {
enabled: true,
offsetX: -6,
style: {
fontSize: '12px',
colors: ['#fff']
}
},
stroke: {
show: true,
width: 1,
colors: ['#fff']
},
series: [{
name: 'India',
data: [44, 55, 41, 64, 22]
}, {
name: 'Japan',
data: [53, 32, 33, 52, 13]
}, {
name: 'London',
data: [44, 33, 52, 13, 22]
}],
xaxis: {
categories: [2001, 2002, 2003, 2004, 2005],
},
}
var chart = new ApexCharts(
document.querySelector("#horizontal-bar-chart"),
options
);
chart.render();
});
// [ horizontal-bar-chart ] end
// [ coversions-chart ] start
$(function() {
var options1 = {
chart: {
type: 'bar',
height: 65,
sparkline: {
enabled: true
}
},
colors: ["#7267EF"],
plotOptions: {
bar: {
columnWidth: '80%'
}
},
series: [{
data: [25, 66, 41, 89, 63, 25, 44, 12, 36, 9, 54, 25, 66, 41, 89, 63, 54, 25, 66, 41, 89, 63, 25, 44, 12, 36, 9, 54, 25, 66, 41, 89, 63, 25, 44, 12, 36, 9, 25, 44, 12, 36, 9, 54]
}],
xaxis: {
crosshairs: {
width: 1
},
},
tooltip: {
fixed: {
enabled: false
},
x: {
show: false
},
y: {
title: {
formatter: function(seriesName) {
return ''
}
}
},
marker: {
show: false
}
}
}
new ApexCharts(document.querySelector("#coversions-chart"), options1).render();
});
// [ coversions-chart ] end
// [ site-chart ] start
$(function() {
var optionst = {
chart: {
type: 'line',
height: 135,
sparkline: {
enabled: true
},
},
stroke: {
curve: 'straight',
width: 3,
},
series: [{
data: [135, 187, 180, 222, 185, 195, 158]
}],
yaxis: {
min: 100
},
colors: ['#7267EF'],
}
var chart = new ApexCharts(document.querySelector("#site-chart"), optionst);
chart.render()
});
// [ site-chart ] end
// [ satisfaction-chart ] start
$(function() {
var options = {
chart: {
height: 260,
type: 'pie',
},
series: [66, 50, 40, 30],
labels: ["Very Poor", "Satisfied", "Very Satisfied", "Poor"],
legend: {
show: true,
offsetY: 50,
},
dataLabels: {
enabled: true,
dropShadow: {
enabled: false,
}
},
theme: {
monochrome: {
enabled: true,
color: '#7267EF',
}
},
responsive: [{
breakpoint: 768,
options: {
chart: {
height: 320,
},
legend: {
position: 'bottom',
offsetY: 0,
}
}
}]
}
var chart = new ApexCharts(document.querySelector("#satisfaction-chart"), options);
chart.render();
});
// [ satisfaction-chart ] end
// [ traffic-chart1 ] start
$(function() {
var options = {
chart: {
height: 325,
type: 'donut',
},
dataLabels: {
enabled: true,
dropShadow: {
enabled: false,
}
},
series: [85.7, 77.56, 20.9, 10.9, 15.8, 86.7],
colors: ["#7267EF", "#0e9e4a", "#3ec9d6", "#ffa21d", "#EA4D4D", "#7759de"],
labels: ["Facebook ads", "Amazon ads", "Youtube videos", "Google adsense", "Twitter ads", "News ads"],
legend: {
show: true,
position: 'bottom',
}
}
var chart = new ApexCharts(
document.querySelector("#traffic-chart1"),
options
);
chart.render();
});
// [ traffic-chart1 ] end
// [ view-chart ] start
$(function() {
var options1 = {
chart: {
type: 'area',
height: 87,
sparkline: {
enabled: true
}
},
colors: ["#EA4D4D"],
stroke: {
curve: 'straight',
width: 3,
},
series: [{
name: 'series1',
data: [25, 66, 41, 89, 63, 25, 44, 12, 36, 9, 54, 25, 66, 41, 89, 63, 54, 25, 66, 41, 89]
}],
tooltip: {
fixed: {
enabled: false
},
x: {
show: false
},
y: {
title: {
formatter: function(seriesName) {
return 'Page view:'
}
}
},
marker: {
show: false
}
}
}
new ApexCharts(document.querySelector("#view-chart"), options1).render();
var options2 = {
chart: {
type: 'area',
height: 87,
sparkline: {
enabled: true
}
},
colors: ["#3ec9d6"],
stroke: {
curve: 'smooth',
width: 3,
},
series: [{
name: 'series1',
data: [25, 66, 41, 89, 63, 25, 44, 12, 36, 9, 54]
}],
tooltip: {
fixed: {
enabled: false
},
x: {
show: false
},
y: {
title: {
formatter: function(seriesName) {
return 'Users:'
}
}
},
marker: {
show: false
}
}
}
new ApexCharts(document.querySelector("#view-chart1"), options2).render();
});
// [ view-chart ] end
// [ time-chart ] start
$(function() {
var options = {
chart: {
height: 225,
type: 'line',
zoom: {
enabled: false
},
toolbar: {
show: false,
}
},
dataLabels: {
enabled: false
},
stroke: {
width: 3,
curve: 'straight',
},
xaxis: {
categories: ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep'],
},
colors: ["#0e9e4a"],
series: [{
name: "Hour.",
data: [10, 41, 35, 51, 49, 52, 58, 71, 89]
}],
grid: {
row: {
colors: ['#f3f6ff', 'transparent'],
opacity: 0.5
}
},
}
var chart = new ApexCharts(document.querySelector("#time-chart"), options);
chart.render();
});
// [ time-chart ] end
// [ sale-chart ] start
$(function() {
var options1 = {
chart: {
type: 'bar',
height: 230,
sparkline: {
enabled: true
}
},
colors: ["#7267EF"],
plotOptions: {
bar: {
columnWidth: '80%'
}
},
series: [{
data: [25, 66, 41, 89, 63, 25, 44, 12, 36, 9, 54, 25, 66, 41, 89, 63, 54, 25, 66, 41, 89, 63, 25]
}],
xaxis: {
crosshairs: {
width: 1
},
},
tooltip: {
fixed: {
enabled: false
},
x: {
show: false
},
y: {
title: {
formatter: function(seriesName) {
return ''
}
}
},
marker: {
show: false
}
}
}
new ApexCharts(document.querySelector("#sale-chart"), options1).render();
});
// [ sale-chart ] end
// [ coversions-char1t ] start
$(function() {
var options1 = {
chart: {
type: 'bar',
height: 65,
sparkline: {
enabled: true
}
},
colors: ["#3ec9d6"],
plotOptions: {
bar: {
columnWidth: '80%'
}
},
series: [{
data: [25, 66, 41, 89, 63, 25, 44, 12, 36, 9, 54, 25, 66, 41, 89, 63, 54, 25, 66, 41, 89, 63, 25, 44, 12, 36, 9, 54, 25, 66, 41, 89, 63, 25, 44, 12, 36, 9, 25, 44, 12, 36, 9, 54]
}],
xaxis: {
crosshairs: {
width: 1
},
},
tooltip: {
fixed: {
enabled: false
},
x: {
show: false
},
y: {
title: {
formatter: function(seriesName) {
return ''
}
}
},
marker: {
show: false
}
}
}
new ApexCharts(document.querySelector("#coversions-chart1"), options1).render();
});
// [ coversions-chart1 ] end
// [ revenue-chart ] start
$(function() {
var options = {
chart: {
height: 200,
type: 'donut',
},
dataLabels: {
enabled: false
},
labels: ['Target', 'Last week', 'Last day'],
series: [1258, 975, 500],
legend: {
show: false
},
colors: ["#3ec9d6", "#ffa21d", "#7267EF"],
}
var chart = new ApexCharts(document.querySelector("#revenue-chart"), options);
chart.render();
});
// [ revenue-chart ] end
// [ market-chart ] start
$(function() {
var options = {
chart: {
height: 200,
type: 'bar',
stacked: true,
toolbar: {
show: false
},
zoom: {
enabled: false
},
sparkline: {
enabled: true
}
},
colors: ["#E0291D", "#3C5A99", "#42C0FB"],
plotOptions: {
bar: {
horizontal: false,
},
},
series: [{
name: 'Youtube',
data: [44, 50, 41, 67, 22, 43, 44, 50, 41, 52, 22, 43]
}, {
name: 'Facebook',
data: [13, 23, 20, 8, 13, 27, 13, 23, 20, 8, 13, 27]
}, {
name: 'Twitter',
data: [11, 17, 15, 15, 21, 14, 11, 17, 15, 15, 21, 14]
}],
xaxis: {
type: 'datetime',
categories: ['01/01/2011 GMT', '01/02/2011 GMT', '01/03/2011 GMT', '01/04/2011 GMT', '01/05/2011 GMT', '01/06/2011 GMT', '01/07/2011 GMT', '01/08/2011 GMT', '01/09/2011 GMT', '01/10/2011 GMT', '01/11/2011 GMT', '01/12/2011 GMT'],
},
legend: {
show: false,
},
fill: {
opacity: 1
},
}
var chart = new ApexCharts(document.querySelector("#market-chart"), options);
chart.render();
});
// [ market-chart ] end
// [ type-chart ] start
$(function() {
var options = {
chart: {
height: 200,
type: 'donut',
},
dataLabels: {
enabled: false
},
plotOptions: {
pie: {
donut: {
size: '65%'
}
}
},
labels: ['Desktop Computers', 'Smartphones', 'Tablets'],
series: [76.7, 15, 30],
legend: {
show: false
},
colors: ["#EA4D4D", "#ffa21d", "#3ec9d6"],
}
var chart = new ApexCharts(document.querySelector("#type-chart"), options);
chart.render();
});
// [ type-chart ] end
// [ customer-chart ] start
$(function() {
var options = {
chart: {
height: 195,
type: 'donut',
},
dataLabels: {
enabled: false
},
plotOptions: {
pie: {
donut: {
size: '65%'
}
}
},
labels: ['New', 'Return', 'Custom'],
series: [76.7, 15, 30],
legend: {
show: false
},
grid: {
padding: {
top: 20,
right: 0,
bottom: 0,
left: 0
},
},
colors: ["#7267EF", "#0e9e4a", "#EA4D4D"],
}
var chart = new ApexCharts(document.querySelector("#customer-chart"), options);
chart.render();
});
// [ customer-chart ] end
// [ site-visitor-chart ] start
$(function() {
var lastDate = 0;
var data = [];
function getDayWiseTimeSeries(baseval, count, yrange) {
var i = 0;
while (i < count) {
var x = baseval;
var y = Math.floor(Math.random() * (yrange.max - yrange.min + 1)) + yrange.min;
data.push({
x,
y
});
lastDate = baseval
baseval += 86400000;
i++;
}
}
getDayWiseTimeSeries(new Date('11 Feb 2017 GMT').getTime(), 10, {
min: 10,
max: 90
})
function getNewSeries(baseval, yrange) {
var newDate = baseval + 86400000;
lastDate = newDate
data.push({
x: newDate,
y: Math.floor(Math.random() * (yrange.max - yrange.min + 1)) + yrange.min
})
}
function resetData() {
data = data.slice(data.length - 10, data.length);
}
var options = {
chart: {
height: 300,
type: 'area',
animations: {
enabled: true,
easing: 'linear',
dynamicAnimation: {
speed: 2000
}
},
toolbar: {
show: false
},
zoom: {
enabled: false
}
},
dataLabels: {
enabled: false
},
stroke: {
curve: 'smooth'
},
series: [{
name: 'active Users :',
data: data
}],
colors: ["#EA4D4D"],
fill: {
type: 'gradient',
gradient: {
shadeIntensity: 1,
type: 'horizontal',
opacityFrom: 0.8,
opacityTo: 0,
stops: [0, 100]
}
},
markers: {
size: 0
},
xaxis: {
type: 'datetime',
range: 777600000,
},
yaxis: {
max: 100
},
legend: {
show: false
},
}
var chart = new ApexCharts(
document.querySelector("#site-visitor-chart"),
options
);
chart.render();
var dataPointsLength = 10;
window.setInterval(function() {
getNewSeries(lastDate, {
min: 10,
max: 90
})
chart.updateSeries([{
data: data
}])
}, 2000)
window.setInterval(function() {
resetData()
chart.updateSeries([{
data
}], false, true)
}, 60000)
});
// [ site-visitor-chart ] end
// [ traffic-chart ] start
$(function() {
var options1 = {
chart: {
type: 'bar',
height: 400,
zoom: {
enabled: false
},
},
colors: ["#7267EF"],
plotOptions: {
bar: {
colors: {
ranges: [{
from: 0,
to: 15,
color: '#EA4D4D'
}, {
from: 16,
to: 30,
color: '#ffa21d'
}, {
from: 31,
to: 50,
color: '#7267EF'
}, {
from: 51,
to: 100,
color: '#0e9e4a'
}]
},
columnWidth: '80%',
}
},
series: [{
data: [25, 66, 41, 89, 63, 25, 44, 12, 36, 9, 54, 25, 66, 41, 89, 63, 54, 25, 66, 41, 89, 63, 25, 44, 12, 36, 9, 54, 25, 66, 41, 89, 63, 25, 44, 12, 36, 9, 25, 44, 12, 36, 9, 54]
}],
xaxis: {
crosshairs: {
width: 1
},
},
tooltip: {
fixed: {
enabled: false
},
x: {
show: false
},
y: {
title: {
formatter: function(seriesName) {
return 'Click '
}
}
},
marker: {
show: false
}
}
}
new ApexCharts(document.querySelector("#traffic-chart"), options1).render();
});
// [ traffic-chart ] end
// [ support-chart ] start
$(function() {
var options1 = {
chart: {
type: 'area',
height: 95,
sparkline: {
enabled: true
}
},
colors: ["#7267EF"],
stroke: {
curve: 'smooth',
width: 2,
},
series: [{
data: [0, 20, 10, 45, 30, 55, 20, 30, 0]
}],
tooltip: {
fixed: {
enabled: false
},
x: {
show: false
},
y: {
title: {
formatter: function(seriesName) {
return 'Ticket '
}
}
},
marker: {
show: false
}
}
}
new ApexCharts(document.querySelector("#support-chart"), options1).render();
});
// [ support-chart ] end
// [ average-chart ] start
$(function() {
var btcchartoption1 = {
chart: {
type: 'area',
height: 145,
width: '100%',
sparkline: {
enabled: true
}
},
colors: ["#7267EF"],
fill: {
type: 'gradient',
gradient: {
shadeIntensity: 1,
opacityFrom: 0.8,
opacityTo: 0.4,
stops: [0, 80, 100]
}
},
stroke: {
curve: 'smooth',
width: 2,
},
series: [{
name: 'series1',
data: [40, 60, 35, 55, 35, 75, 50]
}],
yaxis: {
min: 0,
max: 100,
},
tooltip: {
fixed: {
enabled: false
},
x: {
show: false,
},
y: {
title: {
formatter: function(seriesName) {
return '$'
}
}
},
marker: {
show: false
}
}
}
new ApexCharts(document.querySelector("#average-chart11"), btcchartoption1).render();
var btcchartoption2 = {
chart: {
type: 'area',
height: 145,
sparkline: {
enabled: true
}
},
colors: ["#0e9e4a"],
fill: {
type: 'gradient',
gradient: {
shadeIntensity: 1,
opacityFrom: 0.8,
opacityTo: 0.4,
stops: [0, 90, 100]
}
},
stroke: {
curve: 'smooth',
width: 2,
},
series: [{
name: 'series1',
data: [40, 55, 35, 75, 50, 90, 50]
}],
yaxis: {
min: 0,
max: 100,
},
tooltip: {
fixed: {
enabled: false
},
x: {
show: false,
},
y: {
title: {
formatter: function(seriesName) {
return '$'
}
}
},
marker: {
show: false
}
}
}
new ApexCharts(document.querySelector("#average-chart12"), btcchartoption2).render();
var btcchartoption7 = {
chart: {
type: 'area',
height: 145,
sparkline: {
enabled: true
}
},
colors: ["#FFF"],
fill: {
type: 'gradient',
gradient: {
shadeIntensity: 1,
opacityFrom: 0.5,
opacityTo: 0.4,
stops: [0, 100]
}
},
stroke: {
curve: 'smooth',
width: 2,
},
series: [{
name: 'series1',
data: [40, 60, 35, 70, 50]
}],
yaxis: {
min: 0,
max: 100,
},
tooltip: {
theme: 'dark',
fixed: {
enabled: false
},
x: {
show: false,
},
y: {
title: {
formatter: function(seriesName) {
return '$'
}
}
},
marker: {
show: false
}
}
}
new ApexCharts(document.querySelector("#average-chart3"), btcchartoption7).render();
var btcchartoption8 = {
chart: {
type: 'area',
height: 145,
sparkline: {
enabled: true
}
},
colors: ["#FFF"],
fill: {
type: 'gradient',
gradient: {
shadeIntensity: 1,
opacityFrom: 0.5,
opacityTo: 0.4,
stops: [0, 100]
}
},
stroke: {
curve: 'smooth',
width: 2,
},
series: [{
name: 'series1',
data: [65, 45, 60, 40, 80]
}],
yaxis: {
min: 0,
max: 100,
},
tooltip: {
theme: 'dark',
fixed: {
enabled: false
},
x: {
show: false,
},
y: {
title: {
formatter: function(seriesName) {
return '$'
}
}
},
marker: {
show: false
}
}
}
new ApexCharts(document.querySelector("#average-chart4"), btcchartoption8).render();
});
// [ average-chart ] end
// [ crypto-chart ] start
$(function() {
var options = {
annotations: {
yaxis: [{
y: 30,
borderColor: '#999',
label: {
show: true,
text: 'Support',
style: {
color: "#fff",
background: '#00E396'
}
}
}],
xaxis: [{
x: new Date('14 Nov 2012').getTime(),
borderColor: '#999',
yAxisIndex: 0,
label: {
show: true,
text: 'Rally',
style: {
color: "#fff",
background: '#775DD0'
}
}
}]
},
chart: {
type: 'area',
height: 320,
},
dataLabels: {
enabled: false
},
series: [{
name: 'Active Users',
data: [
[1327359600000, 30.95],
[1327446000000, 31.34],
[1327532400000, 31.18],
[1327618800000, 31.05],
[1327878000000, 31.00],
[1327964400000, 30.95],
[1328050800000, 31.24],
[1328137200000, 31.29],
[1328223600000, 31.85],
[1328482800000, 31.86],
[1328569200000, 32.28],
[1328655600000, 32.10],
[1328742000000, 32.65],
[1328828400000, 32.21],
[1329087600000, 32.35],
[1329174000000, 32.44],
[1329260400000, 32.46],
[1329346800000, 32.86],
[1329433200000, 32.75],
[1329778800000, 32.54],
[1329865200000, 32.33],
[1329951600000, 32.97],
[1330038000000, 33.41],
[1330297200000, 33.27],
[1330383600000, 33.27],
[1330470000000, 32.89],
[1330556400000, 33.10],
[1330642800000, 33.73],
[1330902000000, 33.22],
[1330988400000, 31.99],
[1331074800000, 32.41],
[1331161200000, 33.05],
[1331247600000, 33.64],
[1331506800000, 33.56],
[1331593200000, 34.22],
[1331679600000, 33.77],
[1331766000000, 34.17],
[1331852400000, 33.82],
[1332111600000, 34.51],
[1332198000000, 33.16],
[1332284400000, 33.56],
[1332370800000, 33.71],
[1332457200000, 33.81],
[1332712800000, 34.40],
[1332799200000, 34.63],
[1332885600000, 34.46],
[1332972000000, 34.48],
[1333058400000, 34.31],
[1333317600000, 34.70],
[1333404000000, 34.31],
[1333490400000, 33.46],
[1333576800000, 33.59],
[1333922400000, 33.22],
[1334008800000, 32.61],
[1334095200000, 33.01],
[1334181600000, 33.55],
[1334268000000, 33.18],
[1334527200000, 32.84],
[1334613600000, 33.84],
[1334700000000, 33.39],
[1334786400000, 32.91],
[1334872800000, 33.06],
[1335132000000, 32.62],
[1335218400000, 32.40],
[1335304800000, 33.13],
[1335391200000, 33.26],
[1335477600000, 33.58],
[1335736800000, 33.55],
[1335823200000, 33.77],
[1335909600000, 33.76],
[1335996000000, 33.32],
[1336082400000, 32.61],
[1336341600000, 32.52],
[1336428000000, 32.67],
[1336514400000, 32.52],
[1336600800000, 31.92],
[1336687200000, 32.20],
[1336946400000, 32.23],
[1337032800000, 32.33],
[1337119200000, 32.36],
[1337205600000, 32.01],
[1337292000000, 31.31],
[1337551200000, 32.01],
[1337637600000, 32.01],
[1337724000000, 32.18],
[1337810400000, 31.54],
[1337896800000, 31.60],
[1338242400000, 32.05],
[1338328800000, 31.29],
[1338415200000, 31.05],
[1338501600000, 29.82],
[1338760800000, 30.31],
[1338847200000, 30.70],
[1338933600000, 31.69],
[1339020000000, 31.32],
[1339106400000, 31.65],
[1339365600000, 31.13],
[1339452000000, 31.77],
[1339538400000, 31.79],
[1339624800000, 31.67],
[1339711200000, 32.39],
[1339970400000, 32.63],
[1340056800000, 32.89],
[1340143200000, 31.99],
[1340229600000, 31.23],
[1340316000000, 31.57],
[1340575200000, 30.84],
[1340661600000, 31.07],
[1340748000000, 31.41],
[1340834400000, 31.17],
[1340920800000, 32.37],
[1341180000000, 32.19],
[1341266400000, 32.51],
[1341439200000, 32.53],
[1341525600000, 31.37],
[1341784800000, 30.43],
[1341871200000, 30.44],
[1341957600000, 30.20],
[1342044000000, 30.14],
[1342130400000, 30.65],
[1342389600000, 30.40],
[1342476000000, 30.65],
[1342562400000, 31.43],
[1342648800000, 31.89],
[1342735200000, 31.38],
[1342994400000, 30.64],
[1343080800000, 30.02],
[1343167200000, 30.33],
[1343253600000, 30.95],
[1343340000000, 31.89],
[1343599200000, 31.01],
[1343685600000, 30.88],
[1343772000000, 30.69],
[1343858400000, 30.58],
[1343944800000, 32.02],
[1344204000000, 32.14],
[1344290400000, 32.37],
[1344376800000, 32.51],
[1344463200000, 32.65],
[1344549600000, 32.64],
[1344808800000, 32.27],
[1344895200000, 32.10],
[1344981600000, 32.91],
[1345068000000, 33.65],
[1345154400000, 33.80],
[1345413600000, 33.92],
[1345500000000, 33.75],
[1345586400000, 33.84],
[1345672800000, 33.50],
[1345759200000, 32.26],
[1346018400000, 32.32],
[1346104800000, 32.06],
[1346191200000, 31.96],
[1346277600000, 31.46],
[1346364000000, 31.27],
[1346709600000, 31.43],
[1346796000000, 32.26],
[1346882400000, 32.79],
[1346968800000, 32.46],
[1347228000000, 32.13],
[1347314400000, 32.43],
[1347400800000, 32.42],
[1347487200000, 32.81],
[1347573600000, 33.34],
[1347832800000, 33.41],
[1347919200000, 32.57],
[1348005600000, 33.12],
[1348092000000, 34.53],
[1348178400000, 33.83],
[1348437600000, 33.41],
[1348524000000, 32.90],
[1348610400000, 32.53],
[1348696800000, 32.80],
[1348783200000, 32.44],
[1349042400000, 32.62],
[1349128800000, 32.57],
[1349215200000, 32.60],
[1349301600000, 32.68],
[1349388000000, 32.47],
[1349647200000, 32.23],
[1349733600000, 31.68],
[1349820000000, 31.51],
[1349906400000, 31.78],
[1349992800000, 31.94],
[1350252000000, 32.33],
[1350338400000, 33.24],
[1350424800000, 33.44],
[1350511200000, 33.48],
[1350597600000, 33.24],
[1350856800000, 33.49],
[1350943200000, 33.31],
[1351029600000, 33.36],
[1351116000000, 33.40],
[1351202400000, 34.01],
[1351638000000, 34.02],
[1351724400000, 34.36],
[1351810800000, 34.39],
[1352070000000, 34.24],
[1352156400000, 34.39],
[1352242800000, 33.47],
[1352329200000, 32.98],
[1352415600000, 32.90],
[1352674800000, 32.70],
[1352761200000, 32.54],
[1352847600000, 32.23],
[1352934000000, 32.64],
[1353020400000, 32.65],
[1353279600000, 32.92],
[1353366000000, 32.64],
[1353452400000, 32.84],
[1353625200000, 33.40],
[1353884400000, 33.30],
[1353970800000, 33.18],
[1354057200000, 33.88],
[1354143600000, 34.09],
[1354230000000, 34.61],
[1354489200000, 34.70],
[1354575600000, 35.30],
[1354662000000, 35.40],
[1354748400000, 35.14],
[1354834800000, 35.48],
[1355094000000, 35.75],
[1355180400000, 35.54],
[1355266800000, 35.96],
[1355353200000, 35.53],
[1355439600000, 37.56],
[1355698800000, 37.42],
[1355785200000, 37.49],
[1355871600000, 38.09],
[1355958000000, 37.87],
[1356044400000, 37.71],
[1356303600000, 37.53],
[1356476400000, 37.55],
[1356562800000, 37.30],
[1356649200000, 36.90],
[1356908400000, 37.68],
[1357081200000, 38.34],
[1357167600000, 37.75],
[1357254000000, 38.13],
[1357513200000, 37.94],
[1357599600000, 38.14],
[1357686000000, 38.66],
[1357772400000, 38.62],
[1357858800000, 38.09],
[1358118000000, 38.16],
[1358204400000, 38.15],
[1358290800000, 37.88],
[1358377200000, 37.73],
[1358463600000, 37.98],
[1358809200000, 37.95],
[1358895600000, 38.25],
[1358982000000, 38.10],
[1359068400000, 38.32],
[1359327600000, 38.24],
[1359414000000, 38.52],
[1359500400000, 37.94],
[1359586800000, 37.83],
[1359673200000, 38.34],
[1359932400000, 38.10],
[1360018800000, 38.51],
[1360105200000, 38.40],
[1360191600000, 38.07],
[1360278000000, 39.12],
[1360537200000, 38.64],
[1360623600000, 38.89],
[1360710000000, 38.81],
[1360796400000, 38.61],
[1360882800000, 38.63],
[1361228400000, 38.99],
[1361314800000, 38.77],
[1361401200000, 38.34],
[1361487600000, 38.55],
[1361746800000, 38.11],
[1361833200000, 38.59],
[1361919600000, 39.60],
]
}, ],
stroke: {
curve: 'straight',
width: 2,
},
markers: {
size: 0,
style: 'hollow',
},
colors: ["#7267EF"],
xaxis: {
type: 'datetime',
min: new Date('01 Mar 2012').getTime(),
tickAmount: 6,
},
tooltip: {
x: {
format: 'dd MMM yyyy'
}
},
fill: {
type: 'gradient',
gradient: {
shadeIntensity: 1,
opacityFrom: 0.7,
opacityTo: 0.9,
stops: [0, 100]
}
},
}
var chart = new ApexCharts(
document.querySelector("#crypto-chart"),
options
);
chart.render();
var resetCssClasses = function(activeEl) {
var els = document.querySelectorAll("button");
Array.prototype.forEach.call(els, function(el) {
el.classList.remove('active');
});
activeEl.target.classList.add('active')
}
document.querySelector("#one_month").addEventListener('click', function(e) {
resetCssClasses(e)
chart.updateOptions({
xaxis: {
min: new Date('28 Jan 2013').getTime(),
max: new Date('27 Feb 2013').getTime(),
}
})
})
document.querySelector("#six_months").addEventListener('click', function(e) {
resetCssClasses(e)
chart.updateOptions({
xaxis: {
min: new Date('27 Sep 2012').getTime(),
max: new Date('27 Feb 2013').getTime(),
}
})
})
document.querySelector("#one_year").addEventListener('click', function(e) {
resetCssClasses(e)
chart.updateOptions({
xaxis: {
min: new Date('27 Feb 2012').getTime(),
max: new Date('27 Feb 2013').getTime(),
}
})
})
document.querySelector("#ytd").addEventListener('click', function(e) {
resetCssClasses(e)
chart.updateOptions({
xaxis: {
min: new Date('01 Jan 2013').getTime(),
max: new Date('27 Feb 2013').getTime(),
}
})
})
document.querySelector("#all").addEventListener('click', function(e) {
resetCssClasses(e)
chart.updateOptions({
xaxis: {
min: undefined,
max: undefined,
}
})
})
document.querySelector("#ytd").addEventListener('click', function() {})
});
// [ crypto-chart ] end
// [ rating ] start
$('#example-1to10').barrating('show', {
theme: 'bars-1to10',
readonly: true,
showSelectedRating: false
});
// [ rating ] end
}
|
const emailRegex = /[a-z0-9!#$%&'*+/=?^_`{|}~-]+(?:\.[a-z0-9!#$%&'*+/=?^_`{|}~-]+)*@(?:[a-z0-9](?:[a-z0-9-]*[a-z0-9])?\.)+[a-z0-9](?:[a-z0-9-]*[a-z0-9])?/;
function validate(email) {
email = email.trim();
if (/[\ \,]/.test(email)) {
return false;
}
if (email.split(/@/).length > 2) {
return false;
}
const segments = email.split('.');
const TLD = segments[segments.length - 1];
const validTLD = /^[A-z]+$/.test(TLD);
if (!validTLD) {
return false;
}
return emailRegex.test(email);
}
export default {
validate,
};
|
# -*- encoding: utf-8 -*-
#
# Copyright © 2021 Mergify SAS
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import itertools
import operator
import time
import typing
from unittest import mock
from first import first
from freezegun import freeze_time
import pytest
import yaml
from mergify_engine import config
from mergify_engine import constants
from mergify_engine import context
from mergify_engine import date
from mergify_engine import github_types
from mergify_engine import queue
from mergify_engine import rules
from mergify_engine.queue import merge_train
from mergify_engine.rules import conditions
from mergify_engine.tests.functional import base
TEMPLATE_GITHUB_ACTION = """
name: Continuous Integration
on:
pull_request:
branches:
- main
jobs:
unit-tests:
timeout-minutes: 5
runs-on: ubuntu-20.04
steps:
- uses: actions/checkout@v2
- run: %s
"""
class TrainCarMatcher(typing.NamedTuple):
user_pull_request_numbers: typing.List[github_types.GitHubPullRequestNumber]
parent_pull_request_numbers: typing.List[github_types.GitHubPullRequestNumber]
initial_current_base_sha: github_types.SHAType
creation_state: merge_train.TrainCarState
queue_pull_request_number: typing.Optional[github_types.GitHubPullRequestNumber]
class TestQueueAction(base.FunctionalTestBase):
SUBSCRIPTION_ACTIVE = True
@staticmethod
def _assert_car(car: merge_train.TrainCar, expected_car: TrainCarMatcher) -> None:
for i, ep in enumerate(car.still_queued_embarked_pulls):
assert (
ep.user_pull_request_number == expected_car.user_pull_request_numbers[i]
)
assert (
car.parent_pull_request_numbers == expected_car.parent_pull_request_numbers
)
assert car.initial_current_base_sha == expected_car.initial_current_base_sha
assert car.creation_state == expected_car.creation_state
assert car.queue_pull_request_number == expected_car.queue_pull_request_number
@classmethod
async def _assert_cars_contents(
cls,
q: merge_train.Train,
expected_base_sha: github_types.SHAType,
expected_cars: typing.List[TrainCarMatcher],
expected_waiting_pulls: typing.Optional[
typing.List[github_types.GitHubPullRequestNumber]
] = None,
) -> None:
if expected_waiting_pulls is None:
expected_waiting_pulls = []
await q.load()
assert q._current_base_sha == expected_base_sha
pulls_in_queue = await q.get_pulls()
assert (
pulls_in_queue
== list(
itertools.chain.from_iterable(
[p.user_pull_request_numbers for p in expected_cars]
)
)
+ expected_waiting_pulls
)
assert len(q._cars) == len(expected_cars)
for i, expected_car in enumerate(expected_cars):
car = q._cars[i]
cls._assert_car(car, expected_car)
assert len(q._waiting_pulls) == len(expected_waiting_pulls)
for i, expected_waiting_pull in enumerate(expected_waiting_pulls):
wp = q._waiting_pulls[i]
assert wp.user_pull_request_number == expected_waiting_pull
async def test_queue_rule_deleted(self):
rules = {
"queue_rules": [
{
"name": "default",
"conditions": [
"status-success=continuous-integration/fake-ci",
],
"speculative_checks": 5,
}
],
"pull_request_rules": [
{
"name": "Merge me",
"conditions": [f"base={self.main_branch_name}"],
"actions": {"queue": {"name": "default"}},
},
],
}
await self.setup_repo(yaml.dump(rules))
p, _ = await self.create_pr()
await self.run_engine()
pulls = await self.get_pulls()
assert len(pulls) == 1
ctxt = context.Context(self.repository_ctxt, p)
q = await merge_train.Train.from_context(ctxt)
assert len(await q.get_pulls()) == 1
check = first(
await context.Context(self.repository_ctxt, p).pull_engine_check_runs,
key=lambda c: c["name"] == "Rule: Merge me (queue)",
)
assert check["conclusion"] is None
assert (
check["output"]["title"]
== "The pull request is the 1st in the queue to be merged"
)
updated_rules = {
"queue_rules": [
{
"name": "default",
"conditions": [
"status-success=continuous-integration/fake-ci",
],
"speculative_checks": 5,
"allow_inplace_speculative_checks": False,
}
],
"pull_request_rules": [
{
"name": "Merge only if label is present",
"conditions": [f"base={self.main_branch_name}", "label=automerge"],
"actions": {"queue": {"name": "default"}},
},
],
}
p2, _ = await self.create_pr(files={".mergify.yml": yaml.dump(updated_rules)})
await self.merge_pull(p2["number"])
await self.wait_for("push", {"ref": f"refs/heads/{self.main_branch_name}"})
await self.run_engine()
p = await self.get_pull(p["number"])
check = first(
await context.Context(self.repository_ctxt, p).pull_engine_check_runs,
key=lambda c: c["name"] == "Rule: Merge me (queue)",
)
assert check["conclusion"] == "cancelled"
assert check["output"]["title"] == "The rule/action does not exists anymore"
q = await merge_train.Train.from_context(ctxt)
assert len(await q.get_pulls()) == 0
async def test_basic_queue(self):
rules = {
"queue_rules": [
{
"name": "default",
"conditions": [
"status-success=continuous-integration/fake-ci",
],
"speculative_checks": 5,
}
],
"pull_request_rules": [
{
"name": "Merge priority high",
"conditions": [
f"base={self.main_branch_name}",
"label=queue",
],
"actions": {"queue": {"name": "default", "priority": "high"}},
},
],
}
await self.setup_repo(yaml.dump(rules))
p1, _ = await self.create_pr()
p2, _ = await self.create_pr(two_commits=True)
# To force others to be rebased
p, _ = await self.create_pr()
await self.merge_pull(p["number"])
await self.wait_for("pull_request", {"action": "closed"})
await self.run_engine()
p = await self.get_pull(p["number"])
await self.add_label(p1["number"], "queue")
await self.add_label(p2["number"], "queue")
await self.run_engine()
await self.wait_for("pull_request", {"action": "synchronize"})
await self.wait_for("pull_request", {"action": "opened"})
pulls = await self.get_pulls()
assert len(pulls) == 3
tmp_pull = await self.get_pull(pulls[0]["number"])
assert tmp_pull["number"] not in [p1["number"], p2["number"]]
ctxt = context.Context(self.repository_ctxt, p)
q = await merge_train.Train.from_context(ctxt)
await self._assert_cars_contents(
q,
p["merge_commit_sha"],
[
TrainCarMatcher(
[p1["number"]],
[],
p["merge_commit_sha"],
"updated",
None,
),
TrainCarMatcher(
[p2["number"]],
[p1["number"]],
p["merge_commit_sha"],
"created",
tmp_pull["number"],
),
],
)
head_sha = p1["head"]["sha"]
p1 = await self.get_pull(p1["number"])
assert p1["head"]["sha"] != head_sha # ensure it have been rebased
async def assert_queued():
check = first(
await context.Context(self.repository_ctxt, p1).pull_engine_check_runs,
key=lambda c: c["name"] == "Rule: Merge priority high (queue)",
)
assert (
check["output"]["title"]
== "The pull request is the 1st in the queue to be merged"
)
await self.run_engine()
await assert_queued()
assert tmp_pull["commits"] == 6
await self.create_status(tmp_pull)
await self.run_engine()
await assert_queued()
await self.create_comment(p1["number"], "@mergifyio refresh")
await self.run_engine()
await assert_queued()
await self.create_status(p1, state="pending")
await self.run_engine()
await assert_queued()
await self.create_status(p1)
await self.run_engine()
pulls = await self.get_pulls()
assert len(pulls) == 0
await self._assert_cars_contents(q, None, [])
async def test_queue_with_rebase_update_method(self):
rules = {
"queue_rules": [
{
"name": "default",
"conditions": [
"status-success=continuous-integration/fake-ci",
],
"speculative_checks": 5,
}
],
"pull_request_rules": [
{
"name": "Merge priority high",
"conditions": [
f"base={self.main_branch_name}",
"label=queue",
],
"actions": {
"queue": {
"name": "default",
"priority": "high",
"update_method": "rebase",
"update_bot_account": "mergify-test4",
}
},
},
],
}
await self.setup_repo(yaml.dump(rules))
p1, _ = await self.create_pr()
p2, _ = await self.create_pr(two_commits=True)
# To force others to be rebased
p, _ = await self.create_pr()
await self.merge_pull(p["number"])
await self.wait_for("pull_request", {"action": "closed"})
await self.run_engine()
p = await self.get_pull(p["number"])
await self.add_label(p1["number"], "queue")
await self.add_label(p2["number"], "queue")
await self.run_engine()
await self.wait_for("pull_request", {"action": "synchronize"})
await self.wait_for("pull_request", {"action": "opened"})
pulls = await self.get_pulls()
assert len(pulls) == 3
tmp_pull = await self.get_pull(pulls[0]["number"])
assert tmp_pull["number"] not in [p1["number"], p2["number"]]
ctxt = context.Context(self.repository_ctxt, p)
q = await merge_train.Train.from_context(ctxt)
await self._assert_cars_contents(
q,
p["merge_commit_sha"],
[
TrainCarMatcher(
[p1["number"]],
[],
p["merge_commit_sha"],
"updated",
None,
),
TrainCarMatcher(
[p2["number"]],
[p1["number"]],
p["merge_commit_sha"],
"created",
tmp_pull["number"],
),
],
)
head_sha = p1["head"]["sha"]
p1 = await self.get_pull(p1["number"])
assert p1["head"]["sha"] != head_sha # ensure it have been rebased
commits = await self.get_commits(p1["number"])
assert len(commits) == 1
assert commits[0]["commit"]["committer"]["name"] == "mergify-test4"
await self.run_engine()
check = first(
await context.Context(self.repository_ctxt, p1).pull_engine_check_runs,
key=lambda c: c["name"] == "Rule: Merge priority high (queue)",
)
assert (
check["output"]["title"]
== "The pull request is the 1st in the queue to be merged"
)
assert tmp_pull["commits"] == 5
await self.create_status(p1)
await self.run_engine()
await self.create_status(tmp_pull)
await self.run_engine()
pulls = await self.get_pulls()
assert len(pulls) == 0
await self._assert_cars_contents(q, None, [])
async def test_queue_no_inplace(self):
rules = {
"queue_rules": [
{
"name": "default",
"conditions": [
"status-success=continuous-integration/fake-ci",
],
"speculative_checks": 5,
"allow_inplace_speculative_checks": False,
}
],
"pull_request_rules": [
{
"name": "Merge priority high",
"conditions": [
f"base={self.main_branch_name}",
"label=queue",
],
"actions": {"queue": {"name": "default", "priority": "high"}},
},
],
}
await self.setup_repo(yaml.dump(rules))
p1, _ = await self.create_pr()
# To force others to be rebased
p, _ = await self.create_pr()
await self.merge_pull(p["number"])
await self.wait_for("pull_request", {"action": "closed"})
await self.run_engine()
p = await self.get_pull(p["number"])
await self.add_label(p1["number"], "queue")
await self.run_engine()
await self.wait_for("pull_request", {"action": "opened"})
pulls = await self.get_pulls()
assert len(pulls) == 2
tmp_pull = await self.get_pull(pulls[0]["number"])
assert tmp_pull["number"] not in [p1["number"]]
# No parent PR, but created instead updated
ctxt = context.Context(self.repository_ctxt, p)
q = await merge_train.Train.from_context(ctxt)
await self._assert_cars_contents(
q,
p["merge_commit_sha"],
[
TrainCarMatcher(
[p1["number"]],
[],
p["merge_commit_sha"],
"created",
tmp_pull["number"],
),
],
)
async def test_batch_queue(self):
rules = {
"queue_rules": [
{
"name": "default",
"conditions": [
"status-success=continuous-integration/fake-ci",
],
"speculative_checks": 2,
"batch_size": 2,
}
],
"pull_request_rules": [
{
"name": "Merge priority high",
"conditions": [
f"base={self.main_branch_name}",
"label=queue",
],
"actions": {"queue": {"name": "default", "priority": "high"}},
},
],
}
await self.setup_repo(yaml.dump(rules))
p1, _ = await self.create_pr()
p2, _ = await self.create_pr(two_commits=True)
p3, _ = await self.create_pr()
p4, _ = await self.create_pr()
p5, _ = await self.create_pr()
# To force others to be rebased
p, _ = await self.create_pr()
await self.merge_pull(p["number"])
await self.wait_for("pull_request", {"action": "closed"})
await self.run_engine()
p = await self.get_pull(p["number"])
await self.add_label(p1["number"], "queue")
await self.add_label(p2["number"], "queue")
await self.add_label(p3["number"], "queue")
await self.add_label(p4["number"], "queue")
await self.add_label(p5["number"], "queue")
await self.run_engine()
pulls = await self.get_pulls()
assert len(pulls) == 7
tmp_pulls = sorted(
[
tmp
for tmp in pulls
if tmp["number"]
not in (
p1["number"],
p2["number"],
p3["number"],
p4["number"],
p5["number"],
p["number"],
)
],
key=operator.itemgetter("number"),
)
ctxt = context.Context(self.repository_ctxt, p)
q = await merge_train.Train.from_context(ctxt)
await self._assert_cars_contents(
q,
p["merge_commit_sha"],
[
TrainCarMatcher(
[p1["number"], p2["number"]],
[],
p["merge_commit_sha"],
"created",
tmp_pulls[0]["number"],
),
TrainCarMatcher(
[p3["number"], p4["number"]],
[p1["number"], p2["number"]],
p["merge_commit_sha"],
"created",
tmp_pulls[1]["number"],
),
],
[p5["number"]],
)
await self.create_status(tmp_pulls[0])
await self.run_engine()
pulls = await self.get_pulls()
assert len(pulls) == 5
tmp_pulls = sorted(
[
tmp
for tmp in pulls
if tmp["number"]
not in (
p1["number"],
p2["number"],
p3["number"],
p4["number"],
p5["number"],
p["number"],
)
],
key=operator.itemgetter("number"),
)
p2 = await self.get_pull(p2["number"])
await self._assert_cars_contents(
q,
p2["merge_commit_sha"],
[
TrainCarMatcher(
[p3["number"], p4["number"]],
[p1["number"], p2["number"]],
p["merge_commit_sha"],
"created",
tmp_pulls[0]["number"],
),
TrainCarMatcher(
[p5["number"]],
[p3["number"], p4["number"]],
p2["merge_commit_sha"],
"created",
tmp_pulls[1]["number"],
),
],
)
await self.create_status(tmp_pulls[0])
await self.run_engine()
await self.create_status(tmp_pulls[1])
await self.run_engine()
pulls = await self.get_pulls()
assert len(pulls) == 0
await self._assert_cars_contents(q, None, [])
async def test_first_batch_split_queue(self):
rules = {
"queue_rules": [
{
"name": "default",
"conditions": [
"status-success=continuous-integration/fake-ci",
],
"speculative_checks": 2,
"batch_size": 3,
}
],
"pull_request_rules": [
{
"name": "Merge priority high",
"conditions": [
f"base={self.main_branch_name}",
"label=queue",
],
"actions": {"queue": {"name": "default", "priority": "high"}},
},
],
}
await self.setup_repo(yaml.dump(rules))
p1, _ = await self.create_pr()
p2, _ = await self.create_pr()
# To force others to be rebased
p, _ = await self.create_pr()
await self.merge_pull(p["number"])
await self.wait_for("pull_request", {"action": "closed"})
await self.run_engine()
p = await self.get_pull(p["number"])
await self.add_label(p1["number"], "queue")
await self.add_label(p2["number"], "queue")
await self.run_engine()
pulls = await self.get_pulls()
assert len(pulls) == 3
tmp_pulls = sorted(
[
tmp
for tmp in pulls
if tmp["number"]
not in (
p1["number"],
p2["number"],
p["number"],
)
],
key=operator.itemgetter("number"),
)
ctxt = context.Context(self.repository_ctxt, p)
q = await merge_train.Train.from_context(ctxt)
await self._assert_cars_contents(
q,
p["merge_commit_sha"],
[
TrainCarMatcher(
[p1["number"], p2["number"]],
[],
p["merge_commit_sha"],
"created",
tmp_pulls[0]["number"],
),
],
)
await self.create_status(tmp_pulls[0], state="failure")
await self.run_engine()
pulls = await self.get_pulls()
assert len(pulls) == 3
tmp_pulls = sorted(
[
tmp
for tmp in pulls
if tmp["number"]
not in (
p1["number"],
p2["number"],
p["number"],
)
],
key=operator.itemgetter("number"),
)
# The train car has been splitted
await self._assert_cars_contents(
q,
p["merge_commit_sha"],
[
TrainCarMatcher(
[p1["number"]],
[],
p["merge_commit_sha"],
"updated",
None,
),
TrainCarMatcher(
[p2["number"]],
[p1["number"]],
p["merge_commit_sha"],
"created",
tmp_pulls[0]["number"],
),
],
[],
)
# Merge p1, p2 should be marked as failure
p1 = await self.get_pull(p1["number"])
await self.create_status(p1)
await self.run_engine()
pulls = await self.get_pulls()
assert len(pulls) == 1
await self._assert_cars_contents(q, None, [])
async def test_batch_split_queue(self):
rules = {
"queue_rules": [
{
"name": "default",
"conditions": [
"status-success=continuous-integration/fake-ci",
],
"speculative_checks": 2,
"batch_size": 3,
}
],
"pull_request_rules": [
{
"name": "Merge priority high",
"conditions": [
f"base={self.main_branch_name}",
"label=queue",
],
"actions": {"queue": {"name": "default", "priority": "high"}},
},
],
}
await self.setup_repo(yaml.dump(rules))
p1, _ = await self.create_pr()
p2, _ = await self.create_pr(two_commits=True)
p3, _ = await self.create_pr()
p4, _ = await self.create_pr()
p5, _ = await self.create_pr()
# To force others to be rebased
p, _ = await self.create_pr()
await self.merge_pull(p["number"])
await self.wait_for("pull_request", {"action": "closed"})
await self.run_engine()
p = await self.get_pull(p["number"])
await self.add_label(p1["number"], "queue")
await self.add_label(p2["number"], "queue")
await self.add_label(p3["number"], "queue")
await self.add_label(p4["number"], "queue")
await self.add_label(p5["number"], "queue")
await self.run_engine()
pulls = await self.get_pulls()
assert len(pulls) == 7
tmp_pulls = sorted(
[
tmp
for tmp in pulls
if tmp["number"]
not in (
p1["number"],
p2["number"],
p3["number"],
p4["number"],
p5["number"],
p["number"],
)
],
key=operator.itemgetter("number"),
)
ctxt = context.Context(self.repository_ctxt, p)
q = await merge_train.Train.from_context(ctxt)
await self._assert_cars_contents(
q,
p["merge_commit_sha"],
[
TrainCarMatcher(
[p1["number"], p2["number"], p3["number"]],
[],
p["merge_commit_sha"],
"created",
tmp_pulls[0]["number"],
),
TrainCarMatcher(
[p4["number"], p5["number"]],
[p1["number"], p2["number"], p3["number"]],
p["merge_commit_sha"],
"created",
tmp_pulls[1]["number"],
),
],
)
await self.create_status(tmp_pulls[0], state="failure")
await self.run_engine()
pulls = await self.get_pulls()
assert len(pulls) == 7
tmp_pulls = sorted(
[
tmp
for tmp in pulls
if tmp["number"]
not in (
p1["number"],
p2["number"],
p3["number"],
p4["number"],
p5["number"],
p["number"],
)
],
key=operator.itemgetter("number"),
)
# The train car has been splitted
await self._assert_cars_contents(
q,
p["merge_commit_sha"],
[
TrainCarMatcher(
[p1["number"]],
[],
p["merge_commit_sha"],
"updated",
None,
),
TrainCarMatcher(
[p2["number"]],
[p1["number"]],
p["merge_commit_sha"],
"created",
tmp_pulls[1]["number"],
),
TrainCarMatcher(
[p3["number"]],
[p1["number"], p2["number"]],
p["merge_commit_sha"],
"created",
tmp_pulls[0]["number"],
),
],
[p4["number"], p5["number"]],
)
# Merge p1 and p2, p3 should be dropped and p4 et p5 checked
p1 = await self.get_pull(p1["number"])
await self.create_status(p1)
await self.create_status(tmp_pulls[1])
await self.run_engine()
pulls = await self.get_pulls()
assert len(pulls) == 4
tmp_pulls = sorted(
[
tmp
for tmp in pulls
if tmp["number"]
not in (
p1["number"],
p2["number"],
p3["number"],
p4["number"],
p5["number"],
p["number"],
)
],
key=operator.itemgetter("number"),
)
p2 = await self.get_pull(p2["number"])
await self._assert_cars_contents(
q,
p2["merge_commit_sha"],
[
TrainCarMatcher(
[p4["number"], p5["number"]],
[],
p2["merge_commit_sha"],
"created",
tmp_pulls[0]["number"],
),
],
)
await self.create_status(tmp_pulls[0])
await self.run_engine()
pulls = await self.get_pulls()
assert len(pulls) == 1
await self._assert_cars_contents(q, None, [])
async def test_queue_just_rebase(self):
rules = {
"queue_rules": [
{
"name": "default",
"conditions": ["label=queue"],
"speculative_checks": 5,
}
],
"pull_request_rules": [
{
"name": "Merge priority high",
"conditions": [
f"base={self.main_branch_name}",
"label=queue",
],
"actions": {"queue": {"name": "default", "priority": "high"}},
},
],
}
await self.setup_repo(yaml.dump(rules))
p, _ = await self.create_pr()
p_other, _ = await self.create_pr()
await self.merge_pull(p_other["number"])
await self.wait_for("push", {"ref": f"refs/heads/{self.main_branch_name}"})
await self.run_engine()
await self.add_label(p["number"], "queue")
await self.run_engine()
await self.wait_for("pull_request", {"action": "synchronize"})
await self.run_engine()
await self.wait_for("pull_request", {"action": "closed"})
ctxt = context.Context(self.repository_ctxt, p)
q = await merge_train.Train.from_context(ctxt)
await self._assert_cars_contents(q, None, [])
pulls = await self.get_pulls()
assert len(pulls) == 0
async def test_queue_already_ready(self):
rules = {
"queue_rules": [
{
"name": "default",
"conditions": ["label=queue"],
"speculative_checks": 5,
}
],
"pull_request_rules": [
{
"name": "Merge priority high",
"conditions": [
f"base={self.main_branch_name}",
"label=queue",
],
"actions": {"queue": {"name": "default", "priority": "high"}},
},
],
}
await self.setup_repo(yaml.dump(rules))
p, _ = await self.create_pr()
await self.add_label(p["number"], "queue")
await self.run_engine()
await self.wait_for("pull_request", {"action": "closed"})
ctxt = context.Context(self.repository_ctxt, p)
q = await merge_train.Train.from_context(ctxt)
await self._assert_cars_contents(q, None, [])
pulls = await self.get_pulls()
assert len(pulls) == 0
async def test_queue_with_labels(self):
rules = {
"queue_rules": [
{
"name": "default",
"conditions": [
"status-success=continuous-integration/fake-ci",
"label=foobar",
],
"speculative_checks": 5,
}
],
"pull_request_rules": [
{
"name": "Merge priority high",
"conditions": [
f"base={self.main_branch_name}",
"label=queue",
],
"actions": {"queue": {"name": "default", "priority": "high"}},
},
],
}
await self.setup_repo(yaml.dump(rules))
p1, _ = await self.create_pr()
p2, _ = await self.create_pr()
# To force others to be rebased
p, _ = await self.create_pr()
await self.merge_pull(p["number"])
await self.wait_for("pull_request", {"action": "closed"})
await self.run_engine()
p = await self.get_pull(p["number"])
await self.add_label(p1["number"], "queue")
await self.add_label(p2["number"], "queue")
await self.run_engine()
await self.wait_for("pull_request", {"action": "synchronize"})
await self.wait_for("pull_request", {"action": "opened"})
pulls = await self.get_pulls()
assert len(pulls) == 3
tmp_pull = await self.get_pull(pulls[0]["number"])
assert tmp_pull["number"] not in [p1["number"], p2["number"]]
ctxt = context.Context(self.repository_ctxt, p)
q = await merge_train.Train.from_context(ctxt)
await self._assert_cars_contents(
q,
p["merge_commit_sha"],
[
TrainCarMatcher(
[p1["number"]],
[],
p["merge_commit_sha"],
"updated",
None,
),
TrainCarMatcher(
[p2["number"]],
[p1["number"]],
p["merge_commit_sha"],
"created",
tmp_pull["number"],
),
],
)
# Depending on the timing this can have 4 or 5 commits, because
# the merge commit due to the update of the first PR may appear or not
assert tmp_pull["commits"] >= 4
await self.create_status(tmp_pull)
await self.run_engine()
head_sha = p1["head"]["sha"]
p1 = await self.get_pull(p1["number"])
assert p1["head"]["sha"] != head_sha # ensure it have been rebased
async def assert_queued(pull):
check = first(
await context.Context(
self.repository_ctxt, pull
).pull_engine_check_runs,
key=lambda c: c["name"] == constants.MERGE_QUEUE_SUMMARY_NAME,
)
assert check["conclusion"] is None
await assert_queued(p1)
await assert_queued(p2)
await self.create_status(p1, state="pending")
await self.create_status(p2, state="pending")
await self.run_engine()
await assert_queued(p1)
await assert_queued(p2)
pulls = await self.get_pulls()
assert len(pulls) == 3
await self.create_status(p1)
await self.create_status(p2)
await self.run_engine()
await assert_queued(p1)
await assert_queued(p2)
pulls = await self.get_pulls()
assert len(pulls) == 3
await self.add_label(p1["number"], "foobar")
await self.add_label(p2["number"], "foobar")
await self.run_engine()
pulls = await self.get_pulls()
assert len(pulls) == 0
await self._assert_cars_contents(q, None, [])
async def test_queue_with_ci_in_pull_request_rules(self):
rules = {
"queue_rules": [
{
"name": "default",
"conditions": [
"status-success=continuous-integration/fake-ci",
],
"speculative_checks": 5,
}
],
"pull_request_rules": [
{
"name": "Merge priority high",
"conditions": [
f"base={self.main_branch_name}",
"label=queue",
"status-success=continuous-integration/fake-ci",
],
"actions": {"queue": {"name": "default", "priority": "high"}},
},
],
}
await self.setup_repo(yaml.dump(rules))
p1, _ = await self.create_pr()
p2, _ = await self.create_pr(two_commits=True)
# To force others to be rebased
p, _ = await self.create_pr()
await self.merge_pull(p["number"])
await self.wait_for("pull_request", {"action": "closed"})
await self.run_engine()
p = await self.get_pull(p["number"])
await self.create_status(p1)
await self.add_label(p1["number"], "queue")
await self.create_status(p2)
await self.add_label(p2["number"], "queue")
await self.run_engine()
await self.wait_for("pull_request", {"action": "synchronize"})
await self.wait_for("pull_request", {"action": "opened"})
pulls = await self.get_pulls()
assert len(pulls) == 3
tmp_pull = await self.get_pull(pulls[0]["number"])
assert tmp_pull["number"] not in [p1["number"], p2["number"]]
ctxt = context.Context(self.repository_ctxt, p)
q = await merge_train.Train.from_context(ctxt)
await self._assert_cars_contents(
q,
p["merge_commit_sha"],
[
TrainCarMatcher(
[p1["number"]],
[],
p["merge_commit_sha"],
"updated",
None,
),
TrainCarMatcher(
[p2["number"]],
[p1["number"]],
p["merge_commit_sha"],
"created",
tmp_pull["number"],
),
],
)
assert tmp_pull["commits"] == 6
await self.create_status(tmp_pull)
head_sha = p1["head"]["sha"]
p1 = await self.get_pull(p1["number"])
assert p1["head"]["sha"] != head_sha # ensure it have been rebased
await self.run_engine()
check = first(
await context.Context(self.repository_ctxt, p1).pull_engine_check_runs,
key=lambda c: c["name"] == "Rule: Merge priority high (queue)",
)
assert (
check["output"]["title"]
== "The pull request is the 1st in the queue to be merged"
)
await self.create_status(p1, state="pending")
await self.run_engine()
# Ensure it have not been cancelled on pending event
check = first(
await context.Context(self.repository_ctxt, p1).pull_engine_check_runs,
key=lambda c: c["name"] == "Rule: Merge priority high (queue)",
)
assert (
check["output"]["title"]
== "The pull request is the 1st in the queue to be merged"
)
await self.create_status(p1, state="success")
await self.run_engine()
pulls = await self.get_pulls()
assert len(pulls) == 0
await self._assert_cars_contents(q, None, [])
async def test_merge_queue_refresh(self):
rules = {
"queue_rules": [
{
"name": "default",
"conditions": [
"status-success=continuous-integration/fake-ci",
],
"speculative_checks": 5,
}
],
"pull_request_rules": [
{
"name": "Merge priority high",
"conditions": [
f"base={self.main_branch_name}",
"label=queue",
],
"actions": {"queue": {"name": "default", "priority": "high"}},
},
],
}
await self.setup_repo(yaml.dump(rules))
p1, _ = await self.create_pr()
p2, _ = await self.create_pr()
await self.add_label(p1["number"], "queue")
await self.add_label(p2["number"], "queue")
await self.run_engine()
ctxt = context.Context(self.repository_ctxt, p1)
q = await merge_train.Train.from_context(ctxt)
pulls_in_queue = await q.get_pulls()
assert pulls_in_queue == [p1["number"], p2["number"]]
mq_pr_number = q._cars[1].queue_pull_request_number
await self.create_comment(mq_pr_number, "@mergifyio update")
await self.run_engine()
await self.wait_for("issue_comment", {"action": "created"})
comments = await self.get_issue_comments(mq_pr_number)
assert (
"Command not allowed on merge queue pull request." == comments[-1]["body"]
)
await self.create_comment(mq_pr_number, "@mergifyio refresh")
await self.run_engine()
await self.wait_for("issue_comment", {"action": "created"})
comments = await self.get_issue_comments(mq_pr_number)
assert (
"""> refresh
#### ✅ Pull request refreshed
<!--
DO NOT EDIT
-*- Mergify Payload -*-
{"command": "refresh", "conclusion": "success"}
-*- Mergify Payload End -*-
-->
"""
== comments[-1]["body"]
)
async def test_ongoing_train_basic(self):
rules = {
"queue_rules": [
{
"name": "default",
"conditions": [
"status-success=continuous-integration/fake-ci",
],
"speculative_checks": 5,
}
],
"pull_request_rules": [
{
"name": "Merge priority high",
"conditions": [
f"base={self.main_branch_name}",
"label=queue",
],
"actions": {"queue": {"name": "default", "priority": "high"}},
},
],
}
await self.setup_repo(yaml.dump(rules))
p1, _ = await self.create_pr()
p2, _ = await self.create_pr()
p3, _ = await self.create_pr()
# To force others to be rebased
p, _ = await self.create_pr()
await self.merge_pull(p["number"])
await self.wait_for("pull_request", {"action": "closed"})
await self.run_engine()
p = await self.get_pull(p["number"])
# Queue PRs
await self.add_label(p1["number"], "queue")
await self.run_engine()
await self.add_label(p2["number"], "queue")
await self.run_engine()
# Check Queue
pulls = await self.get_pulls()
# 1 queued and rebased PR + 1 queue PR with its tmp PR + 1 one not queued PR
assert len(pulls) == 4
tmp_mq_p2 = pulls[0]
assert tmp_mq_p2["number"] not in [p1["number"], p2["number"], p3["number"]]
ctxt = context.Context(self.repository_ctxt, p)
q = await merge_train.Train.from_context(ctxt)
await self._assert_cars_contents(
q,
p["merge_commit_sha"],
[
TrainCarMatcher(
[p1["number"]],
[],
p["merge_commit_sha"],
"updated",
None,
),
TrainCarMatcher(
[p2["number"]],
[p1["number"]],
p["merge_commit_sha"],
"created",
tmp_mq_p2["number"],
),
],
)
# ensure it have been rebased
head_sha = p1["head"]["sha"]
p1 = await self.get_pull(p1["number"])
assert p1["head"]["sha"] != head_sha
# Merge p1
await self.create_status(p1)
await self.run_engine()
await self.wait_for("push", {"ref": f"refs/heads/{self.main_branch_name}"})
await self.run_engine(3)
pulls = await self.get_pulls()
assert len(pulls) == 3
p1 = await self.get_pull(p1["number"])
assert p1["merged"]
# ensure base is p, it's tested with p1, but current_base_sha have changed since
# we create the tmp pull request
await self._assert_cars_contents(
q,
p1["merge_commit_sha"],
[
TrainCarMatcher(
[p2["number"]],
[p1["number"]],
p["merge_commit_sha"],
"created",
tmp_mq_p2["number"],
),
],
)
# Queue p3
await self.add_label(p3["number"], "queue")
await self.run_engine(3)
# Check train state
pulls = await self.get_pulls()
assert len(pulls) == 4
tmp_mq_p3 = pulls[0]
assert tmp_mq_p3["number"] not in [
p1["number"],
p2["number"],
p3["number"],
tmp_mq_p2["number"],
]
q = await merge_train.Train.from_context(ctxt)
await self._assert_cars_contents(
q,
p1["merge_commit_sha"],
[
# Ensure p2 car is still the same
TrainCarMatcher(
[p2["number"]],
[p1["number"]],
p["merge_commit_sha"],
"created",
tmp_mq_p2["number"],
),
# Ensure base is p1 and only p2 is tested with p3
TrainCarMatcher(
[p3["number"]],
[p2["number"]],
p1["merge_commit_sha"],
"created",
tmp_mq_p3["number"],
),
],
)
async def test_ongoing_train_second_pr_ready_first(self):
rules = {
"queue_rules": [
{
"name": "default",
"conditions": [
"status-success=continuous-integration/fake-ci",
],
"speculative_checks": 5,
}
],
"pull_request_rules": [
{
"name": "Merge priority high",
"conditions": [
f"base={self.main_branch_name}",
"label=queue",
],
"actions": {"queue": {"name": "default", "priority": "high"}},
},
],
}
await self.setup_repo(yaml.dump(rules))
p1, _ = await self.create_pr()
p2, _ = await self.create_pr()
# To force others to be rebased
p, _ = await self.create_pr()
await self.merge_pull(p["number"])
await self.wait_for("pull_request", {"action": "closed"})
await self.run_engine()
p = await self.get_pull(p["number"])
# Queue two pulls
await self.add_label(p1["number"], "queue")
await self.add_label(p2["number"], "queue")
await self.run_engine()
pulls = await self.get_pulls()
assert len(pulls) == 3
tmp_mq_p2 = pulls[0]
assert tmp_mq_p2["number"] not in [p1["number"], p2["number"]]
ctxt = context.Context(self.repository_ctxt, p)
q = await merge_train.Train.from_context(ctxt)
await self._assert_cars_contents(
q,
p["merge_commit_sha"],
[
TrainCarMatcher(
[p1["number"]],
[],
p["merge_commit_sha"],
"updated",
None,
),
TrainCarMatcher(
[p2["number"]],
[p1["number"]],
p["merge_commit_sha"],
"created",
tmp_mq_p2["number"],
),
],
)
# p2 is ready first, ensure it's not merged
await self.create_status(tmp_mq_p2)
await self.run_engine()
pulls = await self.get_pulls()
assert len(pulls) == 2
# Nothing change
await self._assert_cars_contents(
q,
p["merge_commit_sha"],
[
TrainCarMatcher(
[p1["number"]],
[],
p["merge_commit_sha"],
"updated",
None,
),
TrainCarMatcher(
[p2["number"]],
[p1["number"]],
p["merge_commit_sha"],
"created",
tmp_mq_p2["number"],
),
],
)
# TODO(sileht): look state of p2 merge queue check-run
# p1 is ready, check both are merged in a row
p1 = await self.get_pull(p1["number"])
await self.create_status(p1)
await self.run_engine()
await self.wait_for("push", {"ref": f"refs/heads/{self.main_branch_name}"})
await self.run_engine(3)
pulls = await self.get_pulls()
assert len(pulls) == 0
p1 = await self.get_pull(p1["number"])
assert p1["merged"]
p2 = await self.get_pull(p2["number"])
assert p2["merged"]
await self._assert_cars_contents(q, None, [])
async def test_queue_ci_failure(self):
rules = {
"queue_rules": [
{
"name": "default",
"conditions": [
"status-success=continuous-integration/fake-ci",
],
"speculative_checks": 5,
}
],
"pull_request_rules": [
{
"name": "Merge priority high",
"conditions": [
f"base={self.main_branch_name}",
"label=queue",
],
"actions": {"queue": {"name": "default", "priority": "high"}},
},
],
}
await self.setup_repo(yaml.dump(rules))
p1, _ = await self.create_pr()
p2, _ = await self.create_pr()
# To force others to be rebased
p, _ = await self.create_pr()
await self.merge_pull(p["number"])
await self.wait_for("pull_request", {"action": "closed"})
await self.run_engine()
p = await self.get_pull(p["number"])
await self.add_label(p1["number"], "queue")
await self.add_label(p2["number"], "queue")
await self.run_engine()
pulls = await self.get_pulls()
assert len(pulls) == 3
tmp_mq_p2 = pulls[0]
assert tmp_mq_p2["number"] not in [p1["number"], p2["number"]]
ctxt = context.Context(self.repository_ctxt, p)
q = await merge_train.Train.from_context(ctxt)
await self._assert_cars_contents(
q,
p["merge_commit_sha"],
[
TrainCarMatcher(
[p1["number"]],
[],
p["merge_commit_sha"],
"updated",
None,
),
TrainCarMatcher(
[p2["number"]],
[p1["number"]],
p["merge_commit_sha"],
"created",
tmp_mq_p2["number"],
),
],
)
# tmp merge-queue pull p2 fail
await self.create_status(tmp_mq_p2, state="failure")
await self.run_engine()
# then p1 fail too
p1 = await self.get_pull(p1["number"])
await self.create_status(p1, state="failure")
await self.run_engine()
# TODO(sileht): Add some assertion on check-runs content
# tmp merge-queue pull p2 have been closed and p2 updated/rebased
pulls = await self.get_pulls()
assert len(pulls) == 2
await self._assert_cars_contents(
q,
p["merge_commit_sha"],
[
TrainCarMatcher(
[p2["number"]],
[],
p["merge_commit_sha"],
"updated",
None,
),
],
)
# Merge p2
p2 = await self.get_pull(p2["number"])
await self.create_status(p2)
await self.run_engine()
await self.wait_for("push", {"ref": f"refs/heads/{self.main_branch_name}"})
await self.run_engine()
# Only p1 is still there and the queue is empty
pulls = await self.get_pulls()
assert len(pulls) == 1
assert pulls[0]["number"] == p1["number"]
await self._assert_cars_contents(q, None, [])
async def test_queue_cant_create_tmp_pull_request(self):
rules = {
"queue_rules": [
{
"name": "default",
"conditions": [
"status-success=continuous-integration/fake-ci",
],
"speculative_checks": 5,
}
],
"pull_request_rules": [
{
"name": "Merge priority high",
"conditions": [
f"base={self.main_branch_name}",
"label=queue",
],
"actions": {"queue": {"name": "default", "priority": "high"}},
},
],
}
await self.setup_repo(yaml.dump(rules))
p1, _ = await self.create_pr(files={"conflicts": "well"})
p2, _ = await self.create_pr(files={"conflicts": "boom"})
p3, _ = await self.create_pr()
await self.add_label(p1["number"], "queue")
await self.add_label(p2["number"], "queue")
await self.add_label(p3["number"], "queue")
await self.run_engine(3)
pulls = await self.get_pulls()
assert len(pulls) == 4, [p["number"] for p in pulls]
tmp_mq_p3 = pulls[0]
assert tmp_mq_p3["number"] not in [p1["number"], p2["number"], p3["number"]]
# Check only p1 and p3 are in the train
ctxt_p1 = context.Context(self.repository_ctxt, p1)
q = await merge_train.Train.from_context(ctxt_p1)
await self._assert_cars_contents(
q,
p1["base"]["sha"],
[
TrainCarMatcher(
[p1["number"]],
[],
p1["base"]["sha"],
"updated",
None,
),
TrainCarMatcher(
[p3["number"]],
[p1["number"]],
p1["base"]["sha"],
"created",
tmp_mq_p3["number"],
),
],
)
# Ensure p2 status is updated with the failure
p2 = await self.get_pull(p2["number"])
ctxt_p2 = context.Context(self.repository_ctxt, p2)
check = first(
await ctxt_p2.pull_engine_check_runs,
key=lambda c: c["name"] == constants.MERGE_QUEUE_SUMMARY_NAME,
)
assert (
check["output"]["title"] == "This pull request cannot be embarked for merge"
)
assert (
check["output"]["summary"]
== "The merge-queue pull request can't be created\nDetails: `Merge conflict`"
)
# Merge the train
await self.create_status(p1)
await self.run_engine()
await self.wait_for("push", {"ref": f"refs/heads/{self.main_branch_name}"})
await self.create_status(tmp_mq_p3)
await self.run_engine()
await self.wait_for("push", {"ref": f"refs/heads/{self.main_branch_name}"})
# Only p2 is remaining and not in train
pulls = await self.get_pulls()
assert len(pulls) == 1
assert pulls[0]["number"] == p2["number"]
await self._assert_cars_contents(q, None, [])
async def test_queue_cancel_and_refresh(self):
rules = {
"queue_rules": [
{
"name": "default",
"conditions": [
"status-success=continuous-integration/fake-ci",
],
"speculative_checks": 5,
}
],
"pull_request_rules": [
{
"name": "Tchou tchou",
"conditions": [
f"base={self.main_branch_name}",
"label=queue",
],
"actions": {"queue": {"name": "default"}},
},
],
}
await self.setup_repo(yaml.dump(rules))
p1, _ = await self.create_pr()
p2, _ = await self.create_pr()
p3, _ = await self.create_pr()
# Queue PRs
await self.add_label(p1["number"], "queue")
await self.add_label(p2["number"], "queue")
await self.add_label(p3["number"], "queue")
await self.run_engine()
pulls = await self.get_pulls()
assert len(pulls) == 5
tmp_mq_p3 = pulls[0]
tmp_mq_p2 = pulls[1]
assert tmp_mq_p3["number"] not in [p1["number"], p2["number"], p3["number"]]
assert tmp_mq_p2["number"] not in [p1["number"], p2["number"], p3["number"]]
ctxt_p_merged = context.Context(self.repository_ctxt, p1)
q = await merge_train.Train.from_context(ctxt_p_merged)
await self._assert_cars_contents(
q,
p1["base"]["sha"],
[
TrainCarMatcher(
[p1["number"]],
[],
p1["base"]["sha"],
"updated",
None,
),
TrainCarMatcher(
[p2["number"]],
[p1["number"]],
p1["base"]["sha"],
"created",
tmp_mq_p2["number"],
),
TrainCarMatcher(
[p3["number"]],
[p1["number"], p2["number"]],
p1["base"]["sha"],
"created",
tmp_mq_p3["number"],
),
],
)
await self.create_status(p1)
await self.run_engine()
# Ensure p1 is removed and current["head"]["sha"] have been updated on p2 and p3
p1 = await self.get_pull(p1["number"])
await self._assert_cars_contents(
q,
p1["merge_commit_sha"],
[
TrainCarMatcher(
[p2["number"]],
[p1["number"]],
p1["base"]["sha"],
"created",
tmp_mq_p2["number"],
),
TrainCarMatcher(
[p3["number"]],
[p1["number"], p2["number"]],
p1["base"]["sha"],
"created",
tmp_mq_p3["number"],
),
],
)
# tmp merge-queue pr p2, CI fails
await self.create_status(tmp_mq_p2, state="failure")
await self.run_engine()
# tmp merge-queue pr p2 and p3 have been closed
pulls = await self.get_pulls()
assert len(pulls) == 2
# p3 is now rebased instead of havin a tmp merge-queue pr
await self._assert_cars_contents(
q,
p1["merge_commit_sha"],
[
TrainCarMatcher(
[p3["number"]],
[],
p1["merge_commit_sha"],
"updated",
None,
),
],
)
# refresh to add it back in queue
check = typing.cast(
github_types.GitHubCheckRun,
await self.client_admin.items(
f"{self.url_origin}/commits/{p2['head']['sha']}/check-runs",
api_version="antiope",
list_items="check_runs",
params={"name": constants.MERGE_QUEUE_SUMMARY_NAME},
).__anext__(),
)
check_suite_id = check["check_suite"]["id"]
# click on refresh btn
await self.installation_ctxt.client.post(
f"{self.repository_ctxt.base_url}/check-suites/{check_suite_id}/rerequest",
api_version="antiope",
)
await self.wait_for("check_suite", {"action": "rerequested"})
await self.run_engine()
# Check pull is back to the queue and tmp pull recreated
pulls = await self.get_pulls()
assert len(pulls) == 3
tmp_mq_p2_bis = pulls[0]
assert tmp_mq_p2_bis["number"] not in [p1["number"], p2["number"], p3["number"]]
await self._assert_cars_contents(
q,
p1["merge_commit_sha"],
[
TrainCarMatcher(
[p3["number"]],
[],
p1["merge_commit_sha"],
"updated",
None,
),
TrainCarMatcher(
[p2["number"]],
[p3["number"]],
p1["merge_commit_sha"],
"created",
tmp_mq_p2_bis["number"],
),
],
)
async def test_queue_manual_merge(self):
rules = {
"queue_rules": [
{
"name": "default",
"conditions": [
"status-success=continuous-integration/fake-ci",
],
"speculative_checks": 5,
}
],
"pull_request_rules": [
{
"name": "Merge priority high",
"conditions": [
f"base={self.main_branch_name}",
"label=queue",
],
"actions": {"queue": {"name": "default", "priority": "high"}},
},
],
}
await self.setup_repo(yaml.dump(rules))
p1, _ = await self.create_pr()
p2, _ = await self.create_pr()
# To force others to be rebased
p, _ = await self.create_pr()
await self.merge_pull(p["number"])
await self.wait_for("pull_request", {"action": "closed"})
await self.run_engine()
p = await self.get_pull(p["number"])
# Queue PRs
await self.add_label(p1["number"], "queue")
await self.add_label(p2["number"], "queue")
await self.run_engine()
pulls = await self.get_pulls()
assert len(pulls) == 3
tmp_mq_p2 = pulls[0]
assert tmp_mq_p2["number"] not in [p1["number"], p2["number"]]
ctxt_p_merged = context.Context(self.repository_ctxt, p)
q = await merge_train.Train.from_context(ctxt_p_merged)
await self._assert_cars_contents(
q,
p["merge_commit_sha"],
[
TrainCarMatcher(
[p1["number"]],
[],
p["merge_commit_sha"],
"updated",
None,
),
TrainCarMatcher(
[p2["number"]],
[p1["number"]],
p["merge_commit_sha"],
"created",
tmp_mq_p2["number"],
),
],
)
# Ensure p1 have been rebased
head_sha = p1["head"]["sha"]
p1 = await self.get_pull(p1["number"])
assert p1["head"]["sha"] != head_sha
# Merge a not queued PR manually
p_merged_in_meantime, _ = await self.create_pr()
await self.merge_pull(p_merged_in_meantime["number"])
await self.wait_for("pull_request", {"action": "closed"})
await self.wait_for("push", {"ref": f"refs/heads/{self.main_branch_name}"})
p_merged_in_meantime = await self.get_pull(p_merged_in_meantime["number"])
await self.run_engine(3)
pulls = await self.get_pulls()
assert len(pulls) == 3
tmp_mq_p2_bis = await self.get_pull(pulls[0]["number"])
assert tmp_mq_p2_bis["number"] not in [p1["number"], p2["number"]]
await self._assert_cars_contents(
q,
p_merged_in_meantime["merge_commit_sha"],
[
TrainCarMatcher(
[p1["number"]],
[],
p_merged_in_meantime["merge_commit_sha"],
"updated",
None,
),
TrainCarMatcher(
[p2["number"]],
[p1["number"]],
p_merged_in_meantime["merge_commit_sha"],
"created",
tmp_mq_p2_bis["number"],
),
],
)
# Check train have been reseted on top on the new sha one
# Ensure p1 have been rebased again and p2 got recreate with more commits
head_sha = p1["head"]["sha"]
p1 = await self.get_pull(p1["number"])
assert p1["head"]["sha"] != head_sha
assert tmp_mq_p2_bis["commits"] == 6
# Merge the train
await self.create_status(p1)
await self.create_status(tmp_mq_p2_bis)
await self.run_engine()
pulls = await self.get_pulls()
assert len(pulls) == 0
await self._assert_cars_contents(q, None, [])
async def test_queue_priority(self):
rules = {
"queue_rules": [
{
"name": "urgent",
"conditions": [
"status-success=continuous-integration/fast-ci",
],
"speculative_checks": 5,
},
{
"name": "default",
"conditions": [
"status-success=continuous-integration/slow-ci",
],
"speculative_checks": 5,
},
],
"pull_request_rules": [
{
"name": "Merge priority high",
"conditions": [
f"base={self.main_branch_name}",
"label=queue-urgent",
],
"actions": {"queue": {"name": "urgent"}},
},
{
"name": "Merge priority high",
"conditions": [
f"base={self.main_branch_name}",
"label=queue",
],
"actions": {"queue": {"name": "default"}},
},
],
}
await self.setup_repo(yaml.dump(rules))
p1, _ = await self.create_pr()
p2, _ = await self.create_pr()
p3, _ = await self.create_pr()
# To force others to be rebased
p_merged, _ = await self.create_pr()
await self.merge_pull(p_merged["number"])
await self.wait_for("pull_request", {"action": "closed"})
await self.run_engine()
p_merged = await self.get_pull(p_merged["number"])
# Put first PR in queue
await self.add_label(p1["number"], "queue")
await self.add_label(p2["number"], "queue")
await self.run_engine()
ctxt_p_merged = context.Context(self.repository_ctxt, p_merged)
q = await merge_train.Train.from_context(ctxt_p_merged)
# my 3 PRs + 1 merge-queue PR
pulls = await self.get_pulls()
assert len(pulls) == 4
tmp_mq_p1 = pulls[0]
assert tmp_mq_p1["number"] not in [p1["number"], p2["number"], p3["number"]]
await self._assert_cars_contents(
q,
p_merged["merge_commit_sha"],
[
TrainCarMatcher(
[p1["number"]],
[],
p_merged["merge_commit_sha"],
"updated",
None,
),
TrainCarMatcher(
[p2["number"]],
[p1["number"]],
p_merged["merge_commit_sha"],
"created",
tmp_mq_p1["number"],
),
],
)
# ensure it have been rebased
head_sha = p1["head"]["sha"]
p1 = await self.get_pull(p1["number"])
assert p1["head"]["sha"] != head_sha
assert p1["commits"] == 2
# Put second PR at the begining of the queue via queue priority
await self.add_label(p3["number"], "queue-urgent")
await self.run_engine()
pulls = await self.get_pulls()
assert len(pulls) == 3
# p3 is now the only car in train, as its queue is not the same as p1 and p2
await self._assert_cars_contents(
q,
p_merged["merge_commit_sha"],
[
TrainCarMatcher(
[p3["number"]],
[],
p_merged["merge_commit_sha"],
"updated",
None,
),
],
[p1["number"], p2["number"]],
)
r = await self.app.get(
f"/queues/{config.TESTING_ORGANIZATION_ID}",
headers={
"X-Hub-Signature": "sha1=whatever",
"Content-type": "application/json",
},
)
assert r.json() == {
f"{self.RECORD_CONFIG['repository_id']}": {
self.main_branch_name: [p3["number"], p1["number"], p2["number"]]
}
}
# Queue API with token
r = await self.app.get(
f"/v1/repos/{config.TESTING_ORGANIZATION_NAME}/{self.RECORD_CONFIG['repository_name']}/queues",
headers={
"Authorization": f"bearer {self.api_key_admin}",
"Content-type": "application/json",
},
)
assert r.status_code == 200
assert r.json() == {
"queues": [
{
"branch": {"name": self.main_branch_name},
"pull_requests": [
{
"number": p3["number"],
"position": 0,
"queued_at": mock.ANY,
"priority": 2000,
"queue_rule": {
"config": {
"allow_inplace_speculative_checks": True,
"allow_speculative_checks_interruption": True,
"batch_size": 1,
"checks_timeout": None,
"priority": 1,
"speculative_checks": 5,
},
"name": "urgent",
},
"speculative_check_pull_request": {
"in_place": True,
"number": p3["number"],
"started_at": mock.ANY,
},
},
{
"number": p1["number"],
"position": 1,
"priority": 2000,
"queue_rule": {
"config": {
"allow_inplace_speculative_checks": True,
"allow_speculative_checks_interruption": True,
"batch_size": 1,
"checks_timeout": None,
"priority": 0,
"speculative_checks": 5,
},
"name": "default",
},
"queued_at": mock.ANY,
"speculative_check_pull_request": None,
},
{
"number": p2["number"],
"position": 2,
"priority": 2000,
"queue_rule": {
"config": {
"allow_inplace_speculative_checks": True,
"allow_speculative_checks_interruption": True,
"batch_size": 1,
"checks_timeout": None,
"priority": 0,
"speculative_checks": 5,
},
"name": "default",
},
"queued_at": mock.ANY,
"speculative_check_pull_request": None,
},
],
}
],
}
# ensure it have been rebased and tmp merge-queue pr of p1 have all commits
head_sha = p3["head"]["sha"]
p3 = await self.get_pull(p3["number"])
assert p3["head"]["sha"] != head_sha
# Merge p3
await self.create_status(p3, context="continuous-integration/fast-ci")
await self.run_engine()
p3 = await self.get_pull(p3["number"])
assert p3["merged"]
await self.wait_for("push", {"ref": f"refs/heads/{self.main_branch_name}"})
await self.run_engine()
# ensure p1 and p2 are back in queue
pulls = await self.get_pulls()
assert len(pulls) == 3
tmp_mq_p1 = pulls[0]
assert tmp_mq_p1["number"] not in [p1["number"], p2["number"], p3["number"]]
await self._assert_cars_contents(
q,
p3["merge_commit_sha"],
[
TrainCarMatcher(
[p1["number"]],
[],
p3["merge_commit_sha"],
"updated",
None,
),
TrainCarMatcher(
[p2["number"]],
[p1["number"]],
p3["merge_commit_sha"],
"created",
tmp_mq_p1["number"],
),
],
)
# ensure it have been rebased
head_sha = p1["head"]["sha"]
p1 = await self.get_pull(p1["number"])
assert p1["head"]["sha"] != head_sha
assert p1["commits"] == 3
async def test_queue_no_tmp_pull_request(self):
rules = {
"queue_rules": [
{
"name": "default",
"conditions": [
"status-success=continuous-integration/fake-ci",
],
"speculative_checks": 5,
},
],
"pull_request_rules": [
{
"name": "Merge train",
"conditions": [
f"base={self.main_branch_name}",
"label=queue",
],
"actions": {"queue": {"name": "default"}},
},
],
}
await self.setup_repo(yaml.dump(rules))
p1, _ = await self.create_pr()
await self.create_status(p1)
await self.add_label(p1["number"], "queue")
await self.run_engine()
ctxt_p1 = context.Context(self.repository_ctxt, p1)
q = await merge_train.Train.from_context(ctxt_p1)
pulls_in_queue = await q.get_pulls()
assert pulls_in_queue == []
# pull merged without need of a train car
p1 = await self.get_pull(p1["number"])
assert p1["merged"]
# FIXME(sileht): Provide a tools to generate oauth_token without
# the need of the dashboard
# @pytest.mark.skipif(
# config.GITHUB_URL != "https://github.com",
# reason="We use a PAT token instead of an OAUTH_TOKEN",
# )
# MRGFY-472 should fix that
@pytest.mark.skip(
reason="This test is not reliable, GitHub doeesn't always allow to create the tmp pr"
)
async def test_pull_have_base_branch_merged_commit_with_changed_workflow(self):
rules = {
"queue_rules": [
{
"name": "default",
"conditions": [
"status-success=continuous-integration/fake-ci",
],
"speculative_checks": 5,
}
],
"pull_request_rules": [
{
"name": "Merge priority high",
"conditions": [
f"base={self.main_branch_name}",
"label=queue",
],
"actions": {"queue": {"name": "default", "priority": "high"}},
},
],
}
await self.setup_repo(
yaml.dump(rules),
files={
".github/workflows/ci.yml": TEMPLATE_GITHUB_ACTION % "echo Default CI"
},
)
p1, _ = await self.create_pr()
p2, _ = await self.create_pr(two_commits=True)
# To force others to be rebased
p, _ = await self.create_pr(
files={
".github/workflows/ci.yml": TEMPLATE_GITHUB_ACTION % "echo Changed CI"
}
)
await self.merge_pull(p["number"])
await self.wait_for("pull_request", {"action": "closed"})
await self.run_engine()
p = await self.get_pull(p["number"])
# Merge base branch into p2
await self.client_admin.put(
f"{self.url_origin}/pulls/{p2['number']}/update-branch",
api_version="lydian",
json={"expected_head_sha": p2["head"]["sha"]},
)
await self.add_label(p1["number"], "queue")
await self.add_label(p2["number"], "queue")
await self.run_engine()
await self.wait_for("pull_request", {"action": "synchronize"})
await self.wait_for("pull_request", {"action": "opened"})
pulls = await self.get_pulls()
assert len(pulls) == 3
tmp_pull = await self.get_pull(pulls[0]["number"])
assert tmp_pull["number"] not in [p1["number"], p2["number"]]
ctxt = context.Context(self.repository_ctxt, p)
q = await merge_train.Train.from_context(ctxt)
await self._assert_cars_contents(
p["merge_commit_sha"],
q,
[
TrainCarMatcher(
[p1["number"]],
[],
p["merge_commit_sha"],
"updated",
None,
),
TrainCarMatcher(
[p2["number"]],
[p1["number"]],
p["merge_commit_sha"],
"created",
tmp_pull["number"],
),
],
)
assert tmp_pull["commits"] == 7
await self.create_status(tmp_pull)
head_sha = p1["head"]["sha"]
p1 = await self.get_pull(p1["number"])
assert p1["head"]["sha"] != head_sha # ensure it have been rebased
await self.run_engine()
check = first(
await context.Context(self.repository_ctxt, p1).pull_engine_check_runs,
key=lambda c: c["name"] == "Rule: Merge priority high (queue)",
)
assert (
check["output"]["title"]
== "The pull request is the 1st in the queue to be merged"
)
await self.create_status(p1)
await self.run_engine()
pulls = await self.get_pulls()
assert len(pulls) == 0
await self._assert_cars_contents(q, None, [])
async def test_more_ci_in_pull_request_rules_succeed(self):
rules = {
"queue_rules": [
{
"name": "default",
"conditions": [
"status-success=continuous-integration/fake-ci",
],
"speculative_checks": 5,
}
],
"pull_request_rules": [
{
"name": "Merge priority high",
"conditions": [
f"base={self.main_branch_name}",
"status-success=continuous-integration/fake-ci",
"status-success=very-long-ci",
"label=queue",
],
"actions": {"queue": {"name": "default", "priority": "high"}},
},
],
}
await self.setup_repo(yaml.dump(rules))
p1, _ = await self.create_pr()
# To force others to be rebased
p, _ = await self.create_pr()
await self.merge_pull(p["number"])
await self.wait_for("pull_request", {"action": "closed"})
await self.run_engine()
p = await self.get_pull(p["number"])
await self.add_label(p1["number"], "queue")
await self.create_status(p1)
await self.create_status(p1, context="very-long-ci")
await self.run_engine()
await self.wait_for("pull_request", {"action": "synchronize"})
await self.run_engine()
ctxt = context.Context(self.repository_ctxt, p)
q = await merge_train.Train.from_context(ctxt)
await self._assert_cars_contents(
q,
p["merge_commit_sha"],
[
TrainCarMatcher(
[p1["number"]],
[],
p["merge_commit_sha"],
"updated",
None,
),
],
)
head_sha = p1["head"]["sha"]
p1 = await self.get_pull(p1["number"])
assert p1["head"]["sha"] != head_sha # ensure it have been rebased
async def assert_queued():
check = first(
await context.Context(self.repository_ctxt, p1).pull_engine_check_runs,
key=lambda c: c["name"] == "Rule: Merge priority high (queue)",
)
assert (
check["output"]["title"]
== "The pull request is the 1st in the queue to be merged"
)
await assert_queued()
await self.create_status(p1)
await self.run_engine()
await assert_queued()
await self.create_status(p1, context="very-long-ci")
await self.run_engine()
await self.wait_for("push", {"ref": f"refs/heads/{self.main_branch_name}"})
pulls = await self.get_pulls()
assert len(pulls) == 0
await self._assert_cars_contents(q, None, [])
async def test_more_ci_in_pull_request_rules_failure(self):
rules = {
"queue_rules": [
{
"name": "default",
"conditions": [
"status-success=continuous-integration/fake-ci",
],
"speculative_checks": 5,
}
],
"pull_request_rules": [
{
"name": "Merge priority high",
"conditions": [
f"base={self.main_branch_name}",
"status-success=continuous-integration/fake-ci",
"status-success=very-long-ci",
"label=queue",
],
"actions": {"queue": {"name": "default", "priority": "high"}},
},
],
}
await self.setup_repo(yaml.dump(rules))
p1, _ = await self.create_pr()
# To force others to be rebased
p, _ = await self.create_pr()
await self.merge_pull(p["number"])
await self.wait_for("pull_request", {"action": "closed"})
await self.run_engine()
p = await self.get_pull(p["number"])
await self.add_label(p1["number"], "queue")
await self.create_status(p1)
await self.create_status(p1, context="very-long-ci")
await self.run_engine()
await self.wait_for("pull_request", {"action": "synchronize"})
await self.run_engine()
ctxt = context.Context(self.repository_ctxt, p)
q = await merge_train.Train.from_context(ctxt)
await self._assert_cars_contents(
q,
p["merge_commit_sha"],
[
TrainCarMatcher(
[p1["number"]],
[],
p["merge_commit_sha"],
"updated",
None,
),
],
)
head_sha = p1["head"]["sha"]
p1 = await self.get_pull(p1["number"])
assert p1["head"]["sha"] != head_sha # ensure it have been rebased
await self.run_engine()
check = first(
await context.Context(self.repository_ctxt, p1).pull_engine_check_runs,
key=lambda c: c["name"] == "Rule: Merge priority high (queue)",
)
assert (
check["output"]["title"]
== "The pull request is the 1st in the queue to be merged"
)
await self.remove_label(p1["number"], "queue")
await self.run_engine()
# not merged and unqueued
pulls = await self.get_pulls()
assert len(pulls) == 1
await self._assert_cars_contents(q, None, [])
async def test_queue_ci_timeout(self):
config = {
"queue_rules": [
{
"name": "default",
"conditions": [
"check-success=continuous-integration/fake-ci",
],
"checks_timeout": "10 m",
}
],
"pull_request_rules": [
{
"name": "queue",
"conditions": [f"base={self.main_branch_name}"],
"actions": {"queue": {"name": "default"}},
},
],
}
with freeze_time("2021-05-30T10:00:00", tick=True):
await self.setup_repo(yaml.dump(config))
p, _ = await self.create_pr()
await self.run_engine()
check = first(
await context.Context(self.repository_ctxt, p).pull_engine_check_runs,
key=lambda c: c["name"] == "Rule: queue (queue)",
)
assert (
check["output"]["title"]
== "The pull request is the 1st in the queue to be merged"
)
pulls_to_refresh = await self.redis_cache.zrangebyscore(
"delayed-refresh", "-inf", "+inf", withscores=True
)
assert len(pulls_to_refresh) == 1
with freeze_time("2021-05-30T10:12:00", tick=True):
await self.run_engine()
check = first(
await context.Context(self.repository_ctxt, p).pull_engine_check_runs,
key=lambda c: c["name"] == "Rule: queue (queue)",
)
assert (
check["output"]["title"]
== "The pull request has been removed from the queue"
)
check = first(
await context.Context(self.repository_ctxt, p).pull_engine_check_runs,
key=lambda c: c["name"] == "Queue: Embarked in merge train",
)
assert "checks have timed out" in check["output"]["summary"]
async def test_queue_without_branch_protection_for_queueing(self):
rules = {
"queue_rules": [
{
"name": "default",
"conditions": [],
}
],
"pull_request_rules": [
{
"name": "Merge priority high",
"conditions": [
f"base={self.main_branch_name}",
"label=queue",
],
"actions": {
"queue": {
"method": "squash",
"name": "default",
"require_branch_protection": False,
}
},
},
],
}
await self.setup_repo(yaml.dump(rules))
protection = {
"required_status_checks": {
"strict": False,
"contexts": [
"continuous-integration/fake-ci",
],
},
"required_linear_history": True,
"required_pull_request_reviews": None,
"restrictions": None,
"enforce_admins": False,
}
await self.branch_protection_protect(self.main_branch_name, protection)
p1, _ = await self.create_pr()
# To force others to be rebased
p, _ = await self.create_pr()
await self.merge_pull(p["number"])
await self.wait_for("pull_request", {"action": "closed"})
await self.run_engine()
p = await self.get_pull(p["number"])
await self.add_label(p1["number"], "queue")
await self.run_engine()
await self.wait_for("pull_request", {"action": "synchronize"})
await self.run_engine()
ctxt = context.Context(self.repository_ctxt, p)
q = await merge_train.Train.from_context(ctxt)
await self._assert_cars_contents(
q,
p["merge_commit_sha"],
[
TrainCarMatcher(
[p1["number"]],
[],
p["merge_commit_sha"],
"updated",
None,
),
],
)
head_sha = p1["head"]["sha"]
p1 = await self.get_pull(p1["number"])
assert p1["head"]["sha"] != head_sha # ensure it have been rebased
check = first(
await context.Context(self.repository_ctxt, p1).pull_engine_check_runs,
key=lambda c: c["name"] == "Rule: Merge priority high (queue)",
)
assert (
check["output"]["title"]
== "The pull request is the 1st in the queue to be merged"
)
await self.create_status(p1)
await self.run_engine()
await self.wait_for("pull_request", {"action": "closed"})
pulls = await self.get_pulls()
assert len(pulls) == 0
await self._assert_cars_contents(q, None, [])
async def test_queue_checks_and_branch(self):
rules = f"""
queue_rules:
- name: default
conditions:
- "check-success=Summary"
- "check-success=ci/status"
- "check-success=ci/service-test"
- "check-success=ci/pipelines"
- "#approved-reviews-by>=1"
- "-label=flag:wait"
pull_request_rules:
- name: merge
conditions:
- "-draft"
- "-closed"
- "-merged"
- "-conflict"
- "base={self.main_branch_name}"
- "label=flag:merge"
actions:
queue:
name: default
priority: medium
update_method: rebase
require_branch_protection: false
"""
await self.setup_repo(rules)
protection = {
"required_status_checks": {
"strict": False,
"contexts": [
"ci/status",
"ci/service-test",
"ci/pipelines",
],
},
"required_linear_history": False,
"required_pull_request_reviews": None,
"restrictions": None,
"enforce_admins": False,
}
await self.branch_protection_protect(self.main_branch_name, protection)
p, _ = await self.create_pr()
# To force others to be rebased
p_other, _ = await self.create_pr()
await self.merge_pull(p_other["number"])
await self.wait_for("pull_request", {"action": "closed"})
await self.run_engine()
p_other = await self.get_pull(p_other["number"])
await self.create_review(p["number"])
await self.add_label(p["number"], "flag:merge")
await self.run_engine()
await self.wait_for("pull_request", {"action": "synchronize"})
await self.run_engine()
ctxt = context.Context(self.repository_ctxt, p_other)
q = await merge_train.Train.from_context(ctxt)
await self._assert_cars_contents(
q,
p_other["merge_commit_sha"],
[
TrainCarMatcher(
[p["number"]],
[],
p_other["merge_commit_sha"],
"updated",
None,
),
],
)
head_sha = p["head"]["sha"]
p = await self.get_pull(p["number"])
assert p["head"]["sha"] != head_sha # ensure it have been rebased
check = first(
await context.Context(self.repository_ctxt, p).pull_engine_check_runs,
key=lambda c: c["name"] == "Rule: merge (queue)",
)
assert (
check["output"]["title"]
== "The pull request is the 1st in the queue to be merged"
)
await self.create_status(p, "ci/status", state="pending")
await self.run_engine()
await self.create_status(p, "ci/status")
await self.create_status(p, "ci/service-test")
await self.run_engine()
await self.create_status(p, "ci/pipelines")
await self.run_engine()
await self.wait_for("pull_request", {"action": "closed"})
pulls = await self.get_pulls()
assert len(pulls) == 0
await self._assert_cars_contents(q, None, [])
class TestTrainApiCalls(base.FunctionalTestBase):
SUBSCRIPTION_ACTIVE = True
async def test_create_pull_basic(self):
await self.setup_repo(yaml.dump({}))
p1, _ = await self.create_pr()
p2, _ = await self.create_pr()
ctxt = context.Context(self.repository_ctxt, p1)
q = await merge_train.Train.from_context(ctxt)
base_sha = await q.get_base_sha()
queue_config = rules.QueueConfig(
priority=0,
speculative_checks=5,
batch_size=1,
allow_inplace_speculative_checks=True,
allow_speculative_checks_interruption=True,
checks_timeout=None,
)
config = queue.PullQueueConfig(
name="foo",
strict_method="merge",
update_method="merge",
priority=0,
effective_priority=0,
bot_account=None,
update_bot_account=None,
queue_config=queue_config,
)
car = merge_train.TrainCar(
q,
[merge_train.EmbarkedPull(p2["number"], config, date.utcnow())],
[merge_train.EmbarkedPull(p2["number"], config, date.utcnow())],
[p1["number"]],
base_sha,
)
q._cars.append(car)
queue_rule = rules.QueueRule(
name="foo",
conditions=conditions.QueueRuleConditions([]),
config=queue_config,
)
await car.create_pull(queue_rule)
assert car.queue_pull_request_number is not None
pulls = await self.get_pulls()
assert len(pulls) == 3
tmp_pull = [p for p in pulls if p["number"] == car.queue_pull_request_number][0]
assert tmp_pull["draft"]
expected_table = f"| 1 | test_create_pull_basic: pull request n2 from fork ([#{p2['number']}]({p2['html_url']})) | foo/0 | #{tmp_pull['number']} | <fake_pretty_datetime()>|"
assert expected_table in await car.generate_merge_queue_summary(queue_rule)
await car.delete_pull(reason="testing deleted reason")
ctxt = context.Context(self.repository_ctxt, tmp_pull)
summary = await ctxt.get_engine_check_run(constants.SUMMARY_NAME)
assert summary is not None
assert summary["conclusion"] == "cancelled"
assert "testing deleted reason" in summary["output"]["summary"]
# NOTE(sileht): When branch is deleted the associated Pull is deleted in an async
# fashion on GitHub side.
time.sleep(1)
pulls = await self.get_pulls()
assert len(pulls) == 2
async def test_create_pull_conflicts(self):
await self.setup_repo(yaml.dump({}), files={"conflicts": "foobar"})
p, _ = await self.create_pr(files={"conflicts": "well"})
p1, _ = await self.create_pr()
p2, _ = await self.create_pr()
p3, _ = await self.create_pr(files={"conflicts": "boom"})
await self.merge_pull(p["number"])
await self.wait_for("pull_request", {"action": "closed"})
ctxt = context.Context(self.repository_ctxt, p)
q = await merge_train.Train.from_context(ctxt)
base_sha = await q.get_base_sha()
queue_config = rules.QueueConfig(
priority=0,
speculative_checks=5,
batch_size=1,
allow_inplace_speculative_checks=True,
allow_speculative_checks_interruption=True,
checks_timeout=None,
)
config = queue.PullQueueConfig(
name="foo",
strict_method="merge",
update_method="merge",
priority=0,
effective_priority=0,
bot_account=None,
update_bot_account=None,
queue_config=queue_config,
)
car = merge_train.TrainCar(
q,
[merge_train.EmbarkedPull(p3["number"], config, date.utcnow())],
[merge_train.EmbarkedPull(p3["number"], config, date.utcnow())],
[p1["number"], p2["number"]],
base_sha,
)
with pytest.raises(merge_train.TrainCarPullRequestCreationFailure) as exc_info:
await car.create_pull(
rules.QueueRule(
name="foo",
conditions=conditions.QueueRuleConditions([]),
config=queue_config,
)
)
assert exc_info.value.car == car
assert car.queue_pull_request_number is None
p3 = await self.get_pull(p3["number"])
ctxt_p3 = context.Context(self.repository_ctxt, p3)
check = first(
await ctxt_p3.pull_engine_check_runs,
key=lambda c: c["name"] == constants.MERGE_QUEUE_SUMMARY_NAME,
)
assert (
check["output"]["title"] == "This pull request cannot be embarked for merge"
)
assert (
check["output"]["summary"]
== "The merge-queue pull request can't be created\nDetails: `Merge conflict`"
)
@mock.patch.object(config, "ALLOW_COMMIT_MESSAGE_OPTION", False)
async def test_commit_message_queue_brownout(self):
rules = {
"pull_request_rules": [
{
"name": "Merge priority high",
"conditions": [
f"base={self.main_branch_name}",
"label=high",
"status-success=continuous-integration/fake-ci",
],
"actions": {
"queue": {"commit_message": "title+body", "priority": "high"}
},
},
]
}
await self.setup_repo(yaml.dump(rules))
p, _ = await self.create_pr()
await self.run_engine()
checks = await context.Context(self.repository_ctxt, p).pull_engine_check_runs
assert len(checks) == 1
assert "failure" == checks[0]["conclusion"]
assert "The Mergify configuration is invalid" == checks[0]["output"]["title"]
assert (
"extra keys not allowed @ pull_request_rules → item 0 → actions → queue → commit_message"
== checks[0]["output"]["summary"]
)
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class GalleryArtifactPublishingProfileBase(Model):
"""Describes the basic gallery artifact publishing profile.
All required parameters must be populated in order to send to Azure.
:param target_regions: The target regions where the artifact is going to
be published.
:type target_regions:
list[~azure.mgmt.compute.v2018_06_01.models.TargetRegion]
:param source: Required.
:type source: ~azure.mgmt.compute.v2018_06_01.models.GalleryArtifactSource
"""
_validation = {
'source': {'required': True},
}
_attribute_map = {
'target_regions': {'key': 'targetRegions', 'type': '[TargetRegion]'},
'source': {'key': 'source', 'type': 'GalleryArtifactSource'},
}
def __init__(self, **kwargs):
super(GalleryArtifactPublishingProfileBase, self).__init__(**kwargs)
self.target_regions = kwargs.get('target_regions', None)
self.source = kwargs.get('source', None)
|
/*
Copyright (C) 2008-2010 Association of Universities for Research in Astronomy (AURA)
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
3. The name of AURA and its representatives may not be used to
endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY AURA ``AS IS'' AND ANY EXPRESS OR IMPLIED
WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL AURA BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
DAMAGE.
*/
#include <assert.h>
#include <string.h>
#include "lib/polynomial.h"
typedef int (*basis_function_t)(
const size_t,
const size_t,
const coord_t* const,
const int,
const double,
const double,
double* const,
stimage_error_t* const);
int
eval_1dpoly(
const int order,
const double* const coeff,
const size_t ncoord,
const size_t axis,
const coord_t* const ref,
double* const zfit,
stimage_error_t* const error) {
size_t i = 0;
size_t j = 0;
const double* x = (double *)ref + axis;
double* tmp = NULL;
int status = 1;
assert(coeff);
assert(ref);
assert(zfit);
assert(error);
for (i = 0; i < ncoord; ++i) {
zfit[i] = coeff[0];
}
if (order == 1) {
return 0;
}
for (i = 0; i < ncoord; ++i) {
zfit[i] += (x[i<<1] * coeff[1]);
}
if (order == 2) {
return 0;
}
tmp = malloc_with_error(ncoord * sizeof(double), error);
if (tmp == NULL) goto exit;
for (i = 0; i < ncoord; ++i) {
tmp[i] = x[i<<1];
}
for (j = 2; j < order; ++j) {
for (i = 0; i < ncoord; ++i) {
tmp[i] *= x[i<<1];
zfit[i] += tmp[i] * coeff[j];
}
}
status = 0;
exit:
free(tmp);
return 0;
}
int
eval_1dchebyshev(
const int order,
const double* const coeff,
const size_t ncoord,
const size_t axis,
const coord_t* const ref,
const double k1,
const double k2,
double* const zfit,
stimage_error_t* const error) {
size_t i = 0;
size_t j = 0;
const double* x = (double *)ref + axis;
double c1 = 0.0;
double c2 = 0.0;
double* sx = NULL;
double* pn = NULL;
double* pnm1 = NULL;
double* pnm2 = NULL;
int status = 1;
assert(coeff);
assert(ref);
assert(zfit);
assert(error);
for (i = 0; i < ncoord; ++i) {
zfit[i] = coeff[0];
}
if (order == 1) {
return 0;
}
c1 = k2 * coeff[1];
c2 = c1 * k1 + coeff[0];
for (i = 0; i < ncoord; ++i) {
zfit[i] = x[i<<1] * c1 + c2;
}
if (order == 2) {
return 0;
}
sx = malloc_with_error(ncoord * sizeof(double), error);
if (sx == NULL) goto exit;
pn = malloc_with_error(ncoord * sizeof(double), error);
if (pn == NULL) goto exit;
pnm1 = malloc_with_error(ncoord * sizeof(double), error);
if (pnm1 == NULL) goto exit;
pnm2 = malloc_with_error(ncoord * sizeof(double), error);
if (pnm2 == NULL) goto exit;
for (i = 0; i < ncoord; ++i) {
pnm2[i] = 1.0;
pnm1[i] = sx[i] = (x[i<<1] + k1) * k2;
sx[i] *= 2;
}
for (j = 2; j < order; ++j) {
for (i = 0; i < ncoord; ++i) {
pn[i] = (sx[i] * pnm1[i]) - pnm2[i];
}
if (j < order - 1) {
for (i = 0; i < ncoord; ++i) {
pnm2[i] = pnm1[i];
pnm1[i] = pn[i];
}
}
for (i = 0; i < ncoord; ++i) {
pn[i] *= coeff[j];
zfit[i] += pn[i];
}
}
status = 0;
exit:
free(sx);
free(pn);
free(pnm1);
free(pnm2);
return 0;
}
int
eval_1dlegendre(
const int order,
const double* const coeff,
const size_t ncoord,
const size_t axis,
const coord_t* const ref,
const double k1,
const double k2,
double* const zfit,
stimage_error_t* const error) {
size_t i = 0;
size_t j = 0;
const double* x = (double *)ref + axis;
double ri = 0.0;
double ri1 = 0.0;
double ri2 = 0.0;
double* sx = NULL;
double* pn = NULL;
double* pnm1 = NULL;
double* pnm2 = NULL;
int status = 1;
assert(coeff);
assert(ref);
assert(zfit);
assert(error);
for (i = 0; i < ncoord; ++i) {
zfit[i] = coeff[0];
}
if (order == 1) {
return 0;
}
ri1 = k2 * coeff[1];
ri2 = ri1 * k1 + coeff[0];
for (i = 0; i < ncoord; ++i) {
zfit[i] = (x[i<<1] * ri1) + ri2;
}
if (order == 2) {
return 0;
}
sx = malloc_with_error(ncoord * sizeof(double), error);
if (sx == NULL) goto exit;
pn = malloc_with_error(ncoord * sizeof(double), error);
if (pn == NULL) goto exit;
pnm1 = malloc_with_error(ncoord * sizeof(double), error);
if (pnm1 == NULL) goto exit;
pnm2 = malloc_with_error(ncoord * sizeof(double), error);
if (pnm2 == NULL) goto exit;
for (i = 0; i < ncoord; ++i) {
pnm2[i] = 1.0;
pnm1[i] = sx[i] = (x[i<<1] + k1) * k2;
}
for (j = 2; j < order; ++j) {
ri = (double)j + 1.0;
ri1 = (2.0 * ri - 3.0) / (ri - 1.0);
ri2 = -(ri - 2.0) / (ri - 1.0);
for (i = 0; i < ncoord; ++i) {
pn[i] = sx[i] * pnm1[i];
pn[i] = pn[i] * ri1 + pnm2[i] * ri2;
}
if (j < order - 1) {
for (i = 0; i < ncoord; ++i) {
pnm2[i] = pnm1[i];
pnm1[i] = pn[i];
}
}
for (i = 0; i < ncoord; ++i) {
pn[i] *= coeff[j];
zfit[i] += pn[i];
}
}
status = 0;
exit:
free(sx);
free(pn);
free(pnm1);
free(pnm2);
return status;
}
int
basis_poly(
const size_t ncoord,
const size_t axis,
const coord_t* const ref,
const int order,
const double k1, /* Ignored */
const double k2, /* Ignored */
double* const basis,
stimage_error_t* const error) {
size_t i = 0;
size_t k = 0;
const double* const x = (double*)ref + axis;
double* bp = basis;
assert(ref);
assert(basis);
assert(error);
for (k = 0; k < order; ++k) {
assert((bp - basis) >= 0);
if (k == 0) {
for (i = 0; i < ncoord; ++i) {
bp[i] = 1.0;
}
} else if (k == 1) {
for (i = 0; i < ncoord; ++i) {
bp[i] = x[i<<1];
}
} else {
for (i = 0; i < ncoord; ++i) {
assert(((bp - basis) + i - ncoord) > 0);
assert(((bp - basis) + i - ncoord) < ncoord * order);
bp[i] = x[i<<1] * bp[i - ncoord];
}
}
bp += ncoord;
}
return 0;
}
int
basis_chebyshev(
const size_t ncoord,
const size_t axis,
const coord_t* const ref,
const int order,
const double k1,
const double k2,
double* const basis,
stimage_error_t* const error) {
size_t i = 0;
size_t k = 0;
const double* const x = (double*)ref + axis;
double* bp = basis;
assert(ref);
assert(basis);
assert(error);
for (k = 0; k < order; ++k) {
if (k == 0) {
for (i = 0; i < ncoord; ++i) {
bp[i] = 1.0;
}
} else if (k == 1) {
for (i = 0; i < ncoord; ++i) {
bp[i] = (x[i<<1] + k1) * k2;
}
} else {
for (i = 0; i < ncoord; ++i) {
assert(((bp - basis) + i - ncoord) >= 0);
assert(((bp - basis) + i - ncoord) < ncoord * order);
assert(((bp - basis) + i - (2 * ncoord)) >= 0);
assert(((bp - basis) + i - (2 * ncoord)) < ncoord * order);
bp[i] = (basis[ncoord+i] * bp[i-ncoord]);
bp[i] *= 2.0;
bp[i] -= bp[i-(2 * ncoord)];
}
}
bp += ncoord;
}
return 0;
}
int
basis_legendre(
const size_t ncoord,
const size_t axis,
const coord_t* const ref,
const int order,
const double k1,
const double k2,
double* const basis,
stimage_error_t* const error) {
size_t i = 0;
size_t k = 0;
const double* const x = (double*)ref + axis;
double* bp = basis;
double ri = 0.0;
double ri1 = 0.0;
double ri2 = 0.0;
assert(ref);
assert(basis);
assert(error);
for (k = 0; k < order; ++k) {
if (k == 0) {
for (i = 0; i < ncoord; ++i) {
bp[i] = 1.0;
}
} else if (k == 1) {
for (i = 0; i < ncoord; ++i) {
bp[i] = (x[i<<1] + k1) * k2;
}
} else {
assert(((bp - basis) + i - ncoord) >= 0);
assert(((bp - basis) + i - ncoord) < ncoord * order);
assert(((bp - basis) + i - (2 * ncoord)) >= 0);
assert(((bp - basis) + i - (2 * ncoord)) < ncoord * order);
ri = k + 1;
ri1 = (2.0 * ri - 3.0) / (ri - 1.0);
ri2 = -(ri - 2.0) / (ri - 1.0);
for (i = 0; i < ncoord; ++i) {
bp[i] = (basis[ncoord+i] * bp[i-ncoord]);
bp[i] = bp[i] * ri1 + bp[i-(2 * ncoord)] * ri2;
}
}
bp += ncoord;
}
return 0;
}
static int
eval_poly_generic(
const int xorder,
const int yorder,
const double* const coeff,
const size_t ncoord,
const coord_t* const ref,
const xterms_e xterms,
const double k1x,
const double k2x,
const double k1y,
const double k2y,
basis_function_t basis_function,
/* Output */
double* const zfit,
stimage_error_t* const error) {
size_t i = 0;
size_t j = 0;
size_t k = 0;
double* xb = NULL;
double* yb = NULL;
double* accum = NULL;
size_t cp = 0;
const size_t maxorder = MAX(xorder + 1, yorder + 1);
size_t xincr = 0;
double* xbp = xb;
double* ybp = yb;
int status = 1;
assert(coeff);
assert(ref);
assert(zfit);
assert(error);
/* Fit a constant */
if (xorder == 1 && yorder == 1) {
for (i = 0; i < ncoord; ++i) {
zfit[i] = coeff[0];
}
return 0;
}
/* Fit first order in x and y */
if (xorder == 2 && yorder == 1) {
for (i = 0; i < ncoord; ++i) {
zfit[i] += ref[i].x * coeff[1];
}
return 0;
}
if (yorder == 2 && xorder == 1) {
for (i = 0; i < ncoord; ++i) {
zfit[i] += ref[i].y * coeff[1];
}
return 0;
}
if (yorder == 2 && xorder == 2 && xterms == xterms_none) {
for (i = 0; i < ncoord; ++i) {
zfit[i] += ref[i].x * coeff[1] + ref[i].y * coeff[2];
}
return 0;
}
xb = malloc_with_error(xorder * ncoord * sizeof(double), error);
if (xb == NULL) goto exit;
yb = malloc_with_error(yorder * ncoord * sizeof(double), error);
if (yb == NULL) goto exit;
accum = malloc_with_error(ncoord * sizeof(double), error);
if (accum == NULL) goto exit;
/* Calculate basis functions */
if (basis_function(ncoord, 0, ref, xorder, k1x, k2x, xb, error)) goto exit;
if (basis_function(ncoord, 1, ref, yorder, k1y, k2y, yb, error)) goto exit;
/* Accumulate the output vector */
for (i = 0; i < ncoord; ++i) {
zfit[i] = 0.0;
}
if (xterms != xterms_none) {
xincr = xorder;
ybp = yb;
for (j = 0; j < yorder; ++j) {
for (i = 0; i < ncoord; ++i) {
accum[i] = 0.0;
}
xbp = xb;
for (k = 0; k < xincr; ++k) {
for (i = 0; i < ncoord; ++i) {
accum[i] += xbp[i] * coeff[cp+k];
}
}
xbp += ncoord;
}
for (i = 0; i < ncoord; ++i) {
zfit[i] += accum[i] * ybp[i];
}
cp += xincr;
ybp += ncoord;
if (xterms == xterms_half) {
if ((j + xorder + 1) > maxorder) {
xincr -= 1;
}
}
} else { /* xterms == surface_xterms_none */
xbp = xb;
for (k = 0; k < xorder; ++k) {
for (i = 0; i < ncoord; ++i) {
zfit[i] += xbp[i] * coeff[k];
}
xbp += ncoord;
}
ybp = yb + ncoord;
for (k = 0; k < yorder - 1; ++k) {
for (i = 0; i < ncoord; ++i) {
zfit[i] += ybp[i] * coeff[xorder+k];
}
ybp += ncoord;
}
}
status = 0;
exit:
free(xb);
free(yb);
free(accum);
return status;
}
int
eval_poly(
const int xorder,
const int yorder,
const double* const coeff,
const size_t ncoord,
const coord_t* const ref,
const xterms_e xterms,
const double k1x,
const double k2x,
const double k1y,
const double k2y,
/* Output */
double* const zfit,
stimage_error_t* const error) {
return eval_poly_generic(
xorder, yorder, coeff, ncoord, ref, xterms, k1x, k2x, k1y, k2y,
&basis_poly, zfit, error);
}
int
eval_chebyshev(
const int xorder,
const int yorder,
const double* const coeff,
const size_t ncoord,
const coord_t* const ref,
const xterms_e xterms,
const double k1x,
const double k2x,
const double k1y,
const double k2y,
/* Output */
double* const zfit,
stimage_error_t* const error) {
return eval_poly_generic(
xorder, yorder, coeff, ncoord, ref, xterms, k1x, k2x, k1y, k2y,
&basis_chebyshev, zfit, error);
}
int
eval_legendre(
const int xorder,
const int yorder,
const double* const coeff,
const size_t ncoord,
const coord_t* const ref,
const xterms_e xterms,
const double k1x,
const double k2x,
const double k1y,
const double k2y,
/* Output */
double* const zfit,
stimage_error_t* const error) {
return eval_poly_generic(
xorder, yorder, coeff, ncoord, ref, xterms, k1x, k2x, k1y, k2y,
&basis_legendre, zfit, error);
}
|
/* { dg-do compile } */
/* { dg-options "-O2 -fdump-tree-optimized" } */
/* LLVM LOCAL test not applicable */
/* { dg-require-fdump "" } */
static const char f[3] = "?";
int foo()
{
int i = 0;
return f[i] != '?';
}
/* { dg-final { scan-tree-dump "return 0;" "optimized" } } */
/* { dg-final { cleanup-tree-dump "optimized" } } */
|
/*
* This header is generated by classdump-dyld 1.0
* on Sunday, June 7, 2020 at 11:15:04 AM Mountain Standard Time
* Operating System: Version 13.4.5 (Build 17L562)
* Image Source: /System/Library/PrivateFrameworks/GeoServices.framework/GeoServices
* classdump-dyld is licensed under GPLv3, Copyright © 2013-2016 by Elias Limneos.
*/
@protocol GEOSearchAttributionServerProxy;
@interface GEOSearchAttributionManifestManager : NSObject {
id<GEOSearchAttributionServerProxy> _serverProxy;
}
@property (nonatomic,readonly) id<GEOSearchAttributionServerProxy> serverProxy; //@synthesize serverProxy=_serverProxy - In the implementation block
+(id)sharedManager;
+(void)useProxy:(Class)arg1 ;
+(void)useRemoteProxy;
+(void)useLocalProxy;
+(void)setUseLocalProxy:(BOOL)arg1 ;
-(id)init;
-(void)loadAttributionInfoForIdentifiers:(id)arg1 completionHandler:(/*^block*/id)arg2 ;
-(id<GEOSearchAttributionServerProxy>)serverProxy;
@end
|
"use strict";
var KTLayoutHeaderMobile = function() {
// Private properties
var _element;
var _object;
// Get height
var _getHeight = function() {
var height;
height = KTUtil.actualHeight(_element);
return height;
}
// Public methods
return {
init: function(id) {
_element = KTUtil.getById(id);
},
isFixed: function() {
return KTUtil.hasClass(KTUtil.getBody(), 'header-mobile-fixed')
},
getElement: function() {
return _element;
},
getHeader: function() {
return _object;
},
getHeight: function() {
return _getHeight();
}
};
}();
// Webpack support
if (typeof module !== 'undefined') {
module.exports = KTLayoutHeaderMobile;
}
|
#ifndef NAMEDCOLORS_H
#define NAMEDCOLORS_H
#include "vector.h"
namespace NamedColors
{
#define regcolor3f(name, r, g, b) \
MAYBE_UNUSED_ATTR Q_DECL_CONSTEXPR Vector3f COLOR_##name (r, g, b); \
MAYBE_UNUSED_ATTR Q_DECL_CONSTEXPR Vector4f COLOR4_##name = extendV3_V4(COLOR_##name, 1.0f); \
MAYBE_UNUSED_ATTR const QColor QCOLOR_##name (V3ARGS(COLOR_##name));
#define regcolor3_255(name, r, g, b) regcolor3f(name, TF(r), TF(g), TF(b))
//Vector3f COLOR_WHITE, Vector4f COLOR4_WHITE, QColor QCOLOR_WHITE
regcolor3f(WHITE, 1.0f, 1.0f, 1.0f)
regcolor3f(RED, 1.0f, 0.0f, 0.0f)
regcolor3f(GREEN, 0.0f, 1.0f, 0.0f)
regcolor3f(CYAN, 0.0f, 1.0f, 1.0f)
regcolor3f(BLUE, 0.0f, 0.0f, 1.0f)
regcolor3f(BLACK, 0.0f, 0.0f, 0.0f)
// Q_DECL_CONSTEXPR Vector3f COLOR_WHITE (1.0f, 1.0f, 1.0f);
// Q_DECL_CONSTEXPR Vector3f COLOR_RED (1.0f, 0.0f, 0.0f);
// Q_DECL_CONSTEXPR Vector3f COLOR_GREEN (0.0f, 1.0f, 0.0f);
// Q_DECL_CONSTEXPR Vector3f COLOR_CYAN (0.0f, 1.0f, 1.0f);
// Q_DECL_CONSTEXPR Vector3f COLOR_BLUE (0.0f, 0.0f, 1.0f);
// Q_DECL_CONSTEXPR Vector3f COLOR_BLACK (0.0f, 0.0f, 0.0f);
// Q_DECL_CONSTEXPR Vector4f COLOR4_WHITE = extendV3_V4(COLOR_WHITE, 1.0f);
// Q_DECL_CONSTEXPR Vector4f COLOR4_RED = extendV3_V4(COLOR_RED, 1.0f);
// Q_DECL_CONSTEXPR Vector4f COLOR4_GREEN = extendV3_V4(COLOR_GREEN, 1.0f);
// Q_DECL_CONSTEXPR Vector4f COLOR4_CYAN = extendV3_V4(COLOR_CYAN, 1.0f);
// Q_DECL_CONSTEXPR Vector4f COLOR4_BLUE = extendV3_V4(COLOR_BLUE, 1.0f);
// Q_DECL_CONSTEXPR Vector4f COLOR4_BLACK = extendV3_V4(COLOR_BLACK, 1.0f);
// const QColor QCOLOR_WHITE (255, 255, 255);
// const QColor QCOLOR_BLACK (0, 0, 0);
// const QColor QCOLOR_RED (255, 0, 255);
// const QColor QCOLOR_GREEN (0, 255, 0);
// const QColor QCOLOR_CYAN (0, 255, 255);
// const QColor QCOLOR_BLUE (0, 0, 255);
//flat UI US from https://flatuicolors.com/palette/us
regcolor3_255(GRAY0, 223, 230, 233)
regcolor3_255(GRAY1, 178, 190, 195)
regcolor3_255(GRAY2, 99, 110, 114)
regcolor3_255(GRAY3, 45, 52, 54)
regcolor3_255(GRAY4, 5, 12, 14)
// Q_DECL_CONSTEXPR Vector3f COLOR_GRAY0 (TF(223), TF(230), TF(233));
// Q_DECL_CONSTEXPR Vector3f COLOR_GRAY1 (TF(178), TF(190), TF(195));
// Q_DECL_CONSTEXPR Vector3f COLOR_GRAY2 (TF(99), TF(110), TF(114));
// Q_DECL_CONSTEXPR Vector3f COLOR_GRAY3 (TF(45), TF(52), TF(54));
// Q_DECL_CONSTEXPR Vector3f COLOR_GRAY4 (TF(5), TF(12), TF(14));
// Q_DECL_CONSTEXPR Vector4f COLOR4_GRAY0 = extendV3_V4(COLOR_GRAY0, 1.0f);
// Q_DECL_CONSTEXPR Vector4f COLOR4_GRAY1 = extendV3_V4(COLOR_GRAY1, 1.0f);
// Q_DECL_CONSTEXPR Vector4f COLOR4_GRAY2 = extendV3_V4(COLOR_GRAY2, 1.0f);
// Q_DECL_CONSTEXPR Vector4f COLOR4_GRAY3 = extendV3_V4(COLOR_GRAY3, 1.0f);
// Q_DECL_CONSTEXPR Vector4f COLOR4_GRAY4 = extendV3_V4(COLOR_GRAY4, 1.0f);
// const QColor QCOLORGRAY0 (V3ARGS(COLOR_GRAY0));
// const QColor QCOLORGRAY1 (V3ARGS(COLOR_GRAY1));
// const QColor QCOLORGRAY2 (V3ARGS(COLOR_GRAY2));
// const QColor QCOLORGRAY3 (V3ARGS(COLOR_GRAY3));
// const QColor QCOLORGRAY4 (V3ARGS(COLOR_GRAY4));
#undef regcolor3f
#undef regcolor3_255
}
#endif // NAMEDCOLORS_H
|
# coding=utf-8
# Copyright 2022 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tensorflow_datasets.core.features.image_feature."""
import numpy as np
import tensorflow as tf
from tensorflow_datasets import testing
from tensorflow_datasets.core import features as features_lib
class LabeledImageFeatureTest(testing.FeatureExpectationsTestCase):
def test_images(self):
rng = np.random.default_rng()
img = rng.integers(256, size=(28, 28, 1), dtype=np.uint8)
self.assertFeature(
feature=features_lib.LabeledImage(
labels=['background', 'car', 'truck'],
shape=(28, 28, 1),
),
shape=(28, 28, 1),
dtype=tf.uint8,
tests=[
# Numpy array
testing.FeatureExpectationItem(
value=img,
expected=img,
),
],
test_attributes=dict(
num_classes=3,
names=['background', 'car', 'truck'],
_use_colormap=True,
),
)
|
# Copyright 2020 Ram Rachum and collaborators.
# This program is distributed under the MIT license.
from __future__ import annotations
import abc
import operator as operator_module
from typing import (Iterable, Union, Optional, Tuple, Any, Iterator, Type,
Sequence, Callable, Hashable, Mapping, TypeVar)
import collections.abc
import numbers
import itertools
import functools
import time as time_module
from marley import utils
def sleep_until(condition: Callable[[], Any], total_seconds: float = 10, *,
step_seconds: float = 0.5, initial_seconds: float = 0,
reset_condition: Optional[Callable[[], Any]] = None,
exception: Union[Exception, Type[Exception]] = TimeoutError) -> None:
if initial_seconds:
time_module.sleep(initial_seconds)
if condition():
return
div, mod = divmod(total_seconds - initial_seconds, step_seconds)
n_steps = int(div + int(bool(mod)))
while True:
for _ in range(n_steps):
time_module.sleep(step_seconds)
if condition():
return
elif reset_condition is not None and reset_condition():
break # to `while` loop, restarting the wait
else:
raise exception
|
const resolve = require('path').resolve;
const DOC_TABLE_OF_CONTENTS = require('../docs/table-of-contents.json');
module.exports = {
plugins: [
{
resolve: `gatsby-theme-ocular`,
options: {
logLevel: 1, // Adjusts amount of debug information from ocular-gatsby
// Folders
DIR_NAME: __dirname,
ROOT_FOLDER: `${__dirname}/../`,
DOCS: DOC_TABLE_OF_CONTENTS,
DOC_FOLDERS: [
`${__dirname}/../docs/`
],
SOURCE: [`${__dirname}/static`],
PROJECT_TYPE: 'github',
PROJECT_NAME: 'react-map-gl',
PROJECT_ORG: 'uber',
PROJECT_ORG_LOGO: 'images/uber-logo.png',
PROJECT_URL: 'https://github.com/visgl/',
PROJECT_DESC: 'React wrapper for Mapbox GL JS',
PATH_PREFIX: '/react-map-gl/',
GA_TRACKING: null,
// For showing star counts and contributors.
// Should be like btoa('YourUsername:YourKey') and should be readonly.
GITHUB_KEY: null,
HOME_PATH: '/',
PROJECTS: [
{
name: 'deck.gl',
url: 'https://deck.gl'
},
{
name: 'luma.gl',
url: 'https://luma.gl'
},
{
name: 'loaders.gl',
url: 'https://loaders.gl'
},
{
name: 'nebula.gl',
url: 'https://nebula.gl/'
}
],
LINK_TO_GET_STARTED: '/docs/get-started/get-started',
ADDITIONAL_LINKS: [{name: 'Blog', href: 'http://medium.com/vis-gl', index: 1}],
INDEX_PAGE_URL: resolve(__dirname, './src/home.js'),
EXAMPLES: [
{
title: 'Dynamic Styling',
image: 'images/example-layers.jpg',
componentUrl: resolve(__dirname, '../examples/layers/src/app.js'),
path: 'examples/layers'
},
{
title: 'Markers & Popups',
image: 'images/example-controls.jpg',
componentUrl: resolve(__dirname, '../examples/controls/src/app.js'),
path: 'examples/controls'
},
{
title: 'Custom Cursor',
image: 'images/example-custom-cursor.jpg',
componentUrl: resolve(__dirname, '../examples/custom-cursor/src/app.js'),
path: 'examples/custom-cursor'
},
{
title: 'Draggable Marker',
image: 'images/example-draggable-markers.jpg',
componentUrl: resolve(__dirname, '../examples/draggable-markers/src/app.js'),
path: 'examples/draggable-markers'
},
{
title: 'GeoJSON',
image: 'images/example-geojson.jpg',
componentUrl: resolve(__dirname, '../examples/geojson/src/app.js'),
path: 'examples/geojson'
},
{
title: 'GeoJSON Animation',
image: 'images/example-geojson-animation.jpg',
componentUrl: resolve(__dirname, '../examples/geojson-animation/src/app.js'),
path: 'examples/geojson-animation'
},
{
title: 'Clusters',
image: 'images/example-clusters.jpg',
componentUrl: resolve(__dirname, '../examples/clusters/src/app.js'),
path: 'examples/clusters'
},
{
title: 'Locate User',
image: 'images/example-locate-user.jpg',
componentUrl: resolve(__dirname, '../examples/locate-user/src/app.js'),
path: 'examples/locate-user'
},
{
title: 'Limit Map Interaction',
image: 'images/example-interaction.jpg',
componentUrl: resolve(__dirname, '../examples/interaction/src/app.js'),
path: 'examples/interaction'
},
{
title: 'Camera Transition',
image: 'images/example-viewport-animation.jpg',
componentUrl: resolve(__dirname, '../examples/viewport-animation/src/app.js'),
path: 'examples/viewport-animation'
},
{
title: 'Highlight By Filter',
image: 'images/example-filter.jpg',
componentUrl: resolve(__dirname, '../examples/filter/src/app.js'),
path: 'examples/filter'
},
{
title: 'Zoom To Bounds',
image: 'images/example-zoom-to-bounds.jpg',
componentUrl: resolve(__dirname, '../examples/zoom-to-bounds/src/app.js'),
path: 'examples/zoom-to-bounds'
},
{
title: 'Heatmap',
image: 'images/example-heatmap.jpg',
componentUrl: resolve(__dirname, '../examples/heatmap/src/app.js'),
path: 'examples/heatmap'
},
{
title: 'DrawPolygon',
image: 'images/example-draw-polygon.jpg',
componentUrl: resolve(__dirname, '../examples/draw-polygon/src/app.js'),
path: 'examples/draw-polygon'
}
],
STYLESHEETS: [
'https://api.tiles.mapbox.com/mapbox-gl-js/v1.6.0/mapbox-gl.css',
'https://api.mapbox.com/mapbox-gl-js/plugins/mapbox-gl-draw/v1.0.9/mapbox-gl-draw.css',
'/style.css'
]
}
},
{resolve: 'gatsby-plugin-no-sourcemaps'},
{
resolve: 'gatsby-plugin-env-variables',
options: {
whitelist: ['MapboxAccessToken']
}
}
]
};
|
const {
lengthOfLongestSubstring
} = require('../src/3.longest-substring-without-repeating-characters')
describe('longest substring', () => {
test.each([
['abba', 2],
['abcabcbb', 3],
['bbbbb', 1],
['pwwkewr', 4],
['pwwkew', 3]
])('lengthOfLongestSubstring(%s) should output (%i)', (input, expected) => {
expect(lengthOfLongestSubstring(input)).toBe(expected)
})
})
|
/*
* Notifier.h
*
* Created on: Apr 21, 2021
* Author: ubuntu
*/
#ifndef PSSC_NOTIFIER_H_
#define PSSC_NOTIFIER_H_
#include <mutex>
#include <condition_variable>
namespace util {
class Notifier
{
std::mutex mtx;
std::condition_variable cv;
public:
template<typename Rep, typename Period>
std::cv_status wait_for(const std::chrono::duration<Rep, Period>& time)
{
std::unique_lock<std::mutex> lck(mtx);
return cv.wait_for(lck, time);
}
void wait()
{
std::unique_lock<std::mutex> lck(mtx);
cv.wait(lck);
}
void notify_one()
{
cv.notify_one();
}
void notify_all()
{
cv.notify_all();
}
};
}
#endif /* PSSC_NOTIFIER_H_ */
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for training routines."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util as tf_test_util
from tensorflow.python.keras._impl import keras
from tensorflow.python.keras._impl.keras import testing_utils
from tensorflow.python.platform import test
from tensorflow.python.training.rmsprop import RMSPropOptimizer
class TrainingTest(test.TestCase):
def test_fit_on_arrays(self):
a = keras.layers.Input(shape=(3,), name='input_a')
b = keras.layers.Input(shape=(3,), name='input_b')
dense = keras.layers.Dense(4, name='dense')
c = dense(a)
d = dense(b)
e = keras.layers.Dropout(0.5, name='dropout')(c)
model = keras.models.Model([a, b], [d, e])
optimizer = RMSPropOptimizer(learning_rate=0.001)
loss = 'mse'
loss_weights = [1., 0.5]
metrics = ['mae']
model.compile(optimizer, loss, metrics=metrics, loss_weights=loss_weights)
input_a_np = np.random.random((10, 3))
input_b_np = np.random.random((10, 3))
output_d_np = np.random.random((10, 4))
output_e_np = np.random.random((10, 4))
# Test fit at different verbosity
model.fit(
[input_a_np, input_b_np], [output_d_np, output_e_np],
epochs=1,
batch_size=5,
verbose=0)
model.fit(
[input_a_np, input_b_np], [output_d_np, output_e_np],
epochs=1,
batch_size=5,
verbose=1)
model.fit(
[input_a_np, input_b_np], [output_d_np, output_e_np],
epochs=2,
batch_size=5,
verbose=2)
# Test with validation data
model.fit(
[input_a_np, input_b_np], [output_d_np, output_e_np],
validation_data=([input_a_np, input_b_np], [output_d_np,
output_e_np]),
epochs=1,
batch_size=5,
verbose=0)
model.fit(
[input_a_np, input_b_np], [output_d_np, output_e_np],
validation_data=([input_a_np, input_b_np], [output_d_np,
output_e_np]),
epochs=2,
batch_size=5,
verbose=1)
model.fit(
[input_a_np, input_b_np], [output_d_np, output_e_np],
validation_data=([input_a_np, input_b_np], [output_d_np,
output_e_np]),
epochs=2,
batch_size=5,
verbose=2)
model.train_on_batch([input_a_np, input_b_np], [output_d_np, output_e_np])
# Test with validation split
model.fit(
[input_a_np, input_b_np], [output_d_np, output_e_np],
epochs=2,
batch_size=5,
verbose=0,
validation_split=0.2)
# Test with dictionary inputs
model.fit(
{
'input_a': input_a_np,
'input_b': input_b_np
}, {'dense': output_d_np,
'dropout': output_e_np},
epochs=1,
batch_size=5,
verbose=0)
model.fit(
{
'input_a': input_a_np,
'input_b': input_b_np
}, {'dense': output_d_np,
'dropout': output_e_np},
epochs=1,
batch_size=5,
verbose=1)
model.fit(
{
'input_a': input_a_np,
'input_b': input_b_np
}, {'dense': output_d_np,
'dropout': output_e_np},
validation_data=({'input_a': input_a_np,
'input_b': input_b_np
},
{
'dense': output_d_np,
'dropout': output_e_np
}),
epochs=1,
batch_size=5,
verbose=0)
model.train_on_batch({
'input_a': input_a_np,
'input_b': input_b_np
}, {'dense': output_d_np,
'dropout': output_e_np})
# Test with lists for loss, metrics
loss = ['mae', 'mse']
metrics = ['acc', 'mae']
model.compile(optimizer, loss, metrics=metrics)
model.fit(
[input_a_np, input_b_np], [output_d_np, output_e_np],
epochs=1,
batch_size=5,
verbose=0)
# Test with dictionaries for loss, metrics, loss weights
loss = {'dense': 'mse', 'dropout': 'mae'}
loss_weights = {'dense': 1., 'dropout': 0.5}
metrics = {'dense': 'mse', 'dropout': 'mae'}
model.compile(optimizer, loss, metrics=metrics, loss_weights=loss_weights)
model.fit(
[input_a_np, input_b_np], [output_d_np, output_e_np],
epochs=1,
batch_size=5,
verbose=0)
# Invalid use cases
with self.assertRaises(AttributeError):
model.fit(
[input_a_np, input_b_np], [output_d_np, output_e_np],
epochs=1,
validation_data=([input_a_np, input_b_np], 0, 0),
verbose=0)
with self.assertRaises(ValueError):
model.train_on_batch({'input_a': input_a_np},
[output_d_np, output_e_np])
with self.assertRaises(ValueError):
model.train_on_batch([input_a_np], [output_d_np, output_e_np])
with self.assertRaises(AttributeError):
model.train_on_batch(1, [output_d_np, output_e_np])
with self.assertRaises(ValueError):
model.train_on_batch(input_a_np, [output_d_np, output_e_np])
with self.assertRaises(ValueError):
bad_input = np.random.random((11, 3))
model.train_on_batch([bad_input, input_b_np],
[output_d_np, output_e_np])
with self.assertRaises(ValueError):
bad_target = np.random.random((11, 4))
model.train_on_batch([input_a_np, input_b_np],
[bad_target, output_e_np])
# Build single-input model
x = keras.layers.Input(shape=(3,), name='input_a')
y = keras.layers.Dense(4)(x)
model = keras.models.Model(x, y)
model.compile(optimizer=RMSPropOptimizer(learning_rate=0.001), loss='mse')
# This will work
model.fit([input_a_np], output_d_np, epochs=1)
with self.assertRaises(ValueError):
model.fit([input_a_np, input_a_np], output_d_np, epochs=1)
def test_evaluate_predict_on_arrays(self):
a = keras.layers.Input(shape=(3,), name='input_a')
b = keras.layers.Input(shape=(3,), name='input_b')
dense = keras.layers.Dense(4, name='dense')
c = dense(a)
d = dense(b)
e = keras.layers.Dropout(0.5, name='dropout')(c)
model = keras.models.Model([a, b], [d, e])
optimizer = RMSPropOptimizer(learning_rate=0.001)
loss = 'mse'
loss_weights = [1., 0.5]
metrics = ['acc', 'mae']
model.compile(
optimizer,
loss,
metrics=metrics,
loss_weights=loss_weights,
sample_weight_mode=None)
input_a_np = np.random.random((10, 3))
input_b_np = np.random.random((10, 3))
output_d_np = np.random.random((10, 4))
output_e_np = np.random.random((10, 4))
# Test evaluate at different verbosity
out = model.evaluate(
[input_a_np, input_b_np], [output_d_np, output_e_np],
batch_size=5,
verbose=0)
self.assertEqual(len(out), 7)
out = model.evaluate(
[input_a_np, input_b_np], [output_d_np, output_e_np],
batch_size=5,
verbose=1)
self.assertEqual(len(out), 7)
out = model.evaluate(
[input_a_np, input_b_np], [output_d_np, output_e_np],
batch_size=5,
verbose=2)
self.assertEqual(len(out), 7)
out = model.test_on_batch([input_a_np, input_b_np],
[output_d_np, output_e_np])
self.assertEqual(len(out), 7)
# Test evaluate with dictionary inputs
model.evaluate(
{
'input_a': input_a_np,
'input_b': input_b_np
}, {'dense': output_d_np,
'dropout': output_e_np},
batch_size=5,
verbose=0)
model.evaluate(
{
'input_a': input_a_np,
'input_b': input_b_np
}, {'dense': output_d_np,
'dropout': output_e_np},
batch_size=5,
verbose=1)
# Test predict
out = model.predict([input_a_np, input_b_np], batch_size=5)
self.assertEqual(len(out), 2)
out = model.predict({'input_a': input_a_np, 'input_b': input_b_np})
self.assertEqual(len(out), 2)
out = model.predict_on_batch({
'input_a': input_a_np,
'input_b': input_b_np
})
self.assertEqual(len(out), 2)
def test_invalid_loss_or_metrics(self):
num_classes = 5
train_samples = 1000
test_samples = 1000
input_dim = 5
model = keras.models.Sequential()
model.add(keras.layers.Dense(10, input_shape=(input_dim,)))
model.add(keras.layers.Activation('relu'))
model.add(keras.layers.Dense(num_classes))
model.add(keras.layers.Activation('softmax'))
model.compile(loss='categorical_crossentropy',
optimizer=RMSPropOptimizer(learning_rate=0.001))
np.random.seed(1337)
(x_train, y_train), (_, _) = testing_utils.get_test_data(
train_samples=train_samples,
test_samples=test_samples,
input_shape=(input_dim,),
num_classes=num_classes)
with self.assertRaises(ValueError):
model.fit(x_train, np.concatenate([y_train, y_train], axis=-1))
with self.assertRaises(TypeError):
model.compile(loss='categorical_crossentropy',
optimizer=RMSPropOptimizer(learning_rate=0.001),
metrics=set(0))
with self.assertRaises(ValueError):
model.compile(loss=None,
optimizer='rms')
def test_model_methods_with_eager_tensors_multi_io(self):
a = keras.layers.Input(shape=(3,), name='input_a')
b = keras.layers.Input(shape=(3,), name='input_b')
dense = keras.layers.Dense(4, name='dense')
c = dense(a)
d = dense(b)
e = keras.layers.Dropout(0.5, name='dropout')(c)
model = keras.models.Model([a, b], [d, e])
optimizer = RMSPropOptimizer(learning_rate=0.001)
loss = 'mse'
loss_weights = [1., 0.5]
metrics = ['mae']
model.compile(
optimizer,
loss,
metrics=metrics,
loss_weights=loss_weights,
sample_weight_mode=None)
input_a = keras.backend.zeros(shape=(10, 3))
input_b = keras.backend.zeros(shape=(10, 3))
target_d = keras.backend.zeros(shape=(10, 4))
target_e = keras.backend.zeros(shape=(10, 4))
model.fit(
[input_a, input_b], [target_d, target_e],
epochs=1,
batch_size=5,
verbose=0)
# Test: no shuffle.
model.fit(
[input_a, input_b], [target_d, target_e],
epochs=1,
batch_size=5,
verbose=0,
shuffle=False)
# Test: validation data.
model.fit([input_a, input_b], [target_d, target_e],
epochs=1, batch_size=2, verbose=0,
validation_data=([input_a, input_b], [target_d, target_e]))
model.train_on_batch([input_a, input_b], [target_d, target_e])
model.predict([input_a, input_b], batch_size=5)
model.evaluate([input_a, input_b], [target_d, target_e],
batch_size=2, verbose=0)
model.test_on_batch([input_a, input_b], [target_d, target_e])
# Test: mix np and tensors.
input_b = np.zeros(shape=(10, 3)).astype('float32')
target_e = np.zeros(shape=(10, 4)).astype('float32')
model.fit(
[input_a, input_b], [target_d, target_e],
epochs=1,
batch_size=5,
verbose=0)
model.fit([input_a, input_b], [target_d, target_e],
epochs=1, batch_size=2, verbose=0,
validation_data=([input_a, input_b], [target_d, target_e]))
model.fit(
[input_a, input_b], [target_d, target_e],
epochs=1,
batch_size=5,
verbose=0,
shuffle=False)
model.train_on_batch([input_a, input_b], [target_d, target_e])
model.predict([input_a, input_b], batch_size=5)
model.evaluate([input_a, input_b], [target_d, target_e],
batch_size=2, verbose=0)
model.test_on_batch([input_a, input_b], [target_d, target_e])
def test_model_methods_with_eager_tensors_single_io(self):
x = keras.layers.Input(shape=(3,), name='input')
y = keras.layers.Dense(4, name='dense')(x)
model = keras.Model(x, y)
optimizer = RMSPropOptimizer(learning_rate=0.001)
loss = 'mse'
metrics = ['mae']
model.compile(optimizer, loss, metrics=metrics)
inputs = keras.backend.zeros(shape=(10, 3))
targets = keras.backend.zeros(shape=(10, 4))
model.fit(inputs, targets, epochs=1, batch_size=2, verbose=0)
model.fit(inputs, targets, epochs=1, batch_size=3, verbose=0, shuffle=False)
model.fit(inputs, targets, epochs=1, batch_size=4, verbose=0,
validation_data=(inputs, targets))
model.evaluate(inputs, targets, batch_size=2, verbose=0)
model.predict(inputs, batch_size=2)
model.train_on_batch(inputs, targets)
model.test_on_batch(inputs, targets)
class LossWeightingTest(test.TestCase):
def test_class_weights(self):
num_classes = 5
batch_size = 5
weighted_class = 3
train_samples = 300
test_samples = 300
input_dim = 5
model = keras.models.Sequential()
model.add(keras.layers.Dense(10, input_shape=(input_dim,)))
model.add(keras.layers.Activation('relu'))
model.add(keras.layers.Dense(num_classes))
model.add(keras.layers.Activation('softmax'))
model.compile(loss='categorical_crossentropy',
optimizer=RMSPropOptimizer(learning_rate=0.001))
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=train_samples,
test_samples=test_samples,
input_shape=(input_dim,),
num_classes=num_classes)
int_y_test = y_test.copy()
int_y_train = y_train.copy()
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
test_ids = np.where(int_y_test == np.array(weighted_class))[0]
class_weight = dict([(i, 1.) for i in range(num_classes)])
class_weight[weighted_class] = 4.
sample_weight = np.ones((y_train.shape[0]))
sample_weight[int_y_train == weighted_class] = 4.
model.fit(
x_train,
y_train,
batch_size=batch_size,
epochs=2,
verbose=0,
class_weight=class_weight,
validation_data=(x_train, y_train, sample_weight))
model.fit(
x_train,
y_train,
batch_size=batch_size,
epochs=2,
verbose=0,
class_weight=class_weight)
model.fit(
x_train,
y_train,
batch_size=batch_size,
epochs=2,
verbose=0,
class_weight=class_weight,
validation_split=0.1)
model.train_on_batch(
x_train[:batch_size], y_train[:batch_size], class_weight=class_weight)
ref_score = model.evaluate(x_test, y_test, verbose=0)
score = model.evaluate(
x_test[test_ids, :], y_test[test_ids, :], verbose=0)
self.assertLess(score, ref_score)
def test_sample_weights(self):
num_classes = 5
batch_size = 5
weighted_class = 3
train_samples = 300
test_samples = 300
input_dim = 5
model = keras.models.Sequential()
model.add(keras.layers.Dense(10, input_shape=(input_dim,)))
model.add(keras.layers.Activation('relu'))
model.add(keras.layers.Dense(num_classes))
model.add(keras.layers.Activation('softmax'))
model.compile(loss='categorical_crossentropy',
optimizer=RMSPropOptimizer(learning_rate=0.001))
np.random.seed(43)
(x_train, y_train), _ = testing_utils.get_test_data(
train_samples=train_samples,
test_samples=test_samples,
input_shape=(input_dim,),
num_classes=num_classes)
int_y_train = y_train.copy()
y_train = keras.utils.to_categorical(y_train, num_classes)
class_weight = dict([(i, 1.) for i in range(num_classes)])
class_weight[weighted_class] = 4.
sample_weight = np.ones((y_train.shape[0]))
sample_weight[int_y_train == weighted_class] = 4.
model.fit(
x_train,
y_train,
batch_size=batch_size,
epochs=2,
verbose=0,
sample_weight=sample_weight)
model.fit(
x_train,
y_train,
batch_size=batch_size,
epochs=2,
verbose=0,
sample_weight=sample_weight,
validation_split=0.1)
model.train_on_batch(
x_train[:batch_size],
y_train[:batch_size],
sample_weight=sample_weight[:batch_size])
model.test_on_batch(
x_train[:batch_size],
y_train[:batch_size],
sample_weight=sample_weight[:batch_size])
def test_temporal_sample_weights(self):
num_classes = 5
weighted_class = 3
train_samples = 1000
test_samples = 1000
input_dim = 5
timesteps = 3
model = keras.models.Sequential()
model.add(
keras.layers.TimeDistributed(
keras.layers.Dense(num_classes),
input_shape=(timesteps, input_dim)))
model.add(keras.layers.Activation('softmax'))
np.random.seed(1337)
(_, y_train), _ = testing_utils.get_test_data(
train_samples=train_samples,
test_samples=test_samples,
input_shape=(input_dim,),
num_classes=num_classes)
int_y_train = y_train.copy()
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
class_weight = dict([(i, 1.) for i in range(num_classes)])
class_weight[weighted_class] = 2.
sample_weight = np.ones((y_train.shape[0]))
sample_weight[int_y_train == weighted_class] = 2.
with self.assertRaises(ValueError):
model.compile(
loss='binary_crossentropy',
optimizer=RMSPropOptimizer(learning_rate=0.001),
sample_weight_mode='temporal')
def test_class_weight_invalid_use_case(self):
num_classes = 5
train_samples = 1000
test_samples = 1000
input_dim = 5
timesteps = 3
model = keras.models.Sequential()
model.add(
keras.layers.TimeDistributed(
keras.layers.Dense(num_classes),
input_shape=(timesteps, input_dim)))
model.add(keras.layers.Activation('softmax'))
model.compile(
loss='binary_crossentropy',
optimizer=RMSPropOptimizer(learning_rate=0.001))
(x_train, y_train), _ = testing_utils.get_test_data(
train_samples=train_samples,
test_samples=test_samples,
input_shape=(input_dim,),
num_classes=num_classes)
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
class_weight = dict([(i, 1.) for i in range(num_classes)])
del class_weight[1]
with self.assertRaises(ValueError):
model.fit(x_train, y_train,
epochs=0, verbose=0, class_weight=class_weight)
with self.assertRaises(ValueError):
model.compile(
loss='binary_crossentropy',
optimizer=RMSPropOptimizer(learning_rate=0.001),
sample_weight_mode=[])
# Build multi-output model
x = keras.Input((3,))
y1 = keras.layers.Dense(4, name='1')(x)
y2 = keras.layers.Dense(4, name='2')(x)
model = keras.models.Model(x, [y1, y2])
model.compile(optimizer=RMSPropOptimizer(learning_rate=0.001), loss='mse')
x_np = np.random.random((10, 3))
y_np = np.random.random((10, 4))
w_np = np.random.random((10,))
# This will work
model.fit(x_np, [y_np, y_np], epochs=1, sample_weight={'1': w_np})
# These will not
with self.assertRaises(ValueError):
model.fit(x_np, [y_np, y_np], epochs=1, sample_weight=[w_np])
with self.assertRaises(TypeError):
model.fit(x_np, [y_np, y_np], epochs=1, sample_weight=w_np)
with self.assertRaises(ValueError):
bad_w_np = np.random.random((11,))
model.fit(x_np, [y_np, y_np], epochs=1, sample_weight={'1': bad_w_np})
with self.assertRaises(ValueError):
bad_w_np = np.random.random((10, 2))
model.fit(x_np, [y_np, y_np], epochs=1, sample_weight={'1': bad_w_np})
with self.assertRaises(ValueError):
bad_w_np = np.random.random((10, 2, 2))
model.fit(x_np, [y_np, y_np], epochs=1, sample_weight={'1': bad_w_np})
class CorrectnessTest(test.TestCase):
@tf_test_util.run_in_graph_and_eager_modes()
def test_loss_correctness(self):
# Test that training loss is the same in eager and graph
# (by comparing it to a reference value in a deterministic case)
model = keras.Sequential()
model.add(keras.layers.Dense(3,
activation='relu',
input_dim=4,
kernel_initializer='ones'))
model.add(keras.layers.Dense(2,
activation='softmax',
kernel_initializer='ones'))
model.compile(loss='sparse_categorical_crossentropy',
optimizer=RMSPropOptimizer(learning_rate=0.001))
x = np.ones((100, 4))
np.random.seed(123)
y = np.random.randint(0, 1, size=(100, 1))
history = model.fit(x, y, epochs=1, batch_size=10)
self.assertEqual(
np.around(history.history['loss'][-1], decimals=4), 0.6173)
if __name__ == '__main__':
ops.enable_eager_execution()
test.main()
|
const packageConfig = {
presets: [
[
'@babel/preset-env',
{
targets: {
browsers: ['>0.2%', 'not dead', 'not op_mini all'],
},
exclude: ['transform-async-to-generator', 'transform-regenerator'],
loose: true,
},
],
'@babel/preset-react',
],
plugins: [
['@babel/plugin-proposal-class-properties', { loose: true }],
['@babel/plugin-proposal-object-rest-spread', { loose: true }],
'@babel/plugin-transform-object-assign',
'@babel/plugin-transform-runtime',
'babel-plugin-transform-react-constant-elements',
['babel-plugin-react-remove-properties', { properties: ['data-testid'] }],
[
'babel-plugin-transform-react-remove-prop-types',
{
mode: 'unsafe-wrap',
},
],
[
'babel-plugin-styled-components',
{
pure: true,
},
],
],
ignore: [/@babel[\\|/]runtime/],
};
const docsConfig = {
presets: [
[
'babel-preset-gatsby',
{
targets: {
browsers: ['>0.25%', 'not dead'],
},
},
],
],
plugins: ['@babel/plugin-proposal-optional-chaining'],
};
module.exports = {
env: {
cjs: packageConfig,
esm: packageConfig,
es: packageConfig,
umd: packageConfig,
'docs-development': docsConfig,
'docs-production': {
...docsConfig,
plugins: [
...docsConfig.plugins,
'babel-plugin-transform-react-constant-elements',
[
'babel-plugin-react-remove-properties',
{ properties: ['data-testid'] },
],
['babel-plugin-transform-react-remove-prop-types', { mode: 'remove' }],
],
},
},
};
|
# coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 6
Contact: sdk@isilon.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from isi_sdk_8_1_1.models.auth_access_access_item_file_group import AuthAccessAccessItemFileGroup # noqa: F401,E501
class MappingIdentitiesTarget(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'on_disk': 'bool',
'target': 'AuthAccessAccessItemFileGroup',
'type': 'str'
}
attribute_map = {
'on_disk': 'on_disk',
'target': 'target',
'type': 'type'
}
def __init__(self, on_disk=None, target=None, type=None): # noqa: E501
"""MappingIdentitiesTarget - a model defined in Swagger""" # noqa: E501
self._on_disk = None
self._target = None
self._type = None
self.discriminator = None
if on_disk is not None:
self.on_disk = on_disk
self.target = target
if type is not None:
self.type = type
@property
def on_disk(self):
"""Gets the on_disk of this MappingIdentitiesTarget. # noqa: E501
Identity is preferred on-disk. # noqa: E501
:return: The on_disk of this MappingIdentitiesTarget. # noqa: E501
:rtype: bool
"""
return self._on_disk
@on_disk.setter
def on_disk(self, on_disk):
"""Sets the on_disk of this MappingIdentitiesTarget.
Identity is preferred on-disk. # noqa: E501
:param on_disk: The on_disk of this MappingIdentitiesTarget. # noqa: E501
:type: bool
"""
self._on_disk = on_disk
@property
def target(self):
"""Gets the target of this MappingIdentitiesTarget. # noqa: E501
Specifies properties for a persona, which consists of either a 'type' and a 'name' or an 'ID'. # noqa: E501
:return: The target of this MappingIdentitiesTarget. # noqa: E501
:rtype: AuthAccessAccessItemFileGroup
"""
return self._target
@target.setter
def target(self, target):
"""Sets the target of this MappingIdentitiesTarget.
Specifies properties for a persona, which consists of either a 'type' and a 'name' or an 'ID'. # noqa: E501
:param target: The target of this MappingIdentitiesTarget. # noqa: E501
:type: AuthAccessAccessItemFileGroup
"""
if target is None:
raise ValueError("Invalid value for `target`, must not be `None`") # noqa: E501
self._target = target
@property
def type(self):
"""Gets the type of this MappingIdentitiesTarget. # noqa: E501
Origin of identity mapping. # noqa: E501
:return: The type of this MappingIdentitiesTarget. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this MappingIdentitiesTarget.
Origin of identity mapping. # noqa: E501
:param type: The type of this MappingIdentitiesTarget. # noqa: E501
:type: str
"""
allowed_values = ["auto", "external", "manual"] # noqa: E501
if type not in allowed_values:
raise ValueError(
"Invalid value for `type` ({0}), must be one of {1}" # noqa: E501
.format(type, allowed_values)
)
self._type = type
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, MappingIdentitiesTarget):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
import numpy as np
import dezero
import dezero.functions as F
from dezero import cuda
from dezero.core import Parameter
from dezero.utils import pair
# =============================================================================
# Layer (base class)
# =============================================================================
class Layer:
def __init__(self):
self._params = set()
def __setattr__(self, name, value):
if isinstance(value, (Parameter, Layer)):
self._params.add(name)
super().__setattr__(name, value)
def __call__(self, *args, **kwargs):
return self.forward(*args, **kwargs)
def params(self):
for name in self._params:
obj = self.__dict__[name]
if isinstance(obj, Layer):
yield from obj.params()
else:
yield obj
def cleargrads(self):
for param in self.params():
param.cleargrad()
def to_cpu(self):
for param in self.params():
param.to_cpu()
def to_gpu(self):
for param in self.params():
param.to_gpu()
def _flatten_params(self, params_dict, parent_key=""):
for name in self._params:
obj = self.__dict__[name]
key = parent_key + '/' + name if parent_key else name
if isinstance(obj, Layer):
obj._flatten_params(params_dict, key)
else:
params_dict[key] = obj
def save_weights(self, path):
self.to_cpu()
params_dict = {}
self._flatten_params(params_dict)
array_dict = {key: param.data for key, param in params_dict.items()
if param is not None}
np.savez_compressed(path, **array_dict)
def load_weights(self, path):
npz = np.load(path)
params_dict = {}
self._flatten_params(params_dict)
for key, param in params_dict.items():
param.data = npz[key]
# =============================================================================
# Linear / Conv2d
# =============================================================================
class Linear_simple(Layer):
def __init__(self, in_size, out_size, nobias=False, dtype=np.float32):
super().__init__()
I, O = in_size, out_size
W_data = np.random.randn(I, O).astype(dtype) * np.sqrt(1 / I)
self.W = Parameter(W_data, name='W')
if nobias:
self.b = None
else:
self.b = Parameter(np.zeros(O, dtype=dtype), name='b')
def __call__(self, x):
y = F.linear(x, self.W, self.b)
return y
class Linear(Layer):
def __init__(self, in_size, out_size=None, nobias=False, dtype=np.float32):
super().__init__()
if out_size is None:
in_size, out_size = None, in_size
self.in_size = in_size
self.out_size = out_size
self.dtype = dtype
self.W = Parameter(None, name='W')
if self.in_size is not None:
self._init_W()
if nobias:
self.b = None
else:
self.b = Parameter(np.zeros(out_size, dtype=dtype), name='b')
def _init_W(self, xp=np):
I, O = self.in_size, self.out_size
W_data = xp.random.randn(I, O).astype(self.dtype) * np.sqrt(1 / I)
self.W.data = W_data
def __call__(self, x):
if self.W.data is None:
self.in_size = x.shape[1]
xp = cuda.get_array_module(x)
self._init_W(xp)
y = F.linear(x, self.W, self.b)
return y
class Conv2d(Layer):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
pad=0, nobias=False, dtype=np.float32):
"""Two-dimensional convolutional layer.
Args:
in_channels (int or None): Number of channels of input arrays. If
`None`, parameter initialization will be deferred until the first
forward data pass at which time the size will be determined.
out_channels (int): Number of channels of output arrays.
kernel_size (int or (int, int)): Size of filters.
stride (int or (int, int)): Stride of filter applications.
pad (int or (int, int)): Spatial padding width for input arrays.
nobias (bool): If `True`, then this function does not use the bias.
"""
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.pad = pad
self.dtype = dtype
self.W = Parameter(None, name='W')
if in_channels is not None:
self._init_W()
if nobias:
self.b = None
else:
self.b = Parameter(np.zeros(out_channels, dtype=dtype), name='b')
def _init_W(self, xp=np):
C, OC = self.in_channels, self.out_channels
KH, KW = pair(self.kernel_size)
W_data = xp.random.randn(OC, C, KH, KW).astype(self.dtype) * np.sqrt(
1 / C * KH * KW)
self.W.data = W_data
def __call__(self, x):
if self.W.data is None:
self.in_channels = x.shape[1]
xp = cuda.get_array_module(x)
self._init_W(xp)
y = F.conv2d(x, self.W, self.b, self.stride, self.pad)
return y
# =============================================================================
# RNN / LSTM
# =============================================================================
class RNN(Layer):
def __init__(self, in_size, hidden_size=None):
"""An Elman RNN with tanh.
Args:
in_size (int): The number of features in the input. If unspecified
or `None`, parameter initialization will be deferred until the
first `__call__(x)` at which time the size will be determined.
hidden_size (int): The number of features in the hidden state.
"""
super().__init__()
if hidden_size is None:
in_size, hidden_size = None, in_size
self.x2h = Linear(in_size, hidden_size)
self.h2h = Linear(in_size, hidden_size, nobias=True)
self.h = None
def reset_state(self):
self.h = None
def __call__(self, x):
if self.h is None:
h_new = F.tanh(self.x2h(x))
else:
h_new = F.tanh(self.x2h(x) + self.h2h(self.h))
self.h = h_new
return h_new
class LSTM(Layer):
def __init__(self, in_size, hidden_size=None):
super().__init__()
if hidden_size is None:
in_size, hidden_size = None, in_size
I, H = in_size, hidden_size
self.x2f = Linear(I, H)
self.x2i = Linear(I, H)
self.x2o = Linear(I, H)
self.x2u = Linear(I, H)
self.h2f = Linear(H, H, nobias=True)
self.h2i = Linear(H, H, nobias=True)
self.h2o = Linear(H, H, nobias=True)
self.h2u = Linear(H, H, nobias=True)
self.reset_state()
def reset_state(self):
self.h = None
self.c = None
def __call__(self, x):
if self.h is None:
f = F.sigmoid(self.x2f(x))
i = F.sigmoid(self.x2i(x))
o = F.sigmoid(self.x2o(x))
u = F.tanh(self.x2u(x))
else:
f = F.sigmoid(self.x2f(x) + self.h2f(self.h))
i = F.sigmoid(self.x2i(x) + self.h2i(self.h))
o = F.sigmoid(self.x2o(x) + self.h2o(self.h))
u = F.tanh(self.x2u(x) + self.h2u(self.h))
if self.c is None:
c = (i * u)
else:
c = (f * self.c) + (i * u)
h = o * F.tanh(c)
self.h, self.c = h, c
return h
# =============================================================================
# EmbedID / BatchNorm
# =============================================================================
class EmbedID(Layer):
def __init__(self, in_size, out_size):
super().__init__()
self.W = Parameter(np.random.randn(in_size, out_size), name='W')
def __call__(self, x):
y = self.W[x]
return y
class BatchNorm(Layer):
def __init__(self):
super().__init__()
# `.avg_mean` and `.avg_var` are `Parameter` objects, so they will be
# saved to a file (using `save_weights()`).
# But they don't need grads, so they're just used as `ndarray`.
self.avg_mean = Parameter(None, name='avg_mean')
self.avg_var = Parameter(None, name='avg_var')
self.gamma = Parameter(None, name='gamma')
self.beta = Parameter(None, name='beta')
def _init_params(self, x):
xp = cuda.get_array_module(x)
D = x.shape[1]
if self.avg_mean.data is None:
self.avg_mean.data = xp.zeros(D, dtype=x.dtype)
if self.avg_var.data is None:
self.avg_var.data = xp.ones(D, dtype=x.dtype)
if self.gamma.data is None:
self.gamma.data = xp.ones(D, dtype=x.dtype)
if self.beta.data is None:
self.beta.data = xp.zeros(D, dtype=x.dtype)
def __call__(self, x):
if self.avg_mean.data is None:
self._init_params(x)
return F.batch_nrom(x, self.gamma, self.beta, self.avg_mean.data,
self.avg_var.data)
|
/*!
loadCSS: load a CSS file asynchronously.
[c]2015 @scottjehl, Filament Group, Inc.
Licensed MIT
*/
(function(w){
"use strict";
/* exported loadCSS */
var loadCSS = function( href, before, media ){
// Arguments explained:
// `href` [REQUIRED] is the URL for your CSS file.
// `before` [OPTIONAL] is the element the script should use as a reference for injecting our stylesheet <link> before
// By default, loadCSS attempts to inject the link after the last stylesheet or script in the DOM. However, you might desire a more specific location in your document.
// `media` [OPTIONAL] is the media type or query of the stylesheet. By default it will be 'all'
var doc = w.document;
var ss = doc.createElement( "link" );
var ref;
if( before ){
ref = before;
}
else {
var refs = ( doc.body || doc.getElementsByTagName( "head" )[ 0 ] ).childNodes;
ref = refs[ refs.length - 1];
}
var sheets = doc.styleSheets;
ss.rel = "stylesheet";
ss.href = href;
// temporarily set media to something inapplicable to ensure it'll fetch without blocking render
ss.media = "only x";
// Inject link
// Note: the ternary preserves the existing behavior of "before" argument, but we could choose to change the argument to "after" in a later release and standardize on ref.nextSibling for all refs
// Note: `insertBefore` is used instead of `appendChild`, for safety re: http://www.paulirish.com/2011/surefire-dom-element-insertion/
ref.parentNode.insertBefore( ss, ( before ? ref : ref.nextSibling ) );
// A method (exposed on return object for external use) that mimics onload by polling until document.styleSheets until it includes the new sheet.
var onloadcssdefined = function( cb ){
var resolvedHref = ss.href;
var i = sheets.length;
while( i-- ){
if( sheets[ i ].href === resolvedHref ){
return cb();
}
}
setTimeout(function() {
onloadcssdefined( cb );
});
};
// once loaded, set link's media back to `all` so that the stylesheet applies once it loads
ss.onloadcssdefined = onloadcssdefined;
onloadcssdefined(function() {
ss.media = media || "all";
});
return ss;
};
// commonjs
if( typeof module !== "undefined" ){
module.exports = loadCSS;
}
else {
w.loadCSS = loadCSS;
}
}( typeof global !== "undefined" ? global : this ));
|
define(["exports", "./lib/default-template-processor.js", "./lib/template-result.js", "./lib/directive.js", "./lib/dom.js", "./lib/part.js", "./lib/parts.js", "./lib/render.js", "./lib/template-factory.js", "./lib/template-instance.js", "./lib/template.js"], function (_exports, _defaultTemplateProcessor, _templateResult, _directive, _dom, _part, _parts, _render, _templateFactory, _templateInstance, _template) {
"use strict";
Object.defineProperty(_exports, "__esModule", {
value: true
});
Object.defineProperty(_exports, "DefaultTemplateProcessor", {
enumerable: true,
get: function get() {
return _defaultTemplateProcessor.DefaultTemplateProcessor;
}
});
Object.defineProperty(_exports, "defaultTemplateProcessor", {
enumerable: true,
get: function get() {
return _defaultTemplateProcessor.defaultTemplateProcessor;
}
});
Object.defineProperty(_exports, "SVGTemplateResult", {
enumerable: true,
get: function get() {
return _templateResult.SVGTemplateResult;
}
});
Object.defineProperty(_exports, "TemplateResult", {
enumerable: true,
get: function get() {
return _templateResult.TemplateResult;
}
});
Object.defineProperty(_exports, "directive", {
enumerable: true,
get: function get() {
return _directive.directive;
}
});
Object.defineProperty(_exports, "isDirective", {
enumerable: true,
get: function get() {
return _directive.isDirective;
}
});
Object.defineProperty(_exports, "removeNodes", {
enumerable: true,
get: function get() {
return _dom.removeNodes;
}
});
Object.defineProperty(_exports, "reparentNodes", {
enumerable: true,
get: function get() {
return _dom.reparentNodes;
}
});
Object.defineProperty(_exports, "noChange", {
enumerable: true,
get: function get() {
return _part.noChange;
}
});
Object.defineProperty(_exports, "nothing", {
enumerable: true,
get: function get() {
return _part.nothing;
}
});
Object.defineProperty(_exports, "AttributeCommitter", {
enumerable: true,
get: function get() {
return _parts.AttributeCommitter;
}
});
Object.defineProperty(_exports, "AttributePart", {
enumerable: true,
get: function get() {
return _parts.AttributePart;
}
});
Object.defineProperty(_exports, "BooleanAttributePart", {
enumerable: true,
get: function get() {
return _parts.BooleanAttributePart;
}
});
Object.defineProperty(_exports, "EventPart", {
enumerable: true,
get: function get() {
return _parts.EventPart;
}
});
Object.defineProperty(_exports, "isIterable", {
enumerable: true,
get: function get() {
return _parts.isIterable;
}
});
Object.defineProperty(_exports, "isPrimitive", {
enumerable: true,
get: function get() {
return _parts.isPrimitive;
}
});
Object.defineProperty(_exports, "NodePart", {
enumerable: true,
get: function get() {
return _parts.NodePart;
}
});
Object.defineProperty(_exports, "PropertyCommitter", {
enumerable: true,
get: function get() {
return _parts.PropertyCommitter;
}
});
Object.defineProperty(_exports, "PropertyPart", {
enumerable: true,
get: function get() {
return _parts.PropertyPart;
}
});
Object.defineProperty(_exports, "parts", {
enumerable: true,
get: function get() {
return _render.parts;
}
});
Object.defineProperty(_exports, "render", {
enumerable: true,
get: function get() {
return _render.render;
}
});
Object.defineProperty(_exports, "templateCaches", {
enumerable: true,
get: function get() {
return _templateFactory.templateCaches;
}
});
Object.defineProperty(_exports, "templateFactory", {
enumerable: true,
get: function get() {
return _templateFactory.templateFactory;
}
});
Object.defineProperty(_exports, "TemplateInstance", {
enumerable: true,
get: function get() {
return _templateInstance.TemplateInstance;
}
});
Object.defineProperty(_exports, "createMarker", {
enumerable: true,
get: function get() {
return _template.createMarker;
}
});
Object.defineProperty(_exports, "isTemplatePartActive", {
enumerable: true,
get: function get() {
return _template.isTemplatePartActive;
}
});
Object.defineProperty(_exports, "Template", {
enumerable: true,
get: function get() {
return _template.Template;
}
});
_exports.svg = _exports.html = void 0;
/**
* @license
* Copyright (c) 2017 The Polymer Project Authors. All rights reserved.
* This code may only be used under the BSD style license found at
* http://polymer.github.io/LICENSE.txt
* The complete set of authors may be found at
* http://polymer.github.io/AUTHORS.txt
* The complete set of contributors may be found at
* http://polymer.github.io/CONTRIBUTORS.txt
* Code distributed by Google as part of the polymer project is also
* subject to an additional IP rights grant found at
* http://polymer.github.io/PATENTS.txt
*/
/**
*
* Main lit-html module.
*
* Main exports:
*
* - [[html]]
* - [[svg]]
* - [[render]]
*
* @module lit-html
* @preferred
*/
/**
* Do not remove this comment; it keeps typedoc from misplacing the module
* docs.
*/
// TODO(justinfagnani): remove line when we get NodePart moving methods
// IMPORTANT: do not change the property name or the assignment expression.
// This line will be used in regexes to search for lit-html usage.
// TODO(justinfagnani): inject version number at build time
if (typeof window !== 'undefined') {
(window['litHtmlVersions'] || (window['litHtmlVersions'] = [])).push('1.2.1');
}
/**
* Interprets a template literal as an HTML template that can efficiently
* render to and update a container.
*/
var html = function html(strings) {
for (var _len = arguments.length, values = new Array(_len > 1 ? _len - 1 : 0), _key = 1; _key < _len; _key++) {
values[_key - 1] = arguments[_key];
}
return new _templateResult.TemplateResult(strings, values, 'html', _defaultTemplateProcessor.defaultTemplateProcessor);
};
/**
* Interprets a template literal as an SVG template that can efficiently
* render to and update a container.
*/
_exports.html = html;
var svg = function svg(strings) {
for (var _len2 = arguments.length, values = new Array(_len2 > 1 ? _len2 - 1 : 0), _key2 = 1; _key2 < _len2; _key2++) {
values[_key2 - 1] = arguments[_key2];
}
return new _templateResult.SVGTemplateResult(strings, values, 'svg', _defaultTemplateProcessor.defaultTemplateProcessor);
};
_exports.svg = svg;
});
|
# ------------------------------------------------------------------------
# SeqFormer
# ------------------------------------------------------------------------
# Modified from Deformable DETR (https://github.com/fundamentalvision/Deformable-DETR)
# Copyright (c) 2020 SenseTime. All Rights Reserved.
# ------------------------------------------------------------------------
"""
COCO dataset which returns image_id for evaluation.
Mostly copy-paste from https://github.com/pytorch/vision/blob/13b35ff/references/detection/coco_utils.py
"""
from pathlib import Path
import torch
import torch.utils.data
from pycocotools import mask as coco_mask
from .torchvision_datasets import CocoDetection as TvCocoDetection
from util.misc import get_local_rank, get_local_size
import datasets.transforms as T
import random
class CocoDetection(TvCocoDetection):
def __init__(self, img_folder, ann_file, transforms, return_masks, cache_mode=False, local_rank=0, local_size=1):
super(CocoDetection, self).__init__(img_folder, ann_file,
cache_mode=cache_mode, local_rank=local_rank, local_size=local_size)
self._transforms = transforms
self.prepare = ConvertCocoPolysToMask(return_masks)
def __getitem__(self, idx):
instance_check = False
while not instance_check:
img, target = super(CocoDetection, self).__getitem__(idx)
image_id = self.ids[idx]
target = {'image_id': image_id, 'annotations': target}
img, target = self.prepare(img, target)
if self._transforms is not None:
img, target = self._transforms(img, target)
if len(target['labels']) == 0: # None instance
idx = random.randint(0,self.__len__()-1)
else:
instance_check=True
return img, target
def convert_coco_poly_to_mask(segmentations, height, width):
masks = []
for polygons in segmentations:
rles = coco_mask.frPyObjects(polygons, height, width)
mask = coco_mask.decode(rles)
if len(mask.shape) < 3:
mask = mask[..., None]
mask = torch.as_tensor(mask, dtype=torch.uint8)
mask = mask.any(dim=2)
masks.append(mask)
if masks:
masks = torch.stack(masks, dim=0)
else:
masks = torch.zeros((0, height, width), dtype=torch.uint8)
return masks
class ConvertCocoPolysToMask(object):
def __init__(self, return_masks=False):
self.return_masks = return_masks
def __call__(self, image, target):
w, h = image.size
image_id = target["image_id"]
image_id = torch.tensor([image_id])
anno = target["annotations"]
anno = [obj for obj in anno if 'iscrowd' not in obj or obj['iscrowd'] == 0]
boxes = [obj["bbox"] for obj in anno]
# guard against no boxes via resizing
boxes = torch.as_tensor(boxes, dtype=torch.float32).reshape(-1, 4)
boxes[:, 2:] += boxes[:, :2]
boxes[:, 0::2].clamp_(min=0, max=w)
boxes[:, 1::2].clamp_(min=0, max=h)
classes = [obj["category_id"] for obj in anno]
classes = torch.tensor(classes, dtype=torch.int64)
if self.return_masks:
segmentations = [obj["segmentation"] for obj in anno]
masks = convert_coco_poly_to_mask(segmentations, h, w)
keypoints = None
if anno and "keypoints" in anno[0]:
keypoints = [obj["keypoints"] for obj in anno]
keypoints = torch.as_tensor(keypoints, dtype=torch.float32)
num_keypoints = keypoints.shape[0]
if num_keypoints:
keypoints = keypoints.view(num_keypoints, -1, 3)
keep = (boxes[:, 3] > boxes[:, 1]) & (boxes[:, 2] > boxes[:, 0])
boxes = boxes[keep]
classes = classes[keep]
if self.return_masks:
masks = masks[keep]
if keypoints is not None:
keypoints = keypoints[keep]
target = {}
target["boxes"] = boxes
target["labels"] = classes
if self.return_masks:
target["masks"] = masks
target["image_id"] = image_id
if keypoints is not None:
target["keypoints"] = keypoints
# for conversion to coco api
area = torch.tensor([obj["area"] for obj in anno])
iscrowd = torch.tensor([obj["iscrowd"] if "iscrowd" in obj else 0 for obj in anno])
target["area"] = area[keep]
target["iscrowd"] = iscrowd[keep]
target["orig_size"] = torch.as_tensor([int(h), int(w)])
target["size"] = torch.as_tensor([int(h), int(w)])
return image, target
def make_coco_transforms(image_set):
normalize = T.Compose([
T.ToTensor(),
T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
scales = [480, 512, 544, 576, 608, 640, 672, 704, 736, 768]
# scales = [296, 328, 360, 392]
if image_set == 'train':
return T.Compose([
T.RandomHorizontalFlip(),
T.RandomSelect(
T.RandomResize(scales, max_size=1333),
T.Compose([
T.RandomResize([400, 500, 600]),
T.RandomSizeCrop(384, 600),
T.RandomResize(scales, max_size=1333),
])
),
normalize,
])
if image_set == 'val':
return T.Compose([
T.RandomResize([800], max_size=1333),
normalize,
])
raise ValueError(f'unknown {image_set}')
def build(image_set, args):
root = Path(args.coco_path)
assert root.exists(), f'provided COCO path {root} does not exist'
mode = 'instances'
dataset_type = args.dataset_type
if args.dataset_file == 'coco':
PATHS = {
"train": (root / "train2017", root / "annotations" / f'{mode}_train2017.json'),
"val": (root / "val2017", root / "annotations" / f'{mode}_val2017.json'),
}
img_folder, ann_file = PATHS[image_set]
dataset = CocoDetection(img_folder, ann_file, transforms=make_coco_transforms(image_set), return_masks=args.masks,
cache_mode=args.cache_mode, local_rank=get_local_rank(), local_size=get_local_size())
return dataset
|
# noqa: E501
# surpress info logs of TF , level 2: no warnings, level 3 no errors
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1'
import tensorflow as tf
# dynamically allocate GPU memory
physical_devices = tf.config.list_physical_devices('GPU')
try:
tf.config.experimental.set_memory_growth(physical_devices[0], True)
assert tf.config.experimental.get_memory_growth(physical_devices[0])
except ValueError:
print('Invalid device or cannot modify virtual devices once initialized.')
pass
except IndexError:
print('No GPU found')
pass
import pylib.pc as pc
from pylib.pc import layers
import pylib.io as io
import numpy as np
import tensorflow_graphics
import time
import h5py
# for graph mode debugging
# tf.config.run_functions_eagerly(True)
# np.random.seed(42)
# tf.random.set_seed(42)
quick_test = False
# -- loading data ---
from data_paths import data_dir, hdf5_tmp_dir
num_classes = 40 # modelnet 10 or 40
points_per_file = 10000 # number of points loaded per model
samples_per_model = 1024 # number of input points per file
batch_size = 16
category_names = []
with open(data_dir + f'modelnet{num_classes}_shape_names.txt') as inFile:
for line in inFile:
category_names.append(line.replace('\n', ''))
train_set = []
train_labels = []
with open(data_dir + f'modelnet{num_classes}_train.txt') as inFile:
for line in inFile:
line = line.replace('\n', '')
category = line[:-5]
train_set.append(data_dir + category + '/' + line + '.txt')
if category not in category_names:
raise ValueError('Unknown category ' + category)
train_labels.append(category_names.index(category))
test_set = []
test_labels = []
with open(data_dir + f'modelnet{num_classes}_test.txt') as inFile:
for line in inFile:
line = line.replace('\n', '')
category = line[:-5]
test_set.append(data_dir + category + '/' + line + '.txt')
if category not in category_names:
raise ValueError('Unknown category ' + category)
test_labels.append(category_names.index(category))
num_classes = len(category_names)
if not os.path.exists(hdf5_tmp_dir):
os.mkdir(hdf5_tmp_dir)
if os.path.exists(hdf5_tmp_dir + "/train_data.hdf5"):
h5File = h5py.File(hdf5_tmp_dir + "/train_data.hdf5", "r")
train_data_points = h5File["train_data"][()]
h5File.close()
else:
train_data_points = np.empty([len(train_set), points_per_file, 3])
print(f'### loading modelnet{num_classes} train ###')
for i, filename in enumerate(train_set):
points, _ = \
io.load_points_from_file_to_numpy(filename,
max_num_points=points_per_file)
points = points
train_data_points[i] = points
if i % 500 == 0:
print(f'{i}/{len(train_set)}')
if quick_test and i > 100:
break
h5File = h5py.File(hdf5_tmp_dir + "/train_data.hdf5", "w")
h5File.create_dataset("train_data", data=train_data_points)
h5File.close()
if os.path.exists(hdf5_tmp_dir + "/test_data.hdf5"):
h5File = h5py.File(hdf5_tmp_dir + "/test_data.hdf5", "r")
test_data_points = h5File["test_data"][()]
h5File.close()
else:
test_data_points = np.empty([len(test_set), points_per_file, 3])
print(f'### loading modelnet{num_classes} test ###')
for i, filename in enumerate(test_set):
points, _ = \
io.load_points_from_file_to_numpy(filename,
max_num_points=points_per_file)
points = points
test_data_points[i] = points
if i % 500 == 0:
print(f'{i}/{len(test_set)}')
if quick_test and i > 100:
break
h5File = h5py.File(hdf5_tmp_dir + "/test_data.hdf5", "w")
h5File.create_dataset("test_data", data=test_data_points)
h5File.close()
#-----------------------------------------------
def identity_layer(x, *args, **kwargs):
''' Layer which returns the input features unchanged.
'''
return x
class conv_block(tf.Module):
''' A small ResNet block
Args:
num_features_in: An `int`, the number of input features.
num_features_out: An `int`, the number of output features.
layer_type: A `string`, the type of convolution used,
can be 'MCConv', 'KPConv', 'PointConv'.
strided: A `bool`, indicates if the spatial resolution changes.
If `True` uses a MaxPool layer to adjust the spatial dimension.
'''
def __init__(self,
num_features_in,
num_features_out,
layer_type,
strided=False):
self.res_layers = []
self.skip_layers = []
self.BN_layers = []
self.activation_layers = []
# -- residual layers --
residual_feature_size = num_features_out // 4
self.BN_layers.append(tf.keras.layers.BatchNormalization(momentum=0.9))
self.activation_layers.append(tf.keras.layers.LeakyReLU())
self.res_layers.append(layers.Conv1x1(
num_features_in=num_features_in,
num_features_out=residual_feature_size))
self.BN_layers.append(tf.keras.layers.BatchNormalization(momentum=0.9))
self.activation_layers.append(tf.keras.layers.LeakyReLU())
if layer_type == 'MCConv':
self.res_layers.append(layers.MCConv(
num_features_in=residual_feature_size,
num_features_out=residual_feature_size,
num_dims=3,
num_mlps=1,
mlp_size=[16]))
elif layer_type == 'PointConv':
self.res_layers.append(layers.PointConv(
num_features_in=residual_feature_size,
num_features_out=residual_feature_size,
num_dims=3,
size_hidden=32))
elif layer_type == 'KPConv':
self.res_layers.append(layers.KPConv(
num_features_in=residual_feature_size,
num_features_out=residual_feature_size,
num_dims=3,
num_kernel_points=15))
else:
raise ValueError("Unknown layer type!")
self.BN_layers.append(tf.keras.layers.BatchNormalization(momentum=0.9))
self.activation_layers.append(tf.keras.layers.LeakyReLU())
self.res_layers.append(layers.Conv1x1(
num_features_in=residual_feature_size,
num_features_out=num_features_out))
# -- skip layers --
if strided:
self.skip_layers.append(layers.MaxPooling())
else:
self.skip_layers.append(identity_layer)
if num_features_in != num_features_out:
self.skip_layers.append(
tf.keras.layers.BatchNormalization(momentum=0.9))
self.skip_layers.append(tf.keras.layers.LeakyReLU())
self.skip_layers.append(layers.Conv1x1(
num_features_in=num_features_in,
num_features_out=num_features_out))
def __call__(self,
features,
point_cloud_in,
point_cloud_out,
conv_radius,
pool_radius=None,
training=False):
'''
Args:
features: The input features.
point_cloud_in: A `PointCloud` instance, on which the input features are
defined.
point_cloud_out: A `PointCloud` instance, on which the output features
are defined.
conv_radius: The radius used by the convolutional layer.
pool_radius: The radius of the pooling layer, only used if strided.
training: A `bool`, passed to batch norm layers.
Returns:
Computed features.
'''
# -- residual branch --
# BN + lReLU
res = self.BN_layers[0](features, training=training)
res = self.activation_layers[0](res)
# conv1x1, downsampling in feature dimension
res = self.res_layers[0](res, point_cloud_in)
# BN + lReLU
res = self.BN_layers[1](res, training=training)
res = self.activation_layers[1](res)
# spatial convolution
res = self.res_layers[1](res, point_cloud_in, point_cloud_out, conv_radius)
# BN + lReLU
res = self.BN_layers[2](res, training=training)
res = self.activation_layers[2](res)
# conv 1x1, upsampling in feature dimension
res = self.res_layers[2](res, point_cloud_out)
# -- skip connection --
# spatial maxpooling
skip = self.skip_layers[0](features, point_cloud_in, point_cloud_out,
pool_radius)
if len(self.skip_layers) > 1:
# BN + lReLU
skip = self.skip_layers[1](skip, training=training)
skip = self.skip_layers[2](skip)
# conv 1x1, upsampling in feature dimension
skip = self.skip_layers[3](skip, point_cloud_out)
# --- Add ---
return res + skip
class mymodel(tf.Module):
''' Model architecture.
Args:
features_sizes: A `list` of `ints`, the feature dimensions. Shape `[L+3]`.
pool_radii: A `list` of `floats, the radii used for spatial pooling
of the point clouds. Shape `[L]`.
conv_radii: A `list` of `floats`, the radii used by the convolution
layers. Shape `[L]`.
layer_type: A `string`, the type of convolution used,
can be 'MCConv', 'KPConv', 'PointConv'.
sampling_method: method to sample the point clouds,
can be 'posson disk' or 'cell average'
'''
def __init__(self,
feature_sizes,
pool_radii,
conv_radii,
layer_type='MCConv',
sampling_method='cell average',
dropout_rate=0.0):
super().__init__(name=None)
self.sampling_method = sampling_method
self.num_levels = len(pool_radii)
self.pool_radii = pool_radii.reshape(-1, 1)
self.conv_radii = conv_radii
self.init_conv = []
self.strided_conv_blocks = []
self.conv_blocks = []
self.batch_layers = []
self.dense_layers = []
self.activations = []
self.dropouts = []
# -- encoder network
for i in range(self.num_levels):
if i == 0:
if layer_type == 'MCConv':
self.init_conv.append(layers.MCConv(
num_features_in=1,
num_features_out=feature_sizes[i],
num_dims=3,
num_mlps=1,
mlp_size=[16]))
elif layer_type == 'PointConv':
self.init_conv.append(layers.PointConv(
num_features_in=1,
num_features_out=feature_sizes[i],
num_dims=3,
size_hidden=32))
elif layer_type == 'KPConv':
self.init_conv.append(layers.KPConv(
num_features_in=1,
num_features_out=feature_sizes[i],
num_dims=3,
num_kernel_points=15))
else:
raise ValueError("Unknown layer type!")
else:
self.strided_conv_blocks.append(conv_block(feature_sizes[i - 1],
feature_sizes[i],
layer_type,
strided=True))
self.conv_blocks.append(conv_block(feature_sizes[i],
feature_sizes[i],
layer_type,
strided=False))
self.global_pooling = layers.GlobalAveragePooling()
# -- classification head ---
self.batch_layers.append(tf.keras.layers.BatchNormalization(momentum=0.9))
self.activations.append(tf.keras.layers.LeakyReLU())
self.dropouts.append(tf.keras.layers.Dropout(dropout_rate))
self.dense_layers.append(tf.keras.layers.Dense(feature_sizes[-2]))
self.batch_layers.append(tf.keras.layers.BatchNormalization(momentum=0.9))
self.activations.append(tf.keras.layers.LeakyReLU())
self.dropouts.append(tf.keras.layers.Dropout(dropout_rate))
self.dense_layers.append(tf.keras.layers.Dense(feature_sizes[-1]))
@tf.function(
input_signature=[
tf.TensorSpec(shape=[None, None, 3], dtype=tf.float32),
tf.TensorSpec(shape=[None, None, None], dtype=tf.float32),
tf.TensorSpec(shape=[None], dtype=tf.int32),
tf.TensorSpec(shape=None, dtype=tf.bool)]
)
def __call__(self,
points,
features,
sizes,
training):
''' Evaluates network.
Args:
points: The point coordinates.
features: Input features.
sizes: sizes of the point clouds
training: A `bool`, passed to the batch norm layers.
Returns:
The logits per class.
'''
# spatial downsampling of the point cloud
point_cloud = pc.PointCloud(points, sizes=sizes, batch_size=batch_size)
point_hierarchy = pc.PointHierarchy(point_cloud,
self.pool_radii,
self.sampling_method)
# encoder network
for i in range(self.num_levels):
if i == 0:
num_pts_in = tf.shape(point_hierarchy[i + 1]._points)[0]
features = self.init_conv[0](
features[0:num_pts_in, :],
point_hierarchy[i + 1],
point_hierarchy[i + 1],
self.conv_radii[i])
else:
features = self.strided_conv_blocks[i - 1](
features,
point_hierarchy[i],
point_hierarchy[i + 1],
self.pool_radii[i],
self.pool_radii[i],
training=training)
features = self.conv_blocks[i](features,
point_hierarchy[i + 1],
point_hierarchy[i + 1],
self.conv_radii[i],
training=training)
features = self.global_pooling(features, point_hierarchy[-1])
# classification head
features = self.batch_layers[-2](features, training)
features = self.activations[-2](features)
features = self.dropouts[-2](features, training=training)
features = self.dense_layers[-2](features)
features = self.batch_layers[-1](features, training)
features = self.dropouts[-1](features, training=training)
features = self.activations[-1](features)
return self.dense_layers[-1](features)
#-----------------------------------------------
class modelnet_data_generator(tf.keras.utils.Sequence):
''' Small generator of batched data.
'''
def __init__(self,
points,
labels,
batch_size,
augment):
self.points = points
self.labels = np.array(labels, dtype=int)
self.batch_size = batch_size
self.epoch_size = len(self.points)
self.sizes = np.ones([batch_size]) * samples_per_model
self.augment = augment
# shuffle data before training
self.on_epoch_end()
def __len__(self):
# number of batches per epoch
return(int(np.floor(self.epoch_size / self.batch_size)))
def __call__(self):
''' Loads batch and increases batch index.
'''
data = self.__getitem__(self.index)
self.index += 1
return data
def __getitem__(self, index, samples_per_model=samples_per_model):
''' Loads data of current batch and samples random subset of the points.
'''
# constant input feature
features = tf.ones([self.batch_size, samples_per_model, 1])
# sample points
self_indices = \
self.order[index * self.batch_size:(index + 1) * self.batch_size]
sampled_points = np.empty([self.batch_size, samples_per_model, 3])
out_labels = np.empty([self.batch_size])
for batch in range(self.batch_size):
sampled_points[batch] = \
self.points[self_indices[batch]][0:samples_per_model]
out_labels[batch] = self.labels[self_indices[batch]]
if self.augment:
# Data augmentation - Anisotropic scale.
cur_scaling = np.random.uniform(size=(1, 3)) * 0.2 + 0.9
sampled_points[batch] = sampled_points[batch] * cur_scaling
return sampled_points, features, out_labels
def on_epoch_end(self):
''' Shuffles data and resets batch index.
'''
self.order = np.random.permutation(np.arange(0, len(self.points)))
self.index = 0
#-----------------------------------------------
num_epochs = 400
if quick_test:
num_epochs = 2
dropout_rate = 0.5
# initialize data generators
gen_train = modelnet_data_generator(
train_data_points, train_labels, batch_size, augment=True)
gen_test = modelnet_data_generator(
test_data_points, test_labels, batch_size, augment=False)
# loss function and optimizer
lr_decay = tf.keras.optimizers.schedules.ExponentialDecay(
initial_learning_rate=0.001,
decay_steps=20 * len(gen_train), # every 20th epoch
decay_rate=0.7,
staircase=True)
#optimizer = tf.keras.optimizers.Adam(learning_rate=lr_decay)
optimizer = tf.keras.optimizers.RMSprop(learning_rate=lr_decay)
loss_function = tf.keras.losses.SparseCategoricalCrossentropy()
# --- Training Loop---
def training(model,
num_epochs,
epoch_print=1):
train_loss_results = []
train_accuracy_results = []
test_loss_results = []
test_accuracy_results = []
for epoch in range(num_epochs):
time_epoch_start = time.time()
# --- Training ---
epoch_loss_avg = tf.keras.metrics.Mean()
epoch_accuracy = tf.keras.metrics.SparseCategoricalAccuracy()
print()
print('Epoch {:03d} Start (LR: {:.6f})'.format(
epoch, lr_decay(epoch * len(gen_train))))
print()
iterBatch = 0
for points, features, labels in gen_train:
with tf.GradientTape() as tape:
logits = model(points, features, gen_train.sizes, training=True)
pred = tf.nn.softmax(logits, axis=-1)
loss = loss_function(y_true=labels, y_pred=pred)
grads = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
epoch_loss_avg.update_state(loss)
epoch_accuracy.update_state(labels, pred)
if iterBatch % 10 == 0:
print("\r {:03d} / {:03d} Loss: {:.3f}, Accuracy: {:.3%} ".format(
iterBatch, len(gen_train),
epoch_loss_avg.result(),
epoch_accuracy.result()), end="")
iterBatch += 1
print()
train_loss_results.append(epoch_loss_avg.result())
train_accuracy_results.append(epoch_accuracy.result())
# --- Validation ---
epoch_loss_avg = tf.keras.metrics.Mean()
epoch_accuracy = tf.keras.metrics.SparseCategoricalAccuracy()
for points, features, labels in gen_test:
logits = model(points, features, gen_test.sizes, training=False)
pred = tf.nn.softmax(logits, axis=-1)
loss = loss_function(y_true=labels, y_pred=pred)
epoch_loss_avg.update_state(loss)
epoch_accuracy.update_state(labels, pred)
test_loss_results.append(epoch_loss_avg.result())
test_accuracy_results.append(epoch_accuracy.result())
time_epoch_end = time.time()
# End epoch
print('Epoch {:03d} Time: {:.3f}s'.format(
epoch,
time_epoch_end - time_epoch_start))
print('Training: Loss: {:.3f}, Accuracy: {:.3%}'.format(
train_loss_results[-1],
train_accuracy_results[-1]))
print('Validation: Loss: {:.3f}, Accuracy: {:.3%}'.format(
test_loss_results[-1],
test_accuracy_results[-1]))
# ----------------------------
# feature_sizes = [1, 128, 256, 512, 128, num_classes]
# pool_radii = np.array([0.1, 0.2, 0.4])
feature_sizes = [128, 256, 512, 1024, 2048, 1024, num_classes]
pool_radii = np.array([0.02, 0.04, 0.08, 0.16, 0.32])
conv_radii = pool_radii * 2.0
model_MC = mymodel(feature_sizes, pool_radii, conv_radii,
layer_type='MCConv', dropout_rate=dropout_rate)
training(model_MC, num_epochs)
"""
model_KP = mymodel(feature_sizes, pool_radii, conv_radii,
layer_type='KPConv', dropout_rate=dropout_rate)
training(model_KP, num_epochs)
model_PC = mymodel(feature_sizes, pool_radii, conv_radii,
layer_type='PointConv', dropout_rate=dropout_rate)
training(model_PC, num_epochs)
"""
|
"""Utility and example functions for calculating statistics."""
import numpy
from toolkit.typing import Samples
def mean(samples: Samples) -> float:
"""Calculate sample mean from a dataset."""
# For a sequence of floats numpy.sum will return a float.
return numpy.sum(samples) / len(samples) # type: ignore
|
# Copyright (c) 2018-2019 Manfred Moitzi
# License: MIT License
from ezdxf.lldxf.types import DXFVertex
def test_init():
v = DXFVertex(10, (1, 2, 3))
assert v.value == (1., 2., 3.)
def test_clone():
v = DXFVertex(10, (1, 2, 3))
v2 = v.clone()
assert v2.code == v.code
assert v2.value == v.value
def test_dxf_tags():
v = DXFVertex(10, (1, 2, 3))
tags = tuple(v.dxftags())
assert tags[0] == (10, 1.)
assert tags[1] == (20, 2.)
assert tags[2] == (30, 3.)
def test_dxf_string():
v = DXFVertex(10, (1, 2, 3))
assert v.dxfstr() == " 10\n1.0\n 20\n2.0\n 30\n3.0\n"
def test_xdata_string():
v = DXFVertex(1011, (1, 2, 3))
assert v.dxfstr() == "1011\n1.0\n1021\n2.0\n1031\n3.0\n"
|
import sys
import os
import cv2
import numpy as np
if __name__ == '__main__':
if len(sys.argv) != 3:
print('Usage: python %s <image directory> <output file>' %
sys.argv[0])
sys.exit(1)
image_dir = sys.argv[1]
output_file = sys.argv[2]
try:
output_fd = open(output_file, 'w')
except Exception as e:
print(e)
sys.exit(1)
try:
image_files = sorted(os.listdir(image_dir))
except OSError as e:
print(e)
sys.exit(1)
num_images = len(image_files)
if num_images == 0:
print('No images found.')
sys.exit(1)
print('Output file: %s' % output_file)
for (i, image_file) in enumerate(image_files):
sys.stdout.write('\r')
sys.stdout.write('Processing image %u of %u' % (i, num_images - 1))
sys.stdout.flush()
filename = os.path.join(image_dir, image_file)
im = cv2.imread(filename, cv2.IMREAD_UNCHANGED)
gray_image = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
for row in range(gray_image.shape[0]):
tmp = np.packbits(gray_image[row, :])
output_fd.write(tmp)
output_fd.close()
sys.stdout.write('\n')
|
/*
* (C) Copyright 2008
* Sergei Poselenov, Emcraft Systems, sposelenov@emcraft.com.
*
* Copyright 2004 Freescale Semiconductor.
* (C) Copyright 2002,2003, Motorola Inc.
* Xianghua Xiao, (X.Xiao@motorola.com)
*
* (C) Copyright 2002 Scott McNutt <smcnutt@artesyncp.com>
*
* See file CREDITS for list of people who contributed to this
* project.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of
* the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston,
* MA 02111-1307 USA
*/
#include <common.h>
#include <pci.h>
#include <asm/processor.h>
#include <asm/immap_85xx.h>
#include <ioports.h>
#include <flash.h>
#include <libfdt.h>
#include <fdt_support.h>
#include <asm/io.h>
#include <i2c.h>
#include <mb862xx.h>
#include <video_fb.h>
#include "upm_table.h"
DECLARE_GLOBAL_DATA_PTR;
extern flash_info_t flash_info[]; /* FLASH chips info */
extern GraphicDevice mb862xx;
void local_bus_init (void);
ulong flash_get_size (ulong base, int banknum);
int checkboard (void)
{
volatile ccsr_gur_t *gur = (void *)(CONFIG_SYS_MPC85xx_GUTS_ADDR);
char buf[64];
int f;
int i = getenv_f("serial#", buf, sizeof(buf));
#ifdef CONFIG_PCI
char *src;
#endif
puts("Board: Socrates");
if (i > 0) {
puts(", serial# ");
puts(buf);
}
putc('\n');
#ifdef CONFIG_PCI
/* Check the PCI_clk sel bit */
if (in_be32(&gur->porpllsr) & (1<<15)) {
src = "SYSCLK";
f = CONFIG_SYS_CLK_FREQ;
} else {
src = "PCI_CLK";
f = CONFIG_PCI_CLK_FREQ;
}
printf ("PCI1: 32 bit, %d MHz (%s)\n", f/1000000, src);
#else
printf ("PCI1: disabled\n");
#endif
/*
* Initialize local bus.
*/
local_bus_init ();
return 0;
}
int misc_init_r (void)
{
/*
* Adjust flash start and offset to detected values
*/
gd->bd->bi_flashstart = 0 - gd->bd->bi_flashsize;
gd->bd->bi_flashoffset = 0;
/*
* Check if boot FLASH isn't max size
*/
if (gd->bd->bi_flashsize < (0 - CONFIG_SYS_FLASH0)) {
set_lbc_or(0, gd->bd->bi_flashstart |
(CONFIG_SYS_OR0_PRELIM & 0x00007fff));
set_lbc_br(0, gd->bd->bi_flashstart |
(CONFIG_SYS_BR0_PRELIM & 0x00007fff));
/*
* Re-check to get correct base address
*/
flash_get_size(gd->bd->bi_flashstart, CONFIG_SYS_MAX_FLASH_BANKS - 1);
}
/*
* Check if only one FLASH bank is available
*/
if (gd->bd->bi_flashsize != CONFIG_SYS_MAX_FLASH_BANKS * (0 - CONFIG_SYS_FLASH0)) {
set_lbc_or(1, 0);
set_lbc_br(1, 0);
/*
* Re-do flash protection upon new addresses
*/
flash_protect (FLAG_PROTECT_CLEAR,
gd->bd->bi_flashstart, 0xffffffff,
&flash_info[CONFIG_SYS_MAX_FLASH_BANKS - 1]);
/* Monitor protection ON by default */
flash_protect (FLAG_PROTECT_SET,
CONFIG_SYS_MONITOR_BASE, CONFIG_SYS_MONITOR_BASE + monitor_flash_len - 1,
&flash_info[CONFIG_SYS_MAX_FLASH_BANKS - 1]);
/* Environment protection ON by default */
flash_protect (FLAG_PROTECT_SET,
CONFIG_ENV_ADDR,
CONFIG_ENV_ADDR + CONFIG_ENV_SECT_SIZE - 1,
&flash_info[CONFIG_SYS_MAX_FLASH_BANKS - 1]);
/* Redundant environment protection ON by default */
flash_protect (FLAG_PROTECT_SET,
CONFIG_ENV_ADDR_REDUND,
CONFIG_ENV_ADDR_REDUND + CONFIG_ENV_SECT_SIZE - 1,
&flash_info[CONFIG_SYS_MAX_FLASH_BANKS - 1]);
}
return 0;
}
/*
* Initialize Local Bus
*/
void local_bus_init (void)
{
volatile fsl_lbc_t *lbc = LBC_BASE_ADDR;
volatile ccsr_local_ecm_t *ecm = (void *)(CONFIG_SYS_MPC85xx_ECM_ADDR);
sys_info_t sysinfo;
uint clkdiv;
uint lbc_mhz;
uint lcrr = CONFIG_SYS_LBC_LCRR;
get_sys_info (&sysinfo);
clkdiv = lbc->lcrr & LCRR_CLKDIV;
lbc_mhz = sysinfo.freqSystemBus / 1000000 / clkdiv;
/* Disable PLL bypass for Local Bus Clock >= 66 MHz */
if (lbc_mhz >= 66)
lcrr &= ~LCRR_DBYP; /* DLL Enabled */
else
lcrr |= LCRR_DBYP; /* DLL Bypass */
out_be32 (&lbc->lcrr, lcrr);
asm ("sync;isync;msync");
out_be32 (&lbc->ltesr, 0xffffffff); /* Clear LBC error interrupts */
out_be32 (&lbc->lteir, 0xffffffff); /* Enable LBC error interrupts */
out_be32 (&ecm->eedr, 0xffffffff); /* Clear ecm errors */
out_be32 (&ecm->eeer, 0xffffffff); /* Enable ecm errors */
/* Init UPMA for FPGA access */
out_be32 (&lbc->mamr, 0x44440); /* Use a customer-supplied value */
upmconfig (UPMA, (uint *)UPMTableA, sizeof(UPMTableA)/sizeof(int));
/* Init UPMB for Lime controller access */
out_be32 (&lbc->mbmr, 0x444440); /* Use a customer-supplied value */
upmconfig (UPMB, (uint *)UPMTableB, sizeof(UPMTableB)/sizeof(int));
}
#if defined(CONFIG_PCI)
/*
* Initialize PCI Devices, report devices found.
*/
#ifndef CONFIG_PCI_PNP
static struct pci_config_table pci_mpc85xxads_config_table[] = {
{PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
PCI_IDSEL_NUMBER, PCI_ANY_ID,
pci_cfgfunc_config_device, {PCI_ENET0_IOADDR,
PCI_ENET0_MEMADDR,
PCI_COMMAND_MEMORY |
PCI_COMMAND_MASTER}},
{}
};
#endif
static struct pci_controller hose = {
#ifndef CONFIG_PCI_PNP
config_table:pci_mpc85xxads_config_table,
#endif
};
#endif /* CONFIG_PCI */
void pci_init_board (void)
{
#ifdef CONFIG_PCI
pci_mpc85xx_init (&hose);
#endif /* CONFIG_PCI */
}
#ifdef CONFIG_BOARD_EARLY_INIT_R
int board_early_init_r (void)
{
volatile ccsr_gur_t *gur = (void *)(CONFIG_SYS_MPC85xx_GUTS_ADDR);
/* set and reset the GPIO pin 2 which will reset the W83782G chip */
out_8((unsigned char*)&gur->gpoutdr, 0x3F );
out_be32((unsigned int*)&gur->gpiocr, 0x200 ); /* enable GPOut */
udelay(200);
out_8( (unsigned char*)&gur->gpoutdr, 0x1F );
return (0);
}
#endif /* CONFIG_BOARD_EARLY_INIT_R */
#if defined(CONFIG_OF_LIBFDT) && defined(CONFIG_OF_BOARD_SETUP)
void
ft_board_setup(void *blob, bd_t *bd)
{
u32 val[12];
int rc, i = 0;
ft_cpu_setup(blob, bd);
/* Fixup NOR FLASH mapping */
val[i++] = 0; /* chip select number */
val[i++] = 0; /* always 0 */
val[i++] = gd->bd->bi_flashstart;
val[i++] = gd->bd->bi_flashsize;
if (mb862xx.frameAdrs == CONFIG_SYS_LIME_BASE) {
/* Fixup LIME mapping */
val[i++] = 2; /* chip select number */
val[i++] = 0; /* always 0 */
val[i++] = CONFIG_SYS_LIME_BASE;
val[i++] = CONFIG_SYS_LIME_SIZE;
}
/* Fixup FPGA mapping */
val[i++] = 3; /* chip select number */
val[i++] = 0; /* always 0 */
val[i++] = CONFIG_SYS_FPGA_BASE;
val[i++] = CONFIG_SYS_FPGA_SIZE;
rc = fdt_find_and_setprop(blob, "/localbus", "ranges",
val, i * sizeof(u32), 1);
if (rc)
printf("Unable to update localbus ranges, err=%s\n",
fdt_strerror(rc));
}
#endif /* defined(CONFIG_OF_LIBFDT) && defined(CONFIG_OF_BOARD_SETUP) */
#define DEFAULT_BRIGHTNESS 25
#define BACKLIGHT_ENABLE (1 << 31)
static const gdc_regs init_regs [] =
{
{0x0100, 0x00010f00},
{0x0020, 0x801901df},
{0x0024, 0x00000000},
{0x0028, 0x00000000},
{0x002c, 0x00000000},
{0x0110, 0x00000000},
{0x0114, 0x00000000},
{0x0118, 0x01df0320},
{0x0004, 0x041f0000},
{0x0008, 0x031f031f},
{0x000c, 0x017f0349},
{0x0010, 0x020c0000},
{0x0014, 0x01df01e9},
{0x0018, 0x00000000},
{0x001c, 0x01e00320},
{0x0100, 0x80010f00},
{0x0, 0x0}
};
const gdc_regs *board_get_regs (void)
{
return init_regs;
}
int lime_probe(void)
{
uint cfg_br2;
uint cfg_or2;
int type;
cfg_br2 = get_lbc_br(2);
cfg_or2 = get_lbc_or(2);
/* Configure GPCM for CS2 */
set_lbc_br(2, 0);
set_lbc_or(2, 0xfc000410);
set_lbc_br(2, (CONFIG_SYS_LIME_BASE) | 0x00001901);
/* Get controller type */
type = mb862xx_probe(CONFIG_SYS_LIME_BASE);
/* Restore previous CS2 configuration */
set_lbc_br(2, 0);
set_lbc_or(2, cfg_or2);
set_lbc_br(2, cfg_br2);
return (type == MB862XX_TYPE_LIME) ? 1 : 0;
}
/* Returns Lime base address */
unsigned int board_video_init (void)
{
if (!lime_probe())
return 0;
mb862xx.winSizeX = 800;
mb862xx.winSizeY = 480;
mb862xx.gdfIndex = GDF_15BIT_555RGB;
mb862xx.gdfBytesPP = 2;
return CONFIG_SYS_LIME_BASE;
}
#define W83782D_REG_CFG 0x40
#define W83782D_REG_BANK_SEL 0x4e
#define W83782D_REG_ADCCLK 0x4b
#define W83782D_REG_BEEP_CTRL 0x4d
#define W83782D_REG_BEEP_CTRL2 0x57
#define W83782D_REG_PWMOUT1 0x5b
#define W83782D_REG_VBAT 0x5d
static int w83782d_hwmon_init(void)
{
u8 buf;
if (i2c_read(CONFIG_SYS_I2C_W83782G_ADDR, W83782D_REG_CFG, 1, &buf, 1))
return -1;
i2c_reg_write(CONFIG_SYS_I2C_W83782G_ADDR, W83782D_REG_CFG, 0x80);
i2c_reg_write(CONFIG_SYS_I2C_W83782G_ADDR, W83782D_REG_BANK_SEL, 0);
i2c_reg_write(CONFIG_SYS_I2C_W83782G_ADDR, W83782D_REG_ADCCLK, 0x40);
buf = i2c_reg_read(CONFIG_SYS_I2C_W83782G_ADDR, W83782D_REG_BEEP_CTRL);
i2c_reg_write(CONFIG_SYS_I2C_W83782G_ADDR, W83782D_REG_BEEP_CTRL,
buf | 0x80);
i2c_reg_write(CONFIG_SYS_I2C_W83782G_ADDR, W83782D_REG_BEEP_CTRL2, 0);
i2c_reg_write(CONFIG_SYS_I2C_W83782G_ADDR, W83782D_REG_PWMOUT1, 0x47);
i2c_reg_write(CONFIG_SYS_I2C_W83782G_ADDR, W83782D_REG_VBAT, 0x01);
buf = i2c_reg_read(CONFIG_SYS_I2C_W83782G_ADDR, W83782D_REG_CFG);
i2c_reg_write(CONFIG_SYS_I2C_W83782G_ADDR, W83782D_REG_CFG,
(buf & 0xf4) | 0x01);
return 0;
}
static void board_backlight_brightness(int br)
{
u32 reg;
u8 buf;
u8 old_buf;
/* Select bank 0 */
if (i2c_read(CONFIG_SYS_I2C_W83782G_ADDR, 0x4e, 1, &old_buf, 1))
goto err;
else
buf = old_buf & 0xf8;
if (i2c_write(CONFIG_SYS_I2C_W83782G_ADDR, 0x4e, 1, &buf, 1))
goto err;
if (br > 0) {
/* PWMOUT1 duty cycle ctrl */
buf = 255 / (100 / br);
if (i2c_write(CONFIG_SYS_I2C_W83782G_ADDR, 0x5b, 1, &buf, 1))
goto err;
/* LEDs on */
reg = in_be32((void *)(CONFIG_SYS_FPGA_BASE + 0x0c));
if (!(reg & BACKLIGHT_ENABLE));
out_be32((void *)(CONFIG_SYS_FPGA_BASE + 0x0c),
reg | BACKLIGHT_ENABLE);
} else {
buf = 0;
if (i2c_write(CONFIG_SYS_I2C_W83782G_ADDR, 0x5b, 1, &buf, 1))
goto err;
/* LEDs off */
reg = in_be32((void *)(CONFIG_SYS_FPGA_BASE + 0x0c));
reg &= ~BACKLIGHT_ENABLE;
out_be32((void *)(CONFIG_SYS_FPGA_BASE + 0x0c), reg);
}
/* Restore previous bank setting */
if (i2c_write(CONFIG_SYS_I2C_W83782G_ADDR, 0x4e, 1, &old_buf, 1))
goto err;
return;
err:
printf("W83782G I2C access failed\n");
}
void board_backlight_switch (int flag)
{
char * param;
int rc;
if (w83782d_hwmon_init())
printf ("hwmon IC init failed\n");
if (flag) {
param = getenv("brightness");
rc = param ? simple_strtol(param, NULL, 10) : -1;
if (rc < 0)
rc = DEFAULT_BRIGHTNESS;
} else {
rc = 0;
}
board_backlight_brightness(rc);
}
#if defined(CONFIG_CONSOLE_EXTRA_INFO)
/*
* Return text to be printed besides the logo.
*/
void video_get_info_str (int line_number, char *info)
{
if (line_number == 1) {
strcpy (info, " Board: Socrates");
} else {
info [0] = '\0';
}
}
#endif
|
import React, { useRef, createRef } from 'react';
import { create } from 'react-test-renderer';
import { renderHook } from '@testing-library/react-hooks';
import ReactDOM from 'react-dom';
import { act } from 'react-dom/test-utils';
import Gauge from '../../src/gauge';
import ChartLoading from '../../src/util/createLoading';
import { ErrorBoundary } from '../../src/base';
const refs = renderHook(() => useRef());
describe('Gauge render', () => {
let container;
beforeEach(() => {
container = document.createElement('div');
document.body.appendChild(container);
});
afterEach(() => {
document.body.removeChild(container);
container = null;
});
it('classname * loading * style', () => {
const props = {
style: {
height: '80%',
},
className: 'container',
loading: true,
};
const testRenderer = create(<Gauge {...props} />);
const testInstance = testRenderer.root;
const renderTree = testRenderer.toTree();
expect(renderTree.rendered[0].nodeType).toBe('component');
expect(renderTree.rendered[1].props.className).toBe('container');
expect(renderTree.rendered[1].props.style).toEqual({
height: '80%',
});
expect(renderTree.rendered[1].nodeType).toBe('host');
expect(renderTree.rendered[1].type).toBe('div');
expect(testInstance.findAllByType(ChartLoading).length).toBe(1);
});
it('classname * loading * style with default', () => {
const props = {};
const testRenderer = create(<Gauge {...props} />);
const testInstance = testRenderer.root;
const renderTree = testRenderer.toTree();
expect(renderTree.rendered.nodeType).toBe('host');
expect(renderTree.rendered.type).toBe('div');
expect(renderTree.rendered.props.className).toBeUndefined();
expect(testInstance.findAllByType(ChartLoading).length).toBe(0);
expect(renderTree.rendered.props.style).toEqual({
height: 'inherit',
});
});
it('error template', () => {
const props = {
loading: true,
// An object of type loadingTemplate is only used to trigger a boundary error
loadingTemplate: {
triggleError: true,
},
errorTemplate: () => <span id="error">custom error</span>,
};
const chartProps = {
data: [],
xField: 'date',
yField: 'scales',
autoFit: false,
width: '200',
height: '160',
};
const testRenderer = create(<Gauge {...props} {...chartProps} />);
const testInstance = testRenderer.root;
expect(testInstance.findByType(ErrorBoundary).children[0].children).toEqual(['custom error']);
});
it('chart render * chartRef with callback', () => {
let chartRef = undefined;
const props = {
className: 'container',
chartRef: (ref) => {
chartRef = ref;
},
};
const chartProps = {
percent: 0.75,
autoFit: false,
width: 200,
height: 160,
};
act(() => {
ReactDOM.render(<Gauge {...props} {...chartProps} />, container);
});
expect(chartRef).not.toBeUndefined();
const canvas = container.querySelector('canvas');
expect(canvas.width).toBe(200);
expect(canvas.height).toBe(160);
expect(chartRef.chart.views[0].getData()).toEqual([{ percent: 0.75 }]);
});
it('chartRef with createRef', () => {
const chartRef = createRef();
const props = {
className: 'container',
chartRef,
};
const chartProps = {
percent: 0.75,
autoFit: false,
width: 200,
height: 160,
};
act(() => {
ReactDOM.render(<Gauge {...props} {...chartProps} />, container);
});
expect(chartRef.current.chart.views[0].getData()).toEqual([{ percent: 0.75 }]);
});
it('chartRef with useRef', () => {
const props = {
className: 'container',
};
const chartProps = {
percent: 0.75,
autoFit: false,
width: 200,
height: 160,
};
act(() => {
ReactDOM.render(<Gauge {...props} {...chartProps} ref={refs} />, container);
});
expect(refs.current.getChart().chart.views[0].getData()).toEqual([{ percent: 0.75 }]);
});
});
|
(function () {
class EventHub {
eventList = new Map();
emit(name, value) {
if (!this.eventList.has(name)) {
return;
}
this.eventList.get(name).forEach((v) => v(value));
}
on(name, callback) {
let callbackList = [];
if (this.eventList.has(name)) {
callbackList = this.eventList.get(name);
}
callbackList.push(callback);
this.eventList.set(name, callbackList);
}
off(name, callback) {
if (!this.eventList.has(name)) {
return;
}
const list = this.eventList.get(name);
const nextValue = list.filter((v) => v !== callback);
if (nextValue.length === 0) {
this.eventList.delete(name);
} else {
this.eventList.set(name, nextValue);
}
}
once(name, callback) {
const callbackWrap = (value) => {
callback(value);
this.off(name, callbackWrap);
};
this.on(name, callbackWrap);
}
}
let pathList = [];
let moduleCaches = new Map();
let moduleIsLoading = new Map();
const eventHub = new EventHub();
function require(deps, callback) {
loadModule(deps, function (modules) {
callback(...modules);
});
}
function define(deps, callback) {
const parentName = document.currentScript.dataset.name;
if (typeof deps === "undefined") {
throw new TypeError("Need a function or dependency list");
}
if (Array.isArray(deps) && typeof callback === "function") {
return loadModule(deps, onLoadCallback);
}
if (typeof callback === "undefined" && typeof deps === "function") {
callback = deps;
deps = undefined;
onLoadCallback();
}
function onLoadCallback(module) {
if (Array.isArray(module) && module.length > 0) {
moduleCaches.set(parentName, callback(...module));
} else {
moduleCaches.set(parentName, callback());
}
eventHub.emit(parentName);
}
}
function load(name, path) {
moduleIsLoading.set(name, true);
const script = document.createElement("script");
script.dataset.name = name;
script.src = path;
script.onload = () => moduleIsLoading.set(name, false);
document.body.appendChild(script);
}
function loadModule(deps, callback) {
const modules = [];
let doneNums = 0;
deps.forEach(forDepsLoadCallback);
onLoadDoneCallback();
function forDepsLoadCallback(moduleName, index) {
const cacheModule = getCacheModule(moduleName);
if (cacheModule !== false) {
modules[index] = cacheModule;
doneNums++;
return;
}
eventHub.once(moduleName, () => {
modules[index] = moduleCaches.get(moduleName);
doneNums++;
onLoadDoneCallback();
});
const modulePath = getModuleFullPath(moduleName);
if (!modulePath) {
throw new Error(`load module error ${moduleName}`);
}
if (!moduleIsLoading.has(moduleName)) {
load(moduleName, modulePath);
}
}
function onLoadDoneCallback() {
if (doneNums >= deps.length) {
callback(modules);
}
}
}
function getCacheModule(name) {
if (moduleCaches.has(name)) {
return moduleCaches.get(name);
}
return false;
}
function getModuleFullPath(name) {
return pathList[name];
}
require.config = function (config) {
if (!config || typeof config.paths !== "object" || config.paths === null) {
throw new TypeError("need to be a paths object!");
}
const baseUrl = config.baseUrl || "";
for (const [key, value] of Object.entries(config.paths)) {
if (typeof value !== "string") {
throw new TypeError("need to be a module string path!");
}
pathList[key] = baseUrl + value;
}
};
function loadMainFile() {
if (document.currentScript) {
const mainModulePath = document.currentScript.dataset.main;
load(getFileName(mainModulePath), mainModulePath);
function getFileName(s) {
s = s.split("/");
return s[s.length - 1].match(/(\w+)/g)[0];
}
}
}
window.require = require;
window.define = define;
loadMainFile();
})();
|
# Copyright 2017 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Rule for downloading apt packages and tar them in a .tar file."""
load(
"//skylib:path.bzl",
"runfile",
)
def _generate_add_additional_repo_commands(ctx, additional_repos):
return """printf "{repos}" >> /etc/apt/sources.list.d/{name}_repos.list""".format(
name = ctx.attr.name,
repos = "\n".join(additional_repos.to_list()),
)
def _generate_download_commands(ctx, packages, additional_repos):
return """#!/usr/bin/env bash
set -ex
{add_additional_repo_commands}
# Remove /var/lib/apt/lists/* in the base image. apt-get update -y command will create them.
rm -rf /var/lib/apt/lists/*
# Fetch Index
apt-get update -y
# Make partial dir
mkdir -p /tmp/install/./partial
# Install command
apt-get install --no-install-recommends -y -q -o Dir::Cache="/tmp/install" -o Dir::Cache::archives="." {packages} --download-only
items=$(ls /tmp/install/*.deb)
if [ $items = ""]; then
echo "Did not find the .deb files for debian packages {packages} in /tmp/install. Did apt-get actually succeed?" && false
fi
# Generate csv listing the name & versions of the debian packages.
# Example contents of a metadata CSV with debian packages gcc 8.1 & clang 9.1:
# Name,Version
# gcc,7.1
# clang,9.1
echo "Generating metadata CSV file {installables}_metadata.csv"
echo Name,Version > {installables}_metadata.csv
dpkg_deb_path=$(which dpkg-deb)
for item in $items; do
echo "Adding information about $item to metadata CSV"
pkg_name=$($dpkg_deb_path -f $item Package)
if [ $pkg_name = ""]; then
echo "Failed to get name of the package for $item" && false
fi
pkg_version=$($dpkg_deb_path -f $item Version)
if [ $pkg_version = ""]; then
echo "Failed to get the version of the package for $item" && false
fi
echo "Package $pkg_name, Version $pkg_version"
echo -n "$pkg_name," >> {installables}_metadata.csv
echo $pkg_version >> {installables}_metadata.csv
done;
# Tar command to only include all the *.deb files and ignore other directories placed in the cache dir.
tar -cpf {installables}_packages.tar --mtime='1970-01-01' --directory /tmp/install/. `cd /tmp/install/. && ls *.deb`""".format(
installables = ctx.attr.name,
packages = " ".join(packages.to_list()),
add_additional_repo_commands = _generate_add_additional_repo_commands(ctx, additional_repos),
)
def _impl(ctx, image_tar = None, packages = None, additional_repos = None, output_executable = None, output_tar = None, output_script = None, output_metadata = None):
"""Implementation for the download_pkgs rule.
Args:
ctx: The bazel rule context
image_tar: File, overrides ctx.file.image_tar
packages: str List, overrides ctx.attr.packages
additional_repos: str List, overrides ctx.attr.additional_repos
output_executable: File, overrides ctx.outputs.executable
output_tar: File, overrides ctx.outputs.pkg_tar
output_script: File, overrides ctx.outputs.build_script
output_metadata: File, overrides ctx.outputs.metadata_csv
"""
image_tar = image_tar or ctx.file.image_tar
packages = depset(packages or ctx.attr.packages)
additional_repos = depset(additional_repos or ctx.attr.additional_repos)
output_executable = output_executable or ctx.outputs.executable
output_tar = output_tar or ctx.outputs.pkg_tar
output_script = output_script or ctx.outputs.build_script
output_metadata = output_metadata or ctx.outputs.metadata_csv
if not packages:
fail("attribute 'packages' given to download_pkgs rule by {} was empty.".format(attr.label))
toolchain_info = ctx.toolchains["@io_bazel_rules_docker//toolchains/docker:toolchain_type"].info
# Generate a shell script to execute the apt_get inside this docker image.
# We use full paths here.
ctx.actions.expand_template(
template = ctx.file._run_download_tpl,
output = output_script,
substitutions = {
"%{docker_flags}": " ".join(toolchain_info.docker_flags),
"%{docker_tool_path}": toolchain_info.tool_path,
"%{download_commands}": _generate_download_commands(ctx, packages, additional_repos),
"%{image_id_extractor_path}": ctx.executable._extract_image_id.path,
"%{image_tar}": image_tar.path,
"%{installables}": ctx.attr.name,
"%{output_metadata}": output_metadata.path,
"%{output}": output_tar.path,
},
is_executable = True,
)
ctx.actions.run(
outputs = [output_tar, output_metadata],
executable = output_script,
inputs = [image_tar],
tools = [ctx.executable._extract_image_id],
use_default_shell_env = True,
)
# Generate a very similar one as output executable, but with short paths
# This is because the paths for running within bazel build are different.
ctx.actions.expand_template(
template = ctx.file._run_download_tpl,
output = output_executable,
substitutions = {
"%{docker_flags}": " ".join(toolchain_info.docker_flags),
"%{docker_tool_path}": toolchain_info.tool_path,
"%{download_commands}": _generate_download_commands(ctx, packages, additional_repos),
"%{image_id_extractor_path}": "${RUNFILES}/%s" % runfile(ctx, ctx.executable._extract_image_id),
"%{image_tar}": image_tar.short_path,
"%{installables}": ctx.attr.name,
"%{output_metadata}": output_metadata.short_path,
"%{output}": output_tar.short_path,
},
is_executable = True,
)
return [
DefaultInfo(
executable = output_executable,
files = depset([output_executable]),
runfiles = ctx.runfiles(
files = [
image_tar,
output_script,
output_metadata,
ctx.executable._extract_image_id,
],
transitive_files = ctx.attr._extract_image_id[DefaultInfo].default_runfiles.files,
),
),
]
_attrs = {
"additional_repos": attr.string_list(
doc = ("list of additional debian package repos to use, in " +
"sources.list format"),
),
"image_tar": attr.label(
doc = "The image tar for the container used to download packages.",
allow_single_file = True,
mandatory = True,
),
"packages": attr.string_list(
doc = "list of packages to download. e.g. ['curl', 'netbase']",
mandatory = True,
),
"_extract_image_id": attr.label(
default = Label("//contrib:extract_image_id"),
cfg = "host",
executable = True,
allow_files = True,
),
"_run_download_tpl": attr.label(
default = Label("//docker/package_managers:run_download.sh.tpl"),
allow_single_file = True,
),
}
_outputs = {
"build_script": "%{name}.sh",
"metadata_csv": "%{name}_metadata.csv",
"pkg_tar": "%{name}.tar",
}
# Export download_pkgs rule for other bazel rules to depend on.
download = struct(
attrs = _attrs,
outputs = _outputs,
implementation = _impl,
)
download_pkgs = rule(
attrs = _attrs,
doc = ("This rule creates a script to download packages " +
"within a container. The script bunldes all the " +
"packages in a tarball."),
executable = True,
outputs = _outputs,
toolchains = ["@io_bazel_rules_docker//toolchains/docker:toolchain_type"],
implementation = _impl,
)
|
/* eslint-disable no-unused-expressions, no-magic-numbers */
define([
'app/config',
'app/map/ReferenceLayerToggle',
'dojo/dom-construct',
'dojo/topic',
'dojo/_base/window',
'esri/layers/ArcGISDynamicMapServiceLayer',
'sinon',
'sinon-chai',
'tests/helpers/topics'
], function (
config,
WidgetUnderTest,
domConstruct,
topic,
win,
ArcGISDynamicMapServiceLayer,
sinon,
sinonChai,
topicsHelper
) {
const bdd = intern.getInterface('bdd');
const chai = intern.getPlugin('chai');
const expect = chai.expect;
chai.use(topicsHelper.plugin);
chai.use(sinonChai);
var url = '/arcgis/rest/services/Wildlife/Data/MapServer';
var index = 3;
var destroy = function (widget) {
widget.destroyRecursive();
widget = null;
};
var topics = config.topics.appMapReferenceLayerToggle;
bdd.describe('app/map/ReferenceLayerToggle', function () {
var widget;
bdd.beforeEach(function () {
topicsHelper.beforeEach();
topicsHelper.listen(topics.addLayer);
topicsHelper.listen(topics.toggleLayer);
widget = new WidgetUnderTest({
layerName: 'blah',
mapServiceUrl: url,
layerIndex: index,
layerClass: ArcGISDynamicMapServiceLayer
}, domConstruct.create('div', null, win.body()));
});
bdd.afterEach(function () {
topicsHelper.afterEach();
if (widget) {
destroy(widget);
}
});
bdd.describe('Sanity', function () {
bdd.it('should create a ReferenceLayerToggle', function () {
expect(widget).to.be.instanceOf(WidgetUnderTest);
});
});
bdd.describe('postCreate', () => {
bdd.it('checks box and calls on change when topic fires', () => {
sinon.stub(widget, 'onCheckboxChange');
widget.checkbox.checked = false;
widget.mapServiceUrl = config.urls.DEQEnviro;
widget.layerIndex = config.layerIndices.streams;
topic.publish(config.topics.appSearch.onStreamSelect);
expect(widget.checkbox.checked).to.be.true;
expect(widget.onCheckboxChange).to.have.been.called;
});
});
bdd.describe('onCheckboxChange', function () {
bdd.it('publishes the appropriate toggleLayer topic', function () {
widget.checkbox.checked = true;
widget.onCheckboxChange();
expect(topics.addLayer).to.have.been.publishedWith(url, ArcGISDynamicMapServiceLayer, index);
expect(topics.toggleLayer).to.have.been.publishedWith(url, index, true);
});
});
});
});
|
from logging import Logger
import os
import sys
from typing import List
import numpy as np
from tensorboardX import SummaryWriter
import torch
from tqdm import trange
from torch.optim.lr_scheduler import ExponentialLR
from .evaluate import evaluate, evaluate_predictions
from .predict import predict
from .train import train
from chemprop.args import TrainArgs
from chemprop.constants import MODEL_FILE_NAME
from chemprop.data import get_class_sizes, get_data, MoleculeDataLoader, split_data, StandardScaler, validate_dataset_type
from chemprop.models import MoleculeModel
from chemprop.nn_utils import param_count
from chemprop.utils import build_optimizer, build_lr_scheduler, get_loss_func, get_metric_func, load_checkpoint,\
makedirs, save_checkpoint, save_smiles_splits
def run_training(args: TrainArgs, logger: Logger = None) -> List[float]:
"""
Loads data, trains a Chemprop model, and returns test scores for the model checkpoint with the highest validation score.
:param args: A :class:`~chemprop.args.TrainArgs` object containing arguments for
loading data and training the Chemprop model.
:param logger: A logger to record output.
:return: A list of model scores for each task.
"""
if logger is not None:
debug, info = logger.debug, logger.info
else:
debug = info = print
# Print command line
debug('Command line')
debug(f'python {" ".join(sys.argv)}')
# Print args
debug('Args')
debug(args)
# Save args
args.save(os.path.join(args.save_dir, 'args.json'))
# Set pytorch seed for random initial weights
torch.manual_seed(args.pytorch_seed)
# Get data
debug('Loading data')
data = get_data(path=args.data_path, args=args, logger=logger)
validate_dataset_type(data, dataset_type=args.dataset_type)
args.features_size = data.features_size()
debug(f'Number of tasks = {args.num_tasks}')
# Split data
debug(f'Splitting data with seed {args.seed}')
if args.separate_test_path:
test_data = get_data(path=args.separate_test_path, args=args, features_path=args.separate_test_features_path, logger=logger)
if args.separate_val_path:
val_data = get_data(path=args.separate_val_path, args=args, features_path=args.separate_val_features_path, logger=logger)
if args.separate_val_path and args.separate_test_path:
train_data = data
elif args.separate_val_path:
train_data, _, test_data = split_data(data=data, split_type=args.split_type, sizes=(0.8, 0.0, 0.2), seed=args.seed, args=args, logger=logger)
elif args.separate_test_path:
train_data, val_data, _ = split_data(data=data, split_type=args.split_type, sizes=(0.8, 0.2, 0.0), seed=args.seed, args=args, logger=logger)
else:
train_data, val_data, test_data = split_data(data=data, split_type=args.split_type, sizes=args.split_sizes, seed=args.seed, args=args, logger=logger)
if args.dataset_type == 'classification':
class_sizes = get_class_sizes(data)
debug('Class sizes')
for i, task_class_sizes in enumerate(class_sizes):
debug(f'{args.task_names[i]} '
f'{", ".join(f"{cls}: {size * 100:.2f}%" for cls, size in enumerate(task_class_sizes))}')
if args.save_smiles_splits:
save_smiles_splits(
train_data=train_data,
val_data=val_data,
test_data=test_data,
data_path=args.data_path,
save_dir=args.save_dir,
smiles_column=args.smiles_column
)
if args.features_scaling:
features_scaler = train_data.normalize_features(replace_nan_token=0)
val_data.normalize_features(features_scaler)
test_data.normalize_features(features_scaler)
else:
features_scaler = None
args.train_data_size = len(train_data)
debug(f'Total size = {len(data):,} | '
f'train size = {len(train_data):,} | val size = {len(val_data):,} | test size = {len(test_data):,}')
# Initialize scaler and scale training targets by subtracting mean and dividing standard deviation (regression only)
if args.dataset_type == 'regression':
debug('Fitting scaler')
train_smiles, train_targets = train_data.smiles(), train_data.targets()
scaler = StandardScaler().fit(train_targets)
scaled_targets = scaler.transform(train_targets).tolist()
train_data.set_targets(scaled_targets)
else:
scaler = None
# Get loss and metric functions
loss_func = get_loss_func(args)
metric_func = get_metric_func(metric=args.metric)
# Set up test set evaluation
test_smiles, test_targets = test_data.smiles(), test_data.targets()
if args.dataset_type == 'multiclass':
sum_test_preds = np.zeros((len(test_smiles), args.num_tasks, args.multiclass_num_classes))
else:
sum_test_preds = np.zeros((len(test_smiles), args.num_tasks))
# Automatically determine whether to cache
if len(data) <= args.cache_cutoff:
cache = True
num_workers = 0
else:
cache = False
num_workers = args.num_workers
# Create data loaders
train_data_loader = MoleculeDataLoader(
dataset=train_data,
batch_size=args.batch_size,
num_workers=num_workers,
cache=cache,
class_balance=args.class_balance,
shuffle=True,
seed=args.seed
)
val_data_loader = MoleculeDataLoader(
dataset=val_data,
batch_size=args.batch_size,
num_workers=num_workers,
cache=cache
)
test_data_loader = MoleculeDataLoader(
dataset=test_data,
batch_size=args.batch_size,
num_workers=num_workers,
cache=cache
)
if args.class_balance:
debug(f'With class_balance, effective train size = {train_data_loader.iter_size:,}')
# Train ensemble of models
for model_idx in range(args.ensemble_size):
# Tensorboard writer
save_dir = os.path.join(args.save_dir, f'model_{model_idx}')
makedirs(save_dir)
try:
writer = SummaryWriter(log_dir=save_dir)
except:
writer = SummaryWriter(logdir=save_dir)
# Load/build model
if args.checkpoint_paths is not None:
debug(f'Loading model {model_idx} from {args.checkpoint_paths[model_idx]}')
model = load_checkpoint(args.checkpoint_paths[model_idx], logger=logger)
else:
debug(f'Building model {model_idx}')
model = MoleculeModel(args)
debug(model)
debug(f'Number of parameters = {param_count(model):,}')
if args.cuda:
debug('Moving model to cuda')
model = model.to(args.device)
# Ensure that model is saved in correct location for evaluation if 0 epochs
save_checkpoint(os.path.join(save_dir, MODEL_FILE_NAME), model, scaler, features_scaler, args)
# Optimizers
optimizer = build_optimizer(model, args)
# Learning rate schedulers
scheduler = build_lr_scheduler(optimizer, args)
# Run training
best_score = float('inf') if args.minimize_score else -float('inf')
best_epoch, n_iter = 0, 0
for epoch in trange(args.epochs):
debug(f'Epoch {epoch}')
n_iter = train(
model=model,
data_loader=train_data_loader,
loss_func=loss_func,
optimizer=optimizer,
scheduler=scheduler,
args=args,
n_iter=n_iter,
logger=logger,
writer=writer
)
if isinstance(scheduler, ExponentialLR):
scheduler.step()
val_scores = evaluate(
model=model,
data_loader=val_data_loader,
num_tasks=args.num_tasks,
metric_func=metric_func,
dataset_type=args.dataset_type,
scaler=scaler,
logger=logger
)
# Average validation score
avg_val_score = np.nanmean(val_scores)
debug(f'Validation {args.metric} = {avg_val_score:.6f}')
writer.add_scalar(f'validation_{args.metric}', avg_val_score, n_iter)
if args.show_individual_scores:
# Individual validation scores
for task_name, val_score in zip(args.task_names, val_scores):
debug(f'Validation {task_name} {args.metric} = {val_score:.6f}')
writer.add_scalar(f'validation_{task_name}_{args.metric}', val_score, n_iter)
# Save model checkpoint if improved validation score
if args.minimize_score and avg_val_score < best_score or \
not args.minimize_score and avg_val_score > best_score:
best_score, best_epoch = avg_val_score, epoch
save_checkpoint(os.path.join(save_dir, MODEL_FILE_NAME), model, scaler, features_scaler, args)
# Evaluate on test set using model with best validation score
info(f'Model {model_idx} best validation {args.metric} = {best_score:.6f} on epoch {best_epoch}')
model = load_checkpoint(os.path.join(save_dir, MODEL_FILE_NAME), device=args.device, logger=logger)
test_preds = predict(
model=model,
data_loader=test_data_loader,
scaler=scaler
)
test_scores = evaluate_predictions(
preds=test_preds,
targets=test_targets,
num_tasks=args.num_tasks,
metric_func=metric_func,
dataset_type=args.dataset_type,
logger=logger
)
if len(test_preds) != 0:
sum_test_preds += np.array(test_preds)
# Average test score
avg_test_score = np.nanmean(test_scores)
info(f'Model {model_idx} test {args.metric} = {avg_test_score:.6f}')
writer.add_scalar(f'test_{args.metric}', avg_test_score, 0)
if args.show_individual_scores:
# Individual test scores
for task_name, test_score in zip(args.task_names, test_scores):
info(f'Model {model_idx} test {task_name} {args.metric} = {test_score:.6f}')
writer.add_scalar(f'test_{task_name}_{args.metric}', test_score, n_iter)
writer.close()
# Evaluate ensemble on test set
avg_test_preds = (sum_test_preds / args.ensemble_size).tolist()
ensemble_scores = evaluate_predictions(
preds=avg_test_preds,
targets=test_targets,
num_tasks=args.num_tasks,
metric_func=metric_func,
dataset_type=args.dataset_type,
logger=logger
)
# Average ensemble score
avg_ensemble_test_score = np.nanmean(ensemble_scores)
info(f'Ensemble test {args.metric} = {avg_ensemble_test_score:.6f}')
# Individual ensemble scores
if args.show_individual_scores:
for task_name, ensemble_score in zip(args.task_names, ensemble_scores):
info(f'Ensemble test {task_name} {args.metric} = {ensemble_score:.6f}')
return ensemble_scores
|
/*
* Copyright 2020 Adobe. All rights reserved.
* This file is licensed to you under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. You may obtain a copy
* of the License at http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under
* the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS
* OF ANY KIND, either express or implied. See the License for the specific language
* governing permissions and limitations under the License.
*/
/* eslint-env mocha */
/* eslint-disable mocha/no-mocha-arrows */
"use strict";
const assert = require("assert");
const Metrics = require("../lib/metrics");
describe("metrics", () => {
describe("timestamps", () => {
it("timestamp", () => {
const timestamp = Metrics.timestamp();
assert.ok(typeof timestamp === "number");
});
it("start-empty", () => {
const start = Metrics.start();
assert.ok(typeof start.start === "number");
});
it("start-object", () => {
const metrics = {};
const start = Metrics.start(metrics);
assert.ok(typeof start.start === "number");
assert.strictEqual(metrics, start);
});
it("end", () => {
const start = Metrics.start();
const end = Metrics.end(start);
assert.ok(typeof end.start === "number");
assert.ok(typeof end.end === "number");
assert.strictEqual(end.end - end.start, end.duration);
assert.strictEqual(start, end);
});
});
describe("summary", () => {
it("numbers-array", () => {
const summary = Metrics.summary([1, 2, 3, 4, 5, 6, 7, 8]);
// limit stdev to 3 digits after the dot for comparison
summary.stdev = Math.round(summary.stdev * 1000) / 1000;
assert.deepStrictEqual(summary, {
max: 8,
mean: 4.5,
median: 4.5,
min: 1,
q1: 2.75,
q3: 6.25,
stdev: 2.449
});
});
it("numbers-set", () => {
const summary = Metrics.summary(new Set([1, 2, 3, 4, 5, 6, 7, 8]));
// limit stdev to 3 digits after the dot for comparison
summary.stdev = Math.round(summary.stdev * 1000) / 1000;
assert.deepStrictEqual(summary, {
max: 8,
mean: 4.5,
median: 4.5,
min: 1,
q1: 2.75,
q3: 6.25,
stdev: 2.449
});
});
it("objects-array", () => {
const summary = Metrics.summary([{
counter: 1,
constant: 5
}, {
counter: 2,
constant: 5
}, {
counter: 3,
constant: 5
}, {
counter: 4,
constant: 5
}, {
counter: 5,
constant: 5
}, {
counter: 6,
constant: 5
}, {
counter: 7,
constant: 5
}, {
counter: 8,
constant: 5
}]);
// limit stdev to 3 digits after the dot for comparison
summary.counter.stdev = Math.round(summary.counter.stdev * 1000) / 1000;
assert.deepStrictEqual(summary, {
counter: {
max: 8,
mean: 4.5,
median: 4.5,
min: 1,
q1: 2.75,
q3: 6.25,
stdev: 2.449
},
constant: {
max: 5,
mean: 5,
median: 5,
min: 5,
q1: 5,
q3: 5,
stdev: 0
}
});
});
it('object array of objects', () => {
const summary = Metrics.summary([
{
mem: {
usage: 1,
percentage: 3 },
cpu: {
usage: 2
}
}]);
assert.deepStrictEqual(summary, {
mem_usage: { min: 1, max: 1, mean: 1, stdev: NaN, median: 1, q1: 1, q3: 1 },
mem_percentage: { min: 3, max: 3, mean: 3, stdev: NaN, median: 3, q1: 3, q3: 3 },
cpu_usage: { min: 2, max: 2, mean: 2, stdev: NaN, median: 2, q1: 2, q3: 2 }
});
}
);
it('object array of objects, ignores the undefined attributes', () => {
const summary = Metrics.summary([
{
counter: 2,
constant: 5,
extra: undefined
}, {
counter: 5,
constant: 5,
extra: 1
},{
counter: 2,
constant: 5,
extra: 1
}
]);
// limit stdev to 3 digits after the dot for comparison
summary.counter.stdev = Math.round(summary.counter.stdev * 1000) / 1000;
console.log(summary);
assert.deepStrictEqual(summary, {
counter: { min: 2, max: 5, mean: 3, stdev: 1.732, median: 2, q1: 2, q3: 3.5 },
constant: { min: 5, max: 5, mean: 5, stdev: 0, median: 5, q1: 5, q3: 5 },
extra: { min: 1, max: 1, mean: 1, stdev: 0, median: 1, q1: 1, q3: 1 }
});
});
it('object array of objects, ignores the unmatched attributes', () => {
const summary = Metrics.summary([
{
counter: 2,
constant: 5
}, {
counter: 5,
constant: 5,
extra: 1
},{
counter: 2,
constant: 5,
extra: 1
}
]);
// limit stdev to 3 digits after the dot for comparison
summary.counter.stdev = Math.round(summary.counter.stdev * 1000) / 1000;
console.log(summary);
assert.deepStrictEqual(summary, {
counter: { min: 2, max: 5, mean: 3, stdev: 1.732, median: 2, q1: 2, q3: 3.5 },
constant: { min: 5, max: 5, mean: 5, stdev: 0, median: 5, q1: 5, q3: 5 }
});
});
it('object-array contains periods', () => {
const summary = Metrics.summary([
{
"mem.usage": 1,
"mem.percentage": 3,
"cpu.usage": 2
}]);
// limit stdev to 3 digits after the dot for comparison
assert.deepStrictEqual(summary, {
mem_usage: { min: 1, max: 1, mean: 1, stdev: NaN, median: 1, q1: 1, q3: 1 },
mem_percentage: { min: 3, max: 3, mean: 3, stdev: NaN, median: 3, q1: 3, q3: 3 },
cpu_usage: { min: 2, max: 2, mean: 2, stdev: NaN, median: 2, q1: 2, q3: 2 }
});
});
});
});
|
import os
import re
import sys
import xml.etree.ElementTree as ET
import argparse
class FileProcessor(object):
@staticmethod
def get_title():
return 'AIML Utilities'
def create_base_args_parser(self):
parser = argparse.ArgumentParser(description=self.get_title())
parser.add_argument('-if', '--inputfile', action="store", dest="inputfile", help="Input file")
parser.add_argument('-of', '--outputfile', action="store", dest="outputfile", help="Output file")
parser.add_argument('-id', '--inputdir', action="store", dest="inputdir", help="Input dir")
parser.add_argument('-od', '--outputdir', action="store", dest="outputdir", help="Output dir")
parser.add_argument('-verbose', action="store", dest="verbose", default=False, help="Verbose output")
parser.add_argument('-dummy', action="store", dest="dummy", default=False, help="Don't modify output file(s)")
return parser
def process(self, args):
if args.inputfile and args.outputfile:
return self.process_single_file(args)
elif args.inputdir and args.outputdir:
return self.process_folders(args)
return False
def _process_file(self, inputfile, outputfile, args):
print("Converting [%s] -> [%s]"%(inputfile, outputfile))
def process_single_file(self, args):
print("Processing single file")
self._process_file(args.inputfile, args.outputfile, args)
return True
def process_folders(self, args):
print("Processing folders")
files = self._gather_files(args)
for file in sorted(files):
self._process_file(file[0], file[1], args)
return True
def _gather_files(self, args):
files = []
self._list_files(args.inputdir, args.outputdir, files)
return files
def _list_files(self, existing_base, new_base, files):
for item in os.listdir(existing_base):
if item[0] != ".":
existing_full_path = os.path.join(existing_base, item)
new_full_path = os.path.join(new_base, item)
if os.path.isdir(existing_full_path):
new_full_path = os.path.join(new_base, item)
try:
os.makedirs(new_full_path)
except OSError as e:
pass
self._list_files(existing_full_path, new_full_path, files)
else:
files.append((existing_full_path, new_full_path))
if __name__ == '__main__':
processor = FileProcessor()
parser = processor.create_base_args_parser()
args = parser.parse_args()
if processor.process(args) is False:
parser.print_help()
print("\n")
|
/*
* @Author: djvolz
* @Date: 2016-11-15 00:05:13
* @Last Modified by: djvolz
* @Last Modified time: 2016-11-15 00:05:25
*/
module.exports = {
"chase": "Watch the light run across the room!"
};
|
from config import Config
class Translation(object):
START = str(Config.START) + "\n\nMade with ❤ From @CoderzHEX"
RULES = Config.RULES
LOGIN = """Only for admins for receiving feedbacks"""
ABOUT = """**MY DETAILS:**
```🤖My Name:``` [Feedback Bot](https://t.me/Feedback_Nsbot)
```📝 Language:``` [Python 3](https://www.python.org/)
```🧰 Framework:``` [Pyrogram](https://github.com/pyrogram/pyrogram)
```👨🏻💻 Developer:``` [CoderzHEX](https://t.me/CoderzHEX)
```📢 Channel:``` [Click here](https://t.me/CODERZHEX)
```👥 Group:``` [Support](https://t.me/CoderzSupport)
"""
|
import pytesseract
from PIL import Image
import argparse
import cv2
import os
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required=True,
help="path to input image to be OCR'd")
ap.add_argument("-p", "--preprocess", type=str, default="thresh",
help="type of preprocessing to be done")
args = vars(ap.parse_args())
# load the example image and convert it to grayscale
image = cv2.imread(args["image"])
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# check to see if we should apply thresholding to preprocess the
# image
if args["preprocess"] == "thresh":
gray = cv2.threshold(gray, 0, 255,
cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]
# make a check to see if median blurring should be done to remove
# noise
elif args["preprocess"] == "blur":
gray = cv2.medianBlur(gray, 3)
# write the grayscale image to disk as a temporary file so we can
# apply OCR to it
filename = "{}.png".format(os.getpid())
cv2.imwrite(filename, gray)
# load the image as a PIL/Pillow image, apply OCR, and then delete
# the temporary file
text = pytesseract.image_to_string(Image.open(filename))
os.remove(filename)
print('car plate number>>')
print(text)
# show the output images
cv2.imshow("Image", image)
cv2.imshow("Output", gray)
cv2.waitKey(0)
"""
Python-tesseract is an optical character recognition (OCR) tool for python.
That is, it will recognize and “read” the text embedded in images.
Tesseract is finding templates in pixels, letters, words and sentences. It uses two-step approach that calls adaptive recognition. It requires one data stage for character recognition,
then the second stage to fulfil any letters, it wasn't insured in, by letters that can match the word or sentence context.
"""
|
import { render } from 'react-dom';
import PropTypes from 'prop-types';
import React from "react"
import Utils from "utils"
import {makeUrl, A} from "routing"
var createReactClass = require('create-react-class');
var OrderByBox = createReactClass({
displayName: "OrderByBox",
propTypes: {
//the available sort orders
sortOrders: PropTypes.arrayOf(PropTypes.shape({
title: PropTypes.string.isRequired,
key: PropTypes.string.isRequired,
defaultDirection: PropTypes.oneOf(["desc", "asc"])
}.isRequired)).isRequired,
//the currently selected filter criterion
sort: PropTypes.string,
//the currently selected sort direction
direction: PropTypes.oneOf(["desc", "asc"]),
dropdown: PropTypes.bool,
baseUrl: PropTypes.string.isRequired,
params: PropTypes.any.isRequired
},
getDefaultProps: function (){
return {
dropdown: true,
right: true
}
},
render: function() {
var sort = this.props.sort
var direction = this.props.direction
var sortOrders = this.props.sortOrders
var boxItems = sortOrders.map(function(sortOrder){
var defaultSortDirection = sortOrder.defaultDirection || 'desc'
var reverseSortDirection = (direction == 'desc') ? 'asc' : 'desc'
var newSortDirection = (sort == sortOrder.key) ? reverseSortDirection : defaultSortDirection
var filterHref = makeUrl(this.props.baseUrl, {sort: sortOrder.key, direction: newSortDirection, offset: 0}, this.props.params)
var filterListItem
if (sort != sortOrder.key) {
//currently not selected
filterListItem = <li key={sortOrder.key}><A className="link" href={filterHref}>{Utils.capitalizeFirstChar(sortOrder.title)}</A></li>
} else {
var sortButtons
//currently selected
if (direction == 'desc' && sort) {
sortButtons = <div className="controls pull-right">
<A className="button pull-left" href={filterHref}>
<span className="fa fa-caret-up left text-center"></span>
</A>
<div className="button-selected pull-right">
<span className="fa fa-caret-down left text-center"></span>
</div>
</div>
} else {
sortButtons = <div className="controls pull-right">
<div className="button-selected pull-left" >
<span className="fa fa-caret-up text-center"></span>
</div>
<A className="button pull-right" href={filterHref}>
<span className="fa fa-caret-down text-center"></span>
</A>
</div>
}
filterListItem = <li key={sortOrder.key} className='clickable selected clearfix'>
<A className="pull-left filter-title" href={filterHref}>{Utils.capitalizeFirstChar(sortOrder.title)}</A>
{sortButtons}
</li>
}
return filterListItem
}.bind(this))
if (this.props.dropdown) {
var dropDownMenuRight = this.props.right ? "dropdown-menu-right" : ""
return <li className="dropdown">
<A className="dropdown-toggle" data-toggle="dropdown"><span className="fa fa-sort-amount-asc" /></A>
<ul className={"dropdown-menu list " + dropDownMenuRight}>
{boxItems}
</ul>
</li>
} else {
return <div className="box">
<div className="head">
<h3>Order By</h3>
</div>
<div className="body">
<ul className="list">
{boxItems}
</ul>
</div>
</div>
}
}
})
export default OrderByBox
|
"""
4 - Jan - 2018 / H. F. Stevance / fstevance1@sheffield.ac.uk
This is the main module of FUSS. It contains general utility functions, a couple of interactive routines and
also defines a new class: PolData, to deal with specpol data.
All this should make dealing with and analysing specpol data easier.
Functions:
----------
get_spctr(): Gets flux data from text file.
get_pol(): Gets pol data from text file.
dopcor(): Doppler Correction.
dopcor_file(): Doppler correction from data from a file output into a new file
ylim_def(): Used to define y limits for plots. Used within FUSS.
rot_data(): To rotate 2D data.
norm_ellipse(): Creates random data where the x and y coordinates are described by 2 different normal distributions.
Interactive routines:
---------------------
ep_date(): Taking a date as reference point, finds epoch from date or date from epoch.
vel(): Finds expansion velocity of element from observed and rest wavelength.
Class PolData():
----------------
Attributes:
Defined by __init__
- name: name
- wlp = wavelength bins of polarisation data
- p = p
- pr = Delta p
- q = q
- qr = Delta q
- u = u
- ur = Delta u
- a = Polarisation Angle P.A
- ar = Delta P.A
- wlf = wavelength bins of flux spectrum
- f = Flux
- fr = Delta F
Defined by find_isp() or add_isp()
- qisp, qispr, uisp, uispr, aisp, aispr: Stokes parameters and P.A of ISP
Defined by rmv_isp()
- p0, p0r, q0, ... , a0r : Original polarisation data before ISP correction
- Updates p, pr, q, ..., ar with ISP corrected values.
Methods:
- add_flux_data()
- flu_n_pol()
- find_isp()
- add_isp()
- rmv_isp()
- qu_plt()
"""
from __future__ import division
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
import math as m
import matplotlib.gridspec as gridspec
from scipy.odr import ODR, Model, Data, RealData, odr, Output
import os
import datetime as dt
from FUSS import isp as isp
import sys
import pandas as pd
if sys.version_info.major < 3:
range = xrange
input = raw_input
# ################## FUNCTIONS ###################### FUNCTIONS #################### FUNCTIONS ################# #
def get_spctr(filename, wlmin=0, wlmax=100000, err=False, scale=True, skiprows = 0 ):
"""
Imports spectrum.
Notes
-----
Required file format: wl(Angstrom) flux *flux_error* (*optional*)
Parameters
----------
filename : string
Name of the ASCII file where the spectrum is.
wlmin : int, optional
Lower wavelength cutoff. Default = 0.
wlmax : int, optional
Upper wavelength cutoff. Default = 100000.
err : bool, optional
If there is an error column, set to True. Default is False.
scale : bool, optional
Default is True. Multiplies the spectrum (and error) by the median values of the flux.
skiprows : int, optional
Default is 0, number of rows to skip
Returns
-------
Tuple of 1D Arrays
=> Wavelength, Flux, *flux_error* (optional)
"""
if err is False:
flux = np.loadtxt(filename, unpack=True, usecols=(0, 1), skiprows=skiprows)
cond = (flux[0] > wlmin) & (flux[0] < wlmax)
wl = flux[0][cond]
f = flux[1][cond]
if scale is True:
s = 1 / np.median(f) # normalising the spectrum
f = f * s
return wl, f
else:
flux = np.loadtxt(filename, unpack=True, usecols=(0, 1, 2))
cond = (flux[0] > wlmin) & (flux[0] < wlmax)
wl = flux[0][cond]
f = flux[1][cond]
r = flux[2][cond]
if scale is True:
s = 1 / np.median(f)
f = f * s
r = r * s
return wl, f, r
def get_pol(filename, wlmin=0, wlmax=100000, skiprows = 0):
"""
Imports values from polarisation files (given by the old specpol routine in datred (pre Dec 2017)).
Notes
-----
Required File format: 9 columns.
First column must be wavelength in Angstrom.
The other 8 columns are for stokes parameters, degree of pol and P.A, and associated errors:
=> wl p p_err q q_err u u_err angle angle_err
Parameters
----------
filename : string
Name of the ASCII file.
wlmin : int, optional
Lower wavelength cutoff. Default = 0.
wlmax : int, optional
Upper wavelength cutoff. Default = 100000.
Returns
-------
Tuple of 1D Arrays
One 1 D array per parameter (so first must be wavelength, order of the rest depends on input file).
=> 9 arrays total.
"""
pol0 = np.loadtxt(filename, unpack=True, usecols=(0, 1, 2, 3, 4, 5, 6, 7, 8), skiprows=skiprows)
pol = []
cond = (pol0[0] > wlmin) & (pol0[0] < wlmax) # pol0[0] should contain the wavelength bins
for val in pol0:
# Applies the limits determined by wlmin, wlmax
valn = val[cond]
pol.append(valn)
return pol[0], pol[1], pol[2], pol[3], pol[4], pol[5], pol[6], pol[7], pol[8]
def dopcor(val, z):
"""
Doppler Correction.
Parameters
----------
val : array
Array containing the data. val[0] MUST BE THE WAVELENGTH. NEED AT LEAST 2 COLUMNS!!
z : float
Redshift
Returns
--------
Array containing the data with the wavelength column doppler corrected.
"""
values = np.array(val) # need this in case val is not an array but a list
wl0 = values[0]
wln = np.array([])
for wl in wl0:
wl_dopcor = (wl) - (wl * z)
wln = np.append(wln, wl_dopcor)
values[0] = wln
return values
def dopcor_file(filename, z, dataframe=True, sep='\t'):
"""
Doppler Correction of data from a file (filename), into another file (output)
Parameters
----------
filename : str
Name of the file where the data to be doppler corrected is
z : float
Redshift
"""
if dataframe is False:
output = 'dc_' + filename
os.system('cp -i ' + filename + ' ' + output)
f = file(output, 'r+')
dopcor = []
for line in f:
columns = line.split()
wl = float(columns[0])
wl_dopcor = (wl) - (wl * z)
dopcor.append(wl_dopcor)
f.close()
f0 = file(filename, 'r')
f = file(output, 'w')
i = 0
for line in f0:
columns = line.split()
n_line = line.replace(columns[0], str(dopcor[i]))
f.write(n_line)
i = i + 1
print(output + ' created')
elif dataframe is True:
data = pd.read_csv(filename, sep = sep)
#data['wl'] -= data['wl']*z
data.iloc[:,0] = data.iloc[:,0].values - data.iloc[:,0].values*z
data.to_csv('dc_'+filename, sep = '\t', index=False)
print('dc_'+filename + ' created')
def ylim_def(wl, f, wlmin=4500, wlmax=9500):
"""
(Yes I need this in PolData) finds appropriate y limits for a spectrum. Look at values between a given range (Default: 4500-9500A) where
we don't expect few order of magnitudes discrepancies like we see sometimes at the extremeties of the spectrum, then
find the max and min value then define ymax and ymin.
"""
fmax = -100000
fmin = 1000
for i in range(len(wl)):
if wl[i] < wlmax and wl[i] > wlmin:
if f[i] < fmin:
fmin = f[i]
#print(fmin)
elif f[i] > fmax:
fmax = f[i]
#print(fmax)
# These tweaks to make the y limit okay were determined through testing. May not always
# be appropriate and might need fixing later.
if fmin > 0 and fmin < 1:
ymin = fmin - 1.2 * fmin
elif fmin > 0 and fmin > 1:
ymin = fmin - fmin / 5
elif fmin < 0 and fmin > -1:
ymin = fmin + 1.2 * fmin
elif fmin < 0 and fmin < -1:
ymin = fmin + fmin / 5
if fmax > 0 and fmax < 1:
ymax = fmax + 1.2 * fmax
elif fmax > 0 and fmax > 1:
ymax = fmax + fmax / 5
elif fmax < 0 and fmax > -1:
ymax = fmax - 1.2 * fmax
elif fmax < 0 and fmin < -1:
ymax = fmax - fmax / 10
return ymin, ymax
def rot_data(q, u, theta):
"""
Used to rotate Stokes parameters (or any 2D data set) by angle theta.
Parameters
----------
q : 1D np.array
u : 1D np.array
theta : float
Returns
-------
Two 1D np.arrays: q rotated, u rotated
"""
rot_matrix = np.array([[np.cos(theta), -np.sin(theta)],
[np.sin(theta), np.cos(theta)]])
q_rot = np.array([])
u_rot = np.array([])
# Applying rotation to all bins and storing result in q_rot and u_rot
for i in range(len(u)):
coor = np.array([[q[i]],
[u[i]]])
new_coor_i = np.dot(rot_matrix, coor)
q_rot = np.append(q_rot, new_coor_i[0])
u_rot = np.append(u_rot, new_coor_i[1])
return q_rot, u_rot
def norm_ellipse(xc, yc, a, b, theta, n):
"""
Creates ellipsoidal data set normally distributed around (xc,yc).
Parameters
----------
xc : flaot
X coordinate of ellipse center
yc : float
Y coordinate of ellipse center
a : float
major axis
b : float
minor axis
theta :
Angle of ellipse
n : int
Number of points
Returns
-------
Two 1D np.arrays containing the x and y coordinates (respectively) of the data created.
"""
i = 0
x = np.array([])
y = np.array([])
# This creates data within ellipse. The x an y coordinates are defined by normal distribution.
# That means we get normally distributed points in 2D, also means the ellipse's major and minor axis
# are aligned with x and y axis or vice versa. So also give possibility to rotate the data set created
while i <= n:
x = np.append(x, np.random.normal(xc, a))
y = np.append(y, np.random.normal(yc, b))
i += 1
if theta != 0:
x, y = rot_data(x, y, theta) # Applying rotation
return x, y
def ep_date():
"""
Interactive Routine. Finds epoch from date or date from epoch given a maximum date.
"""
# ####### Functions used by ep_date ########## #
def date_input():
yr = input("Year: ")
month = input("Month: ")
day = input("Day: ")
date = dt.date(int(yr), int(month), int(day))
return date
def date_from_epoch():
ep = dt.timedelta(float(input("\n What epoch (in days) would you like to know the date for: ")))
print('\nDate at epoch ' + str(ep) + ' days: ')
print(vmax + ep)
return vmax + ep
def ep_from_dates():
print("\nDate of epoch you want in days")
date_ep = date_input()
ep = date_ep - vmax
print('\nEpoch:')
print(ep)
return ep
# ############### MAIN ##################### #
print("\nDate at V-band max")
vmax = date_input()
print("\n What do you want to do? \n (1) Get epoch in days. Inputs: Date of epoch" \
"\n (2) Get date for an epoch in days. Inputs: Epoch in days (can be negative)" \
"\n (3) Update the V-band max date" \
"\n (4) Exit")
to_do = input("#> ")
while to_do != '4':
if to_do == '1':
ep_from_dates()
if to_do == '2':
date_from_epoch()
if to_do == '3':
print("\nDate at V-band max")
vmax = date_input()
if to_do != '1' and to_do != '2' and to_do != '3' and to_do != '4':
print("Must choose option 1, 2, 3 or 4")
to_do = input("#> ")
return "Good Bye"
def vel():
"""
Interactive routine. Finds the velocity for a given observed wavelength and rest wavelength.
"""
cont = 'y'
while cont == 'y' or cont == '':
l_obs = float(input('What is the observed wavelength: '))
l_emit = float(input('What is the rest wavelength: '))
c = 299792.458 # Speed of light in km/s
v = ((l_obs - l_emit) / l_emit) * c
print(v)
cont = input('Continue?(y/n): ')
# ################################################################################# #
# ############## CLASSE ############## POLDATA ########### CLASSE ############### #
# ################################################################################# #
class PolData(object):
"""
Each instance contains one spectropolarimetric data set.
Note
-----
The attributes wlp, p, pr, q, qr, u, ur, a and ar are 1D arrays and must have the
same length.
The attributes wlf, f and fr must have the same length, but it can differ from the
length of wlp, p, etc...
When the ISP is removed, the attributes p0, p0r, q0, etc... store the original values
of p, pr, q, etc..., and the latter are updated to have the ISP corrected values of polarisation.
Parameters
----------
poldata : str or tuple
The polarisation data can be imported from a text file containing only the data, where
the column order is: wavelength p p_err q q_err u u_err a a_err.
Alternatively a tuple of arrays containing the data can be provided. Make sure the order
of the arrays in the tuple corresponds to wavelength p p_err q q_err u u_err a a_err.
name : str
A short handle to make your data object recognisable (e.g. 'ep1', '14ad')
wlmin : int, optional
Minimum wavelength cutoff
wlmax : int, optional
Maximum wavelength cutoff
Attributes
----------
name : str
A short handle to make your data object recognisable (e.g. 'ep1', '14ad')
wlp : array
1D array containing the wavelength bins of the polarisation data.
p : array
1D array containing the degree of polarisation in each bin.
pr : array
1D array containing the error on p in each bin.
q : array
1D array containing Stokes q in each bin.
qr : array
1D array containing the error on q in each bin.
u : array
1D array containing Stokes u in each bin.
ur : array
1D array containing the error on u in each bin.
a : array
1D array containing the polarisation angle in each bin.
ar : array
1D array containing the error on the polarisation in each bin.
wlf : array, optional
1D array containing wavelength bins of the flux spectrum.
f : array, optional
1D array containing the flux in each bin.
fr : array, optional
1D array containing the error on the flux in each bin.
qisp : float, optional
Stokes q of the ISP.
qispr : float, optional
Error on q ISP.
uisp : float, optional
Stokes u of the ISP
uispr : float, optional
Error on u ISP
gradq : tuple, optional
Gradient of Stokes q ISP and error on the gradient.
constq : tuple, optional
Intercept of Stokes q ISP and error on the intercept.
gradu : tuple, optional
Gradient of Stokes u ISP and error on the gradient.
constu : tuple, optional
Intercept of Stokes u ISP and error on the intercept.
p0 : array
1D array containing the degree of polarisation in each bin BEFORE ISP REMOVAL.
p0r : array
1D array containing the error on p in each bin BEFORE ISP REMOVAL.
q0 : array
1D array containing Stokes q in each bin BEFORE ISP REMOVAL.
q0r : array
1D array containing the error on q in each bin BEFORE ISP REMOVAL.
u0 : array
1D array containing Stokes u in each bin BEFORE ISP REMOVAL.
u0r : array
1D array containing the error on u in each bin BEFORE ISP REMOVAL.
a0 : array
1D array containing the polarisation angle in each bin BEFORE ISP REMOVAL.
a0r : array
1D array containing the error on the polarisation in each bin BEFORE ISP REMOVAL.
"""
def __init__(self, poldata, name=' ', wlmin=0, wlmax=1000000):
if type(poldata) is str:
try:
# This if we use the old way of creating pol data files fron datred (space separate no header )
pol0 = get_pol(poldata, wlmin=wlmin, wlmax=wlmax)
self.wlp, self.p, self.pr= pol0[0], pol0[1], pol0[2]
self.q , self.qr, self.u, self.ur, self.a, self.ar = pol0[3], pol0[4], pol0[5], pol0[6], pol0[7], pol0[8]
except ValueError:
# This we got the new pol data files for datred (pandas data frame to tab separated file with col names)
poldf = pd.read_csv(poldata, sep='\t')
mask = (poldf.iloc[:,0].values > wlmin) & (poldf.iloc[:,0].values < wlmax)
self.wlp, self.p, self.pr = poldf.iloc[:,0].values[mask], poldf.iloc[:,1].values[mask], poldf.iloc[:,2].values[mask]
self.q, self.qr = poldf.iloc[:,3].values[mask], poldf.iloc[:,4].values[mask]
self.u, self.ur = poldf.iloc[:,5].values[mask], poldf.iloc[:,6].values[mask]
self.a, self.ar = poldf.iloc[:,7].values[mask], poldf.iloc[:,8].values[mask]
else:
pol0 = poldata
self.wlp, self.p, self.pr= pol0[0], pol0[1], pol0[2]
self.q , self.qr, self.u, self.ur, self.a, self.ar = pol0[3], pol0[4], pol0[5], pol0[6], pol0[7], pol0[8]
self.name = name
self.wlf = None
self.f = None
self.fr = None
self.qisp = None
self.qispr = None
self.uisp = None
self.uispr = None
self.pisp = None
self.pispr = None
self.aisp = None
self.aispr = None
self.gradq = None
self.constq = None
self.gradu = None
self.constu = None
self.q0 = None
self.u0 = None
self.q0r = None
self.u0r = None
self.p0 = None
self.p0r = None
self.a0 = None
self.a0r = None
print(" ==== PolData - instance: " + self.name + " ====")
print("Polarisation data initialised. If you want to add Stokes I use add_flux_data(). " \
"To find ISP use find_isp(). \n")
def add_flux_data(self, filename, wlmin=0, wlmax=1000000, err=False, scale=False, skiprows = 0):
"""
Adds flux spectrum data attributes to the PolData.
Parameters
----------
filename : str
File containing the flux data. File format: wl, f, fr (no comas)
wlmin : int
Minimum wavelength cut off
wlmax :
Maximum wavelength cut off
err : bool
If false, only imports wavelength and flux, not the error on the flux. Default = False.
skiprows : int, optional
efault is 0, number of rows to skip
"""
try:
flux = get_spctr(filename, wlmin=wlmin, wlmax=wlmax, scale=scale, skiprows = skiprows)
self.wlf = flux[0]
self.f = flux[1]
if err is True:
self.fr = flux[2]
print(" ==== PolData - instance: " + self.name + " ====")
print("Flux spectrum added.")
except ValueError as error:
print("ValueError: "+str(error) + "\n /!\ This function uses np.loadtxt, if there are rows of text at the top of your file that need to be skipped add the argument skiprows = [number of rows to skip]")
def flu_n_pol(self, save=False):
"""
Creates plot of p, q, u, theta, and flux.
Note
----
/!\ The x-axis is SHARED, so limits on polarisation attributes and flux
attributes should be the same.
Parameters
----------
save : bool
Whether to save the plot or not. Saved as [self.name]_fnp.png
"""
fnp = plt.figure(figsize=(10, 10))
grid = gridspec.GridSpec(5, 1, hspace=0)
p_plot = plt.subplot(grid[0])
q_plot = plt.subplot(grid[1])
u_plot = plt.subplot(grid[2])
a_plot = plt.subplot(grid[3])
f_plot = plt.subplot(grid[4])
p_plot.errorbar(self.wlp, self.p, yerr=self.pr, color='purple', capsize=0, ecolor='grey')
q_plot.errorbar(self.wlp, self.q, yerr=self.qr, color='r', alpha=0.8, capsize=0, ecolor='grey')
u_plot.errorbar(self.wlp, self.u, yerr=self.ur, color='blue', alpha=0.8, capsize=0, ecolor='grey')
a_plot.errorbar(self.wlp, self.a, yerr=self.ar, color='orange', alpha=0.8, capsize=0, ecolor='grey')
try:
f_plot.errorbar(self.wlf, self.f, yerr=self.fr, color='k', alpha=0.5, lw=1.5, capsize=0, ecolor='grey')
except:
print('Flux attributes not defined')
p_plot.set_ylim(ylim_def(self.wlp, self.p, wlmin=4700))
p_plot.set_ylabel('p (%)')
p_plot.set_title(self.name, fontsize=16)
q_plot.set_ylim(ylim_def(self.wlp, self.q, wlmin=4700))
q_plot.set_ylabel('q (%)')
u_plot.set_ylim(ylim_def(self.wlp, self.u, wlmin=4700))
u_plot.set_ylabel('u (%)')
a_plot.set_ylim(ylim_def(self.wlp, self.a, wlmin=4700))
a_plot.set_ylabel('P.A (deg)')
try:
f_plot.set_ylim(ylim_def(self.wlf, self.f))
f_plot.set_ylabel('Flux')
f_plot.set_xlabel('Wavelength (Ang)', fontsize=14)
except:
print('Flux attributes not defined')
p_plot.xaxis.set_visible(False)
q_plot.xaxis.set_visible(False)
u_plot.xaxis.set_visible(False)
a_plot.xaxis.set_visible(False)
if save is True:
fnp.savefig(self.name + '_fnp.png')
plt.show()
return
def find_isp(self, wlmin, wlmax):
"""
Estimates ISP
Notes
-----
Simply an average of q and u over a given wavelength range which should correspond to line
blanketting region.
Parameters
----------
wlmin : int
Start of wavelength range.
wlmax : int
End of wavelength range.
"""
ls = [self.q, self.qr, self.u, self.ur]
cond = (self.wlp > wlmin) & (self.wlp < wlmax)
crop = []
for val in ls:
valn = val[cond]
crop.append(valn)
# Values of p, q, u, a and their error for ISP
self.qisp = np.average(crop[0], weights=1 / (crop[1] ** 2))
self.qispr = np.std(crop[0])
self.uisp = np.average(crop[2], weights=1 / (crop[3] ** 2))
self.uispr = np.std(crop[2])
self.pisp = np.sqrt(self.qisp ** 2 + self.uisp ** 2)
self.pispr = (1 / self.pisp) * np.sqrt((self.qisp * self.qispr) ** 2 + (self.uisp * self.uispr) ** 2)
if self.pisp > self.pispr:
self.pisp = self.pisp - (self.pispr**2)/self.pisp
self.aisp = (0.5 * m.atan2(self.uisp, self.qisp)) * 180.0 / m.pi
self.aispr = 0.5 * np.sqrt(((self.uispr / self.uisp) ** 2 + (self.qispr / self.qisp) ** 2) * (
1 / (1 + (self.uisp / self.qisp) ** 2)) ** 2)
if self.aisp < 0:
self.aisp = 180 + self.aisp # Making sure P.A range is 0-180 deg
print(" ==== PolData - instance: " + self.name + " ====")
print("ISP found: \n qisp = " + str(self.qisp) + " +/- " + str(self.qispr) \
+ "\n usip = " + str(self.uisp) + " +/- " + str(self.uispr) \
+ "\n pisp = " + str(self.pisp) + " +/- " + str(self.pispr) \
+ "\n P.A isp = " + str(self.aisp) + " +/- " + str(self.aispr))
return self.qisp, self.qispr, self.uisp, self.uispr
def add_isp(self, constisp_params = None, linearisp_params = None):
"""
Adds parameters of isp to the data.
Parameters
----------
constisp_params : list
If the isp is constant give the stokes parameters of the isp here in a list:
[qisp, qisp error, uisp , uisp error]
linearisp_params : list
Tuple of tuples: [[grad_q, grad_q error],[intercept_q, intercept_q error],
[grad_u, grad_u error],[intercept_u, intercept_u error]].
For qisp = grad_q * lambda + intercept_q (and similar equation for u), where lambda is in Angstrom.
Examples
--------
If the ISP is constant across your wavelength range, put its values an associated errors in constisp_params:
>> PolDataObj.add_isp(constisp_params=[0.14, 0.04, 0.08, 0.03])
If the isp changes linearly with wavelength, give the parameters for the lines of q and u ISP here.
>> PolDataObj.add_isp(linearisp_params=[[0.00035, 0.00003],[2.45, 0.19]])
"""
if linearisp_params is None:
self.qisp, self.qispr, self.uisp, self.uispr = constisp_params
# Values of p, q, u, a and their error for ISP
self.pisp = np.sqrt(self.qisp ** 2 + self.uisp ** 2)
self.pispr = (1 / self.pisp) * np.sqrt((self.qisp * self.qispr) ** 2 + (self.uisp * self.uispr) ** 2)
self.aisp = (0.5 * m.atan2(self.uisp, self.qisp)) * 180.0 / m.pi
self.aispr = 0.5 * np.sqrt(((self.uispr / self.uisp) ** 2 + (self.qispr / self.qisp) ** 2) * (
1 / (1 + (self.uisp / self.qisp) ** 2)) ** 2)
self.aispr = (self.aispr * 180.0) / m.pi
if self.aisp < 0:
self.aisp = 180 + self.aisp # Making sure P.A range is 0-180 deg
print(" ==== PolData - instance: " + self.name + " ====")
print("ISP Added: \n qisp = " + str(self.qisp) + " +/- " + str(self.qispr) \
+ "\n usip = " + str(self.uisp) + " +/- " + str(self.uispr) \
+ "\n pisp = " + str(self.pisp) + " +/- " + str(self.pispr) \
+ "\n P.A isp = " + str(self.aisp) + " +/- " + str(self.aispr) + "\n")
self.gradq = None # this will be used as a condition for the method of isp removal in rmv_isp
elif constisp_params is None:
self.gradq, self.constq, self.gradu, self.constu, self.cov = linearisp_params
self.qisp = None # this will be used as a condition for the method of isp removal in rmv_isp
return
def rmv_isp(self, bayesian_pcorr=False, p0_step=0.01):
# TODO: I need 2 tests for this. Maybe will need 14ad data for the constant case and 11hs for the linear case
"""
Removes ISP and updates q, qr, u, ur, p, pr, a and ar.
Note
-----
Stores the original non ISP corrected degree of polarisation, Stokes parameters, polarisation angle,
and associated errors in p0, p0r, q0, q0r, u0, u0r, a0, and a0r, and updates p, pr, q, qr, u, ur, a and ar.
"""
# Storing original values of Stokes parameters and their errors in newly defined
# attributes.
self.q0 = self.q
self.u0 = self.u
self.q0r = self.qr
self.u0r = self.ur
# Storing original degree of polarisation and it's error in new variable and updating p and pr
self.p0 = self.p
self.p0r = self.pr
# Same as before but for the P.A
self.a0 = self.a
self.a0r = self.ar
if self.qisp is None:
new_stokes, __ = isp.linear_isp(self.wlp, self.gradq, self.constq,
self.gradu, self.constu,
self.cov[0], self.cov[1], #respectively covariance of q parameters and u parameters
self.q, self.qr,
self.u, self.ur,
bayesian_pcorr=bayesian_pcorr, p0_step=p0_step)
elif self.gradq is None:
new_stokes = isp.const_isp(self.wlp, self.qisp, self.qispr,
self.uisp, self.uispr,
self.q, self.qr,
self.u, self.ur,
bayesian_pcorr=bayesian_pcorr, p0_step=p0_step)
self.p = new_stokes[1]
self.pr =new_stokes[2]
self.q = new_stokes[3] # new_stokes[0] is wavelength bins
self.qr = new_stokes[4]
self.u = new_stokes[5]
self.ur = new_stokes[6]
self.a = new_stokes[7]
self.ar = new_stokes[8]
def qu_plt(self, subplot_loc=111, wlmin=0, wlmax=100000,
qlim=[-3.0, 3.0], ulim=[-3.0, 3.0], textloc=[-2.7, -2.7], cisp='k', fs=16,
ls=14, isp=False, wlrest=None, colorbar=True, colorbar_labelsize=14, size_clbar=0.05, line_color=None,
marker='.', lambda_xshift=1.7, fit=True,
qlab_vis=True, ulab_vis=True,
qticks_vis=True, uticks_vis=True, cmap='jet'):
# TODO: anyway to use *args here? how does that even work?
"""
Plots the QU plane corresponding to the imported data.
Parameters
----------
subplot_loc : int or matplotlib.gridspec.GridSpec, optional
Location of the subplot. Can be a 3 digit integer or a gridspec location ifcreated a grid using gridspec.
Default = 111.
wlmin : int, optional
Min wavelength cut off. Default None.
wlmax : int, optional
Max wavelength cut off. Default 100000.
qlim : tuple, optional
[min q, max q]. Default = [-3.0, 3.0]
ulim : tuple, optional
[min u, max u]. Default = [-3.0, 3.0]
textloc : tuple, optional
Location of name of qu-plot. Default = [-2.7, -2.7]
cisp : string, optional
Color of ISP marker. Default = 'k'
fs : int, optional
Font size. Applies to text on plot and axis labels, not graduations on the axes. Default = 16
ls : int, optional
Label size. Size of the tick numbers on axes. Default = 14.
isp : bool, optional
Whether to plot ISP. Default False.
wlrest :int, optional
If plotting qu plot of a line, rest wavelength of that line. Otherwise leave default value: None.
colorbar : bool, optional
Default is True. If False the colorbar is not plotted.
colorbar_labelsize : int, optional
Label size of the color bar ticks. Default 15.
size_clbar : float, optional
Modifies the size of the colour bar. Also screws with the plot somehow. Default = 0.05.
line_color : string, optional
If want a solid colour for the lines between the markers. Default is None and gives lines cycling through
rainbow colors to match the color of the point they are associated with.
marker : string, optional
Type of marker to be used. Default is '.'
lambda_xshift : float, optional
Position of the colourbar label define as qmax + shift. This is the shift value. Default is 1.7.
fit : bool, optional
If False the dominant axis will not be plotted. Its parameters will still be calculated and returned.
Default is True.
qlab_vis : bool, optional
If False, the q label is not plotted. Default is True.
ulab_vis : bool, optional
If False, the u label is not plotted. Default is True.
qticks_vis : bool, optional
If False, all q tick labels are invisible. Default is True.
uticks_vis : bool, optional
If False, all u tick labels are invisible. Default is True.
cmap : str, optional
A valid matplotlib colormap. Default = jet
Returns
------
matplotlib.axes._subplots.AxesSubplot
The axis the qu plane is plotted on. That way can plot other things on top, e.g line or ellipse or else.
"""
# ################### FITTING THE DATA WITH DOM AXIS ########################### #
func = lambda beta,x: beta[0] + beta[1] * x # Expression of the line that we want to fit to the data
data = RealData(self.q, self.u, self.qr, self.ur)
model = Model(func)
odr = ODR(data, model, [0, 0])
# Given the levels of pol in SNE, I don't expect to ever have to plot a q-u plot with limits [-10,10]
# The following are just q values from -10 to 10 that will be used to plot the line fit
q_n = np.arange(-10, 10, 0.1)
qu = plt.subplot(subplot_loc, aspect='equal')
odr.set_job(fit_type=0) # fit_type = 0 => explicit ODR.
output = odr.run()
print(" ==== QUplot - instance: " + self.name + " ====")
print("Dom. Axis = a*x + b")
print("a = " + str(output.beta[1]) + " +/- " + str(output.sd_beta[1]))
print("b = " + str(output.beta[0]) + " +/- " + str(output.sd_beta[0]) + "\n")
u_n = func(output.beta, q_n) # Based on fit, get the u values for each q
if fit is True:
qu.plot(q_n, u_n, 'k--', linewidth=2, zorder=1000)
# the zorder is high to sit on top of the scatter created belox
print(wlmin, wlmax)
cond = (self.wlp > wlmin) & (self.wlp < wlmax)
wl_crop = self.wlp[cond]
q_crop = self.q[cond]
qr_crop = self.qr[cond]
u_crop = self.u[cond]
ur_crop = self.ur[cond]
# #################### CREATING THE PLOT ########################
plt.set_cmap(cmap)
if wlrest is None:
# Defining the min and max wavelength, which are going to be the beginning and end of the colour map
wlmin = min(wl_crop)
wlmax = max(wl_crop)
sc = qu.scatter(q_crop, u_crop, s=100,
vmin=wlmin, vmax=wlmax,
c=wl_crop, marker=marker,
zorder=600, lw=0)
else:
vel = np.array([])
c = 299792.0
for i in range(len(wl_crop)):
v = c * ((wl_crop[i] - wlrest) / wlrest)
vel = np.append(vel, v)
# Defining the min and max VELOCITIES, which are going to be the beginning and end of the colour map
velmin = min(vel)
velmax = max(vel)
print(velmin, velmax)
sc = qu.scatter(q_crop, u_crop, s=100,
vmin=velmin, vmax=velmax,
c=vel, marker=marker,
zorder=600, lw=0)
# ################## Plotting Points ###############################
# vmin and vmax are the start and end of the colour map. c = wl because we're defining the colourmap using the
# wavelengths wl. zorder doesn't have to be 600, it just needs to be below that of the fitting line we did above
# and greater than the zorder of the error bars, because otherwise it doesn't look nice.
clbar = plt.colorbar(sc, fraction=size_clbar) # Plotting to colour map. Need to do that to get a rainbow.
clbar.ax.tick_params(labelsize=colorbar_labelsize)
if colorbar is False:
clbar.remove() # Removing Colormap from plot (but still exists so we can plot rainbows)
elif colorbar is True:
if wlrest is None:
qu.text(qlim[1] + lambda_xshift, (ulim[1] + ulim[0]) / 2, r'$\lambda (\AA)$', fontsize=fs)
else:
qu.text(qlim[1] + lambda_xshift, (ulim[1] + ulim[0]) / 2, 'Velocity (km/s)', rotation='vertical',
fontsize=fs)
a, b, c = qu.errorbar(q_crop, u_crop, xerr=qr_crop, yerr=ur_crop, marker='.', capsize=0,
zorder=500, linestyle='None', alpha=0.4) # Plotting error bars
# Convert my wavelengths into the colour map plotted earlier applying the colourbar to "c",
# that is, the errorbars, there are 2 components (c[0] and c[1]) because I have error bars in both x and y.
if wlrest is None:
clmap = clbar.to_rgba(wl_crop)
else:
clmap = clbar.to_rgba(vel)
c[0].set_color(clmap)
c[1].set_color(clmap)
# The following loop cycles through our colormap. Without this the lines we are about to create to connect
# the points of the scatter plot will not have colours corresponding to the points they are linking.
qu.set_prop_cycle(plt.cycler('color', clmap))
for i in range(len(wl_crop) - 1):
qu.plot(q_crop[i:i + 2], u_crop[i:i + 2], c=line_color,
alpha=1) # Here we create line for each pair of points
# Note that it's "i+2" in order for the last point to be i+1 -because it's up to point i+2, excluding i+2.
# To mark ISP with errorbars
if isp is True:
plt.errorbar(self.qisp, self.uisp, xerr=self.qispr, yerr=self.uispr, fmt='o', color=cisp, elinewidth=2.5,
capthick=2.5, zorder=5000)
plt.axvline(0, color='k', linestyle='-.')
plt.axhline(0, color='k', linestyle='-.')
qu.tick_params(axis='both', which='major', labelsize=ls)
# Now fiddling with the ticks: If ticks are made to be visible then sent every other tick to be invisible
# so bring so space to the axes. If ticks are set to be invisible... well make them invisible.
xticks = qu.xaxis.get_major_ticks()
yticks = qu.yaxis.get_major_ticks()
''' Didn't work to resize my tick labels :(
for xtick in xticks:
xtick.label1.set_fontsize(ticklabelsize)
for ytick in yticks:
ytick.label1.set_fontsize(ticklabelsize)
'''
if qticks_vis is False:
for i in range(0, len(xticks)):
xticks[i].label1.set_visible(False)
else:
for i in range(0, len(xticks), 2):
xticks[i].label1.set_visible(False)
if uticks_vis is False:
for i in range(0, len(yticks)):
yticks[i].label1.set_visible(False)
else:
for i in range(0, len(yticks), 2):
yticks[i].label1.set_visible(False)
if qlab_vis is True:
qu.set_xlabel('q (%)', fontsize=fs)
if ulab_vis is True:
qu.set_ylabel('u (%)', labelpad=-1, fontsize=fs)
qu.text(textloc[0], textloc[1], self.name, fontsize=fs)
qu.set_xlim(qlim) # Setting some limits.
qu.set_ylim(ulim)
return qu
|
/* NUGET: BEGIN LICENSE TEXT
*
* Microsoft grants you the right to use these script files for the sole
* purpose of either: (i) interacting through your browser with the Microsoft
* website or online service, subject to the applicable licensing or use
* terms; or (ii) using the files as included with a Microsoft product subject
* to that product's license terms. Microsoft reserves all other rights to the
* files not expressly granted by Microsoft, whether by implication, estoppel
* or otherwise. Insofar as a script file is dual licensed under GPL,
* Microsoft neither took the code under GPL nor distributes it thereunder but
* under the terms set out in this paragraph. All notices and licenses
* below are for informational purposes only.
*
* NUGET: END LICENSE TEXT */
/*
* This file has been commented to support Visual Studio Intellisense.
* You should not use this file at runtime inside the browser--it is only
* intended to be used only for design-time IntelliSense. Please use the
* standard jQuery library for all production use.
*
* Comment version: 1.11.1
*/
/*
* Note: While Microsoft is not the author of this file, Microsoft is
* offering you a license subject to the terms of the Microsoft Software
* License Terms for Microsoft ASP.NET Model View Controller 3.
* Microsoft reserves all other rights. The notices below are provided
* for informational purposes only and are not the license terms under
* which Microsoft distributed this file.
*
* jQuery Validation Plugin - v1.11.1 - 2/4/2013
* https://github.com/jzaefferer/jquery-validation
* Copyright (c) 2013 Jörn Zaefferer; Licensed MIT
*
*/
(function ($) {
$.extend($.fn, {
// http://docs.jquery.com/Plugins/Validation/validate
validate: function (options) {
/// <summary>
/// Validates the selected form. This method sets up event handlers for submit, focus,
/// keyup, blur and click to trigger validation of the entire form or individual
/// elements. Each one can be disabled, see the onxxx options (onsubmit, onfocusout,
/// onkeyup, onclick). focusInvalid focuses elements when submitting a invalid form.
/// </summary>
/// <param name="options" type="Object">
/// A set of key/value pairs that configure the validate. All options are optional.
/// </param>
// if nothing is selected, return nothing; can't chain anyway
if (!this.length) {
options && options.debug && window.console && console.warn("nothing selected, can't validate, returning nothing");
return;
}
// check if a validator for this form was already created
var validator = $.data(this[0], 'validator');
if (validator) {
return validator;
}
validator = new $.validator(options, this[0]);
$.data(this[0], 'validator', validator);
if (validator.settings.onsubmit) {
// allow suppresing validation by adding a cancel class to the submit button
this.find("input, button").filter(".cancel").click(function () {
validator.cancelSubmit = true;
});
// when a submitHandler is used, capture the submitting button
if (validator.settings.submitHandler) {
this.find("input, button").filter(":submit").click(function () {
validator.submitButton = this;
});
}
// validate the form on submit
this.submit(function (event) {
if (validator.settings.debug)
// prevent form submit to be able to see console output
event.preventDefault();
function handle() {
if (validator.settings.submitHandler) {
if (validator.submitButton) {
// insert a hidden input as a replacement for the missing submit button
var hidden = $("<input type='hidden'/>").attr("name", validator.submitButton.name).val(validator.submitButton.value).appendTo(validator.currentForm);
}
validator.settings.submitHandler.call(validator, validator.currentForm);
if (validator.submitButton) {
// and clean up afterwards; thanks to no-block-scope, hidden can be referenced
hidden.remove();
}
return false;
}
return true;
}
// prevent submit for invalid forms or custom submit handlers
if (validator.cancelSubmit) {
validator.cancelSubmit = false;
return handle();
}
if (validator.form()) {
if (validator.pendingRequest) {
validator.formSubmitted = true;
return false;
}
return handle();
} else {
validator.focusInvalid();
return false;
}
});
}
return validator;
},
// http://docs.jquery.com/Plugins/Validation/valid
valid: function () {
/// <summary>
/// Checks if the selected form is valid or if all selected elements are valid.
/// validate() needs to be called on the form before checking it using this method.
/// </summary>
/// <returns type="Boolean" />
if ($(this[0]).is('form')) {
return this.validate().form();
} else {
var valid = true;
var validator = $(this[0].form).validate();
this.each(function () {
valid &= validator.element(this);
});
return valid;
}
},
// attributes: space seperated list of attributes to retrieve and remove
removeAttrs: function (attributes) {
/// <summary>
/// Remove the specified attributes from the first matched element and return them.
/// </summary>
/// <param name="attributes" type="String">
/// A space-seperated list of attribute names to remove.
/// </param>
var result = {},
$element = this;
$.each(attributes.split(/\s/), function (index, value) {
result[value] = $element.attr(value);
$element.removeAttr(value);
});
return result;
},
// http://docs.jquery.com/Plugins/Validation/rules
rules: function (command, argument) {
/// <summary>
/// Return the validations rules for the first selected element.
/// </summary>
/// <param name="command" type="String">
/// Can be either "add" or "remove".
/// </param>
/// <param name="argument" type="">
/// A list of rules to add or remove.
/// </param>
var element = this[0];
if (command) {
var settings = $.data(element.form, 'validator').settings;
var staticRules = settings.rules;
var existingRules = $.validator.staticRules(element);
switch (command) {
case "add":
$.extend(existingRules, $.validator.normalizeRule(argument));
staticRules[element.name] = existingRules;
if (argument.messages)
settings.messages[element.name] = $.extend(settings.messages[element.name], argument.messages);
break;
case "remove":
if (!argument) {
delete staticRules[element.name];
return existingRules;
}
var filtered = {};
$.each(argument.split(/\s/), function (index, method) {
filtered[method] = existingRules[method];
delete existingRules[method];
});
return filtered;
}
}
var data = $.validator.normalizeRules(
$.extend(
{},
$.validator.metadataRules(element),
$.validator.classRules(element),
$.validator.attributeRules(element),
$.validator.staticRules(element)
), element);
// make sure required is at front
if (data.required) {
var param = data.required;
delete data.required;
data = $.extend({ required: param }, data);
}
return data;
}
});
// Custom selectors
$.extend($.expr[":"], {
// http://docs.jquery.com/Plugins/Validation/blank
blank: function (a) { return !$.trim("" + a.value); },
// http://docs.jquery.com/Plugins/Validation/filled
filled: function (a) { return !!$.trim("" + a.value); },
// http://docs.jquery.com/Plugins/Validation/unchecked
unchecked: function (a) { return !a.checked; }
});
// constructor for validator
$.validator = function (options, form) {
this.settings = $.extend(true, {}, $.validator.defaults, options);
this.currentForm = form;
this.init();
};
$.validator.format = function (source, params) {
/// <summary>
/// Replaces {n} placeholders with arguments.
/// One or more arguments can be passed, in addition to the string template itself, to insert
/// into the string.
/// </summary>
/// <param name="source" type="String">
/// The string to format.
/// </param>
/// <param name="params" type="String">
/// The first argument to insert, or an array of Strings to insert
/// </param>
/// <returns type="String" />
if (arguments.length == 1)
return function () {
var args = $.makeArray(arguments);
args.unshift(source);
return $.validator.format.apply(this, args);
};
if (arguments.length > 2 && params.constructor != Array) {
params = $.makeArray(arguments).slice(1);
}
if (params.constructor != Array) {
params = [params];
}
$.each(params, function (i, n) {
source = source.replace(new RegExp("\\{" + i + "\\}", "g"), n);
});
return source;
};
$.extend($.validator, {
defaults: {
messages: {},
groups: {},
rules: {},
errorClass: "error",
validClass: "valid",
errorElement: "label",
focusInvalid: true,
errorContainer: $([]),
errorLabelContainer: $([]),
onsubmit: true,
ignore: [],
ignoreTitle: false,
onfocusin: function (element) {
this.lastActive = element;
// hide error label and remove error class on focus if enabled
if (this.settings.focusCleanup && !this.blockFocusCleanup) {
this.settings.unhighlight && this.settings.unhighlight.call(this, element, this.settings.errorClass, this.settings.validClass);
this.addWrapper(this.errorsFor(element)).hide();
}
},
onfocusout: function (element) {
if (!this.checkable(element) && (element.name in this.submitted || !this.optional(element))) {
this.element(element);
}
},
onkeyup: function (element) {
if (element.name in this.submitted || element == this.lastElement) {
this.element(element);
}
},
onclick: function (element) {
// click on selects, radiobuttons and checkboxes
if (element.name in this.submitted)
this.element(element);
// or option elements, check parent select in that case
else if (element.parentNode.name in this.submitted)
this.element(element.parentNode);
},
highlight: function (element, errorClass, validClass) {
$(element).addClass(errorClass).removeClass(validClass);
},
unhighlight: function (element, errorClass, validClass) {
$(element).removeClass(errorClass).addClass(validClass);
}
},
// http://docs.jquery.com/Plugins/Validation/Validator/setDefaults
setDefaults: function (settings) {
/// <summary>
/// Modify default settings for validation.
/// Accepts everything that Plugins/Validation/validate accepts.
/// </summary>
/// <param name="settings" type="Options">
/// Options to set as default.
/// </param>
$.extend($.validator.defaults, settings);
},
messages: {
required: "This field is required.",
remote: "Please fix this field.",
email: "Please enter a valid email address.",
url: "Please enter a valid URL.",
date: "Please enter a valid date.",
dateISO: "Please enter a valid date (ISO).",
number: "Please enter a valid number.",
digits: "Please enter only digits.",
creditcard: "Please enter a valid credit card number.",
equalTo: "Please enter the same value again.",
accept: "Please enter a value with a valid extension.",
maxlength: $.validator.format("Please enter no more than {0} characters."),
minlength: $.validator.format("Please enter at least {0} characters."),
rangelength: $.validator.format("Please enter a value between {0} and {1} characters long."),
range: $.validator.format("Please enter a value between {0} and {1}."),
max: $.validator.format("Please enter a value less than or equal to {0}."),
min: $.validator.format("Please enter a value greater than or equal to {0}.")
},
autoCreateRanges: false,
prototype: {
init: function () {
this.labelContainer = $(this.settings.errorLabelContainer);
this.errorContext = this.labelContainer.length && this.labelContainer || $(this.currentForm);
this.containers = $(this.settings.errorContainer).add(this.settings.errorLabelContainer);
this.submitted = {};
this.valueCache = {};
this.pendingRequest = 0;
this.pending = {};
this.invalid = {};
this.reset();
var groups = (this.groups = {});
$.each(this.settings.groups, function (key, value) {
$.each(value.split(/\s/), function (index, name) {
groups[name] = key;
});
});
var rules = this.settings.rules;
$.each(rules, function (key, value) {
rules[key] = $.validator.normalizeRule(value);
});
function delegate(event) {
var validator = $.data(this[0].form, "validator"),
eventType = "on" + event.type.replace(/^validate/, "");
validator.settings[eventType] && validator.settings[eventType].call(validator, this[0]);
}
$(this.currentForm)
.validateDelegate(":text, :password, :file, select, textarea", "focusin focusout keyup", delegate)
.validateDelegate(":radio, :checkbox, select, option", "click", delegate);
if (this.settings.invalidHandler)
$(this.currentForm).bind("invalid-form.validate", this.settings.invalidHandler);
},
// http://docs.jquery.com/Plugins/Validation/Validator/form
form: function () {
/// <summary>
/// Validates the form, returns true if it is valid, false otherwise.
/// This behaves as a normal submit event, but returns the result.
/// </summary>
/// <returns type="Boolean" />
this.checkForm();
$.extend(this.submitted, this.errorMap);
this.invalid = $.extend({}, this.errorMap);
if (!this.valid())
$(this.currentForm).triggerHandler("invalid-form", [this]);
this.showErrors();
return this.valid();
},
checkForm: function () {
this.prepareForm();
for (var i = 0, elements = (this.currentElements = this.elements()); elements[i]; i++) {
this.check(elements[i]);
}
return this.valid();
},
// http://docs.jquery.com/Plugins/Validation/Validator/element
element: function (element) {
/// <summary>
/// Validates a single element, returns true if it is valid, false otherwise.
/// This behaves as validation on blur or keyup, but returns the result.
/// </summary>
/// <param name="element" type="Selector">
/// An element to validate, must be inside the validated form.
/// </param>
/// <returns type="Boolean" />
element = this.clean(element);
this.lastElement = element;
this.prepareElement(element);
this.currentElements = $(element);
var result = this.check(element);
if (result) {
delete this.invalid[element.name];
} else {
this.invalid[element.name] = true;
}
if (!this.numberOfInvalids()) {
// Hide error containers on last error
this.toHide = this.toHide.add(this.containers);
}
this.showErrors();
return result;
},
// http://docs.jquery.com/Plugins/Validation/Validator/showErrors
showErrors: function (errors) {
/// <summary>
/// Show the specified messages.
/// Keys have to refer to the names of elements, values are displayed for those elements, using the configured error placement.
/// </summary>
/// <param name="errors" type="Object">
/// One or more key/value pairs of input names and messages.
/// </param>
if (errors) {
// add items to error list and map
$.extend(this.errorMap, errors);
this.errorList = [];
for (var name in errors) {
this.errorList.push({
message: errors[name],
element: this.findByName(name)[0]
});
}
// remove items from success list
this.successList = $.grep(this.successList, function (element) {
return !(element.name in errors);
});
}
this.settings.showErrors
? this.settings.showErrors.call(this, this.errorMap, this.errorList)
: this.defaultShowErrors();
},
// http://docs.jquery.com/Plugins/Validation/Validator/resetForm
resetForm: function () {
/// <summary>
/// Resets the controlled form.
/// Resets input fields to their original value (requires form plugin), removes classes
/// indicating invalid elements and hides error messages.
/// </summary>
if ($.fn.resetForm)
$(this.currentForm).resetForm();
this.submitted = {};
this.prepareForm();
this.hideErrors();
this.elements().removeClass(this.settings.errorClass);
},
numberOfInvalids: function () {
/// <summary>
/// Returns the number of invalid fields.
/// This depends on the internal validator state. It covers all fields only after
/// validating the complete form (on submit or via $("form").valid()). After validating
/// a single element, only that element is counted. Most useful in combination with the
/// invalidHandler-option.
/// </summary>
/// <returns type="Number" />
return this.objectLength(this.invalid);
},
objectLength: function (obj) {
var count = 0;
for (var i in obj)
count++;
return count;
},
hideErrors: function () {
this.addWrapper(this.toHide).hide();
},
valid: function () {
return this.size() == 0;
},
size: function () {
return this.errorList.length;
},
focusInvalid: function () {
if (this.settings.focusInvalid) {
try {
$(this.findLastActive() || this.errorList.length && this.errorList[0].element || [])
.filter(":visible")
.focus()
// manually trigger focusin event; without it, focusin handler isn't called, findLastActive won't have anything to find
.trigger("focusin");
} catch (e) {
// ignore IE throwing errors when focusing hidden elements
}
}
},
findLastActive: function () {
var lastActive = this.lastActive;
return lastActive && $.grep(this.errorList, function (n) {
return n.element.name == lastActive.name;
}).length == 1 && lastActive;
},
elements: function () {
var validator = this,
rulesCache = {};
// select all valid inputs inside the form (no submit or reset buttons)
// workaround $Query([]).add until http://dev.jquery.com/ticket/2114 is solved
return $([]).add(this.currentForm.elements)
.filter(":input")
.not(":submit, :reset, :image, [disabled]")
.not(this.settings.ignore)
.filter(function () {
!this.name && validator.settings.debug && window.console && console.error("%o has no name assigned", this);
// select only the first element for each name, and only those with rules specified
if (this.name in rulesCache || !validator.objectLength($(this).rules()))
return false;
rulesCache[this.name] = true;
return true;
});
},
clean: function (selector) {
return $(selector)[0];
},
errors: function () {
return $(this.settings.errorElement + "." + this.settings.errorClass, this.errorContext);
},
reset: function () {
this.successList = [];
this.errorList = [];
this.errorMap = {};
this.toShow = $([]);
this.toHide = $([]);
this.currentElements = $([]);
},
prepareForm: function () {
this.reset();
this.toHide = this.errors().add(this.containers);
},
prepareElement: function (element) {
this.reset();
this.toHide = this.errorsFor(element);
},
check: function (element) {
element = this.clean(element);
// if radio/checkbox, validate first element in group instead
if (this.checkable(element)) {
element = this.findByName(element.name).not(this.settings.ignore)[0];
}
var rules = $(element).rules();
var dependencyMismatch = false;
for (var method in rules) {
var rule = { method: method, parameters: rules[method] };
try {
var result = $.validator.methods[method].call(this, element.value.replace(/\r/g, ""), element, rule.parameters);
// if a method indicates that the field is optional and therefore valid,
// don't mark it as valid when there are no other rules
if (result == "dependency-mismatch") {
dependencyMismatch = true;
continue;
}
dependencyMismatch = false;
if (result == "pending") {
this.toHide = this.toHide.not(this.errorsFor(element));
return;
}
if (!result) {
this.formatAndAdd(element, rule);
return false;
}
} catch (e) {
this.settings.debug && window.console && console.log("exception occured when checking element " + element.id
+ ", check the '" + rule.method + "' method", e);
throw e;
}
}
if (dependencyMismatch)
return;
if (this.objectLength(rules))
this.successList.push(element);
return true;
},
// return the custom message for the given element and validation method
// specified in the element's "messages" metadata
customMetaMessage: function (element, method) {
if (!$.metadata)
return;
var meta = this.settings.meta
? $(element).metadata()[this.settings.meta]
: $(element).metadata();
return meta && meta.messages && meta.messages[method];
},
// return the custom message for the given element name and validation method
customMessage: function (name, method) {
var m = this.settings.messages[name];
return m && (m.constructor == String
? m
: m[method]);
},
// return the first defined argument, allowing empty strings
findDefined: function () {
for (var i = 0; i < arguments.length; i++) {
if (arguments[i] !== undefined)
return arguments[i];
}
return undefined;
},
defaultMessage: function (element, method) {
return this.findDefined(
this.customMessage(element.name, method),
this.customMetaMessage(element, method),
// title is never undefined, so handle empty string as undefined
!this.settings.ignoreTitle && element.title || undefined,
$.validator.messages[method],
"<strong>Warning: No message defined for " + element.name + "</strong>"
);
},
formatAndAdd: function (element, rule) {
var message = this.defaultMessage(element, rule.method),
theregex = /\$?\{(\d+)\}/g;
if (typeof message == "function") {
message = message.call(this, rule.parameters, element);
} else if (theregex.test(message)) {
message = jQuery.format(message.replace(theregex, '{$1}'), rule.parameters);
}
this.errorList.push({
message: message,
element: element
});
this.errorMap[element.name] = message;
this.submitted[element.name] = message;
},
addWrapper: function (toToggle) {
if (this.settings.wrapper)
toToggle = toToggle.add(toToggle.parent(this.settings.wrapper));
return toToggle;
},
defaultShowErrors: function () {
for (var i = 0; this.errorList[i]; i++) {
var error = this.errorList[i];
this.settings.highlight && this.settings.highlight.call(this, error.element, this.settings.errorClass, this.settings.validClass);
this.showLabel(error.element, error.message);
}
if (this.errorList.length) {
this.toShow = this.toShow.add(this.containers);
}
if (this.settings.success) {
for (var i = 0; this.successList[i]; i++) {
this.showLabel(this.successList[i]);
}
}
if (this.settings.unhighlight) {
for (var i = 0, elements = this.validElements(); elements[i]; i++) {
this.settings.unhighlight.call(this, elements[i], this.settings.errorClass, this.settings.validClass);
}
}
this.toHide = this.toHide.not(this.toShow);
this.hideErrors();
this.addWrapper(this.toShow).show();
},
validElements: function () {
return this.currentElements.not(this.invalidElements());
},
invalidElements: function () {
return $(this.errorList).map(function () {
return this.element;
});
},
showLabel: function (element, message) {
var label = this.errorsFor(element);
if (label.length) {
// refresh error/success class
label.removeClass().addClass(this.settings.errorClass);
// check if we have a generated label, replace the message then
label.attr("generated") && label.html(message);
} else {
// create label
label = $("<" + this.settings.errorElement + "/>")
.attr({ "for": this.idOrName(element), generated: true })
.addClass(this.settings.errorClass)
.html(message || "");
if (this.settings.wrapper) {
// make sure the element is visible, even in IE
// actually showing the wrapped element is handled elsewhere
label = label.hide().show().wrap("<" + this.settings.wrapper + "/>").parent();
}
if (!this.labelContainer.append(label).length)
this.settings.errorPlacement
? this.settings.errorPlacement(label, $(element))
: label.insertAfter(element);
}
if (!message && this.settings.success) {
label.text("");
typeof this.settings.success == "string"
? label.addClass(this.settings.success)
: this.settings.success(label);
}
this.toShow = this.toShow.add(label);
},
errorsFor: function (element) {
var name = this.idOrName(element);
return this.errors().filter(function () {
return $(this).attr('for') == name;
});
},
idOrName: function (element) {
return this.groups[element.name] || (this.checkable(element) ? element.name : element.id || element.name);
},
checkable: function (element) {
return /radio|checkbox/i.test(element.type);
},
findByName: function (name) {
// select by name and filter by form for performance over form.find("[name=...]")
var form = this.currentForm;
return $(document.getElementsByName(name)).map(function (index, element) {
return element.form == form && element.name == name && element || null;
});
},
getLength: function (value, element) {
switch (element.nodeName.toLowerCase()) {
case 'select':
return $("option:selected", element).length;
case 'input':
if (this.checkable(element))
return this.findByName(element.name).filter(':checked').length;
}
return value.length;
},
depend: function (param, element) {
return this.dependTypes[typeof param]
? this.dependTypes[typeof param](param, element)
: true;
},
dependTypes: {
"boolean": function (param, element) {
return param;
},
"string": function (param, element) {
return !!$(param, element.form).length;
},
"function": function (param, element) {
return param(element);
}
},
optional: function (element) {
return !$.validator.methods.required.call(this, $.trim(element.value), element) && "dependency-mismatch";
},
startRequest: function (element) {
if (!this.pending[element.name]) {
this.pendingRequest++;
this.pending[element.name] = true;
}
},
stopRequest: function (element, valid) {
this.pendingRequest--;
// sometimes synchronization fails, make sure pendingRequest is never < 0
if (this.pendingRequest < 0)
this.pendingRequest = 0;
delete this.pending[element.name];
if (valid && this.pendingRequest == 0 && this.formSubmitted && this.form()) {
$(this.currentForm).submit();
this.formSubmitted = false;
} else if (!valid && this.pendingRequest == 0 && this.formSubmitted) {
$(this.currentForm).triggerHandler("invalid-form", [this]);
this.formSubmitted = false;
}
},
previousValue: function (element) {
return $.data(element, "previousValue") || $.data(element, "previousValue", {
old: null,
valid: true,
message: this.defaultMessage(element, "remote")
});
}
},
classRuleSettings: {
required: { required: true },
email: { email: true },
url: { url: true },
date: { date: true },
dateISO: { dateISO: true },
dateDE: { dateDE: true },
number: { number: true },
numberDE: { numberDE: true },
digits: { digits: true },
creditcard: { creditcard: true }
},
addClassRules: function (className, rules) {
/// <summary>
/// Add a compound class method - useful to refactor common combinations of rules into a single
/// class.
/// </summary>
/// <param name="name" type="String">
/// The name of the class rule to add
/// </param>
/// <param name="rules" type="Options">
/// The compound rules
/// </param>
className.constructor == String ?
this.classRuleSettings[className] = rules :
$.extend(this.classRuleSettings, className);
},
classRules: function (element) {
var rules = {};
var classes = $(element).attr('class');
classes && $.each(classes.split(' '), function () {
if (this in $.validator.classRuleSettings) {
$.extend(rules, $.validator.classRuleSettings[this]);
}
});
return rules;
},
attributeRules: function (element) {
var rules = {};
var $element = $(element);
for (var method in $.validator.methods) {
var value = $element.attr(method);
if (value) {
rules[method] = value;
}
}
// maxlength may be returned as -1, 2147483647 (IE) and 524288 (safari) for text inputs
if (rules.maxlength && /-1|2147483647|524288/.test(rules.maxlength)) {
delete rules.maxlength;
}
return rules;
},
metadataRules: function (element) {
if (!$.metadata) return {};
var meta = $.data(element.form, 'validator').settings.meta;
return meta ?
$(element).metadata()[meta] :
$(element).metadata();
},
staticRules: function (element) {
var rules = {};
var validator = $.data(element.form, 'validator');
if (validator.settings.rules) {
rules = $.validator.normalizeRule(validator.settings.rules[element.name]) || {};
}
return rules;
},
normalizeRules: function (rules, element) {
// handle dependency check
$.each(rules, function (prop, val) {
// ignore rule when param is explicitly false, eg. required:false
if (val === false) {
delete rules[prop];
return;
}
if (val.param || val.depends) {
var keepRule = true;
switch (typeof val.depends) {
case "string":
keepRule = !!$(val.depends, element.form).length;
break;
case "function":
keepRule = val.depends.call(element, element);
break;
}
if (keepRule) {
rules[prop] = val.param !== undefined ? val.param : true;
} else {
delete rules[prop];
}
}
});
// evaluate parameters
$.each(rules, function (rule, parameter) {
rules[rule] = $.isFunction(parameter) ? parameter(element) : parameter;
});
// clean number parameters
$.each(['minlength', 'maxlength', 'min', 'max'], function () {
if (rules[this]) {
rules[this] = Number(rules[this]);
}
});
$.each(['rangelength', 'range'], function () {
if (rules[this]) {
rules[this] = [Number(rules[this][0]), Number(rules[this][1])];
}
});
if ($.validator.autoCreateRanges) {
// auto-create ranges
if (rules.min && rules.max) {
rules.range = [rules.min, rules.max];
delete rules.min;
delete rules.max;
}
if (rules.minlength && rules.maxlength) {
rules.rangelength = [rules.minlength, rules.maxlength];
delete rules.minlength;
delete rules.maxlength;
}
}
// To support custom messages in metadata ignore rule methods titled "messages"
if (rules.messages) {
delete rules.messages;
}
return rules;
},
// Converts a simple string to a {string: true} rule, e.g., "required" to {required:true}
normalizeRule: function (data) {
if (typeof data == "string") {
var transformed = {};
$.each(data.split(/\s/), function () {
transformed[this] = true;
});
data = transformed;
}
return data;
},
// http://docs.jquery.com/Plugins/Validation/Validator/addMethod
addMethod: function (name, method, message) {
/// <summary>
/// Add a custom validation method. It must consist of a name (must be a legal javascript
/// identifier), a javascript based function and a default string message.
/// </summary>
/// <param name="name" type="String">
/// The name of the method, used to identify and referencing it, must be a valid javascript
/// identifier
/// </param>
/// <param name="method" type="Function">
/// The actual method implementation, returning true if an element is valid
/// </param>
/// <param name="message" type="String" optional="true">
/// (Optional) The default message to display for this method. Can be a function created by
/// jQuery.validator.format(value). When undefined, an already existing message is used
/// (handy for localization), otherwise the field-specific messages have to be defined.
/// </param>
$.validator.methods[name] = method;
$.validator.messages[name] = message != undefined ? message : $.validator.messages[name];
if (method.length < 3) {
$.validator.addClassRules(name, $.validator.normalizeRule(name));
}
},
methods: {
// http://docs.jquery.com/Plugins/Validation/Methods/required
required: function (value, element, param) {
// check if dependency is met
if (!this.depend(param, element))
return "dependency-mismatch";
switch (element.nodeName.toLowerCase()) {
case 'select':
// could be an array for select-multiple or a string, both are fine this way
var val = $(element).val();
return val && val.length > 0;
case 'input':
if (this.checkable(element))
return this.getLength(value, element) > 0;
default:
return $.trim(value).length > 0;
}
},
// http://docs.jquery.com/Plugins/Validation/Methods/remote
remote: function (value, element, param) {
if (this.optional(element))
return "dependency-mismatch";
var previous = this.previousValue(element);
if (!this.settings.messages[element.name])
this.settings.messages[element.name] = {};
previous.originalMessage = this.settings.messages[element.name].remote;
this.settings.messages[element.name].remote = previous.message;
param = typeof param == "string" && { url: param } || param;
if (this.pending[element.name]) {
return "pending";
}
if (previous.old === value) {
return previous.valid;
}
previous.old = value;
var validator = this;
this.startRequest(element);
var data = {};
data[element.name] = value;
$.ajax($.extend(true, {
url: param,
mode: "abort",
port: "validate" + element.name,
dataType: "json",
data: data,
success: function (response) {
validator.settings.messages[element.name].remote = previous.originalMessage;
var valid = response === true;
if (valid) {
var submitted = validator.formSubmitted;
validator.prepareElement(element);
validator.formSubmitted = submitted;
validator.successList.push(element);
validator.showErrors();
} else {
var errors = {};
var message = response || validator.defaultMessage(element, "remote");
errors[element.name] = previous.message = $.isFunction(message) ? message(value) : message;
validator.showErrors(errors);
}
previous.valid = valid;
validator.stopRequest(element, valid);
}
}, param));
return "pending";
},
// http://docs.jquery.com/Plugins/Validation/Methods/minlength
minlength: function (value, element, param) {
return this.optional(element) || this.getLength($.trim(value), element) >= param;
},
// http://docs.jquery.com/Plugins/Validation/Methods/maxlength
maxlength: function (value, element, param) {
return this.optional(element) || this.getLength($.trim(value), element) <= param;
},
// http://docs.jquery.com/Plugins/Validation/Methods/rangelength
rangelength: function (value, element, param) {
var length = this.getLength($.trim(value), element);
return this.optional(element) || (length >= param[0] && length <= param[1]);
},
// http://docs.jquery.com/Plugins/Validation/Methods/min
min: function (value, element, param) {
return this.optional(element) || value >= param;
},
// http://docs.jquery.com/Plugins/Validation/Methods/max
max: function (value, element, param) {
return this.optional(element) || value <= param;
},
// http://docs.jquery.com/Plugins/Validation/Methods/range
range: function (value, element, param) {
return this.optional(element) || (value >= param[0] && value <= param[1]);
},
// http://docs.jquery.com/Plugins/Validation/Methods/email
email: function (value, element) {
// contributed by Scott Gonzalez: http://projects.scottsplayground.com/email_address_validation/
return this.optional(element) || /^((([a-z]|\d|[!#\$%&'\*\+\-\/=\?\^_`{\|}~]|[\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF])+(\.([a-z]|\d|[!#\$%&'\*\+\-\/=\?\^_`{\|}~]|[\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF])+)*)|((\x22)((((\x20|\x09)*(\x0d\x0a))?(\x20|\x09)+)?(([\x01-\x08\x0b\x0c\x0e-\x1f\x7f]|\x21|[\x23-\x5b]|[\x5d-\x7e]|[\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF])|(\\([\x01-\x09\x0b\x0c\x0d-\x7f]|[\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF]))))*(((\x20|\x09)*(\x0d\x0a))?(\x20|\x09)+)?(\x22)))@((([a-z]|\d|[\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF])|(([a-z]|\d|[\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF])([a-z]|\d|-|\.|_|~|[\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF])*([a-z]|\d|[\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF])))\.)+(([a-z]|[\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF])|(([a-z]|[\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF])([a-z]|\d|-|\.|_|~|[\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF])*([a-z]|[\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF])))\.?$/i.test(value);
},
// http://docs.jquery.com/Plugins/Validation/Methods/url
url: function (value, element) {
// contributed by Scott Gonzalez: http://projects.scottsplayground.com/iri/
return this.optional(element) || /^(https?|ftp):\/\/(((([a-z]|\d|-|\.|_|~|[\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF])|(%[\da-f]{2})|[!\$&'\(\)\*\+,;=]|:)*@)?(((\d|[1-9]\d|1\d\d|2[0-4]\d|25[0-5])\.(\d|[1-9]\d|1\d\d|2[0-4]\d|25[0-5])\.(\d|[1-9]\d|1\d\d|2[0-4]\d|25[0-5])\.(\d|[1-9]\d|1\d\d|2[0-4]\d|25[0-5]))|((([a-z]|\d|[\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF])|(([a-z]|\d|[\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF])([a-z]|\d|-|\.|_|~|[\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF])*([a-z]|\d|[\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF])))\.)+(([a-z]|[\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF])|(([a-z]|[\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF])([a-z]|\d|-|\.|_|~|[\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF])*([a-z]|[\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF])))\.?)(:\d*)?)(\/((([a-z]|\d|-|\.|_|~|[\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF])|(%[\da-f]{2})|[!\$&'\(\)\*\+,;=]|:|@)+(\/(([a-z]|\d|-|\.|_|~|[\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF])|(%[\da-f]{2})|[!\$&'\(\)\*\+,;=]|:|@)*)*)?)?(\?((([a-z]|\d|-|\.|_|~|[\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF])|(%[\da-f]{2})|[!\$&'\(\)\*\+,;=]|:|@)|[\uE000-\uF8FF]|\/|\?)*)?(\#((([a-z]|\d|-|\.|_|~|[\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF])|(%[\da-f]{2})|[!\$&'\(\)\*\+,;=]|:|@)|\/|\?)*)?$/i.test(value);
},
// http://docs.jquery.com/Plugins/Validation/Methods/date
date: function (value, element) {
return this.optional(element) || !/Invalid|NaN/.test(new Date(value));
},
// http://docs.jquery.com/Plugins/Validation/Methods/dateISO
dateISO: function (value, element) {
return this.optional(element) || /^\d{4}[\/-]\d{1,2}[\/-]\d{1,2}$/.test(value);
},
// http://docs.jquery.com/Plugins/Validation/Methods/number
number: function (value, element) {
return this.optional(element) || /^-?(?:\d+|\d{1,3}(?:,\d{3})+)(?:\.\d+)?$/.test(value);
},
// http://docs.jquery.com/Plugins/Validation/Methods/digits
digits: function (value, element) {
return this.optional(element) || /^\d+$/.test(value);
},
// http://docs.jquery.com/Plugins/Validation/Methods/creditcard
// based on http://en.wikipedia.org/wiki/Luhn
creditcard: function (value, element) {
if (this.optional(element))
return "dependency-mismatch";
// accept only digits and dashes
if (/[^0-9-]+/.test(value))
return false;
var nCheck = 0,
nDigit = 0,
bEven = false;
value = value.replace(/\D/g, "");
for (var n = value.length - 1; n >= 0; n--) {
var cDigit = value.charAt(n);
var nDigit = parseInt(cDigit, 10);
if (bEven) {
if ((nDigit *= 2) > 9)
nDigit -= 9;
}
nCheck += nDigit;
bEven = !bEven;
}
return (nCheck % 10) == 0;
},
// http://docs.jquery.com/Plugins/Validation/Methods/accept
accept: function (value, element, param) {
param = typeof param == "string" ? param.replace(/,/g, '|') : "png|jpe?g|gif";
return this.optional(element) || value.match(new RegExp(".(" + param + ")$", "i"));
},
// http://docs.jquery.com/Plugins/Validation/Methods/equalTo
equalTo: function (value, element, param) {
// bind to the blur event of the target in order to revalidate whenever the target field is updated
// TODO find a way to bind the event just once, avoiding the unbind-rebind overhead
var target = $(param).unbind(".validate-equalTo").bind("blur.validate-equalTo", function () {
$(element).valid();
});
return value == target.val();
}
}
});
// deprecated, use $.validator.format instead
$.format = $.validator.format;
})(jQuery);
// ajax mode: abort
// usage: $.ajax({ mode: "abort"[, port: "uniqueport"]});
// if mode:"abort" is used, the previous request on that port (port can be undefined) is aborted via XMLHttpRequest.abort()
; (function ($) {
var pendingRequests = {};
// Use a prefilter if available (1.5+)
if ($.ajaxPrefilter) {
$.ajaxPrefilter(function (settings, _, xhr) {
var port = settings.port;
if (settings.mode == "abort") {
if (pendingRequests[port]) {
pendingRequests[port].abort();
} pendingRequests[port] = xhr;
}
});
} else {
// Proxy ajax
var ajax = $.ajax;
$.ajax = function (settings) {
var mode = ("mode" in settings ? settings : $.ajaxSettings).mode,
port = ("port" in settings ? settings : $.ajaxSettings).port;
if (mode == "abort") {
if (pendingRequests[port]) {
pendingRequests[port].abort();
}
return (pendingRequests[port] = ajax.apply(this, arguments));
}
return ajax.apply(this, arguments);
};
}
})(jQuery);
// provides cross-browser focusin and focusout events
// IE has native support, in other browsers, use event caputuring (neither bubbles)
// provides delegate(type: String, delegate: Selector, handler: Callback) plugin for easier event delegation
// handler is only called when $(event.target).is(delegate), in the scope of the jquery-object for event.target
; (function ($) {
// only implement if not provided by jQuery core (since 1.4)
// TODO verify if jQuery 1.4's implementation is compatible with older jQuery special-event APIs
if (!jQuery.event.special.focusin && !jQuery.event.special.focusout && document.addEventListener) {
$.each({
focus: 'focusin',
blur: 'focusout'
}, function (original, fix) {
$.event.special[fix] = {
setup: function () {
this.addEventListener(original, handler, true);
},
teardown: function () {
this.removeEventListener(original, handler, true);
},
handler: function (e) {
arguments[0] = $.event.fix(e);
arguments[0].type = fix;
return $.event.handle.apply(this, arguments);
}
};
function handler(e) {
e = $.event.fix(e);
e.type = fix;
return $.event.handle.call(this, e);
}
});
};
$.extend($.fn, {
validateDelegate: function (delegate, type, handler) {
return this.bind(type, function (event) {
var target = $(event.target);
if (target.is(delegate)) {
return handler.apply(target, arguments);
}
});
}
});
})(jQuery);
|