file_name large_stringlengths 4 140 | prefix large_stringlengths 0 39k | suffix large_stringlengths 0 36.1k | middle large_stringlengths 0 29.4k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
icon-dropdown-menu.tsx | // Copyright (c) ppy Pty Ltd <contact@ppy.sh>. Licensed under the GNU Affero General Public License v3.0.
// See the LICENCE file in the repository root for full licence text.
import PopupMenu from 'components/popup-menu';
import * as React from 'react';
import { classWithModifiers } from 'utils/css';
import { SlateContext } from './slate-context';
export interface MenuItem {
icon: JSX.Element;
id: string;
label: string;
}
interface Props {
disabled?: boolean;
menuOptions: MenuItem[];
onSelect: (id: string) => void;
selected: string;
}
export default class IconDropdownMenu extends React.Component<Props> { | declare context: React.ContextType<typeof SlateContext>;
render(): React.ReactNode {
return (
<PopupMenu customRender={this.renderButton} direction='right'>
{() => (
<div className='simple-menu simple-menu--popup-menu-compact'>
{this.props.menuOptions.map(this.renderMenuItem)}
</div>
)}
</PopupMenu>
);
}
renderButton = (children: React.ReactNode, ref: React.RefObject<HTMLDivElement>, toggle: (event: React.MouseEvent<HTMLElement>) => void) => {
const selected: MenuItem = this.props.menuOptions.find((option) => option.id === this.props.selected) ?? this.props.menuOptions[0];
const bn = 'icon-dropdown-menu';
const mods = [];
if (this.props.disabled) {
toggle = () => { /* do nothing */ };
mods.push('disabled');
}
return (
<div
ref={ref}
className={classWithModifiers(bn, mods)}
// workaround for slatejs 'Cannot resolve a Slate point from DOM point' nonsense
contentEditable={false}
onClick={toggle}
>
{selected.icon}
{children}
</div>
);
};
renderMenuItem = (menuItem: MenuItem) => (
<button
key={menuItem.id}
className={classWithModifiers('simple-menu__item', { active: menuItem.id === this.props.selected })}
data-id={menuItem.id}
onClick={this.select}
>
<div className={classWithModifiers('simple-menu__item-icon', 'icon-dropdown-menu')}>
{menuItem.icon}
</div>
<div className='simple-menu__label'>
{menuItem.label}
</div>
</button>
);
select = (event: React.MouseEvent<HTMLElement>) => {
event.preventDefault();
const target = event.currentTarget;
if (!target) {
return;
}
this.props.onSelect(target.dataset.id ?? '');
};
} | static contextType = SlateContext; | random_line_split |
icon-dropdown-menu.tsx | // Copyright (c) ppy Pty Ltd <contact@ppy.sh>. Licensed under the GNU Affero General Public License v3.0.
// See the LICENCE file in the repository root for full licence text.
import PopupMenu from 'components/popup-menu';
import * as React from 'react';
import { classWithModifiers } from 'utils/css';
import { SlateContext } from './slate-context';
export interface MenuItem {
icon: JSX.Element;
id: string;
label: string;
}
interface Props {
disabled?: boolean;
menuOptions: MenuItem[];
onSelect: (id: string) => void;
selected: string;
}
export default class IconDropdownMenu extends React.Component<Props> {
static contextType = SlateContext;
declare context: React.ContextType<typeof SlateContext>;
render(): React.ReactNode {
return (
<PopupMenu customRender={this.renderButton} direction='right'>
{() => (
<div className='simple-menu simple-menu--popup-menu-compact'>
{this.props.menuOptions.map(this.renderMenuItem)}
</div>
)}
</PopupMenu>
);
}
renderButton = (children: React.ReactNode, ref: React.RefObject<HTMLDivElement>, toggle: (event: React.MouseEvent<HTMLElement>) => void) => {
const selected: MenuItem = this.props.menuOptions.find((option) => option.id === this.props.selected) ?? this.props.menuOptions[0];
const bn = 'icon-dropdown-menu';
const mods = [];
if (this.props.disabled) {
toggle = () => { /* do nothing */ };
mods.push('disabled');
}
return (
<div
ref={ref}
className={classWithModifiers(bn, mods)}
// workaround for slatejs 'Cannot resolve a Slate point from DOM point' nonsense
contentEditable={false}
onClick={toggle}
>
{selected.icon}
{children}
</div>
);
};
renderMenuItem = (menuItem: MenuItem) => (
<button
key={menuItem.id}
className={classWithModifiers('simple-menu__item', { active: menuItem.id === this.props.selected })}
data-id={menuItem.id}
onClick={this.select}
>
<div className={classWithModifiers('simple-menu__item-icon', 'icon-dropdown-menu')}>
{menuItem.icon}
</div>
<div className='simple-menu__label'>
{menuItem.label}
</div>
</button>
);
select = (event: React.MouseEvent<HTMLElement>) => {
event.preventDefault();
const target = event.currentTarget;
if (!target) |
this.props.onSelect(target.dataset.id ?? '');
};
}
| {
return;
} | conditional_block |
configureReducer.js | import app from './app/reducer';
import auth from './auth/reducer';
import config from './config/reducer';
import device from './device/reducer';
import intl from './intl/reducer';
import todos from './todos/reducer';
import ui from './ui/reducer';
import users from './users/reducer';
import messages from './messages/reducer';
import events from './events/reducer';
import bio from './bio/reducer';
import { SIGN_OUT } from './auth/actions';
import { combineReducers } from 'redux';
import { fieldsReducer as fields } from './lib/redux-fields';
import { firebaseReducer as firebase } from './lib/redux-firebase';
import { routerReducer as routing } from 'react-router-redux';
import { updateStateOnStorageLoad } from './configureStorage';
const resetStateOnSignOut = (reducer, initialState) => (state, action) => {
// Reset app state on sign out, stackoverflow.com/q/35622588/233902.
if (action.type === SIGN_OUT) {
// Preserve state without sensitive data.
state = {
app: state.app,
config: initialState.config,
device: initialState.device,
intl: initialState.intl,
routing: state.routing // Routing state has to be reused.
};
}
return reducer(state, action);
};
export default function | (initialState, platformReducers) {
let reducer = combineReducers({
...platformReducers,
app,
auth,
config,
device,
fields,
firebase,
intl,
routing,
todos,
ui,
users,
messages,
events,
bio
});
// The power of higher-order reducers, http://slides.com/omnidan/hor
reducer = resetStateOnSignOut(reducer, initialState);
reducer = updateStateOnStorageLoad(reducer);
return reducer;
}
| configureReducer | identifier_name |
configureReducer.js | import app from './app/reducer';
import auth from './auth/reducer';
import config from './config/reducer';
import device from './device/reducer';
import intl from './intl/reducer';
import todos from './todos/reducer';
import ui from './ui/reducer';
import users from './users/reducer';
import messages from './messages/reducer';
import events from './events/reducer';
import bio from './bio/reducer';
import { SIGN_OUT } from './auth/actions';
import { combineReducers } from 'redux';
import { fieldsReducer as fields } from './lib/redux-fields';
import { firebaseReducer as firebase } from './lib/redux-firebase';
import { routerReducer as routing } from 'react-router-redux';
import { updateStateOnStorageLoad } from './configureStorage';
const resetStateOnSignOut = (reducer, initialState) => (state, action) => {
// Reset app state on sign out, stackoverflow.com/q/35622588/233902.
if (action.type === SIGN_OUT) {
// Preserve state without sensitive data.
state = {
app: state.app,
config: initialState.config,
device: initialState.device,
intl: initialState.intl,
routing: state.routing // Routing state has to be reused.
};
}
return reducer(state, action);
};
export default function configureReducer(initialState, platformReducers) | {
let reducer = combineReducers({
...platformReducers,
app,
auth,
config,
device,
fields,
firebase,
intl,
routing,
todos,
ui,
users,
messages,
events,
bio
});
// The power of higher-order reducers, http://slides.com/omnidan/hor
reducer = resetStateOnSignOut(reducer, initialState);
reducer = updateStateOnStorageLoad(reducer);
return reducer;
} | identifier_body | |
configureReducer.js | import app from './app/reducer';
import auth from './auth/reducer';
import config from './config/reducer';
import device from './device/reducer';
import intl from './intl/reducer';
import todos from './todos/reducer';
import ui from './ui/reducer';
import users from './users/reducer';
import messages from './messages/reducer';
import events from './events/reducer';
import bio from './bio/reducer';
import { SIGN_OUT } from './auth/actions';
import { combineReducers } from 'redux';
import { fieldsReducer as fields } from './lib/redux-fields';
import { firebaseReducer as firebase } from './lib/redux-firebase';
import { routerReducer as routing } from 'react-router-redux';
import { updateStateOnStorageLoad } from './configureStorage';
const resetStateOnSignOut = (reducer, initialState) => (state, action) => {
// Reset app state on sign out, stackoverflow.com/q/35622588/233902.
if (action.type === SIGN_OUT) |
return reducer(state, action);
};
export default function configureReducer(initialState, platformReducers) {
let reducer = combineReducers({
...platformReducers,
app,
auth,
config,
device,
fields,
firebase,
intl,
routing,
todos,
ui,
users,
messages,
events,
bio
});
// The power of higher-order reducers, http://slides.com/omnidan/hor
reducer = resetStateOnSignOut(reducer, initialState);
reducer = updateStateOnStorageLoad(reducer);
return reducer;
}
| {
// Preserve state without sensitive data.
state = {
app: state.app,
config: initialState.config,
device: initialState.device,
intl: initialState.intl,
routing: state.routing // Routing state has to be reused.
};
} | conditional_block |
configureReducer.js | import app from './app/reducer';
import auth from './auth/reducer';
import config from './config/reducer';
import device from './device/reducer';
import intl from './intl/reducer';
import todos from './todos/reducer';
import ui from './ui/reducer';
import users from './users/reducer';
import messages from './messages/reducer';
import events from './events/reducer';
import bio from './bio/reducer';
import { SIGN_OUT } from './auth/actions';
import { combineReducers } from 'redux';
import { fieldsReducer as fields } from './lib/redux-fields';
import { firebaseReducer as firebase } from './lib/redux-firebase';
import { routerReducer as routing } from 'react-router-redux';
import { updateStateOnStorageLoad } from './configureStorage';
const resetStateOnSignOut = (reducer, initialState) => (state, action) => {
// Reset app state on sign out, stackoverflow.com/q/35622588/233902.
if (action.type === SIGN_OUT) {
// Preserve state without sensitive data.
state = {
app: state.app,
config: initialState.config,
device: initialState.device,
intl: initialState.intl,
routing: state.routing // Routing state has to be reused.
};
} | ...platformReducers,
app,
auth,
config,
device,
fields,
firebase,
intl,
routing,
todos,
ui,
users,
messages,
events,
bio
});
// The power of higher-order reducers, http://slides.com/omnidan/hor
reducer = resetStateOnSignOut(reducer, initialState);
reducer = updateStateOnStorageLoad(reducer);
return reducer;
} | return reducer(state, action);
};
export default function configureReducer(initialState, platformReducers) {
let reducer = combineReducers({ | random_line_split |
en-MH.ts | /**
* @license
* Copyright Google Inc. All Rights Reserved.
*
* Use of this source code is governed by an MIT-style license that can be
* found in the LICENSE file at https://angular.io/license
*/
// THIS CODE IS GENERATED - DO NOT MODIFY
// See angular/tools/gulp-tasks/cldr/extract.js
const u = undefined;
function plural(n: number): number |
export default [
'en-MH',
[['a', 'p'], ['AM', 'PM'], u],
[['AM', 'PM'], u, u],
[
['S', 'M', 'T', 'W', 'T', 'F', 'S'], ['Sun', 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat'],
['Sunday', 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday'],
['Su', 'Mo', 'Tu', 'We', 'Th', 'Fr', 'Sa']
],
u,
[
['J', 'F', 'M', 'A', 'M', 'J', 'J', 'A', 'S', 'O', 'N', 'D'],
['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'],
[
'January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September',
'October', 'November', 'December'
]
],
u,
[['B', 'A'], ['BC', 'AD'], ['Before Christ', 'Anno Domini']],
0,
[6, 0],
['M/d/yy', 'MMM d, y', 'MMMM d, y', 'EEEE, MMMM d, y'],
['h:mm a', 'h:mm:ss a', 'h:mm:ss a z', 'h:mm:ss a zzzz'],
['{1}, {0}', u, '{1} \'at\' {0}', u],
['.', ',', ';', '%', '+', '-', 'E', '×', '‰', '∞', 'NaN', ':'],
['#,##0.###', '#,##0%', '¤#,##0.00', '#E0'],
'USD',
'$',
'US Dollar',
{},
'ltr',
plural
];
| {
let i = Math.floor(Math.abs(n)), v = n.toString().replace(/^[^.]*\.?/, '').length;
if (i === 1 && v === 0) return 1;
return 5;
} | identifier_body |
en-MH.ts | /**
* @license
* Copyright Google Inc. All Rights Reserved.
*
* Use of this source code is governed by an MIT-style license that can be
* found in the LICENSE file at https://angular.io/license
*/
// THIS CODE IS GENERATED - DO NOT MODIFY
// See angular/tools/gulp-tasks/cldr/extract.js
const u = undefined;
function plural(n: number): number {
let i = Math.floor(Math.abs(n)), v = n.toString().replace(/^[^.]*\.?/, '').length;
if (i === 1 && v === 0) return 1;
return 5;
}
export default [
'en-MH',
[['a', 'p'], ['AM', 'PM'], u],
[['AM', 'PM'], u, u],
[
['S', 'M', 'T', 'W', 'T', 'F', 'S'], ['Sun', 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat'],
['Sunday', 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday'],
['Su', 'Mo', 'Tu', 'We', 'Th', 'Fr', 'Sa']
],
u,
[
['J', 'F', 'M', 'A', 'M', 'J', 'J', 'A', 'S', 'O', 'N', 'D'],
['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'], | 'October', 'November', 'December'
]
],
u,
[['B', 'A'], ['BC', 'AD'], ['Before Christ', 'Anno Domini']],
0,
[6, 0],
['M/d/yy', 'MMM d, y', 'MMMM d, y', 'EEEE, MMMM d, y'],
['h:mm a', 'h:mm:ss a', 'h:mm:ss a z', 'h:mm:ss a zzzz'],
['{1}, {0}', u, '{1} \'at\' {0}', u],
['.', ',', ';', '%', '+', '-', 'E', '×', '‰', '∞', 'NaN', ':'],
['#,##0.###', '#,##0%', '¤#,##0.00', '#E0'],
'USD',
'$',
'US Dollar',
{},
'ltr',
plural
]; | [
'January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', | random_line_split |
en-MH.ts | /**
* @license
* Copyright Google Inc. All Rights Reserved.
*
* Use of this source code is governed by an MIT-style license that can be
* found in the LICENSE file at https://angular.io/license
*/
// THIS CODE IS GENERATED - DO NOT MODIFY
// See angular/tools/gulp-tasks/cldr/extract.js
const u = undefined;
function | (n: number): number {
let i = Math.floor(Math.abs(n)), v = n.toString().replace(/^[^.]*\.?/, '').length;
if (i === 1 && v === 0) return 1;
return 5;
}
export default [
'en-MH',
[['a', 'p'], ['AM', 'PM'], u],
[['AM', 'PM'], u, u],
[
['S', 'M', 'T', 'W', 'T', 'F', 'S'], ['Sun', 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat'],
['Sunday', 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday'],
['Su', 'Mo', 'Tu', 'We', 'Th', 'Fr', 'Sa']
],
u,
[
['J', 'F', 'M', 'A', 'M', 'J', 'J', 'A', 'S', 'O', 'N', 'D'],
['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'],
[
'January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September',
'October', 'November', 'December'
]
],
u,
[['B', 'A'], ['BC', 'AD'], ['Before Christ', 'Anno Domini']],
0,
[6, 0],
['M/d/yy', 'MMM d, y', 'MMMM d, y', 'EEEE, MMMM d, y'],
['h:mm a', 'h:mm:ss a', 'h:mm:ss a z', 'h:mm:ss a zzzz'],
['{1}, {0}', u, '{1} \'at\' {0}', u],
['.', ',', ';', '%', '+', '-', 'E', '×', '‰', '∞', 'NaN', ':'],
['#,##0.###', '#,##0%', '¤#,##0.00', '#E0'],
'USD',
'$',
'US Dollar',
{},
'ltr',
plural
];
| plural | identifier_name |
geometry.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use euclid::{Rect, Point3D};
/*
A naive port of "An Efficient and Robust Ray–Box Intersection Algorithm"
from https://www.cs.utah.edu/~awilliam/box/box.pdf
This should be cleaned up and extracted into more useful types!
*/
// Assumes rect is in the z=0 plane!
pub fn ray_intersects_rect(ray_origin: Point3D<f32>,
ray_end: Point3D<f32>,
rect: Rect<f32>) -> bool {
let mut dir = ray_end - ray_origin;
let len = ((dir.x*dir.x) + (dir.y*dir.y) + (dir.z*dir.z)).sqrt();
dir.x = dir.x / len;
dir.y = dir.y / len;
dir.z = dir.z / len;
let inv_direction = Point3D::new(1.0/dir.x, 1.0/dir.y, 1.0/dir.z);
let sign = [
if inv_direction.x < 0.0 {
1
} else {
0
},
if inv_direction.y < 0.0 {
1
} else {
| if inv_direction.z < 0.0 {
1
} else {
0
},
];
let parameters = [
Point3D::new(rect.origin.x, rect.origin.y, 0.0),
Point3D::new(rect.origin.x + rect.size.width,
rect.origin.y + rect.size.height,
0.0),
];
let mut tmin = (parameters[sign[0]].x - ray_origin.x) * inv_direction.x;
let mut tmax = (parameters[1-sign[0]].x - ray_origin.x) * inv_direction.x;
let tymin = (parameters[sign[1]].y - ray_origin.y) * inv_direction.y;
let tymax = (parameters[1-sign[1]].y - ray_origin.y) * inv_direction.y;
if (tmin > tymax) || (tymin > tmax) {
return false;
}
if tymin > tmin {
tmin = tymin;
}
if tymax < tmax {
tmax = tymax;
}
let tzmin = (parameters[sign[2]].z - ray_origin.z) * inv_direction.z;
let tzmax = (parameters[1-sign[2]].z - ray_origin.z) * inv_direction.z;
if (tmin > tzmax) || (tzmin > tmax) {
return false;
}
// Don't care about where on the ray it hits...
true
/*
if tzmin > tmin {
tmin = tzmin;
}
if tzmax < tmax {
tmax = tzmax;
}
let t0 = 0.0;
let t1 = len;
(tmin < t1) && (tmax > t0)
*/
}
/*
pub fn circle_contains_rect(circle_center: &Point2D<f32>,
radius: f32,
rect: &Rect<f32>) -> bool {
let dx = (circle_center.x - rect.origin.x).max(rect.origin.x + rect.size.width - circle_center.x);
let dy = (circle_center.y - rect.origin.y).max(rect.origin.y + rect.size.height - circle_center.y);
radius * radius >= dx * dx + dy * dy
}
pub fn rect_intersects_circle(circle_center: &Point2D<f32>,
radius: f32,
rect: &Rect<f32>) -> bool {
let circle_distance_x = (circle_center.x - (rect.origin.x + rect.size.width * 0.5)).abs();
let circle_distance_y = (circle_center.y - (rect.origin.y + rect.size.height * 0.5)).abs();
if circle_distance_x > rect.size.width * 0.5 + radius {
return false
}
if circle_distance_y > rect.size.height * 0.5 + radius {
return false
}
if circle_distance_x <= rect.size.width * 0.5 {
return true;
}
if circle_distance_y <= rect.size.height * 0.5 {
return true;
}
let corner_distance_sq = (circle_distance_x - rect.size.width * 0.5) * (circle_distance_x - rect.size.width * 0.5) +
(circle_distance_y - rect.size.height * 0.5) * (circle_distance_y - rect.size.height * 0.5);
corner_distance_sq <= radius * radius
}
*/
| 0
},
| conditional_block |
geometry.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use euclid::{Rect, Point3D};
/*
A naive port of "An Efficient and Robust Ray–Box Intersection Algorithm"
from https://www.cs.utah.edu/~awilliam/box/box.pdf
This should be cleaned up and extracted into more useful types!
*/
// Assumes rect is in the z=0 plane!
pub fn ra | ay_origin: Point3D<f32>,
ray_end: Point3D<f32>,
rect: Rect<f32>) -> bool {
let mut dir = ray_end - ray_origin;
let len = ((dir.x*dir.x) + (dir.y*dir.y) + (dir.z*dir.z)).sqrt();
dir.x = dir.x / len;
dir.y = dir.y / len;
dir.z = dir.z / len;
let inv_direction = Point3D::new(1.0/dir.x, 1.0/dir.y, 1.0/dir.z);
let sign = [
if inv_direction.x < 0.0 {
1
} else {
0
},
if inv_direction.y < 0.0 {
1
} else {
0
},
if inv_direction.z < 0.0 {
1
} else {
0
},
];
let parameters = [
Point3D::new(rect.origin.x, rect.origin.y, 0.0),
Point3D::new(rect.origin.x + rect.size.width,
rect.origin.y + rect.size.height,
0.0),
];
let mut tmin = (parameters[sign[0]].x - ray_origin.x) * inv_direction.x;
let mut tmax = (parameters[1-sign[0]].x - ray_origin.x) * inv_direction.x;
let tymin = (parameters[sign[1]].y - ray_origin.y) * inv_direction.y;
let tymax = (parameters[1-sign[1]].y - ray_origin.y) * inv_direction.y;
if (tmin > tymax) || (tymin > tmax) {
return false;
}
if tymin > tmin {
tmin = tymin;
}
if tymax < tmax {
tmax = tymax;
}
let tzmin = (parameters[sign[2]].z - ray_origin.z) * inv_direction.z;
let tzmax = (parameters[1-sign[2]].z - ray_origin.z) * inv_direction.z;
if (tmin > tzmax) || (tzmin > tmax) {
return false;
}
// Don't care about where on the ray it hits...
true
/*
if tzmin > tmin {
tmin = tzmin;
}
if tzmax < tmax {
tmax = tzmax;
}
let t0 = 0.0;
let t1 = len;
(tmin < t1) && (tmax > t0)
*/
}
/*
pub fn circle_contains_rect(circle_center: &Point2D<f32>,
radius: f32,
rect: &Rect<f32>) -> bool {
let dx = (circle_center.x - rect.origin.x).max(rect.origin.x + rect.size.width - circle_center.x);
let dy = (circle_center.y - rect.origin.y).max(rect.origin.y + rect.size.height - circle_center.y);
radius * radius >= dx * dx + dy * dy
}
pub fn rect_intersects_circle(circle_center: &Point2D<f32>,
radius: f32,
rect: &Rect<f32>) -> bool {
let circle_distance_x = (circle_center.x - (rect.origin.x + rect.size.width * 0.5)).abs();
let circle_distance_y = (circle_center.y - (rect.origin.y + rect.size.height * 0.5)).abs();
if circle_distance_x > rect.size.width * 0.5 + radius {
return false
}
if circle_distance_y > rect.size.height * 0.5 + radius {
return false
}
if circle_distance_x <= rect.size.width * 0.5 {
return true;
}
if circle_distance_y <= rect.size.height * 0.5 {
return true;
}
let corner_distance_sq = (circle_distance_x - rect.size.width * 0.5) * (circle_distance_x - rect.size.width * 0.5) +
(circle_distance_y - rect.size.height * 0.5) * (circle_distance_y - rect.size.height * 0.5);
corner_distance_sq <= radius * radius
}
*/
| y_intersects_rect(r | identifier_name |
geometry.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use euclid::{Rect, Point3D};
/*
A naive port of "An Efficient and Robust Ray–Box Intersection Algorithm"
from https://www.cs.utah.edu/~awilliam/box/box.pdf
This should be cleaned up and extracted into more useful types!
*/
// Assumes rect is in the z=0 plane!
pub fn ray_intersects_rect(ray_origin: Point3D<f32>,
ray_end: Point3D<f32>,
rect: Rect<f32>) -> bool {
| /*
pub fn circle_contains_rect(circle_center: &Point2D<f32>,
radius: f32,
rect: &Rect<f32>) -> bool {
let dx = (circle_center.x - rect.origin.x).max(rect.origin.x + rect.size.width - circle_center.x);
let dy = (circle_center.y - rect.origin.y).max(rect.origin.y + rect.size.height - circle_center.y);
radius * radius >= dx * dx + dy * dy
}
pub fn rect_intersects_circle(circle_center: &Point2D<f32>,
radius: f32,
rect: &Rect<f32>) -> bool {
let circle_distance_x = (circle_center.x - (rect.origin.x + rect.size.width * 0.5)).abs();
let circle_distance_y = (circle_center.y - (rect.origin.y + rect.size.height * 0.5)).abs();
if circle_distance_x > rect.size.width * 0.5 + radius {
return false
}
if circle_distance_y > rect.size.height * 0.5 + radius {
return false
}
if circle_distance_x <= rect.size.width * 0.5 {
return true;
}
if circle_distance_y <= rect.size.height * 0.5 {
return true;
}
let corner_distance_sq = (circle_distance_x - rect.size.width * 0.5) * (circle_distance_x - rect.size.width * 0.5) +
(circle_distance_y - rect.size.height * 0.5) * (circle_distance_y - rect.size.height * 0.5);
corner_distance_sq <= radius * radius
}
*/
| let mut dir = ray_end - ray_origin;
let len = ((dir.x*dir.x) + (dir.y*dir.y) + (dir.z*dir.z)).sqrt();
dir.x = dir.x / len;
dir.y = dir.y / len;
dir.z = dir.z / len;
let inv_direction = Point3D::new(1.0/dir.x, 1.0/dir.y, 1.0/dir.z);
let sign = [
if inv_direction.x < 0.0 {
1
} else {
0
},
if inv_direction.y < 0.0 {
1
} else {
0
},
if inv_direction.z < 0.0 {
1
} else {
0
},
];
let parameters = [
Point3D::new(rect.origin.x, rect.origin.y, 0.0),
Point3D::new(rect.origin.x + rect.size.width,
rect.origin.y + rect.size.height,
0.0),
];
let mut tmin = (parameters[sign[0]].x - ray_origin.x) * inv_direction.x;
let mut tmax = (parameters[1-sign[0]].x - ray_origin.x) * inv_direction.x;
let tymin = (parameters[sign[1]].y - ray_origin.y) * inv_direction.y;
let tymax = (parameters[1-sign[1]].y - ray_origin.y) * inv_direction.y;
if (tmin > tymax) || (tymin > tmax) {
return false;
}
if tymin > tmin {
tmin = tymin;
}
if tymax < tmax {
tmax = tymax;
}
let tzmin = (parameters[sign[2]].z - ray_origin.z) * inv_direction.z;
let tzmax = (parameters[1-sign[2]].z - ray_origin.z) * inv_direction.z;
if (tmin > tzmax) || (tzmin > tmax) {
return false;
}
// Don't care about where on the ray it hits...
true
/*
if tzmin > tmin {
tmin = tzmin;
}
if tzmax < tmax {
tmax = tzmax;
}
let t0 = 0.0;
let t1 = len;
(tmin < t1) && (tmax > t0)
*/
}
| identifier_body |
geometry.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use euclid::{Rect, Point3D};
/*
A naive port of "An Efficient and Robust Ray–Box Intersection Algorithm"
from https://www.cs.utah.edu/~awilliam/box/box.pdf
This should be cleaned up and extracted into more useful types!
*/
// Assumes rect is in the z=0 plane!
pub fn ray_intersects_rect(ray_origin: Point3D<f32>,
ray_end: Point3D<f32>,
rect: Rect<f32>) -> bool {
let mut dir = ray_end - ray_origin;
let len = ((dir.x*dir.x) + (dir.y*dir.y) + (dir.z*dir.z)).sqrt();
dir.x = dir.x / len;
dir.y = dir.y / len;
dir.z = dir.z / len;
let inv_direction = Point3D::new(1.0/dir.x, 1.0/dir.y, 1.0/dir.z);
let sign = [
if inv_direction.x < 0.0 {
1
} else {
0
},
if inv_direction.y < 0.0 {
1
} else {
0
},
if inv_direction.z < 0.0 {
1
} else {
0
},
];
let parameters = [
Point3D::new(rect.origin.x, rect.origin.y, 0.0),
Point3D::new(rect.origin.x + rect.size.width,
rect.origin.y + rect.size.height,
0.0),
];
let mut tmin = (parameters[sign[0]].x - ray_origin.x) * inv_direction.x;
let mut tmax = (parameters[1-sign[0]].x - ray_origin.x) * inv_direction.x;
let tymin = (parameters[sign[1]].y - ray_origin.y) * inv_direction.y;
let tymax = (parameters[1-sign[1]].y - ray_origin.y) * inv_direction.y;
if (tmin > tymax) || (tymin > tmax) {
return false;
}
if tymin > tmin {
tmin = tymin;
}
if tymax < tmax {
tmax = tymax;
}
let tzmin = (parameters[sign[2]].z - ray_origin.z) * inv_direction.z;
let tzmax = (parameters[1-sign[2]].z - ray_origin.z) * inv_direction.z;
if (tmin > tzmax) || (tzmin > tmax) {
return false;
}
// Don't care about where on the ray it hits...
true
/*
if tzmin > tmin {
tmin = tzmin;
}
if tzmax < tmax {
tmax = tzmax;
} | let t0 = 0.0;
let t1 = len;
(tmin < t1) && (tmax > t0)
*/
}
/*
pub fn circle_contains_rect(circle_center: &Point2D<f32>,
radius: f32,
rect: &Rect<f32>) -> bool {
let dx = (circle_center.x - rect.origin.x).max(rect.origin.x + rect.size.width - circle_center.x);
let dy = (circle_center.y - rect.origin.y).max(rect.origin.y + rect.size.height - circle_center.y);
radius * radius >= dx * dx + dy * dy
}
pub fn rect_intersects_circle(circle_center: &Point2D<f32>,
radius: f32,
rect: &Rect<f32>) -> bool {
let circle_distance_x = (circle_center.x - (rect.origin.x + rect.size.width * 0.5)).abs();
let circle_distance_y = (circle_center.y - (rect.origin.y + rect.size.height * 0.5)).abs();
if circle_distance_x > rect.size.width * 0.5 + radius {
return false
}
if circle_distance_y > rect.size.height * 0.5 + radius {
return false
}
if circle_distance_x <= rect.size.width * 0.5 {
return true;
}
if circle_distance_y <= rect.size.height * 0.5 {
return true;
}
let corner_distance_sq = (circle_distance_x - rect.size.width * 0.5) * (circle_distance_x - rect.size.width * 0.5) +
(circle_distance_y - rect.size.height * 0.5) * (circle_distance_y - rect.size.height * 0.5);
corner_distance_sq <= radius * radius
}
*/ | random_line_split | |
must_use.rs | use rustc_ast::ast::Attribute;
use rustc_errors::Applicability;
use rustc_hir::def_id::{DefIdSet, LocalDefId};
use rustc_hir::{self as hir, def::Res, intravisit, QPath};
use rustc_lint::{LateContext, LintContext};
use rustc_middle::{
hir::map::Map,
lint::in_external_macro,
ty::{self, Ty},
};
use rustc_span::{sym, Span};
use clippy_utils::attrs::is_proc_macro;
use clippy_utils::diagnostics::{span_lint_and_help, span_lint_and_then};
use clippy_utils::source::snippet_opt;
use clippy_utils::ty::is_must_use_ty;
use clippy_utils::{match_def_path, must_use_attr, return_ty, trait_ref_of_method};
use super::{DOUBLE_MUST_USE, MUST_USE_CANDIDATE, MUST_USE_UNIT};
pub(super) fn check_item(cx: &LateContext<'tcx>, item: &'tcx hir::Item<'_>) {
let attrs = cx.tcx.hir().attrs(item.hir_id());
let attr = must_use_attr(attrs);
if let hir::ItemKind::Fn(ref sig, ref _generics, ref body_id) = item.kind {
let is_public = cx.access_levels.is_exported(item.def_id);
let fn_header_span = item.span.with_hi(sig.decl.output.span().hi());
if let Some(attr) = attr {
check_needless_must_use(cx, sig.decl, item.hir_id(), item.span, fn_header_span, attr);
} else if is_public && !is_proc_macro(cx.sess(), attrs) && !attrs.iter().any(|a| a.has_name(sym::no_mangle)) {
check_must_use_candidate(
cx,
sig.decl,
cx.tcx.hir().body(*body_id),
item.span,
item.def_id,
item.span.with_hi(sig.decl.output.span().hi()),
"this function could have a `#[must_use]` attribute",
);
}
}
}
pub(super) fn check_impl_item(cx: &LateContext<'tcx>, item: &'tcx hir::ImplItem<'_>) {
if let hir::ImplItemKind::Fn(ref sig, ref body_id) = item.kind {
let is_public = cx.access_levels.is_exported(item.def_id);
let fn_header_span = item.span.with_hi(sig.decl.output.span().hi());
let attrs = cx.tcx.hir().attrs(item.hir_id());
let attr = must_use_attr(attrs);
if let Some(attr) = attr {
check_needless_must_use(cx, sig.decl, item.hir_id(), item.span, fn_header_span, attr);
} else if is_public && !is_proc_macro(cx.sess(), attrs) && trait_ref_of_method(cx, item.hir_id()).is_none() {
check_must_use_candidate(
cx,
sig.decl,
cx.tcx.hir().body(*body_id),
item.span,
item.def_id,
item.span.with_hi(sig.decl.output.span().hi()),
"this method could have a `#[must_use]` attribute",
);
}
}
}
pub(super) fn check_trait_item(cx: &LateContext<'tcx>, item: &'tcx hir::TraitItem<'_>) {
if let hir::TraitItemKind::Fn(ref sig, ref eid) = item.kind {
let is_public = cx.access_levels.is_exported(item.def_id);
let fn_header_span = item.span.with_hi(sig.decl.output.span().hi());
let attrs = cx.tcx.hir().attrs(item.hir_id());
let attr = must_use_attr(attrs);
if let Some(attr) = attr {
check_needless_must_use(cx, sig.decl, item.hir_id(), item.span, fn_header_span, attr);
} else if let hir::TraitFn::Provided(eid) = *eid {
let body = cx.tcx.hir().body(eid);
if attr.is_none() && is_public && !is_proc_macro(cx.sess(), attrs) {
check_must_use_candidate(
cx,
sig.decl,
body,
item.span,
item.def_id,
item.span.with_hi(sig.decl.output.span().hi()),
"this method could have a `#[must_use]` attribute",
);
}
}
}
}
fn check_needless_must_use(
cx: &LateContext<'_>,
decl: &hir::FnDecl<'_>,
item_id: hir::HirId,
item_span: Span,
fn_header_span: Span,
attr: &Attribute,
) {
if in_external_macro(cx.sess(), item_span) {
return;
}
if returns_unit(decl) {
span_lint_and_then(
cx,
MUST_USE_UNIT,
fn_header_span,
"this unit-returning function has a `#[must_use]` attribute",
|diag| {
diag.span_suggestion(
attr.span,
"remove the attribute",
"".into(),
Applicability::MachineApplicable,
);
},
);
} else if attr.value_str().is_none() && is_must_use_ty(cx, return_ty(cx, item_id)) {
span_lint_and_help(
cx,
DOUBLE_MUST_USE,
fn_header_span,
"this function has an empty `#[must_use]` attribute, but returns a type already marked as `#[must_use]`",
None,
"either add some descriptive text or remove the attribute",
);
}
}
fn check_must_use_candidate<'tcx>(
cx: &LateContext<'tcx>,
decl: &'tcx hir::FnDecl<'_>,
body: &'tcx hir::Body<'_>,
item_span: Span,
item_id: LocalDefId,
fn_span: Span,
msg: &str,
) {
if has_mutable_arg(cx, body)
|| mutates_static(cx, body)
|| in_external_macro(cx.sess(), item_span)
|| returns_unit(decl)
|| !cx.access_levels.is_exported(item_id)
|| is_must_use_ty(cx, return_ty(cx, cx.tcx.hir().local_def_id_to_hir_id(item_id)))
{
return;
}
span_lint_and_then(cx, MUST_USE_CANDIDATE, fn_span, msg, |diag| {
if let Some(snippet) = snippet_opt(cx, fn_span) {
diag.span_suggestion(
fn_span,
"add the attribute",
format!("#[must_use] {}", snippet),
Applicability::MachineApplicable,
);
}
});
}
fn returns_unit(decl: &hir::FnDecl<'_>) -> bool { | hir::TyKind::Tup(tys) => tys.is_empty(),
hir::TyKind::Never => true,
_ => false,
},
}
}
fn has_mutable_arg(cx: &LateContext<'_>, body: &hir::Body<'_>) -> bool {
let mut tys = DefIdSet::default();
body.params.iter().any(|param| is_mutable_pat(cx, param.pat, &mut tys))
}
fn is_mutable_pat(cx: &LateContext<'_>, pat: &hir::Pat<'_>, tys: &mut DefIdSet) -> bool {
if let hir::PatKind::Wild = pat.kind {
return false; // ignore `_` patterns
}
if cx.tcx.has_typeck_results(pat.hir_id.owner.to_def_id()) {
is_mutable_ty(cx, cx.tcx.typeck(pat.hir_id.owner).pat_ty(pat), pat.span, tys)
} else {
false
}
}
static KNOWN_WRAPPER_TYS: &[&[&str]] = &[&["alloc", "rc", "Rc"], &["std", "sync", "Arc"]];
fn is_mutable_ty<'tcx>(cx: &LateContext<'tcx>, ty: Ty<'tcx>, span: Span, tys: &mut DefIdSet) -> bool {
match *ty.kind() {
// primitive types are never mutable
ty::Bool | ty::Char | ty::Int(_) | ty::Uint(_) | ty::Float(_) | ty::Str => false,
ty::Adt(adt, substs) => {
tys.insert(adt.did) && !ty.is_freeze(cx.tcx.at(span), cx.param_env)
|| KNOWN_WRAPPER_TYS.iter().any(|path| match_def_path(cx, adt.did, path))
&& substs.types().any(|ty| is_mutable_ty(cx, ty, span, tys))
},
ty::Tuple(substs) => substs.types().any(|ty| is_mutable_ty(cx, ty, span, tys)),
ty::Array(ty, _) | ty::Slice(ty) => is_mutable_ty(cx, ty, span, tys),
ty::RawPtr(ty::TypeAndMut { ty, mutbl }) | ty::Ref(_, ty, mutbl) => {
mutbl == hir::Mutability::Mut || is_mutable_ty(cx, ty, span, tys)
},
// calling something constitutes a side effect, so return true on all callables
// also never calls need not be used, so return true for them, too
_ => true,
}
}
struct StaticMutVisitor<'a, 'tcx> {
cx: &'a LateContext<'tcx>,
mutates_static: bool,
}
impl<'a, 'tcx> intravisit::Visitor<'tcx> for StaticMutVisitor<'a, 'tcx> {
type Map = Map<'tcx>;
fn visit_expr(&mut self, expr: &'tcx hir::Expr<'_>) {
use hir::ExprKind::{AddrOf, Assign, AssignOp, Call, MethodCall};
if self.mutates_static {
return;
}
match expr.kind {
Call(_, args) | MethodCall(_, _, args, _) => {
let mut tys = DefIdSet::default();
for arg in args {
if self.cx.tcx.has_typeck_results(arg.hir_id.owner.to_def_id())
&& is_mutable_ty(
self.cx,
self.cx.tcx.typeck(arg.hir_id.owner).expr_ty(arg),
arg.span,
&mut tys,
)
&& is_mutated_static(arg)
{
self.mutates_static = true;
return;
}
tys.clear();
}
},
Assign(target, ..) | AssignOp(_, target, _) | AddrOf(_, hir::Mutability::Mut, target) => {
self.mutates_static |= is_mutated_static(target);
},
_ => {},
}
}
fn nested_visit_map(&mut self) -> intravisit::NestedVisitorMap<Self::Map> {
intravisit::NestedVisitorMap::None
}
}
fn is_mutated_static(e: &hir::Expr<'_>) -> bool {
use hir::ExprKind::{Field, Index, Path};
match e.kind {
Path(QPath::Resolved(_, path)) => !matches!(path.res, Res::Local(_)),
Path(_) => true,
Field(inner, _) | Index(inner, _) => is_mutated_static(inner),
_ => false,
}
}
fn mutates_static<'tcx>(cx: &LateContext<'tcx>, body: &'tcx hir::Body<'_>) -> bool {
let mut v = StaticMutVisitor {
cx,
mutates_static: false,
};
intravisit::walk_expr(&mut v, &body.value);
v.mutates_static
} | match decl.output {
hir::FnRetTy::DefaultReturn(_) => true,
hir::FnRetTy::Return(ty) => match ty.kind { | random_line_split |
must_use.rs | use rustc_ast::ast::Attribute;
use rustc_errors::Applicability;
use rustc_hir::def_id::{DefIdSet, LocalDefId};
use rustc_hir::{self as hir, def::Res, intravisit, QPath};
use rustc_lint::{LateContext, LintContext};
use rustc_middle::{
hir::map::Map,
lint::in_external_macro,
ty::{self, Ty},
};
use rustc_span::{sym, Span};
use clippy_utils::attrs::is_proc_macro;
use clippy_utils::diagnostics::{span_lint_and_help, span_lint_and_then};
use clippy_utils::source::snippet_opt;
use clippy_utils::ty::is_must_use_ty;
use clippy_utils::{match_def_path, must_use_attr, return_ty, trait_ref_of_method};
use super::{DOUBLE_MUST_USE, MUST_USE_CANDIDATE, MUST_USE_UNIT};
pub(super) fn check_item(cx: &LateContext<'tcx>, item: &'tcx hir::Item<'_>) {
let attrs = cx.tcx.hir().attrs(item.hir_id());
let attr = must_use_attr(attrs);
if let hir::ItemKind::Fn(ref sig, ref _generics, ref body_id) = item.kind {
let is_public = cx.access_levels.is_exported(item.def_id);
let fn_header_span = item.span.with_hi(sig.decl.output.span().hi());
if let Some(attr) = attr {
check_needless_must_use(cx, sig.decl, item.hir_id(), item.span, fn_header_span, attr);
} else if is_public && !is_proc_macro(cx.sess(), attrs) && !attrs.iter().any(|a| a.has_name(sym::no_mangle)) {
check_must_use_candidate(
cx,
sig.decl,
cx.tcx.hir().body(*body_id),
item.span,
item.def_id,
item.span.with_hi(sig.decl.output.span().hi()),
"this function could have a `#[must_use]` attribute",
);
}
}
}
pub(super) fn | (cx: &LateContext<'tcx>, item: &'tcx hir::ImplItem<'_>) {
if let hir::ImplItemKind::Fn(ref sig, ref body_id) = item.kind {
let is_public = cx.access_levels.is_exported(item.def_id);
let fn_header_span = item.span.with_hi(sig.decl.output.span().hi());
let attrs = cx.tcx.hir().attrs(item.hir_id());
let attr = must_use_attr(attrs);
if let Some(attr) = attr {
check_needless_must_use(cx, sig.decl, item.hir_id(), item.span, fn_header_span, attr);
} else if is_public && !is_proc_macro(cx.sess(), attrs) && trait_ref_of_method(cx, item.hir_id()).is_none() {
check_must_use_candidate(
cx,
sig.decl,
cx.tcx.hir().body(*body_id),
item.span,
item.def_id,
item.span.with_hi(sig.decl.output.span().hi()),
"this method could have a `#[must_use]` attribute",
);
}
}
}
pub(super) fn check_trait_item(cx: &LateContext<'tcx>, item: &'tcx hir::TraitItem<'_>) {
if let hir::TraitItemKind::Fn(ref sig, ref eid) = item.kind {
let is_public = cx.access_levels.is_exported(item.def_id);
let fn_header_span = item.span.with_hi(sig.decl.output.span().hi());
let attrs = cx.tcx.hir().attrs(item.hir_id());
let attr = must_use_attr(attrs);
if let Some(attr) = attr {
check_needless_must_use(cx, sig.decl, item.hir_id(), item.span, fn_header_span, attr);
} else if let hir::TraitFn::Provided(eid) = *eid {
let body = cx.tcx.hir().body(eid);
if attr.is_none() && is_public && !is_proc_macro(cx.sess(), attrs) {
check_must_use_candidate(
cx,
sig.decl,
body,
item.span,
item.def_id,
item.span.with_hi(sig.decl.output.span().hi()),
"this method could have a `#[must_use]` attribute",
);
}
}
}
}
fn check_needless_must_use(
cx: &LateContext<'_>,
decl: &hir::FnDecl<'_>,
item_id: hir::HirId,
item_span: Span,
fn_header_span: Span,
attr: &Attribute,
) {
if in_external_macro(cx.sess(), item_span) {
return;
}
if returns_unit(decl) {
span_lint_and_then(
cx,
MUST_USE_UNIT,
fn_header_span,
"this unit-returning function has a `#[must_use]` attribute",
|diag| {
diag.span_suggestion(
attr.span,
"remove the attribute",
"".into(),
Applicability::MachineApplicable,
);
},
);
} else if attr.value_str().is_none() && is_must_use_ty(cx, return_ty(cx, item_id)) {
span_lint_and_help(
cx,
DOUBLE_MUST_USE,
fn_header_span,
"this function has an empty `#[must_use]` attribute, but returns a type already marked as `#[must_use]`",
None,
"either add some descriptive text or remove the attribute",
);
}
}
fn check_must_use_candidate<'tcx>(
cx: &LateContext<'tcx>,
decl: &'tcx hir::FnDecl<'_>,
body: &'tcx hir::Body<'_>,
item_span: Span,
item_id: LocalDefId,
fn_span: Span,
msg: &str,
) {
if has_mutable_arg(cx, body)
|| mutates_static(cx, body)
|| in_external_macro(cx.sess(), item_span)
|| returns_unit(decl)
|| !cx.access_levels.is_exported(item_id)
|| is_must_use_ty(cx, return_ty(cx, cx.tcx.hir().local_def_id_to_hir_id(item_id)))
{
return;
}
span_lint_and_then(cx, MUST_USE_CANDIDATE, fn_span, msg, |diag| {
if let Some(snippet) = snippet_opt(cx, fn_span) {
diag.span_suggestion(
fn_span,
"add the attribute",
format!("#[must_use] {}", snippet),
Applicability::MachineApplicable,
);
}
});
}
fn returns_unit(decl: &hir::FnDecl<'_>) -> bool {
match decl.output {
hir::FnRetTy::DefaultReturn(_) => true,
hir::FnRetTy::Return(ty) => match ty.kind {
hir::TyKind::Tup(tys) => tys.is_empty(),
hir::TyKind::Never => true,
_ => false,
},
}
}
fn has_mutable_arg(cx: &LateContext<'_>, body: &hir::Body<'_>) -> bool {
let mut tys = DefIdSet::default();
body.params.iter().any(|param| is_mutable_pat(cx, param.pat, &mut tys))
}
fn is_mutable_pat(cx: &LateContext<'_>, pat: &hir::Pat<'_>, tys: &mut DefIdSet) -> bool {
if let hir::PatKind::Wild = pat.kind {
return false; // ignore `_` patterns
}
if cx.tcx.has_typeck_results(pat.hir_id.owner.to_def_id()) {
is_mutable_ty(cx, cx.tcx.typeck(pat.hir_id.owner).pat_ty(pat), pat.span, tys)
} else {
false
}
}
static KNOWN_WRAPPER_TYS: &[&[&str]] = &[&["alloc", "rc", "Rc"], &["std", "sync", "Arc"]];
fn is_mutable_ty<'tcx>(cx: &LateContext<'tcx>, ty: Ty<'tcx>, span: Span, tys: &mut DefIdSet) -> bool {
match *ty.kind() {
// primitive types are never mutable
ty::Bool | ty::Char | ty::Int(_) | ty::Uint(_) | ty::Float(_) | ty::Str => false,
ty::Adt(adt, substs) => {
tys.insert(adt.did) && !ty.is_freeze(cx.tcx.at(span), cx.param_env)
|| KNOWN_WRAPPER_TYS.iter().any(|path| match_def_path(cx, adt.did, path))
&& substs.types().any(|ty| is_mutable_ty(cx, ty, span, tys))
},
ty::Tuple(substs) => substs.types().any(|ty| is_mutable_ty(cx, ty, span, tys)),
ty::Array(ty, _) | ty::Slice(ty) => is_mutable_ty(cx, ty, span, tys),
ty::RawPtr(ty::TypeAndMut { ty, mutbl }) | ty::Ref(_, ty, mutbl) => {
mutbl == hir::Mutability::Mut || is_mutable_ty(cx, ty, span, tys)
},
// calling something constitutes a side effect, so return true on all callables
// also never calls need not be used, so return true for them, too
_ => true,
}
}
struct StaticMutVisitor<'a, 'tcx> {
cx: &'a LateContext<'tcx>,
mutates_static: bool,
}
impl<'a, 'tcx> intravisit::Visitor<'tcx> for StaticMutVisitor<'a, 'tcx> {
type Map = Map<'tcx>;
fn visit_expr(&mut self, expr: &'tcx hir::Expr<'_>) {
use hir::ExprKind::{AddrOf, Assign, AssignOp, Call, MethodCall};
if self.mutates_static {
return;
}
match expr.kind {
Call(_, args) | MethodCall(_, _, args, _) => {
let mut tys = DefIdSet::default();
for arg in args {
if self.cx.tcx.has_typeck_results(arg.hir_id.owner.to_def_id())
&& is_mutable_ty(
self.cx,
self.cx.tcx.typeck(arg.hir_id.owner).expr_ty(arg),
arg.span,
&mut tys,
)
&& is_mutated_static(arg)
{
self.mutates_static = true;
return;
}
tys.clear();
}
},
Assign(target, ..) | AssignOp(_, target, _) | AddrOf(_, hir::Mutability::Mut, target) => {
self.mutates_static |= is_mutated_static(target);
},
_ => {},
}
}
fn nested_visit_map(&mut self) -> intravisit::NestedVisitorMap<Self::Map> {
intravisit::NestedVisitorMap::None
}
}
fn is_mutated_static(e: &hir::Expr<'_>) -> bool {
use hir::ExprKind::{Field, Index, Path};
match e.kind {
Path(QPath::Resolved(_, path)) => !matches!(path.res, Res::Local(_)),
Path(_) => true,
Field(inner, _) | Index(inner, _) => is_mutated_static(inner),
_ => false,
}
}
fn mutates_static<'tcx>(cx: &LateContext<'tcx>, body: &'tcx hir::Body<'_>) -> bool {
let mut v = StaticMutVisitor {
cx,
mutates_static: false,
};
intravisit::walk_expr(&mut v, &body.value);
v.mutates_static
}
| check_impl_item | identifier_name |
must_use.rs | use rustc_ast::ast::Attribute;
use rustc_errors::Applicability;
use rustc_hir::def_id::{DefIdSet, LocalDefId};
use rustc_hir::{self as hir, def::Res, intravisit, QPath};
use rustc_lint::{LateContext, LintContext};
use rustc_middle::{
hir::map::Map,
lint::in_external_macro,
ty::{self, Ty},
};
use rustc_span::{sym, Span};
use clippy_utils::attrs::is_proc_macro;
use clippy_utils::diagnostics::{span_lint_and_help, span_lint_and_then};
use clippy_utils::source::snippet_opt;
use clippy_utils::ty::is_must_use_ty;
use clippy_utils::{match_def_path, must_use_attr, return_ty, trait_ref_of_method};
use super::{DOUBLE_MUST_USE, MUST_USE_CANDIDATE, MUST_USE_UNIT};
pub(super) fn check_item(cx: &LateContext<'tcx>, item: &'tcx hir::Item<'_>) {
let attrs = cx.tcx.hir().attrs(item.hir_id());
let attr = must_use_attr(attrs);
if let hir::ItemKind::Fn(ref sig, ref _generics, ref body_id) = item.kind {
let is_public = cx.access_levels.is_exported(item.def_id);
let fn_header_span = item.span.with_hi(sig.decl.output.span().hi());
if let Some(attr) = attr {
check_needless_must_use(cx, sig.decl, item.hir_id(), item.span, fn_header_span, attr);
} else if is_public && !is_proc_macro(cx.sess(), attrs) && !attrs.iter().any(|a| a.has_name(sym::no_mangle)) {
check_must_use_candidate(
cx,
sig.decl,
cx.tcx.hir().body(*body_id),
item.span,
item.def_id,
item.span.with_hi(sig.decl.output.span().hi()),
"this function could have a `#[must_use]` attribute",
);
}
}
}
pub(super) fn check_impl_item(cx: &LateContext<'tcx>, item: &'tcx hir::ImplItem<'_>) {
if let hir::ImplItemKind::Fn(ref sig, ref body_id) = item.kind {
let is_public = cx.access_levels.is_exported(item.def_id);
let fn_header_span = item.span.with_hi(sig.decl.output.span().hi());
let attrs = cx.tcx.hir().attrs(item.hir_id());
let attr = must_use_attr(attrs);
if let Some(attr) = attr {
check_needless_must_use(cx, sig.decl, item.hir_id(), item.span, fn_header_span, attr);
} else if is_public && !is_proc_macro(cx.sess(), attrs) && trait_ref_of_method(cx, item.hir_id()).is_none() {
check_must_use_candidate(
cx,
sig.decl,
cx.tcx.hir().body(*body_id),
item.span,
item.def_id,
item.span.with_hi(sig.decl.output.span().hi()),
"this method could have a `#[must_use]` attribute",
);
}
}
}
pub(super) fn check_trait_item(cx: &LateContext<'tcx>, item: &'tcx hir::TraitItem<'_>) {
if let hir::TraitItemKind::Fn(ref sig, ref eid) = item.kind {
let is_public = cx.access_levels.is_exported(item.def_id);
let fn_header_span = item.span.with_hi(sig.decl.output.span().hi());
let attrs = cx.tcx.hir().attrs(item.hir_id());
let attr = must_use_attr(attrs);
if let Some(attr) = attr {
check_needless_must_use(cx, sig.decl, item.hir_id(), item.span, fn_header_span, attr);
} else if let hir::TraitFn::Provided(eid) = *eid {
let body = cx.tcx.hir().body(eid);
if attr.is_none() && is_public && !is_proc_macro(cx.sess(), attrs) {
check_must_use_candidate(
cx,
sig.decl,
body,
item.span,
item.def_id,
item.span.with_hi(sig.decl.output.span().hi()),
"this method could have a `#[must_use]` attribute",
);
}
}
}
}
fn check_needless_must_use(
cx: &LateContext<'_>,
decl: &hir::FnDecl<'_>,
item_id: hir::HirId,
item_span: Span,
fn_header_span: Span,
attr: &Attribute,
) {
if in_external_macro(cx.sess(), item_span) {
return;
}
if returns_unit(decl) {
span_lint_and_then(
cx,
MUST_USE_UNIT,
fn_header_span,
"this unit-returning function has a `#[must_use]` attribute",
|diag| {
diag.span_suggestion(
attr.span,
"remove the attribute",
"".into(),
Applicability::MachineApplicable,
);
},
);
} else if attr.value_str().is_none() && is_must_use_ty(cx, return_ty(cx, item_id)) {
span_lint_and_help(
cx,
DOUBLE_MUST_USE,
fn_header_span,
"this function has an empty `#[must_use]` attribute, but returns a type already marked as `#[must_use]`",
None,
"either add some descriptive text or remove the attribute",
);
}
}
fn check_must_use_candidate<'tcx>(
cx: &LateContext<'tcx>,
decl: &'tcx hir::FnDecl<'_>,
body: &'tcx hir::Body<'_>,
item_span: Span,
item_id: LocalDefId,
fn_span: Span,
msg: &str,
) {
if has_mutable_arg(cx, body)
|| mutates_static(cx, body)
|| in_external_macro(cx.sess(), item_span)
|| returns_unit(decl)
|| !cx.access_levels.is_exported(item_id)
|| is_must_use_ty(cx, return_ty(cx, cx.tcx.hir().local_def_id_to_hir_id(item_id)))
{
return;
}
span_lint_and_then(cx, MUST_USE_CANDIDATE, fn_span, msg, |diag| {
if let Some(snippet) = snippet_opt(cx, fn_span) {
diag.span_suggestion(
fn_span,
"add the attribute",
format!("#[must_use] {}", snippet),
Applicability::MachineApplicable,
);
}
});
}
fn returns_unit(decl: &hir::FnDecl<'_>) -> bool {
match decl.output {
hir::FnRetTy::DefaultReturn(_) => true,
hir::FnRetTy::Return(ty) => match ty.kind {
hir::TyKind::Tup(tys) => tys.is_empty(),
hir::TyKind::Never => true,
_ => false,
},
}
}
fn has_mutable_arg(cx: &LateContext<'_>, body: &hir::Body<'_>) -> bool {
let mut tys = DefIdSet::default();
body.params.iter().any(|param| is_mutable_pat(cx, param.pat, &mut tys))
}
fn is_mutable_pat(cx: &LateContext<'_>, pat: &hir::Pat<'_>, tys: &mut DefIdSet) -> bool {
if let hir::PatKind::Wild = pat.kind {
return false; // ignore `_` patterns
}
if cx.tcx.has_typeck_results(pat.hir_id.owner.to_def_id()) {
is_mutable_ty(cx, cx.tcx.typeck(pat.hir_id.owner).pat_ty(pat), pat.span, tys)
} else {
false
}
}
static KNOWN_WRAPPER_TYS: &[&[&str]] = &[&["alloc", "rc", "Rc"], &["std", "sync", "Arc"]];
fn is_mutable_ty<'tcx>(cx: &LateContext<'tcx>, ty: Ty<'tcx>, span: Span, tys: &mut DefIdSet) -> bool {
match *ty.kind() {
// primitive types are never mutable
ty::Bool | ty::Char | ty::Int(_) | ty::Uint(_) | ty::Float(_) | ty::Str => false,
ty::Adt(adt, substs) => {
tys.insert(adt.did) && !ty.is_freeze(cx.tcx.at(span), cx.param_env)
|| KNOWN_WRAPPER_TYS.iter().any(|path| match_def_path(cx, adt.did, path))
&& substs.types().any(|ty| is_mutable_ty(cx, ty, span, tys))
},
ty::Tuple(substs) => substs.types().any(|ty| is_mutable_ty(cx, ty, span, tys)),
ty::Array(ty, _) | ty::Slice(ty) => is_mutable_ty(cx, ty, span, tys),
ty::RawPtr(ty::TypeAndMut { ty, mutbl }) | ty::Ref(_, ty, mutbl) => {
mutbl == hir::Mutability::Mut || is_mutable_ty(cx, ty, span, tys)
},
// calling something constitutes a side effect, so return true on all callables
// also never calls need not be used, so return true for them, too
_ => true,
}
}
struct StaticMutVisitor<'a, 'tcx> {
cx: &'a LateContext<'tcx>,
mutates_static: bool,
}
impl<'a, 'tcx> intravisit::Visitor<'tcx> for StaticMutVisitor<'a, 'tcx> {
type Map = Map<'tcx>;
fn visit_expr(&mut self, expr: &'tcx hir::Expr<'_>) {
use hir::ExprKind::{AddrOf, Assign, AssignOp, Call, MethodCall};
if self.mutates_static {
return;
}
match expr.kind {
Call(_, args) | MethodCall(_, _, args, _) => {
let mut tys = DefIdSet::default();
for arg in args {
if self.cx.tcx.has_typeck_results(arg.hir_id.owner.to_def_id())
&& is_mutable_ty(
self.cx,
self.cx.tcx.typeck(arg.hir_id.owner).expr_ty(arg),
arg.span,
&mut tys,
)
&& is_mutated_static(arg)
{
self.mutates_static = true;
return;
}
tys.clear();
}
},
Assign(target, ..) | AssignOp(_, target, _) | AddrOf(_, hir::Mutability::Mut, target) => {
self.mutates_static |= is_mutated_static(target);
},
_ => {},
}
}
fn nested_visit_map(&mut self) -> intravisit::NestedVisitorMap<Self::Map> {
intravisit::NestedVisitorMap::None
}
}
fn is_mutated_static(e: &hir::Expr<'_>) -> bool |
fn mutates_static<'tcx>(cx: &LateContext<'tcx>, body: &'tcx hir::Body<'_>) -> bool {
let mut v = StaticMutVisitor {
cx,
mutates_static: false,
};
intravisit::walk_expr(&mut v, &body.value);
v.mutates_static
}
| {
use hir::ExprKind::{Field, Index, Path};
match e.kind {
Path(QPath::Resolved(_, path)) => !matches!(path.res, Res::Local(_)),
Path(_) => true,
Field(inner, _) | Index(inner, _) => is_mutated_static(inner),
_ => false,
}
} | identifier_body |
dst-bad-coerce1.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
| }
struct Foo;
trait Bar {}
pub fn main() {
// With a vec of ints.
let f1 = Fat { ptr: [1, 2, 3] };
let f2: &Fat<[int, ..3]> = &f1;
let f3: &Fat<[uint]> = f2;
//~^ ERROR mismatched types: expected `&Fat<[uint]>`, found `&Fat<[int, ..3]>`
// With a trait.
let f1 = Fat { ptr: Foo };
let f2: &Fat<Foo> = &f1;
let f3: &Fat<Bar> = f2;
//~^ ERROR the trait `Bar` is not implemented for the type `Foo`
} | // Attempt to change the type as well as unsizing.
struct Fat<Sized? T> {
ptr: T | random_line_split |
dst-bad-coerce1.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Attempt to change the type as well as unsizing.
struct Fat<Sized? T> {
ptr: T
}
struct | ;
trait Bar {}
pub fn main() {
// With a vec of ints.
let f1 = Fat { ptr: [1, 2, 3] };
let f2: &Fat<[int, ..3]> = &f1;
let f3: &Fat<[uint]> = f2;
//~^ ERROR mismatched types: expected `&Fat<[uint]>`, found `&Fat<[int, ..3]>`
// With a trait.
let f1 = Fat { ptr: Foo };
let f2: &Fat<Foo> = &f1;
let f3: &Fat<Bar> = f2;
//~^ ERROR the trait `Bar` is not implemented for the type `Foo`
}
| Foo | identifier_name |
rust-picotcp-dhcpd.rs | #![feature(globs)]
extern crate libc;
extern crate picotcp;
use picotcp::pico_ip4;
use picotcp::pico_ip6;
use picotcp::pico_dhcp_server::*;
fn main() {
/* Initialize stack */
let pico = picotcp::stack::new();
let my_ip_addr = pico_ip4::new("192.168.2.150");
let my_netmask = pico_ip4::new("255.255.255.0");
let my_ip6_addr = pico_ip6 { addr:[0xaa, 0xaa, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1] }; // Constructor is still WIP...
let my_6_netmask = pico_ip6 { addr:[0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0, 0, 0, 0, 0, 0, 0] };
let dhcp_start = pico_ip4::new("192.168.2.1");
let dhcp_end = pico_ip4::new("192.168.2.10");
let pico_dev_eth = picotcp::tap_create("tap0");
picotcp::ipv4_link_add(pico_dev_eth, my_ip_addr.clone(), my_netmask.clone());
picotcp::ipv6_link_add(pico_dev_eth, my_ip6_addr.clone(), my_6_netmask.clone());
println!("tap0: ip addr is {}", my_ip_addr.clone());
println!("tap0: ip6 addr is {}", my_ip6_addr);
let mut settings : pico_dhcp_server_setting = pico_dhcp_server_setting {
pool_start: dhcp_start.clone(),
pool_next: dhcp_start,
pool_end: dhcp_end,
lease_time: 0,
dev: pico_dev_eth,
s: 0, |
dhcp_server_initiate(&mut settings);
pico.stack_loop();
} | server_ip: my_ip_addr,
netmask: my_netmask,
flags: 0
}; | random_line_split |
rust-picotcp-dhcpd.rs | #![feature(globs)]
extern crate libc;
extern crate picotcp;
use picotcp::pico_ip4;
use picotcp::pico_ip6;
use picotcp::pico_dhcp_server::*;
fn | () {
/* Initialize stack */
let pico = picotcp::stack::new();
let my_ip_addr = pico_ip4::new("192.168.2.150");
let my_netmask = pico_ip4::new("255.255.255.0");
let my_ip6_addr = pico_ip6 { addr:[0xaa, 0xaa, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1] }; // Constructor is still WIP...
let my_6_netmask = pico_ip6 { addr:[0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0, 0, 0, 0, 0, 0, 0] };
let dhcp_start = pico_ip4::new("192.168.2.1");
let dhcp_end = pico_ip4::new("192.168.2.10");
let pico_dev_eth = picotcp::tap_create("tap0");
picotcp::ipv4_link_add(pico_dev_eth, my_ip_addr.clone(), my_netmask.clone());
picotcp::ipv6_link_add(pico_dev_eth, my_ip6_addr.clone(), my_6_netmask.clone());
println!("tap0: ip addr is {}", my_ip_addr.clone());
println!("tap0: ip6 addr is {}", my_ip6_addr);
let mut settings : pico_dhcp_server_setting = pico_dhcp_server_setting {
pool_start: dhcp_start.clone(),
pool_next: dhcp_start,
pool_end: dhcp_end,
lease_time: 0,
dev: pico_dev_eth,
s: 0,
server_ip: my_ip_addr,
netmask: my_netmask,
flags: 0
};
dhcp_server_initiate(&mut settings);
pico.stack_loop();
}
| main | identifier_name |
rust-picotcp-dhcpd.rs | #![feature(globs)]
extern crate libc;
extern crate picotcp;
use picotcp::pico_ip4;
use picotcp::pico_ip6;
use picotcp::pico_dhcp_server::*;
fn main() | {
/* Initialize stack */
let pico = picotcp::stack::new();
let my_ip_addr = pico_ip4::new("192.168.2.150");
let my_netmask = pico_ip4::new("255.255.255.0");
let my_ip6_addr = pico_ip6 { addr:[0xaa, 0xaa, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1] }; // Constructor is still WIP...
let my_6_netmask = pico_ip6 { addr:[0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0, 0, 0, 0, 0, 0, 0] };
let dhcp_start = pico_ip4::new("192.168.2.1");
let dhcp_end = pico_ip4::new("192.168.2.10");
let pico_dev_eth = picotcp::tap_create("tap0");
picotcp::ipv4_link_add(pico_dev_eth, my_ip_addr.clone(), my_netmask.clone());
picotcp::ipv6_link_add(pico_dev_eth, my_ip6_addr.clone(), my_6_netmask.clone());
println!("tap0: ip addr is {}", my_ip_addr.clone());
println!("tap0: ip6 addr is {}", my_ip6_addr);
let mut settings : pico_dhcp_server_setting = pico_dhcp_server_setting {
pool_start: dhcp_start.clone(),
pool_next: dhcp_start,
pool_end: dhcp_end,
lease_time: 0,
dev: pico_dev_eth,
s: 0,
server_ip: my_ip_addr,
netmask: my_netmask,
flags: 0
};
dhcp_server_initiate(&mut settings);
pico.stack_loop();
} | identifier_body | |
configuration.test.js | 'use strict';
const chai = require('chai'),
expect = chai.expect,
config = require('../config/config'),
Support = require('./support'),
dialect = Support.getTestDialect(),
Sequelize = Support.Sequelize,
fs = require('fs'),
path = require('path');
if (dialect === 'sqlite') {
var sqlite3 = require('sqlite3'); // eslint-disable-line
}
describe(Support.getTestDialectTeaser('Configuration'), () => {
describe('Connections problems should fail with a nice message', () => {
it('when we don\'t have the correct server details', () => {
const seq = new Sequelize(config[dialect].database, config[dialect].username, config[dialect].password, { storage: '/path/to/no/where/land', logging: false, host: '0.0.0.1', port: config[dialect].port, dialect });
if (dialect === 'sqlite') {
// SQLite doesn't have a breakdown of error codes, so we are unable to discern between the different types of errors.
return expect(seq.query('select 1 as hello')).to.eventually.be.rejectedWith(Sequelize.ConnectionError, 'SQLITE_CANTOPEN: unable to open database file');
}
return expect(seq.query('select 1 as hello')).to.eventually.be.rejectedWith([Sequelize.HostNotReachableError, Sequelize.InvalidConnectionError]);
});
it('when we don\'t have the correct login information', () => {
if (dialect === 'mssql') {
// NOTE: Travis seems to be having trouble with this test against the
// AWS instance. Works perfectly fine on a local setup. | if (dialect === 'sqlite') {
// SQLite doesn't require authentication and `select 1 as hello` is a valid query, so this should be fulfilled not rejected for it.
return expect(seq.query('select 1 as hello')).to.eventually.be.fulfilled;
}
return expect(seq.query('select 1 as hello')).to.eventually.be.rejectedWith(Sequelize.ConnectionRefusedError, 'connect ECONNREFUSED');
});
it('when we don\'t have a valid dialect.', () => {
expect(() => {
new Sequelize(config[dialect].database, config[dialect].username, config[dialect].password, { host: '0.0.0.1', port: config[dialect].port, dialect: 'some-fancy-dialect' });
}).to.throw(Error, 'The dialect some-fancy-dialect is not supported. Supported dialects: mssql, mariadb, mysql, postgres, and sqlite.');
});
});
describe('Instantiation with arguments', () => {
if (dialect === 'sqlite') {
it('should respect READONLY / READWRITE connection modes', () => {
const p = path.join(__dirname, '../tmp', 'foo.sqlite');
const createTableFoo = 'CREATE TABLE foo (faz TEXT);';
const createTableBar = 'CREATE TABLE bar (baz TEXT);';
const testAccess = Sequelize.Promise.method(() => {
return Sequelize.Promise.promisify(fs.access)(p, fs.R_OK | fs.W_OK);
});
return Sequelize.Promise.promisify(fs.unlink)(p)
.catch(err => {
expect(err.code).to.equal('ENOENT');
})
.then(() => {
const sequelizeReadOnly = new Sequelize('sqlite://foo', {
storage: p,
dialectOptions: {
mode: sqlite3.OPEN_READONLY
}
});
const sequelizeReadWrite = new Sequelize('sqlite://foo', {
storage: p,
dialectOptions: {
mode: sqlite3.OPEN_READWRITE
}
});
expect(sequelizeReadOnly.config.dialectOptions.mode).to.equal(sqlite3.OPEN_READONLY);
expect(sequelizeReadWrite.config.dialectOptions.mode).to.equal(sqlite3.OPEN_READWRITE);
return Sequelize.Promise.join(
sequelizeReadOnly.query(createTableFoo)
.should.be.rejectedWith(Error, 'SQLITE_CANTOPEN: unable to open database file'),
sequelizeReadWrite.query(createTableFoo)
.should.be.rejectedWith(Error, 'SQLITE_CANTOPEN: unable to open database file')
);
})
.then(() => {
// By default, sqlite creates a connection that's READWRITE | CREATE
const sequelize = new Sequelize('sqlite://foo', {
storage: p
});
return sequelize.query(createTableFoo);
})
.then(testAccess)
.then(() => {
const sequelizeReadOnly = new Sequelize('sqlite://foo', {
storage: p,
dialectOptions: {
mode: sqlite3.OPEN_READONLY
}
});
const sequelizeReadWrite = new Sequelize('sqlite://foo', {
storage: p,
dialectOptions: {
mode: sqlite3.OPEN_READWRITE
}
});
return Sequelize.Promise.join(
sequelizeReadOnly.query(createTableBar)
.should.be.rejectedWith(Error, 'SQLITE_READONLY: attempt to write a readonly database'),
sequelizeReadWrite.query(createTableBar)
);
})
.finally(() => {
return Sequelize.Promise.promisify(fs.unlink)(p);
});
});
}
});
}); | expect(true).to.be.true;
return;
}
const seq = new Sequelize(config[dialect].database, config[dialect].username, 'fakepass123', { logging: false, host: config[dialect].host, port: 1, dialect }); | random_line_split |
configuration.test.js | 'use strict';
const chai = require('chai'),
expect = chai.expect,
config = require('../config/config'),
Support = require('./support'),
dialect = Support.getTestDialect(),
Sequelize = Support.Sequelize,
fs = require('fs'),
path = require('path');
if (dialect === 'sqlite') |
describe(Support.getTestDialectTeaser('Configuration'), () => {
describe('Connections problems should fail with a nice message', () => {
it('when we don\'t have the correct server details', () => {
const seq = new Sequelize(config[dialect].database, config[dialect].username, config[dialect].password, { storage: '/path/to/no/where/land', logging: false, host: '0.0.0.1', port: config[dialect].port, dialect });
if (dialect === 'sqlite') {
// SQLite doesn't have a breakdown of error codes, so we are unable to discern between the different types of errors.
return expect(seq.query('select 1 as hello')).to.eventually.be.rejectedWith(Sequelize.ConnectionError, 'SQLITE_CANTOPEN: unable to open database file');
}
return expect(seq.query('select 1 as hello')).to.eventually.be.rejectedWith([Sequelize.HostNotReachableError, Sequelize.InvalidConnectionError]);
});
it('when we don\'t have the correct login information', () => {
if (dialect === 'mssql') {
// NOTE: Travis seems to be having trouble with this test against the
// AWS instance. Works perfectly fine on a local setup.
expect(true).to.be.true;
return;
}
const seq = new Sequelize(config[dialect].database, config[dialect].username, 'fakepass123', { logging: false, host: config[dialect].host, port: 1, dialect });
if (dialect === 'sqlite') {
// SQLite doesn't require authentication and `select 1 as hello` is a valid query, so this should be fulfilled not rejected for it.
return expect(seq.query('select 1 as hello')).to.eventually.be.fulfilled;
}
return expect(seq.query('select 1 as hello')).to.eventually.be.rejectedWith(Sequelize.ConnectionRefusedError, 'connect ECONNREFUSED');
});
it('when we don\'t have a valid dialect.', () => {
expect(() => {
new Sequelize(config[dialect].database, config[dialect].username, config[dialect].password, { host: '0.0.0.1', port: config[dialect].port, dialect: 'some-fancy-dialect' });
}).to.throw(Error, 'The dialect some-fancy-dialect is not supported. Supported dialects: mssql, mariadb, mysql, postgres, and sqlite.');
});
});
describe('Instantiation with arguments', () => {
if (dialect === 'sqlite') {
it('should respect READONLY / READWRITE connection modes', () => {
const p = path.join(__dirname, '../tmp', 'foo.sqlite');
const createTableFoo = 'CREATE TABLE foo (faz TEXT);';
const createTableBar = 'CREATE TABLE bar (baz TEXT);';
const testAccess = Sequelize.Promise.method(() => {
return Sequelize.Promise.promisify(fs.access)(p, fs.R_OK | fs.W_OK);
});
return Sequelize.Promise.promisify(fs.unlink)(p)
.catch(err => {
expect(err.code).to.equal('ENOENT');
})
.then(() => {
const sequelizeReadOnly = new Sequelize('sqlite://foo', {
storage: p,
dialectOptions: {
mode: sqlite3.OPEN_READONLY
}
});
const sequelizeReadWrite = new Sequelize('sqlite://foo', {
storage: p,
dialectOptions: {
mode: sqlite3.OPEN_READWRITE
}
});
expect(sequelizeReadOnly.config.dialectOptions.mode).to.equal(sqlite3.OPEN_READONLY);
expect(sequelizeReadWrite.config.dialectOptions.mode).to.equal(sqlite3.OPEN_READWRITE);
return Sequelize.Promise.join(
sequelizeReadOnly.query(createTableFoo)
.should.be.rejectedWith(Error, 'SQLITE_CANTOPEN: unable to open database file'),
sequelizeReadWrite.query(createTableFoo)
.should.be.rejectedWith(Error, 'SQLITE_CANTOPEN: unable to open database file')
);
})
.then(() => {
// By default, sqlite creates a connection that's READWRITE | CREATE
const sequelize = new Sequelize('sqlite://foo', {
storage: p
});
return sequelize.query(createTableFoo);
})
.then(testAccess)
.then(() => {
const sequelizeReadOnly = new Sequelize('sqlite://foo', {
storage: p,
dialectOptions: {
mode: sqlite3.OPEN_READONLY
}
});
const sequelizeReadWrite = new Sequelize('sqlite://foo', {
storage: p,
dialectOptions: {
mode: sqlite3.OPEN_READWRITE
}
});
return Sequelize.Promise.join(
sequelizeReadOnly.query(createTableBar)
.should.be.rejectedWith(Error, 'SQLITE_READONLY: attempt to write a readonly database'),
sequelizeReadWrite.query(createTableBar)
);
})
.finally(() => {
return Sequelize.Promise.promisify(fs.unlink)(p);
});
});
}
});
});
| {
var sqlite3 = require('sqlite3'); // eslint-disable-line
} | conditional_block |
meson_post_install.py | #!/usr/bin/env python3
import os
import pathlib
import sysconfig
import compileall
import subprocess
| destdir = os.environ.get('DESTDIR', '')
if not destdir:
print('Compiling gsettings schemas...')
subprocess.call(['glib-compile-schemas', str(datadir / 'glib-2.0' / 'schemas')])
print('Updating icon cache...')
subprocess.call(['gtk-update-icon-cache', '-qtf', str(datadir / 'icons' / 'hicolor')])
print('Updating desktop database...')
subprocess.call(['update-desktop-database', '-q', str(datadir / 'applications')])
print('Compiling python bytecode...')
moduledir = sysconfig.get_path('purelib', vars={'base': str(prefix)})
compileall.compile_dir(destdir + os.path.join(moduledir, 'eidisi'), optimize=2) | prefix = pathlib.Path(os.environ.get('MESON_INSTALL_PREFIX', '/usr/local'))
datadir = prefix / 'share' | random_line_split |
meson_post_install.py | #!/usr/bin/env python3
import os
import pathlib
import sysconfig
import compileall
import subprocess
prefix = pathlib.Path(os.environ.get('MESON_INSTALL_PREFIX', '/usr/local'))
datadir = prefix / 'share'
destdir = os.environ.get('DESTDIR', '')
if not destdir:
|
print('Compiling python bytecode...')
moduledir = sysconfig.get_path('purelib', vars={'base': str(prefix)})
compileall.compile_dir(destdir + os.path.join(moduledir, 'eidisi'), optimize=2)
| print('Compiling gsettings schemas...')
subprocess.call(['glib-compile-schemas', str(datadir / 'glib-2.0' / 'schemas')])
print('Updating icon cache...')
subprocess.call(['gtk-update-icon-cache', '-qtf', str(datadir / 'icons' / 'hicolor')])
print('Updating desktop database...')
subprocess.call(['update-desktop-database', '-q', str(datadir / 'applications')]) | conditional_block |
test_cloud_memorystore_system.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""System tests for Google Cloud Memorystore operators"""
import os
from urllib.parse import urlparse
import pytest
from tests.providers.google.cloud.utils.gcp_authenticator import GCP_MEMORYSTORE
from tests.test_utils.gcp_system_helpers import CLOUD_DAG_FOLDER, GoogleSystemTest, provide_gcp_context
GCP_PROJECT_ID = os.environ.get("GCP_PROJECT_ID", "example-project")
GCP_ARCHIVE_URL = os.environ.get("GCP_MEMORYSTORE_EXPORT_GCS_URL", "gs://test-memorystore/my-export.rdb")
GCP_ARCHIVE_URL_PARTS = urlparse(GCP_ARCHIVE_URL)
GCP_BUCKET_NAME = GCP_ARCHIVE_URL_PARTS.netloc
@pytest.mark.backend("mysql", "postgres")
@pytest.mark.credential_file(GCP_MEMORYSTORE)
class CloudMemorystoreSystemTest(GoogleSystemTest):
""" | @provide_gcp_context(GCP_MEMORYSTORE)
def setUp(self):
super().setUp()
self.create_gcs_bucket(GCP_BUCKET_NAME, location="europe-north1")
@provide_gcp_context(GCP_MEMORYSTORE)
def test_run_example_dag_memorystore_redis(self):
self.run_dag('gcp_cloud_memorystore_redis', CLOUD_DAG_FOLDER)
@provide_gcp_context(GCP_MEMORYSTORE)
def test_run_example_dag_memorystore_memcached(self):
self.run_dag('gcp_cloud_memorystore_memcached', CLOUD_DAG_FOLDER)
@provide_gcp_context(GCP_MEMORYSTORE)
def tearDown(self):
self.delete_gcs_bucket(GCP_BUCKET_NAME)
super().tearDown() | System tests for Google Cloud Memorystore operators
It use a real service.
"""
| random_line_split |
test_cloud_memorystore_system.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""System tests for Google Cloud Memorystore operators"""
import os
from urllib.parse import urlparse
import pytest
from tests.providers.google.cloud.utils.gcp_authenticator import GCP_MEMORYSTORE
from tests.test_utils.gcp_system_helpers import CLOUD_DAG_FOLDER, GoogleSystemTest, provide_gcp_context
GCP_PROJECT_ID = os.environ.get("GCP_PROJECT_ID", "example-project")
GCP_ARCHIVE_URL = os.environ.get("GCP_MEMORYSTORE_EXPORT_GCS_URL", "gs://test-memorystore/my-export.rdb")
GCP_ARCHIVE_URL_PARTS = urlparse(GCP_ARCHIVE_URL)
GCP_BUCKET_NAME = GCP_ARCHIVE_URL_PARTS.netloc
@pytest.mark.backend("mysql", "postgres")
@pytest.mark.credential_file(GCP_MEMORYSTORE)
class CloudMemorystoreSystemTest(GoogleSystemTest):
"""
System tests for Google Cloud Memorystore operators
It use a real service.
"""
@provide_gcp_context(GCP_MEMORYSTORE)
def setUp(self):
super().setUp()
self.create_gcs_bucket(GCP_BUCKET_NAME, location="europe-north1")
@provide_gcp_context(GCP_MEMORYSTORE)
def test_run_example_dag_memorystore_redis(self):
self.run_dag('gcp_cloud_memorystore_redis', CLOUD_DAG_FOLDER)
@provide_gcp_context(GCP_MEMORYSTORE)
def | (self):
self.run_dag('gcp_cloud_memorystore_memcached', CLOUD_DAG_FOLDER)
@provide_gcp_context(GCP_MEMORYSTORE)
def tearDown(self):
self.delete_gcs_bucket(GCP_BUCKET_NAME)
super().tearDown()
| test_run_example_dag_memorystore_memcached | identifier_name |
test_cloud_memorystore_system.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""System tests for Google Cloud Memorystore operators"""
import os
from urllib.parse import urlparse
import pytest
from tests.providers.google.cloud.utils.gcp_authenticator import GCP_MEMORYSTORE
from tests.test_utils.gcp_system_helpers import CLOUD_DAG_FOLDER, GoogleSystemTest, provide_gcp_context
GCP_PROJECT_ID = os.environ.get("GCP_PROJECT_ID", "example-project")
GCP_ARCHIVE_URL = os.environ.get("GCP_MEMORYSTORE_EXPORT_GCS_URL", "gs://test-memorystore/my-export.rdb")
GCP_ARCHIVE_URL_PARTS = urlparse(GCP_ARCHIVE_URL)
GCP_BUCKET_NAME = GCP_ARCHIVE_URL_PARTS.netloc
@pytest.mark.backend("mysql", "postgres")
@pytest.mark.credential_file(GCP_MEMORYSTORE)
class CloudMemorystoreSystemTest(GoogleSystemTest):
| """
System tests for Google Cloud Memorystore operators
It use a real service.
"""
@provide_gcp_context(GCP_MEMORYSTORE)
def setUp(self):
super().setUp()
self.create_gcs_bucket(GCP_BUCKET_NAME, location="europe-north1")
@provide_gcp_context(GCP_MEMORYSTORE)
def test_run_example_dag_memorystore_redis(self):
self.run_dag('gcp_cloud_memorystore_redis', CLOUD_DAG_FOLDER)
@provide_gcp_context(GCP_MEMORYSTORE)
def test_run_example_dag_memorystore_memcached(self):
self.run_dag('gcp_cloud_memorystore_memcached', CLOUD_DAG_FOLDER)
@provide_gcp_context(GCP_MEMORYSTORE)
def tearDown(self):
self.delete_gcs_bucket(GCP_BUCKET_NAME)
super().tearDown() | identifier_body | |
huge-largest-array.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// run-pass
use std::mem::size_of;
#[cfg(target_pointer_width = "32")]
pub fn main() {
assert_eq!(size_of::<[u8; (1 << 31) - 1]>(), (1 << 31) - 1);
}
#[cfg(target_pointer_width = "64")]
pub fn main() | {
assert_eq!(size_of::<[u8; (1 << 47) - 1]>(), (1 << 47) - 1);
} | identifier_body | |
huge-largest-array.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// run-pass
use std::mem::size_of;
#[cfg(target_pointer_width = "32")]
pub fn main() {
assert_eq!(size_of::<[u8; (1 << 31) - 1]>(), (1 << 31) - 1);
}
#[cfg(target_pointer_width = "64")]
pub fn | () {
assert_eq!(size_of::<[u8; (1 << 47) - 1]>(), (1 << 47) - 1);
}
| main | identifier_name |
huge-largest-array.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed | // except according to those terms.
// run-pass
use std::mem::size_of;
#[cfg(target_pointer_width = "32")]
pub fn main() {
assert_eq!(size_of::<[u8; (1 << 31) - 1]>(), (1 << 31) - 1);
}
#[cfg(target_pointer_width = "64")]
pub fn main() {
assert_eq!(size_of::<[u8; (1 << 47) - 1]>(), (1 << 47) - 1);
} | random_line_split | |
subselect.py | import re
from unittest import TestCase
def mark_quoted_strings(sql):
"""Mark all quoted strings in the SOQL by '@' and get them as params,
with respect to all escaped backslashes and quotes.
"""
pm_pattern = re.compile(r"'[^\\']*(?:\\[\\'][^\\']*)*'")
bs_pattern = re.compile(r"\\([\\'])")
out_pattern = re.compile("^[-!()*+,.:<=>\w\s]*$")
start = 0
out = []
params = []
for match in pm_pattern.finditer(sql):
out.append(sql[start:match.start()])
assert out_pattern.match(sql[start:match.start()])
params.append(bs_pattern.sub('\\1', sql[match.start() + 1:match.end() -1]))
start = match.end()
out.append(sql[start:])
assert out_pattern.match(sql[start:])
return '@'.join(out), params
def subst_quoted_strings(sql, params):
"""Reverse operation to mark_quoted_strings - substitutes '@' by params.
"""
parts = sql.split('@')
assert len(parts) == len(params) + 1
out = []
for i, param in enumerate(params):
out.append(parts[i])
out.append("'%s'" % param.replace('\\', '\\\\').replace("\'", "\\\'"))
out.append(parts[-1])
return ''.join(out)
def find_closing_parenthesis(sql, startpos):
"""Find the pair of opening and closing parentheses.
Starts search at the position startpos.
Returns tuple of positions (opening, closing) if search succeeds, otherwise None.
"""
pattern = re.compile(r'[()]')
level = 0
opening = 0
for match in pattern.finditer(sql, startpos):
par = match.group()
if par == '(':
if level == 0:
opening = match.start()
level += 1
if par == ')':
|
def transform_except_subselect(sql, func):
"""Call a func on every part of SOQL query except nested (SELECT ...)"""
start = 0
out = []
while sql.find('(SELECT', start) > -1:
pos = sql.find('(SELECT', start)
out.append(func(sql[start:pos]))
start, pos = find_closing_parenthesis(sql, pos)
out.append(sql[start:pos])
start = pos
out.append(func(sql[start:len(sql)]))
return ''.join(out)
class TestSubSelectSearch(TestCase):
def test_parenthesis(self):
self.assertEqual(find_closing_parenthesis('() (() (())) ()', 0), (0, 2))
self.assertEqual(find_closing_parenthesis('() (() (())) ()', 2), (3, 12))
self.assertEqual(find_closing_parenthesis('() (() (())) ()', 3), (3, 12))
self.assertEqual(find_closing_parenthesis('() (() (())) ()', 6), (7, 11))
self.assertEqual(find_closing_parenthesis('() (() (())) ()',13), (13,15))
self.assertRaises(AssertionError, find_closing_parenthesis, '() (() (())) ()',1)
def test_subselect(self):
sql = "SELECT a, (SELECT x FROM y) FROM b WHERE (c IN (SELECT p FROM q WHERE r = %s) AND c = %s)"
func = lambda sql: '*transfomed*'
expected = "*transfomed*(SELECT x FROM y)*transfomed*(SELECT p FROM q WHERE r = %s)*transfomed*"
self.assertEqual(transform_except_subselect(sql, func), expected)
def test_nested_subselect(self):
sql = "SELECT a, (SELECT x, (SELECT p FROM q) FROM y) FROM b"
func = lambda x: '*transfomed*'
expected = "*transfomed*(SELECT x, (SELECT p FROM q) FROM y)*transfomed*"
self.assertEqual(transform_except_subselect(sql, func), expected)
class ReplaceQuotedStringsTest(TestCase):
def test_subst_quoted_strings(self):
def inner(sql, expected):
result = mark_quoted_strings(sql)
self.assertEqual(result, expected)
self.assertEqual(subst_quoted_strings(*result), sql)
inner("where x=''", ("where x=@", ['']))
inner("a'bc'd", ("a@d", ['bc']))
inner(r"a'bc\\'d", ("a@d", ['bc\\']))
inner(r"a'\'\\'b''''", ("a@b@@", ['\'\\', '', '']))
self.assertRaises(AssertionError, mark_quoted_strings, r"a'bc'\\d")
self.assertRaises(AssertionError, mark_quoted_strings, "a'bc''d")
| assert level > 0
level -= 1
if level == 0:
closing = match.end()
return opening, closing | conditional_block |
subselect.py | import re
from unittest import TestCase
def | (sql):
"""Mark all quoted strings in the SOQL by '@' and get them as params,
with respect to all escaped backslashes and quotes.
"""
pm_pattern = re.compile(r"'[^\\']*(?:\\[\\'][^\\']*)*'")
bs_pattern = re.compile(r"\\([\\'])")
out_pattern = re.compile("^[-!()*+,.:<=>\w\s]*$")
start = 0
out = []
params = []
for match in pm_pattern.finditer(sql):
out.append(sql[start:match.start()])
assert out_pattern.match(sql[start:match.start()])
params.append(bs_pattern.sub('\\1', sql[match.start() + 1:match.end() -1]))
start = match.end()
out.append(sql[start:])
assert out_pattern.match(sql[start:])
return '@'.join(out), params
def subst_quoted_strings(sql, params):
"""Reverse operation to mark_quoted_strings - substitutes '@' by params.
"""
parts = sql.split('@')
assert len(parts) == len(params) + 1
out = []
for i, param in enumerate(params):
out.append(parts[i])
out.append("'%s'" % param.replace('\\', '\\\\').replace("\'", "\\\'"))
out.append(parts[-1])
return ''.join(out)
def find_closing_parenthesis(sql, startpos):
"""Find the pair of opening and closing parentheses.
Starts search at the position startpos.
Returns tuple of positions (opening, closing) if search succeeds, otherwise None.
"""
pattern = re.compile(r'[()]')
level = 0
opening = 0
for match in pattern.finditer(sql, startpos):
par = match.group()
if par == '(':
if level == 0:
opening = match.start()
level += 1
if par == ')':
assert level > 0
level -= 1
if level == 0:
closing = match.end()
return opening, closing
def transform_except_subselect(sql, func):
"""Call a func on every part of SOQL query except nested (SELECT ...)"""
start = 0
out = []
while sql.find('(SELECT', start) > -1:
pos = sql.find('(SELECT', start)
out.append(func(sql[start:pos]))
start, pos = find_closing_parenthesis(sql, pos)
out.append(sql[start:pos])
start = pos
out.append(func(sql[start:len(sql)]))
return ''.join(out)
class TestSubSelectSearch(TestCase):
def test_parenthesis(self):
self.assertEqual(find_closing_parenthesis('() (() (())) ()', 0), (0, 2))
self.assertEqual(find_closing_parenthesis('() (() (())) ()', 2), (3, 12))
self.assertEqual(find_closing_parenthesis('() (() (())) ()', 3), (3, 12))
self.assertEqual(find_closing_parenthesis('() (() (())) ()', 6), (7, 11))
self.assertEqual(find_closing_parenthesis('() (() (())) ()',13), (13,15))
self.assertRaises(AssertionError, find_closing_parenthesis, '() (() (())) ()',1)
def test_subselect(self):
sql = "SELECT a, (SELECT x FROM y) FROM b WHERE (c IN (SELECT p FROM q WHERE r = %s) AND c = %s)"
func = lambda sql: '*transfomed*'
expected = "*transfomed*(SELECT x FROM y)*transfomed*(SELECT p FROM q WHERE r = %s)*transfomed*"
self.assertEqual(transform_except_subselect(sql, func), expected)
def test_nested_subselect(self):
sql = "SELECT a, (SELECT x, (SELECT p FROM q) FROM y) FROM b"
func = lambda x: '*transfomed*'
expected = "*transfomed*(SELECT x, (SELECT p FROM q) FROM y)*transfomed*"
self.assertEqual(transform_except_subselect(sql, func), expected)
class ReplaceQuotedStringsTest(TestCase):
def test_subst_quoted_strings(self):
def inner(sql, expected):
result = mark_quoted_strings(sql)
self.assertEqual(result, expected)
self.assertEqual(subst_quoted_strings(*result), sql)
inner("where x=''", ("where x=@", ['']))
inner("a'bc'd", ("a@d", ['bc']))
inner(r"a'bc\\'d", ("a@d", ['bc\\']))
inner(r"a'\'\\'b''''", ("a@b@@", ['\'\\', '', '']))
self.assertRaises(AssertionError, mark_quoted_strings, r"a'bc'\\d")
self.assertRaises(AssertionError, mark_quoted_strings, "a'bc''d")
| mark_quoted_strings | identifier_name |
subselect.py | import re
from unittest import TestCase
def mark_quoted_strings(sql):
"""Mark all quoted strings in the SOQL by '@' and get them as params,
with respect to all escaped backslashes and quotes.
"""
pm_pattern = re.compile(r"'[^\\']*(?:\\[\\'][^\\']*)*'")
bs_pattern = re.compile(r"\\([\\'])")
out_pattern = re.compile("^[-!()*+,.:<=>\w\s]*$")
start = 0
out = []
params = []
for match in pm_pattern.finditer(sql):
out.append(sql[start:match.start()])
assert out_pattern.match(sql[start:match.start()])
params.append(bs_pattern.sub('\\1', sql[match.start() + 1:match.end() -1]))
start = match.end()
out.append(sql[start:])
assert out_pattern.match(sql[start:])
return '@'.join(out), params
def subst_quoted_strings(sql, params):
"""Reverse operation to mark_quoted_strings - substitutes '@' by params.
"""
parts = sql.split('@')
assert len(parts) == len(params) + 1
out = []
for i, param in enumerate(params):
out.append(parts[i])
out.append("'%s'" % param.replace('\\', '\\\\').replace("\'", "\\\'"))
out.append(parts[-1])
return ''.join(out)
| Starts search at the position startpos.
Returns tuple of positions (opening, closing) if search succeeds, otherwise None.
"""
pattern = re.compile(r'[()]')
level = 0
opening = 0
for match in pattern.finditer(sql, startpos):
par = match.group()
if par == '(':
if level == 0:
opening = match.start()
level += 1
if par == ')':
assert level > 0
level -= 1
if level == 0:
closing = match.end()
return opening, closing
def transform_except_subselect(sql, func):
"""Call a func on every part of SOQL query except nested (SELECT ...)"""
start = 0
out = []
while sql.find('(SELECT', start) > -1:
pos = sql.find('(SELECT', start)
out.append(func(sql[start:pos]))
start, pos = find_closing_parenthesis(sql, pos)
out.append(sql[start:pos])
start = pos
out.append(func(sql[start:len(sql)]))
return ''.join(out)
class TestSubSelectSearch(TestCase):
def test_parenthesis(self):
self.assertEqual(find_closing_parenthesis('() (() (())) ()', 0), (0, 2))
self.assertEqual(find_closing_parenthesis('() (() (())) ()', 2), (3, 12))
self.assertEqual(find_closing_parenthesis('() (() (())) ()', 3), (3, 12))
self.assertEqual(find_closing_parenthesis('() (() (())) ()', 6), (7, 11))
self.assertEqual(find_closing_parenthesis('() (() (())) ()',13), (13,15))
self.assertRaises(AssertionError, find_closing_parenthesis, '() (() (())) ()',1)
def test_subselect(self):
sql = "SELECT a, (SELECT x FROM y) FROM b WHERE (c IN (SELECT p FROM q WHERE r = %s) AND c = %s)"
func = lambda sql: '*transfomed*'
expected = "*transfomed*(SELECT x FROM y)*transfomed*(SELECT p FROM q WHERE r = %s)*transfomed*"
self.assertEqual(transform_except_subselect(sql, func), expected)
def test_nested_subselect(self):
sql = "SELECT a, (SELECT x, (SELECT p FROM q) FROM y) FROM b"
func = lambda x: '*transfomed*'
expected = "*transfomed*(SELECT x, (SELECT p FROM q) FROM y)*transfomed*"
self.assertEqual(transform_except_subselect(sql, func), expected)
class ReplaceQuotedStringsTest(TestCase):
def test_subst_quoted_strings(self):
def inner(sql, expected):
result = mark_quoted_strings(sql)
self.assertEqual(result, expected)
self.assertEqual(subst_quoted_strings(*result), sql)
inner("where x=''", ("where x=@", ['']))
inner("a'bc'd", ("a@d", ['bc']))
inner(r"a'bc\\'d", ("a@d", ['bc\\']))
inner(r"a'\'\\'b''''", ("a@b@@", ['\'\\', '', '']))
self.assertRaises(AssertionError, mark_quoted_strings, r"a'bc'\\d")
self.assertRaises(AssertionError, mark_quoted_strings, "a'bc''d") | def find_closing_parenthesis(sql, startpos):
"""Find the pair of opening and closing parentheses.
| random_line_split |
subselect.py | import re
from unittest import TestCase
def mark_quoted_strings(sql):
"""Mark all quoted strings in the SOQL by '@' and get them as params,
with respect to all escaped backslashes and quotes.
"""
pm_pattern = re.compile(r"'[^\\']*(?:\\[\\'][^\\']*)*'")
bs_pattern = re.compile(r"\\([\\'])")
out_pattern = re.compile("^[-!()*+,.:<=>\w\s]*$")
start = 0
out = []
params = []
for match in pm_pattern.finditer(sql):
out.append(sql[start:match.start()])
assert out_pattern.match(sql[start:match.start()])
params.append(bs_pattern.sub('\\1', sql[match.start() + 1:match.end() -1]))
start = match.end()
out.append(sql[start:])
assert out_pattern.match(sql[start:])
return '@'.join(out), params
def subst_quoted_strings(sql, params):
"""Reverse operation to mark_quoted_strings - substitutes '@' by params.
"""
parts = sql.split('@')
assert len(parts) == len(params) + 1
out = []
for i, param in enumerate(params):
out.append(parts[i])
out.append("'%s'" % param.replace('\\', '\\\\').replace("\'", "\\\'"))
out.append(parts[-1])
return ''.join(out)
def find_closing_parenthesis(sql, startpos):
"""Find the pair of opening and closing parentheses.
Starts search at the position startpos.
Returns tuple of positions (opening, closing) if search succeeds, otherwise None.
"""
pattern = re.compile(r'[()]')
level = 0
opening = 0
for match in pattern.finditer(sql, startpos):
par = match.group()
if par == '(':
if level == 0:
opening = match.start()
level += 1
if par == ')':
assert level > 0
level -= 1
if level == 0:
closing = match.end()
return opening, closing
def transform_except_subselect(sql, func):
"""Call a func on every part of SOQL query except nested (SELECT ...)"""
start = 0
out = []
while sql.find('(SELECT', start) > -1:
pos = sql.find('(SELECT', start)
out.append(func(sql[start:pos]))
start, pos = find_closing_parenthesis(sql, pos)
out.append(sql[start:pos])
start = pos
out.append(func(sql[start:len(sql)]))
return ''.join(out)
class TestSubSelectSearch(TestCase):
def test_parenthesis(self):
self.assertEqual(find_closing_parenthesis('() (() (())) ()', 0), (0, 2))
self.assertEqual(find_closing_parenthesis('() (() (())) ()', 2), (3, 12))
self.assertEqual(find_closing_parenthesis('() (() (())) ()', 3), (3, 12))
self.assertEqual(find_closing_parenthesis('() (() (())) ()', 6), (7, 11))
self.assertEqual(find_closing_parenthesis('() (() (())) ()',13), (13,15))
self.assertRaises(AssertionError, find_closing_parenthesis, '() (() (())) ()',1)
def test_subselect(self):
sql = "SELECT a, (SELECT x FROM y) FROM b WHERE (c IN (SELECT p FROM q WHERE r = %s) AND c = %s)"
func = lambda sql: '*transfomed*'
expected = "*transfomed*(SELECT x FROM y)*transfomed*(SELECT p FROM q WHERE r = %s)*transfomed*"
self.assertEqual(transform_except_subselect(sql, func), expected)
def test_nested_subselect(self):
|
class ReplaceQuotedStringsTest(TestCase):
def test_subst_quoted_strings(self):
def inner(sql, expected):
result = mark_quoted_strings(sql)
self.assertEqual(result, expected)
self.assertEqual(subst_quoted_strings(*result), sql)
inner("where x=''", ("where x=@", ['']))
inner("a'bc'd", ("a@d", ['bc']))
inner(r"a'bc\\'d", ("a@d", ['bc\\']))
inner(r"a'\'\\'b''''", ("a@b@@", ['\'\\', '', '']))
self.assertRaises(AssertionError, mark_quoted_strings, r"a'bc'\\d")
self.assertRaises(AssertionError, mark_quoted_strings, "a'bc''d")
| sql = "SELECT a, (SELECT x, (SELECT p FROM q) FROM y) FROM b"
func = lambda x: '*transfomed*'
expected = "*transfomed*(SELECT x, (SELECT p FROM q) FROM y)*transfomed*"
self.assertEqual(transform_except_subselect(sql, func), expected) | identifier_body |
timeline.js | function(head, req) {
var ddoc = this,
path = require("vendor/couchapp/lib/path").init(req);
send('{"dateTimeFormat": "iso8601", "events": [');
var sep = "";
while(row = getRow()) {
var doc = row.doc;
var coordinates = null;
if (doc.Latitude && doc.Longitude) {
coordinates = [parseFloat(doc.Longitude), parseFloat(doc.Latitude)];
var date_parts = doc.Date.split(" ");
var summary = '';
if (doc.Summary && doc.Summary.length <= 350) {
summary = doc.Summary;
} else |
var event = {
start: date_parts[0] + "T" + date_parts[1],
title: doc.Type,
description: summary,
link: path.show("report", doc._id),
coordinates: coordinates,
id: doc._id,
long_title: doc.Title,
affiliation: doc.Affiliation,
category: doc.Category
}
send(sep + toJSON(event));
sep = ",";
}
}
return(']}');
}
| {
summary = (doc.Summary.replace(/<(.|\n)*?>/g, '').substring(0,350) + '...');
} | conditional_block |
timeline.js | function(head, req) {
var ddoc = this,
path = require("vendor/couchapp/lib/path").init(req);
send('{"dateTimeFormat": "iso8601", "events": [');
var sep = "";
while(row = getRow()) {
var doc = row.doc; |
if (doc.Latitude && doc.Longitude) {
coordinates = [parseFloat(doc.Longitude), parseFloat(doc.Latitude)];
var date_parts = doc.Date.split(" ");
var summary = '';
if (doc.Summary && doc.Summary.length <= 350) {
summary = doc.Summary;
} else {
summary = (doc.Summary.replace(/<(.|\n)*?>/g, '').substring(0,350) + '...');
}
var event = {
start: date_parts[0] + "T" + date_parts[1],
title: doc.Type,
description: summary,
link: path.show("report", doc._id),
coordinates: coordinates,
id: doc._id,
long_title: doc.Title,
affiliation: doc.Affiliation,
category: doc.Category
}
send(sep + toJSON(event));
sep = ",";
}
}
return(']}');
} |
var coordinates = null; | random_line_split |
delete.py | from logbook import Logger
from ..core.local import get_current_conf
from ..core.connection import autoccontext
from .. import db
from datetime import timedelta, datetime
log = Logger(__name__)
def | ():
conf = get_current_conf()
with autoccontext(commit=True) as conn:
before = db.get_query_count(conn)
db.del_inactive_queries(
conn,
before=datetime.utcnow() - timedelta(days=conf['TORABOT_DELETE_INACTIVE_QUERIES_BEFORE_DAYS']),
limit=conf['TORABOT_DELETE_INACTIVE_QUERIES_LIMIT']
)
after = db.get_query_count(conn)
log.info('delete inactive queries, from {} to {}, deleted {}', before, after, before - after)
return before - after
def del_old_changes():
conf = get_current_conf()
with autoccontext(commit=True) as conn:
before = db.get_change_count(conn)
db.del_old_changes(
conn,
before=datetime.utcnow() - timedelta(days=conf['TORABOT_DELETE_OLD_CHANGES_BEFORE_DAYS']),
limit=conf['TORABOT_DELETE_OLD_CHANGES_LIMIT']
)
after = db.get_change_count(conn)
log.info('delete old changes, from {} to {}, deleted {}', before, after, before - after)
return before - after
| del_inactive_queries | identifier_name |
delete.py | from logbook import Logger
from ..core.local import get_current_conf
from ..core.connection import autoccontext
from .. import db
from datetime import timedelta, datetime
log = Logger(__name__)
def del_inactive_queries():
conf = get_current_conf()
with autoccontext(commit=True) as conn:
before = db.get_query_count(conn)
db.del_inactive_queries( | conn,
before=datetime.utcnow() - timedelta(days=conf['TORABOT_DELETE_INACTIVE_QUERIES_BEFORE_DAYS']),
limit=conf['TORABOT_DELETE_INACTIVE_QUERIES_LIMIT']
)
after = db.get_query_count(conn)
log.info('delete inactive queries, from {} to {}, deleted {}', before, after, before - after)
return before - after
def del_old_changes():
conf = get_current_conf()
with autoccontext(commit=True) as conn:
before = db.get_change_count(conn)
db.del_old_changes(
conn,
before=datetime.utcnow() - timedelta(days=conf['TORABOT_DELETE_OLD_CHANGES_BEFORE_DAYS']),
limit=conf['TORABOT_DELETE_OLD_CHANGES_LIMIT']
)
after = db.get_change_count(conn)
log.info('delete old changes, from {} to {}, deleted {}', before, after, before - after)
return before - after | random_line_split | |
delete.py | from logbook import Logger
from ..core.local import get_current_conf
from ..core.connection import autoccontext
from .. import db
from datetime import timedelta, datetime
log = Logger(__name__)
def del_inactive_queries():
conf = get_current_conf()
with autoccontext(commit=True) as conn:
before = db.get_query_count(conn)
db.del_inactive_queries(
conn,
before=datetime.utcnow() - timedelta(days=conf['TORABOT_DELETE_INACTIVE_QUERIES_BEFORE_DAYS']),
limit=conf['TORABOT_DELETE_INACTIVE_QUERIES_LIMIT']
)
after = db.get_query_count(conn)
log.info('delete inactive queries, from {} to {}, deleted {}', before, after, before - after)
return before - after
def del_old_changes():
| conf = get_current_conf()
with autoccontext(commit=True) as conn:
before = db.get_change_count(conn)
db.del_old_changes(
conn,
before=datetime.utcnow() - timedelta(days=conf['TORABOT_DELETE_OLD_CHANGES_BEFORE_DAYS']),
limit=conf['TORABOT_DELETE_OLD_CHANGES_LIMIT']
)
after = db.get_change_count(conn)
log.info('delete old changes, from {} to {}, deleted {}', before, after, before - after)
return before - after | identifier_body | |
jenkins_discovery.py | #!/usr/bin/env python
# Jenkins server UDP based discovery
# Based on original work by Gordon McGregor gordon.mcgregor@verilab.com
#
# Author Aske Olsson aske.olsson@switch-gears.dk
from twisted.internet.protocol import DatagramProtocol
from twisted.internet import reactor
from twisted.application.internet import MulticastServer
from twisted.internet import task
import xml.etree.ElementTree as ET
MULTICAST_ADDR = "239.77.124.213"
UDP_PORT = 33848
DELAY = 60
class JenkinsDiscovery(DatagramProtocol):
def __init__(self):
self.instances = {}
self.ping_str = 'Hello Jenkins, Where are you'
def startProtocol(self):
# print 'Host discovery: listening'
self.transport.joinGroup(MULTICAST_ADDR)
def refreshList(self):
# print 'Refreshing list...'
self.instances = {}
self.ping()
def ping(self):
self.transport.write(self.ping_str, (MULTICAST_ADDR, UDP_PORT))
| # print datagram
try:
xml = str.lower(datagram)
root = ET.fromstring(xml)
# Check if we received a datagram from another Jenkins/Hudson instance
if root.tag == 'hudson' or root.tag == 'jenkins':
for url in root.findall('url'):
# print "Jenkins url:", url.text
if not url.text in self.instances:
self.instances[url.text] = address[0]
# print "Jenkins IP:", address[0]
print "Found instances:"
for k,v in self.instances.iteritems():
print "%s Running @ %s" %(k,v)
except:
# Twisted and xml parser seems to through some Unhandled error
pass
if __name__ == '__main__':
discovery = JenkinsDiscovery()
reactor.listenMulticast(UDP_PORT, discovery)
refresh = task.LoopingCall(discovery.refreshList)
refresh.start(DELAY)
reactor.run() | def datagramReceived(self, datagram, address): | random_line_split |
jenkins_discovery.py | #!/usr/bin/env python
# Jenkins server UDP based discovery
# Based on original work by Gordon McGregor gordon.mcgregor@verilab.com
#
# Author Aske Olsson aske.olsson@switch-gears.dk
from twisted.internet.protocol import DatagramProtocol
from twisted.internet import reactor
from twisted.application.internet import MulticastServer
from twisted.internet import task
import xml.etree.ElementTree as ET
MULTICAST_ADDR = "239.77.124.213"
UDP_PORT = 33848
DELAY = 60
class JenkinsDiscovery(DatagramProtocol):
def __init__(self):
self.instances = {}
self.ping_str = 'Hello Jenkins, Where are you'
def startProtocol(self):
# print 'Host discovery: listening'
self.transport.joinGroup(MULTICAST_ADDR)
def refreshList(self):
# print 'Refreshing list...'
self.instances = {}
self.ping()
def ping(self):
self.transport.write(self.ping_str, (MULTICAST_ADDR, UDP_PORT))
def datagramReceived(self, datagram, address):
# print datagram
try:
xml = str.lower(datagram)
root = ET.fromstring(xml)
# Check if we received a datagram from another Jenkins/Hudson instance
if root.tag == 'hudson' or root.tag == 'jenkins':
for url in root.findall('url'):
# print "Jenkins url:", url.text
if not url.text in self.instances:
self.instances[url.text] = address[0]
# print "Jenkins IP:", address[0]
print "Found instances:"
for k,v in self.instances.iteritems():
print "%s Running @ %s" %(k,v)
except:
# Twisted and xml parser seems to through some Unhandled error
pass
if __name__ == '__main__':
| discovery = JenkinsDiscovery()
reactor.listenMulticast(UDP_PORT, discovery)
refresh = task.LoopingCall(discovery.refreshList)
refresh.start(DELAY)
reactor.run() | conditional_block | |
jenkins_discovery.py | #!/usr/bin/env python
# Jenkins server UDP based discovery
# Based on original work by Gordon McGregor gordon.mcgregor@verilab.com
#
# Author Aske Olsson aske.olsson@switch-gears.dk
from twisted.internet.protocol import DatagramProtocol
from twisted.internet import reactor
from twisted.application.internet import MulticastServer
from twisted.internet import task
import xml.etree.ElementTree as ET
MULTICAST_ADDR = "239.77.124.213"
UDP_PORT = 33848
DELAY = 60
class JenkinsDiscovery(DatagramProtocol):
def __init__(self):
self.instances = {}
self.ping_str = 'Hello Jenkins, Where are you'
def startProtocol(self):
# print 'Host discovery: listening'
|
def refreshList(self):
# print 'Refreshing list...'
self.instances = {}
self.ping()
def ping(self):
self.transport.write(self.ping_str, (MULTICAST_ADDR, UDP_PORT))
def datagramReceived(self, datagram, address):
# print datagram
try:
xml = str.lower(datagram)
root = ET.fromstring(xml)
# Check if we received a datagram from another Jenkins/Hudson instance
if root.tag == 'hudson' or root.tag == 'jenkins':
for url in root.findall('url'):
# print "Jenkins url:", url.text
if not url.text in self.instances:
self.instances[url.text] = address[0]
# print "Jenkins IP:", address[0]
print "Found instances:"
for k,v in self.instances.iteritems():
print "%s Running @ %s" %(k,v)
except:
# Twisted and xml parser seems to through some Unhandled error
pass
if __name__ == '__main__':
discovery = JenkinsDiscovery()
reactor.listenMulticast(UDP_PORT, discovery)
refresh = task.LoopingCall(discovery.refreshList)
refresh.start(DELAY)
reactor.run()
| self.transport.joinGroup(MULTICAST_ADDR) | identifier_body |
jenkins_discovery.py | #!/usr/bin/env python
# Jenkins server UDP based discovery
# Based on original work by Gordon McGregor gordon.mcgregor@verilab.com
#
# Author Aske Olsson aske.olsson@switch-gears.dk
from twisted.internet.protocol import DatagramProtocol
from twisted.internet import reactor
from twisted.application.internet import MulticastServer
from twisted.internet import task
import xml.etree.ElementTree as ET
MULTICAST_ADDR = "239.77.124.213"
UDP_PORT = 33848
DELAY = 60
class JenkinsDiscovery(DatagramProtocol):
def | (self):
self.instances = {}
self.ping_str = 'Hello Jenkins, Where are you'
def startProtocol(self):
# print 'Host discovery: listening'
self.transport.joinGroup(MULTICAST_ADDR)
def refreshList(self):
# print 'Refreshing list...'
self.instances = {}
self.ping()
def ping(self):
self.transport.write(self.ping_str, (MULTICAST_ADDR, UDP_PORT))
def datagramReceived(self, datagram, address):
# print datagram
try:
xml = str.lower(datagram)
root = ET.fromstring(xml)
# Check if we received a datagram from another Jenkins/Hudson instance
if root.tag == 'hudson' or root.tag == 'jenkins':
for url in root.findall('url'):
# print "Jenkins url:", url.text
if not url.text in self.instances:
self.instances[url.text] = address[0]
# print "Jenkins IP:", address[0]
print "Found instances:"
for k,v in self.instances.iteritems():
print "%s Running @ %s" %(k,v)
except:
# Twisted and xml parser seems to through some Unhandled error
pass
if __name__ == '__main__':
discovery = JenkinsDiscovery()
reactor.listenMulticast(UDP_PORT, discovery)
refresh = task.LoopingCall(discovery.refreshList)
refresh.start(DELAY)
reactor.run()
| __init__ | identifier_name |
brightcovePlayer.py | import httplib
from pyamf import AMF0, AMF3
from pyamf import remoting
from pyamf.remoting.client import RemotingService
height = 1080
def build_amf_request(const, playerID, videoPlayer, publisherID):
|
def get_clip_info(const, playerID, videoPlayer, publisherID, playerKey):
conn = httplib.HTTPConnection("c.brightcove.com")
envelope = build_amf_request(const, playerID, videoPlayer, publisherID)
conn.request("POST", "/services/messagebroker/amf?playerKey=" + playerKey, str(remoting.encode(envelope).read()), {'content-type': 'application/x-amf'})
response = conn.getresponse().read()
response = remoting.decode(response).bodies[0][1].body
return response
def play(const, playerID, videoPlayer, publisherID, playerKey):
rtmpdata = get_clip_info(const, playerID, videoPlayer, publisherID, playerKey)
streamName = ""
streamUrl = rtmpdata['FLVFullLengthURL'];
for item in sorted(rtmpdata['renditions'], key=lambda item:item['frameHeight'], reverse=False):
streamHeight = item['frameHeight']
if streamHeight <= height:
streamUrl = item['defaultURL']
streamName = streamName + rtmpdata['displayName']
return [streamName, streamUrl];
| env = remoting.Envelope(amfVersion=3)
env.bodies.append(
(
"/1",
remoting.Request(
target="com.brightcove.player.runtime.PlayerMediaFacade.findMediaById",
body=[const, playerID, videoPlayer, publisherID],
envelope=env
)
)
)
return env | identifier_body |
brightcovePlayer.py | import httplib
from pyamf import AMF0, AMF3
from pyamf import remoting
from pyamf.remoting.client import RemotingService
height = 1080
def build_amf_request(const, playerID, videoPlayer, publisherID):
env = remoting.Envelope(amfVersion=3)
env.bodies.append(
(
"/1",
remoting.Request(
target="com.brightcove.player.runtime.PlayerMediaFacade.findMediaById",
body=[const, playerID, videoPlayer, publisherID],
envelope=env
)
)
)
return env
def get_clip_info(const, playerID, videoPlayer, publisherID, playerKey):
conn = httplib.HTTPConnection("c.brightcove.com") |
def play(const, playerID, videoPlayer, publisherID, playerKey):
rtmpdata = get_clip_info(const, playerID, videoPlayer, publisherID, playerKey)
streamName = ""
streamUrl = rtmpdata['FLVFullLengthURL'];
for item in sorted(rtmpdata['renditions'], key=lambda item:item['frameHeight'], reverse=False):
streamHeight = item['frameHeight']
if streamHeight <= height:
streamUrl = item['defaultURL']
streamName = streamName + rtmpdata['displayName']
return [streamName, streamUrl]; | envelope = build_amf_request(const, playerID, videoPlayer, publisherID)
conn.request("POST", "/services/messagebroker/amf?playerKey=" + playerKey, str(remoting.encode(envelope).read()), {'content-type': 'application/x-amf'})
response = conn.getresponse().read()
response = remoting.decode(response).bodies[0][1].body
return response | random_line_split |
brightcovePlayer.py | import httplib
from pyamf import AMF0, AMF3
from pyamf import remoting
from pyamf.remoting.client import RemotingService
height = 1080
def build_amf_request(const, playerID, videoPlayer, publisherID):
env = remoting.Envelope(amfVersion=3)
env.bodies.append(
(
"/1",
remoting.Request(
target="com.brightcove.player.runtime.PlayerMediaFacade.findMediaById",
body=[const, playerID, videoPlayer, publisherID],
envelope=env
)
)
)
return env
def get_clip_info(const, playerID, videoPlayer, publisherID, playerKey):
conn = httplib.HTTPConnection("c.brightcove.com")
envelope = build_amf_request(const, playerID, videoPlayer, publisherID)
conn.request("POST", "/services/messagebroker/amf?playerKey=" + playerKey, str(remoting.encode(envelope).read()), {'content-type': 'application/x-amf'})
response = conn.getresponse().read()
response = remoting.decode(response).bodies[0][1].body
return response
def | (const, playerID, videoPlayer, publisherID, playerKey):
rtmpdata = get_clip_info(const, playerID, videoPlayer, publisherID, playerKey)
streamName = ""
streamUrl = rtmpdata['FLVFullLengthURL'];
for item in sorted(rtmpdata['renditions'], key=lambda item:item['frameHeight'], reverse=False):
streamHeight = item['frameHeight']
if streamHeight <= height:
streamUrl = item['defaultURL']
streamName = streamName + rtmpdata['displayName']
return [streamName, streamUrl];
| play | identifier_name |
brightcovePlayer.py | import httplib
from pyamf import AMF0, AMF3
from pyamf import remoting
from pyamf.remoting.client import RemotingService
height = 1080
def build_amf_request(const, playerID, videoPlayer, publisherID):
env = remoting.Envelope(amfVersion=3)
env.bodies.append(
(
"/1",
remoting.Request(
target="com.brightcove.player.runtime.PlayerMediaFacade.findMediaById",
body=[const, playerID, videoPlayer, publisherID],
envelope=env
)
)
)
return env
def get_clip_info(const, playerID, videoPlayer, publisherID, playerKey):
conn = httplib.HTTPConnection("c.brightcove.com")
envelope = build_amf_request(const, playerID, videoPlayer, publisherID)
conn.request("POST", "/services/messagebroker/amf?playerKey=" + playerKey, str(remoting.encode(envelope).read()), {'content-type': 'application/x-amf'})
response = conn.getresponse().read()
response = remoting.decode(response).bodies[0][1].body
return response
def play(const, playerID, videoPlayer, publisherID, playerKey):
rtmpdata = get_clip_info(const, playerID, videoPlayer, publisherID, playerKey)
streamName = ""
streamUrl = rtmpdata['FLVFullLengthURL'];
for item in sorted(rtmpdata['renditions'], key=lambda item:item['frameHeight'], reverse=False):
streamHeight = item['frameHeight']
if streamHeight <= height:
|
streamName = streamName + rtmpdata['displayName']
return [streamName, streamUrl];
| streamUrl = item['defaultURL'] | conditional_block |
logger_setup.py | '''
logger_setup.py customizes the app's logging module. Each time an event is
logged the logger checks the level of the event (eg. debug, warning, info...).
If the event is above the approved threshold then it goes through. The handlers
do the same thing; they output to a file/shell if the event level is above their
threshold.
:Example:
>> from website import logger
>> logger.info('event', foo='bar')
**Levels**:
- logger.debug('For debugging purposes')
- logger.info('An event occured, for example a database update')
- logger.warning('Rare situation')
- logger.error('Something went wrong')
- logger.critical('Very very bad')
You can build a log incrementally as so:
>> log = logger.new(date='now')
>> log = log.bind(weather='rainy')
>> log.info('user logged in', user='John')
'''
import datetime as dt
import logging
from logging.handlers import RotatingFileHandler
import pytz
from flask import request, session
from structlog import wrap_logger
from structlog.processors import JSONRenderer
from app import app
# Set the logging level
app.logger.setLevel(app.config['LOG_LEVEL'])
# Remove the stdout handler
app.logger.removeHandler(app.logger.handlers[0])
TZ = pytz.timezone(app.config['TIMEZONE'])
def add_fields(_, level, event_dict):
''' Add custom fields to each record. '''
now = dt.datetime.now()
#event_dict['timestamp'] = TZ.localize(now, True).astimezone(pytz.utc).isoformat()
event_dict['timestamp'] = TZ.localize(now, True).astimezone\
(pytz.timezone(app.config['TIMEZONE'])).strftime(app.config['TIME_FMT'])
event_dict['level'] = level
if request:
try:
#event_dict['ip_address'] = request.headers['X-Forwarded-For'].split(',')[0].strip()
event_dict['ip_address'] = request.headers.get('X-Forwarded-For', request.remote_addr)
#event_dict['ip_address'] = request.header.get('X-Real-IP')
except:
event_dict['ip_address'] = 'unknown'
return event_dict
# Add a handler to write log messages to a file
if app.config.get('LOG_FILE'):
|
# Wrap the application logger with structlog to format the output
logger = wrap_logger(
app.logger,
processors=[
add_fields,
JSONRenderer(indent=None)
]
) | file_handler = RotatingFileHandler(filename=app.config['LOG_FILENAME'],
maxBytes=app.config['LOG_MAXBYTES'],
backupCount=app.config['LOG_BACKUPS'],
mode='a',
encoding='utf-8')
file_handler.setLevel(logging.DEBUG)
app.logger.addHandler(file_handler) | conditional_block |
logger_setup.py | '''
logger_setup.py customizes the app's logging module. Each time an event is
logged the logger checks the level of the event (eg. debug, warning, info...).
If the event is above the approved threshold then it goes through. The handlers
do the same thing; they output to a file/shell if the event level is above their
threshold.
:Example:
>> from website import logger
>> logger.info('event', foo='bar')
**Levels**:
- logger.debug('For debugging purposes')
- logger.info('An event occured, for example a database update')
- logger.warning('Rare situation')
- logger.error('Something went wrong')
- logger.critical('Very very bad')
You can build a log incrementally as so:
>> log = logger.new(date='now')
>> log = log.bind(weather='rainy')
>> log.info('user logged in', user='John')
'''
import datetime as dt
import logging
from logging.handlers import RotatingFileHandler
import pytz
from flask import request, session
from structlog import wrap_logger
from structlog.processors import JSONRenderer
from app import app
# Set the logging level
app.logger.setLevel(app.config['LOG_LEVEL'])
# Remove the stdout handler
app.logger.removeHandler(app.logger.handlers[0])
TZ = pytz.timezone(app.config['TIMEZONE'])
def add_fields(_, level, event_dict):
|
# Add a handler to write log messages to a file
if app.config.get('LOG_FILE'):
file_handler = RotatingFileHandler(filename=app.config['LOG_FILENAME'],
maxBytes=app.config['LOG_MAXBYTES'],
backupCount=app.config['LOG_BACKUPS'],
mode='a',
encoding='utf-8')
file_handler.setLevel(logging.DEBUG)
app.logger.addHandler(file_handler)
# Wrap the application logger with structlog to format the output
logger = wrap_logger(
app.logger,
processors=[
add_fields,
JSONRenderer(indent=None)
]
) | ''' Add custom fields to each record. '''
now = dt.datetime.now()
#event_dict['timestamp'] = TZ.localize(now, True).astimezone(pytz.utc).isoformat()
event_dict['timestamp'] = TZ.localize(now, True).astimezone\
(pytz.timezone(app.config['TIMEZONE'])).strftime(app.config['TIME_FMT'])
event_dict['level'] = level
if request:
try:
#event_dict['ip_address'] = request.headers['X-Forwarded-For'].split(',')[0].strip()
event_dict['ip_address'] = request.headers.get('X-Forwarded-For', request.remote_addr)
#event_dict['ip_address'] = request.header.get('X-Real-IP')
except:
event_dict['ip_address'] = 'unknown'
return event_dict | identifier_body |
logger_setup.py | '''
logger_setup.py customizes the app's logging module. Each time an event is
logged the logger checks the level of the event (eg. debug, warning, info...).
If the event is above the approved threshold then it goes through. The handlers
do the same thing; they output to a file/shell if the event level is above their
threshold.
:Example:
>> from website import logger
>> logger.info('event', foo='bar')
**Levels**:
- logger.debug('For debugging purposes')
- logger.info('An event occured, for example a database update')
- logger.warning('Rare situation')
- logger.error('Something went wrong')
- logger.critical('Very very bad')
You can build a log incrementally as so:
>> log = logger.new(date='now')
>> log = log.bind(weather='rainy')
>> log.info('user logged in', user='John')
'''
import datetime as dt
import logging
from logging.handlers import RotatingFileHandler
import pytz
from flask import request, session
from structlog import wrap_logger
from structlog.processors import JSONRenderer
from app import app
# Set the logging level
app.logger.setLevel(app.config['LOG_LEVEL'])
# Remove the stdout handler
app.logger.removeHandler(app.logger.handlers[0])
TZ = pytz.timezone(app.config['TIMEZONE'])
def add_fields(_, level, event_dict):
''' Add custom fields to each record. ''' | (pytz.timezone(app.config['TIMEZONE'])).strftime(app.config['TIME_FMT'])
event_dict['level'] = level
if request:
try:
#event_dict['ip_address'] = request.headers['X-Forwarded-For'].split(',')[0].strip()
event_dict['ip_address'] = request.headers.get('X-Forwarded-For', request.remote_addr)
#event_dict['ip_address'] = request.header.get('X-Real-IP')
except:
event_dict['ip_address'] = 'unknown'
return event_dict
# Add a handler to write log messages to a file
if app.config.get('LOG_FILE'):
file_handler = RotatingFileHandler(filename=app.config['LOG_FILENAME'],
maxBytes=app.config['LOG_MAXBYTES'],
backupCount=app.config['LOG_BACKUPS'],
mode='a',
encoding='utf-8')
file_handler.setLevel(logging.DEBUG)
app.logger.addHandler(file_handler)
# Wrap the application logger with structlog to format the output
logger = wrap_logger(
app.logger,
processors=[
add_fields,
JSONRenderer(indent=None)
]
) | now = dt.datetime.now()
#event_dict['timestamp'] = TZ.localize(now, True).astimezone(pytz.utc).isoformat()
event_dict['timestamp'] = TZ.localize(now, True).astimezone\ | random_line_split |
logger_setup.py | '''
logger_setup.py customizes the app's logging module. Each time an event is
logged the logger checks the level of the event (eg. debug, warning, info...).
If the event is above the approved threshold then it goes through. The handlers
do the same thing; they output to a file/shell if the event level is above their
threshold.
:Example:
>> from website import logger
>> logger.info('event', foo='bar')
**Levels**:
- logger.debug('For debugging purposes')
- logger.info('An event occured, for example a database update')
- logger.warning('Rare situation')
- logger.error('Something went wrong')
- logger.critical('Very very bad')
You can build a log incrementally as so:
>> log = logger.new(date='now')
>> log = log.bind(weather='rainy')
>> log.info('user logged in', user='John')
'''
import datetime as dt
import logging
from logging.handlers import RotatingFileHandler
import pytz
from flask import request, session
from structlog import wrap_logger
from structlog.processors import JSONRenderer
from app import app
# Set the logging level
app.logger.setLevel(app.config['LOG_LEVEL'])
# Remove the stdout handler
app.logger.removeHandler(app.logger.handlers[0])
TZ = pytz.timezone(app.config['TIMEZONE'])
def | (_, level, event_dict):
''' Add custom fields to each record. '''
now = dt.datetime.now()
#event_dict['timestamp'] = TZ.localize(now, True).astimezone(pytz.utc).isoformat()
event_dict['timestamp'] = TZ.localize(now, True).astimezone\
(pytz.timezone(app.config['TIMEZONE'])).strftime(app.config['TIME_FMT'])
event_dict['level'] = level
if request:
try:
#event_dict['ip_address'] = request.headers['X-Forwarded-For'].split(',')[0].strip()
event_dict['ip_address'] = request.headers.get('X-Forwarded-For', request.remote_addr)
#event_dict['ip_address'] = request.header.get('X-Real-IP')
except:
event_dict['ip_address'] = 'unknown'
return event_dict
# Add a handler to write log messages to a file
if app.config.get('LOG_FILE'):
file_handler = RotatingFileHandler(filename=app.config['LOG_FILENAME'],
maxBytes=app.config['LOG_MAXBYTES'],
backupCount=app.config['LOG_BACKUPS'],
mode='a',
encoding='utf-8')
file_handler.setLevel(logging.DEBUG)
app.logger.addHandler(file_handler)
# Wrap the application logger with structlog to format the output
logger = wrap_logger(
app.logger,
processors=[
add_fields,
JSONRenderer(indent=None)
]
) | add_fields | identifier_name |
beamsearch_runner.py | from typing import Callable, List, Dict, Optional
import numpy as np
from typeguard import check_argument_types
from neuralmonkey.model.model_part import ModelPart
from neuralmonkey.decoders.beam_search_decoder import (BeamSearchDecoder,
SearchStepOutput)
from neuralmonkey.runners.base_runner import (BaseRunner, Executable,
ExecutionResult, NextExecute)
from neuralmonkey.vocabulary import Vocabulary, END_TOKEN
class BeamSearchExecutable(Executable):
def __init__(self,
rank: int, |
self._rank = rank
self._all_encoders = all_encoders
self._bs_outputs = bs_outputs
self._vocabulary = vocabulary
self._postprocess = postprocess
self.result = None # type: Optional[ExecutionResult]
def next_to_execute(self) -> NextExecute:
return self._all_encoders, {'bs_outputs': self._bs_outputs}, {}
def collect_results(self, results: List[Dict]) -> None:
if len(results) > 1:
raise ValueError("Beam search runner does not support ensembling.")
evaluated_bs = results[0]['bs_outputs']
max_time = evaluated_bs.scores.shape[0]
# pick the end of the hypothesis based on its rank
hyp_index = np.argpartition(
-evaluated_bs.scores[-1], self._rank - 1)[self._rank - 1]
bs_score = evaluated_bs.scores[-1][hyp_index]
# now backtrack
output_tokens = [] # type: List[str]
for time in reversed(range(max_time)):
token_id = evaluated_bs.token_ids[time][hyp_index]
token = self._vocabulary.index_to_word[token_id]
output_tokens.append(token)
hyp_index = evaluated_bs.parent_ids[time][hyp_index]
output_tokens.reverse()
before_eos_tokens = [] # type: List[str]
for tok in output_tokens:
if tok == END_TOKEN:
break
before_eos_tokens.append(tok)
if self._postprocess is not None:
decoded_tokens = self._postprocess([before_eos_tokens])
else:
decoded_tokens = [before_eos_tokens]
self.result = ExecutionResult(
outputs=decoded_tokens,
losses=[bs_score],
scalar_summaries=None,
histogram_summaries=None,
image_summaries=None)
class BeamSearchRunner(BaseRunner):
def __init__(self,
output_series: str,
decoder: BeamSearchDecoder,
rank: int = 1,
postprocess: Callable[[List[str]], List[str]] = None) -> None:
super(BeamSearchRunner, self).__init__(output_series, decoder)
check_argument_types()
if rank < 1 or rank > decoder.beam_size:
raise ValueError(
("Rank of output hypothesis must be between 1 and the beam "
"size ({}), was {}.").format(decoder.beam_size, rank))
self._rank = rank
self._postprocess = postprocess
def get_executable(self,
compute_losses: bool = False,
summaries: bool = True) -> BeamSearchExecutable:
return BeamSearchExecutable(
self._rank, self.all_coders, self._decoder.outputs,
self._decoder.vocabulary, self._postprocess)
@property
def loss_names(self) -> List[str]:
return ["beam_search_score"]
@property
def decoder_data_id(self) -> Optional[str]:
return None
def beam_search_runner_range(output_series: str,
decoder: BeamSearchDecoder,
max_rank: int = None,
postprocess: Callable[
[List[str]], List[str]]=None
) -> List[BeamSearchRunner]:
"""A list of beam search runners for a range of ranks from 1 to max_rank.
This means there is max_rank output series where the n-th series contains
the n-th best hypothesis from the beam search.
Args:
output_series: Prefix of output series.
decoder: Beam search decoder shared by all runners.
max_rank: Maximum rank of the hypotheses.
postprocess: Series-level postprocess applied on output.
Returns:
List of beam search runners getting hypotheses with rank from 1 to
max_rank.
"""
check_argument_types()
if max_rank is None:
max_rank = decoder.beam_size
if max_rank > decoder.beam_size:
raise ValueError(
("The maximum rank ({}) cannot be "
"bigger than beam size {}.").format(
max_rank, decoder.beam_size))
return [BeamSearchRunner("{}.rank{:03d}".format(output_series, r),
decoder, r, postprocess)
for r in range(1, max_rank + 1)] | all_encoders: List[ModelPart],
bs_outputs: SearchStepOutput,
vocabulary: Vocabulary,
postprocess: Optional[Callable]) -> None: | random_line_split |
beamsearch_runner.py | from typing import Callable, List, Dict, Optional
import numpy as np
from typeguard import check_argument_types
from neuralmonkey.model.model_part import ModelPart
from neuralmonkey.decoders.beam_search_decoder import (BeamSearchDecoder,
SearchStepOutput)
from neuralmonkey.runners.base_runner import (BaseRunner, Executable,
ExecutionResult, NextExecute)
from neuralmonkey.vocabulary import Vocabulary, END_TOKEN
class BeamSearchExecutable(Executable):
def __init__(self,
rank: int,
all_encoders: List[ModelPart],
bs_outputs: SearchStepOutput,
vocabulary: Vocabulary,
postprocess: Optional[Callable]) -> None:
self._rank = rank
self._all_encoders = all_encoders
self._bs_outputs = bs_outputs
self._vocabulary = vocabulary
self._postprocess = postprocess
self.result = None # type: Optional[ExecutionResult]
def next_to_execute(self) -> NextExecute:
return self._all_encoders, {'bs_outputs': self._bs_outputs}, {}
def collect_results(self, results: List[Dict]) -> None:
if len(results) > 1:
raise ValueError("Beam search runner does not support ensembling.")
evaluated_bs = results[0]['bs_outputs']
max_time = evaluated_bs.scores.shape[0]
# pick the end of the hypothesis based on its rank
hyp_index = np.argpartition(
-evaluated_bs.scores[-1], self._rank - 1)[self._rank - 1]
bs_score = evaluated_bs.scores[-1][hyp_index]
# now backtrack
output_tokens = [] # type: List[str]
for time in reversed(range(max_time)):
token_id = evaluated_bs.token_ids[time][hyp_index]
token = self._vocabulary.index_to_word[token_id]
output_tokens.append(token)
hyp_index = evaluated_bs.parent_ids[time][hyp_index]
output_tokens.reverse()
before_eos_tokens = [] # type: List[str]
for tok in output_tokens:
if tok == END_TOKEN:
break
before_eos_tokens.append(tok)
if self._postprocess is not None:
decoded_tokens = self._postprocess([before_eos_tokens])
else:
decoded_tokens = [before_eos_tokens]
self.result = ExecutionResult(
outputs=decoded_tokens,
losses=[bs_score],
scalar_summaries=None,
histogram_summaries=None,
image_summaries=None)
class BeamSearchRunner(BaseRunner):
def __init__(self,
output_series: str,
decoder: BeamSearchDecoder,
rank: int = 1,
postprocess: Callable[[List[str]], List[str]] = None) -> None:
super(BeamSearchRunner, self).__init__(output_series, decoder)
check_argument_types()
if rank < 1 or rank > decoder.beam_size:
raise ValueError(
("Rank of output hypothesis must be between 1 and the beam "
"size ({}), was {}.").format(decoder.beam_size, rank))
self._rank = rank
self._postprocess = postprocess
def get_executable(self,
compute_losses: bool = False,
summaries: bool = True) -> BeamSearchExecutable:
return BeamSearchExecutable(
self._rank, self.all_coders, self._decoder.outputs,
self._decoder.vocabulary, self._postprocess)
@property
def loss_names(self) -> List[str]:
return ["beam_search_score"]
@property
def decoder_data_id(self) -> Optional[str]:
|
def beam_search_runner_range(output_series: str,
decoder: BeamSearchDecoder,
max_rank: int = None,
postprocess: Callable[
[List[str]], List[str]]=None
) -> List[BeamSearchRunner]:
"""A list of beam search runners for a range of ranks from 1 to max_rank.
This means there is max_rank output series where the n-th series contains
the n-th best hypothesis from the beam search.
Args:
output_series: Prefix of output series.
decoder: Beam search decoder shared by all runners.
max_rank: Maximum rank of the hypotheses.
postprocess: Series-level postprocess applied on output.
Returns:
List of beam search runners getting hypotheses with rank from 1 to
max_rank.
"""
check_argument_types()
if max_rank is None:
max_rank = decoder.beam_size
if max_rank > decoder.beam_size:
raise ValueError(
("The maximum rank ({}) cannot be "
"bigger than beam size {}.").format(
max_rank, decoder.beam_size))
return [BeamSearchRunner("{}.rank{:03d}".format(output_series, r),
decoder, r, postprocess)
for r in range(1, max_rank + 1)]
| return None | identifier_body |
beamsearch_runner.py | from typing import Callable, List, Dict, Optional
import numpy as np
from typeguard import check_argument_types
from neuralmonkey.model.model_part import ModelPart
from neuralmonkey.decoders.beam_search_decoder import (BeamSearchDecoder,
SearchStepOutput)
from neuralmonkey.runners.base_runner import (BaseRunner, Executable,
ExecutionResult, NextExecute)
from neuralmonkey.vocabulary import Vocabulary, END_TOKEN
class BeamSearchExecutable(Executable):
def __init__(self,
rank: int,
all_encoders: List[ModelPart],
bs_outputs: SearchStepOutput,
vocabulary: Vocabulary,
postprocess: Optional[Callable]) -> None:
self._rank = rank
self._all_encoders = all_encoders
self._bs_outputs = bs_outputs
self._vocabulary = vocabulary
self._postprocess = postprocess
self.result = None # type: Optional[ExecutionResult]
def next_to_execute(self) -> NextExecute:
return self._all_encoders, {'bs_outputs': self._bs_outputs}, {}
def collect_results(self, results: List[Dict]) -> None:
if len(results) > 1:
raise ValueError("Beam search runner does not support ensembling.")
evaluated_bs = results[0]['bs_outputs']
max_time = evaluated_bs.scores.shape[0]
# pick the end of the hypothesis based on its rank
hyp_index = np.argpartition(
-evaluated_bs.scores[-1], self._rank - 1)[self._rank - 1]
bs_score = evaluated_bs.scores[-1][hyp_index]
# now backtrack
output_tokens = [] # type: List[str]
for time in reversed(range(max_time)):
token_id = evaluated_bs.token_ids[time][hyp_index]
token = self._vocabulary.index_to_word[token_id]
output_tokens.append(token)
hyp_index = evaluated_bs.parent_ids[time][hyp_index]
output_tokens.reverse()
before_eos_tokens = [] # type: List[str]
for tok in output_tokens:
if tok == END_TOKEN:
break
before_eos_tokens.append(tok)
if self._postprocess is not None:
decoded_tokens = self._postprocess([before_eos_tokens])
else:
decoded_tokens = [before_eos_tokens]
self.result = ExecutionResult(
outputs=decoded_tokens,
losses=[bs_score],
scalar_summaries=None,
histogram_summaries=None,
image_summaries=None)
class BeamSearchRunner(BaseRunner):
def __init__(self,
output_series: str,
decoder: BeamSearchDecoder,
rank: int = 1,
postprocess: Callable[[List[str]], List[str]] = None) -> None:
super(BeamSearchRunner, self).__init__(output_series, decoder)
check_argument_types()
if rank < 1 or rank > decoder.beam_size:
|
self._rank = rank
self._postprocess = postprocess
def get_executable(self,
compute_losses: bool = False,
summaries: bool = True) -> BeamSearchExecutable:
return BeamSearchExecutable(
self._rank, self.all_coders, self._decoder.outputs,
self._decoder.vocabulary, self._postprocess)
@property
def loss_names(self) -> List[str]:
return ["beam_search_score"]
@property
def decoder_data_id(self) -> Optional[str]:
return None
def beam_search_runner_range(output_series: str,
decoder: BeamSearchDecoder,
max_rank: int = None,
postprocess: Callable[
[List[str]], List[str]]=None
) -> List[BeamSearchRunner]:
"""A list of beam search runners for a range of ranks from 1 to max_rank.
This means there is max_rank output series where the n-th series contains
the n-th best hypothesis from the beam search.
Args:
output_series: Prefix of output series.
decoder: Beam search decoder shared by all runners.
max_rank: Maximum rank of the hypotheses.
postprocess: Series-level postprocess applied on output.
Returns:
List of beam search runners getting hypotheses with rank from 1 to
max_rank.
"""
check_argument_types()
if max_rank is None:
max_rank = decoder.beam_size
if max_rank > decoder.beam_size:
raise ValueError(
("The maximum rank ({}) cannot be "
"bigger than beam size {}.").format(
max_rank, decoder.beam_size))
return [BeamSearchRunner("{}.rank{:03d}".format(output_series, r),
decoder, r, postprocess)
for r in range(1, max_rank + 1)]
| raise ValueError(
("Rank of output hypothesis must be between 1 and the beam "
"size ({}), was {}.").format(decoder.beam_size, rank)) | conditional_block |
beamsearch_runner.py | from typing import Callable, List, Dict, Optional
import numpy as np
from typeguard import check_argument_types
from neuralmonkey.model.model_part import ModelPart
from neuralmonkey.decoders.beam_search_decoder import (BeamSearchDecoder,
SearchStepOutput)
from neuralmonkey.runners.base_runner import (BaseRunner, Executable,
ExecutionResult, NextExecute)
from neuralmonkey.vocabulary import Vocabulary, END_TOKEN
class BeamSearchExecutable(Executable):
def __init__(self,
rank: int,
all_encoders: List[ModelPart],
bs_outputs: SearchStepOutput,
vocabulary: Vocabulary,
postprocess: Optional[Callable]) -> None:
self._rank = rank
self._all_encoders = all_encoders
self._bs_outputs = bs_outputs
self._vocabulary = vocabulary
self._postprocess = postprocess
self.result = None # type: Optional[ExecutionResult]
def next_to_execute(self) -> NextExecute:
return self._all_encoders, {'bs_outputs': self._bs_outputs}, {}
def collect_results(self, results: List[Dict]) -> None:
if len(results) > 1:
raise ValueError("Beam search runner does not support ensembling.")
evaluated_bs = results[0]['bs_outputs']
max_time = evaluated_bs.scores.shape[0]
# pick the end of the hypothesis based on its rank
hyp_index = np.argpartition(
-evaluated_bs.scores[-1], self._rank - 1)[self._rank - 1]
bs_score = evaluated_bs.scores[-1][hyp_index]
# now backtrack
output_tokens = [] # type: List[str]
for time in reversed(range(max_time)):
token_id = evaluated_bs.token_ids[time][hyp_index]
token = self._vocabulary.index_to_word[token_id]
output_tokens.append(token)
hyp_index = evaluated_bs.parent_ids[time][hyp_index]
output_tokens.reverse()
before_eos_tokens = [] # type: List[str]
for tok in output_tokens:
if tok == END_TOKEN:
break
before_eos_tokens.append(tok)
if self._postprocess is not None:
decoded_tokens = self._postprocess([before_eos_tokens])
else:
decoded_tokens = [before_eos_tokens]
self.result = ExecutionResult(
outputs=decoded_tokens,
losses=[bs_score],
scalar_summaries=None,
histogram_summaries=None,
image_summaries=None)
class | (BaseRunner):
def __init__(self,
output_series: str,
decoder: BeamSearchDecoder,
rank: int = 1,
postprocess: Callable[[List[str]], List[str]] = None) -> None:
super(BeamSearchRunner, self).__init__(output_series, decoder)
check_argument_types()
if rank < 1 or rank > decoder.beam_size:
raise ValueError(
("Rank of output hypothesis must be between 1 and the beam "
"size ({}), was {}.").format(decoder.beam_size, rank))
self._rank = rank
self._postprocess = postprocess
def get_executable(self,
compute_losses: bool = False,
summaries: bool = True) -> BeamSearchExecutable:
return BeamSearchExecutable(
self._rank, self.all_coders, self._decoder.outputs,
self._decoder.vocabulary, self._postprocess)
@property
def loss_names(self) -> List[str]:
return ["beam_search_score"]
@property
def decoder_data_id(self) -> Optional[str]:
return None
def beam_search_runner_range(output_series: str,
decoder: BeamSearchDecoder,
max_rank: int = None,
postprocess: Callable[
[List[str]], List[str]]=None
) -> List[BeamSearchRunner]:
"""A list of beam search runners for a range of ranks from 1 to max_rank.
This means there is max_rank output series where the n-th series contains
the n-th best hypothesis from the beam search.
Args:
output_series: Prefix of output series.
decoder: Beam search decoder shared by all runners.
max_rank: Maximum rank of the hypotheses.
postprocess: Series-level postprocess applied on output.
Returns:
List of beam search runners getting hypotheses with rank from 1 to
max_rank.
"""
check_argument_types()
if max_rank is None:
max_rank = decoder.beam_size
if max_rank > decoder.beam_size:
raise ValueError(
("The maximum rank ({}) cannot be "
"bigger than beam size {}.").format(
max_rank, decoder.beam_size))
return [BeamSearchRunner("{}.rank{:03d}".format(output_series, r),
decoder, r, postprocess)
for r in range(1, max_rank + 1)]
| BeamSearchRunner | identifier_name |
pgconf.py | #!/usr/bin/env python
# $Id: $
"""
postgresql.conf configuration file reader
Module contents:
readfile() - Read postgresql.conf file
class gucdict - Container for postgresql.conf settings
class setting - Holds one setting
class ConfigurationError - a subclass of EnvironmentError
Example:
import lib.pgconf as pgconf
d = pgconf.readfile()
port = d.int('port', 5432)
pe = d.bool('password_encryption', False)
sb = d.kB('shared_buffers')
at = d.time('authentication_timeout', 'ms', 2500)
"""
import os
import os.path
import re
# Max recursion level for postgresql.conf include directives.
# The max value is 10 in the postgres code, so it's the same here.
MAX_RECURSION_LEVEL=10
def readfile(filename='postgresql.conf', defaultpath=None):
"""
Read postgresql.conf file and put the settings into a dictionary.
Returns the dictionary: a newly created pgconf.gucdict object.
If filename does not specify an absolute path, it is treated as relative
to defaultpath, or to the current working directory.
"""
if not os.path.isabs(filename):
|
fp = open(filename)
try:
dictionary = gucdict()
dictionary.populate(fp, filename)
return dictionary
except Exception:
raise
finally:
fp.close()
class gucdict(dict):
"""
A container for settings from a postgresql.conf file.
Behaves as an ordinary dictionary, with a few added methods.
The keys of the dictionary are GUC names in lower case, and the
values are instances of the pgconf.setting class.
The populate() method loads the dictionary with settings from a file.
The str(), bool(), int(), float(), kB(), and time() methods return a
value from the dictionary, converted to internal form.
"""
def populate(self, lines, filename='', recurLevel=0):
'''
Given a postgresql.conf input file (or a list of strings, or some
iterable object yielding lines), look for lines of the form
name[=][value][#comment]
For each one found, construct a pgconf.setting object and put it
into our dictionary.
'''
if recurLevel == MAX_RECURSION_LEVEL:
raise Exception('could not open configuration file "%s": maximum nesting depth exceeded' % filename)
linenumber = 0
for line in lines:
linenumber += 1
m = _setpat.match(line)
if m:
name, value, pos = m.group(1), m.group(3), m.start(3)
if name == 'include':
try:
# Remove the ' from the filename and then convert to abspath if needed.
incfilename = value.strip("'")
if not incfilename.startswith('/') and filename != '':
incfilename = '%s/%s' % (filename[0:filename.rfind('/')], incfilename)
fp = open(incfilename)
self.populate(fp, incfilename, recurLevel+1)
fp.close()
except IOError:
raise Exception('File %s included from %s:%d does not exist' % (incfilename, filename, linenumber))
else:
self[name.lower()] = setting(name, value, filename, linenumber, pos)
def str(self, name, default=None):
"""
Return string setting, or default if absent.
"""
v = self.get(name)
if v:
return v.str()
else:
return default
def bool(self, name, default=None):
"""
Return Boolean setting, or default if absent.
"""
v = self.get(name)
if v:
return v.bool()
else:
return default
def int(self, name, default=None):
"""
Return integer setting, or default if absent.
"""
v = self.get(name)
if v:
return v.int()
else:
return default
def float(self, name, default=None):
"""
Return floating-point setting, or default if absent.
"""
v = self.get(name)
if v:
return v.float()
else:
return default
def kB(self, name, default=None):
"""
Return memory setting in units of 1024 bytes, or default if absent.
"""
v = self.get(name)
if v:
return v.kB()
else:
return default
def time(self, name, unit='s', default=None):
"""
Return time setting, or default if absent.
Specify desired unit as 'ms', 's', or 'min'.
"""
v = self.get(name)
if v:
return v.time(unit)
else:
return default
class setting(object):
"""
Holds a GUC setting from a postgresql.conf file.
The str(), bool(), int(), float(), kB(), and time() methods return the
value converted to the requested internal form. pgconf.ConfigurationError
is raised if the conversion fails, i.e. the value does not conform to the
expected syntax.
"""
def __init__(self, name, value, filename='', linenumber=0, pos=0):
self.name = name
self.value = value
self.filename = filename
self.linenumber = linenumber
self.pos = pos # starting offset of value within the input line
def __repr__(self):
return repr(self.value)
def str(self):
"""
Return the value as a string.
"""
v = self.value
if v and v.endswith("'"):
# Single-quoted string. Remove the opening and closing quotes.
# Replace each escape sequence with the character it stands for.
i = v.index("'") + 1
v = _escapepat.sub(_escapefun, v[i:-1])
return v
def bool(self):
"""
Interpret the value as a Boolean. Returns True or False.
"""
s = self.value
if s:
s = s.lower()
n = len(s)
if (s == '1' or
s == 'on' or
s == 'true'[:n] or
s == 'yes'[:n]):
return True
if (s == '0' or
s == 'off'[:n] or
s == 'false'[:n] or
s == 'no'[:n]):
return False
raise self.ConfigurationError('Boolean value should be one of: 1, 0, '
'on, off, true, false, yes, no.')
def int(self):
"""
Interpret the value as an integer. Returns an int or long.
"""
try:
return int(self.value, 0)
except ValueError:
raise self.ConfigurationError('Value should be integer.')
def float(self):
"""
Interpret the value as floating point. Returns a float.
"""
try:
return float(self.value)
except ValueError:
raise self.ConfigurationError('Value should be floating point.')
def kB(self):
"""
Interpret the value as an amount of memory. Returns an int or long,
in units of 1024 bytes.
"""
try:
m = 1
t = re.split('(kB|MB|GB)', self.value)
if len(t) > 1:
i = ['kB', 'MB', 'GB'].index(t[1])
m = (1, 1024, 1024*1024)[i]
try:
return int(t[0], 0) * m
except ValueError:
pass
return int(float(t[0]) * m)
except (ValueError, IndexError):
raise self.ConfigurationError('Value should be integer or float '
'with optional suffix kB, MB, or GB '
'(kB is default).')
def time(self, unit='s'):
"""
Interpret the value as a time. Returns an int or long.
Specify desired unit as 'ms', 's', or 'min'.
"""
u = ['ms', 's', 'min'].index(unit)
u = (1, 1000, 60*1000)[u]
try:
m = u
t = re.split('(ms|s|min|h|d)', self.value)
if len(t) > 1:
i = ['ms', 's', 'min', 'h', 'd'].index(t[1])
m = (1, 1000, 60*1000, 3600*1000, 24*3600*1000)[i]
return int(t[0], 0) * m / u
except (ValueError, IndexError):
raise self.ConfigurationError('Value should be integer with '
'optional suffix ms, s, min, h, or d '
'(%s is default).' % unit)
def ConfigurationError(self, msg):
msg = '(%s = %s) %s' % (self.name, self.value, msg)
return ConfigurationError(msg, self.filename, self.linenumber)
class ConfigurationError(EnvironmentError):
def __init__(self, msg, filename='', linenumber=0):
self.msg = msg
self.filename = filename
self.linenumber = linenumber
if linenumber:
msg = '%s line %d: %s' % (filename, linenumber, msg)
elif filename:
msg = '%s: %s' % (filename, msg)
EnvironmentError.__init__(self, msg)
def __str__(self):
return self.message
#-------------------------------- private --------------------------------
_setpat = re.compile(r"\s*(\w+)\s*(=\s*)?" # name [=]
'('
r"[eE]?('((\\.)?[^\\']*)*')+|" # single-quoted string or
r"[^\s#']*" # token ending at whitespace or comment
')')
_escapepat = re.compile(r"''|" # pair of single quotes, or
r"\\(" # backslash followed by
r"[0-7][0-7]?[0-7]?|" # nnn (1 to 3 octal digits) or
r"x[0-9A-Fa-f][0-9A-Fa-f]?|" # xHH (1 or 2 hex digits) or
r".)") # one char
def _escapefun(matchobj):
"""Callback to interpret an escape sequence"""
s = matchobj.group()
c = s[1]
i = "bfnrt".find(c)
if i >= 0:
c = "\b\f\n\r\t"[i]
elif c == 'x':
c = chr(int(s[2:], 16))
elif c in '01234567':
c = chr(int(s[1:], 8))
return c
| if defaultpath is None:
defaultpath = os.getcwd()
filename = os.path.normpath(os.path.join(defaultpath, filename)) | conditional_block |
pgconf.py | #!/usr/bin/env python
# $Id: $
"""
postgresql.conf configuration file reader
Module contents:
readfile() - Read postgresql.conf file
class gucdict - Container for postgresql.conf settings
class setting - Holds one setting
class ConfigurationError - a subclass of EnvironmentError
Example:
import lib.pgconf as pgconf
d = pgconf.readfile()
port = d.int('port', 5432)
pe = d.bool('password_encryption', False)
sb = d.kB('shared_buffers')
at = d.time('authentication_timeout', 'ms', 2500)
"""
import os
import os.path
import re
# Max recursion level for postgresql.conf include directives.
# The max value is 10 in the postgres code, so it's the same here.
MAX_RECURSION_LEVEL=10
def readfile(filename='postgresql.conf', defaultpath=None):
"""
Read postgresql.conf file and put the settings into a dictionary.
Returns the dictionary: a newly created pgconf.gucdict object.
If filename does not specify an absolute path, it is treated as relative
to defaultpath, or to the current working directory.
"""
if not os.path.isabs(filename):
if defaultpath is None:
defaultpath = os.getcwd()
filename = os.path.normpath(os.path.join(defaultpath, filename))
fp = open(filename)
try:
dictionary = gucdict()
dictionary.populate(fp, filename)
return dictionary
except Exception:
raise
finally:
fp.close()
class gucdict(dict):
"""
A container for settings from a postgresql.conf file.
Behaves as an ordinary dictionary, with a few added methods.
The keys of the dictionary are GUC names in lower case, and the
values are instances of the pgconf.setting class.
The populate() method loads the dictionary with settings from a file.
The str(), bool(), int(), float(), kB(), and time() methods return a
value from the dictionary, converted to internal form.
"""
def populate(self, lines, filename='', recurLevel=0):
'''
Given a postgresql.conf input file (or a list of strings, or some
iterable object yielding lines), look for lines of the form
name[=][value][#comment]
For each one found, construct a pgconf.setting object and put it
into our dictionary.
'''
if recurLevel == MAX_RECURSION_LEVEL:
raise Exception('could not open configuration file "%s": maximum nesting depth exceeded' % filename)
linenumber = 0
for line in lines:
linenumber += 1
m = _setpat.match(line)
if m:
name, value, pos = m.group(1), m.group(3), m.start(3)
if name == 'include':
try:
# Remove the ' from the filename and then convert to abspath if needed.
incfilename = value.strip("'")
if not incfilename.startswith('/') and filename != '':
incfilename = '%s/%s' % (filename[0:filename.rfind('/')], incfilename)
fp = open(incfilename)
self.populate(fp, incfilename, recurLevel+1)
fp.close()
except IOError:
raise Exception('File %s included from %s:%d does not exist' % (incfilename, filename, linenumber))
else:
self[name.lower()] = setting(name, value, filename, linenumber, pos)
def str(self, name, default=None):
"""
Return string setting, or default if absent.
"""
v = self.get(name)
if v:
return v.str()
else:
return default
def bool(self, name, default=None):
"""
Return Boolean setting, or default if absent.
"""
v = self.get(name)
if v:
return v.bool()
else:
return default
def int(self, name, default=None):
"""
Return integer setting, or default if absent.
"""
v = self.get(name)
if v:
return v.int()
else:
return default
def float(self, name, default=None):
"""
Return floating-point setting, or default if absent.
"""
v = self.get(name)
if v:
return v.float()
else:
return default
def kB(self, name, default=None):
"""
Return memory setting in units of 1024 bytes, or default if absent.
"""
v = self.get(name)
if v:
return v.kB()
else:
return default
def time(self, name, unit='s', default=None):
"""
Return time setting, or default if absent.
Specify desired unit as 'ms', 's', or 'min'.
"""
v = self.get(name)
if v:
return v.time(unit)
else:
return default
class setting(object):
"""
Holds a GUC setting from a postgresql.conf file.
The str(), bool(), int(), float(), kB(), and time() methods return the
value converted to the requested internal form. pgconf.ConfigurationError
is raised if the conversion fails, i.e. the value does not conform to the
expected syntax.
"""
def __init__(self, name, value, filename='', linenumber=0, pos=0):
self.name = name
self.value = value
self.filename = filename
self.linenumber = linenumber
self.pos = pos # starting offset of value within the input line
def __repr__(self):
return repr(self.value)
def str(self):
"""
Return the value as a string.
"""
v = self.value
if v and v.endswith("'"):
# Single-quoted string. Remove the opening and closing quotes.
# Replace each escape sequence with the character it stands for.
i = v.index("'") + 1
v = _escapepat.sub(_escapefun, v[i:-1])
return v
def bool(self):
"""
Interpret the value as a Boolean. Returns True or False.
"""
s = self.value
if s:
s = s.lower()
n = len(s)
if (s == '1' or
s == 'on' or
s == 'true'[:n] or
s == 'yes'[:n]):
return True
if (s == '0' or
s == 'off'[:n] or
s == 'false'[:n] or
s == 'no'[:n]):
return False
raise self.ConfigurationError('Boolean value should be one of: 1, 0, '
'on, off, true, false, yes, no.')
def | (self):
"""
Interpret the value as an integer. Returns an int or long.
"""
try:
return int(self.value, 0)
except ValueError:
raise self.ConfigurationError('Value should be integer.')
def float(self):
"""
Interpret the value as floating point. Returns a float.
"""
try:
return float(self.value)
except ValueError:
raise self.ConfigurationError('Value should be floating point.')
def kB(self):
"""
Interpret the value as an amount of memory. Returns an int or long,
in units of 1024 bytes.
"""
try:
m = 1
t = re.split('(kB|MB|GB)', self.value)
if len(t) > 1:
i = ['kB', 'MB', 'GB'].index(t[1])
m = (1, 1024, 1024*1024)[i]
try:
return int(t[0], 0) * m
except ValueError:
pass
return int(float(t[0]) * m)
except (ValueError, IndexError):
raise self.ConfigurationError('Value should be integer or float '
'with optional suffix kB, MB, or GB '
'(kB is default).')
def time(self, unit='s'):
"""
Interpret the value as a time. Returns an int or long.
Specify desired unit as 'ms', 's', or 'min'.
"""
u = ['ms', 's', 'min'].index(unit)
u = (1, 1000, 60*1000)[u]
try:
m = u
t = re.split('(ms|s|min|h|d)', self.value)
if len(t) > 1:
i = ['ms', 's', 'min', 'h', 'd'].index(t[1])
m = (1, 1000, 60*1000, 3600*1000, 24*3600*1000)[i]
return int(t[0], 0) * m / u
except (ValueError, IndexError):
raise self.ConfigurationError('Value should be integer with '
'optional suffix ms, s, min, h, or d '
'(%s is default).' % unit)
def ConfigurationError(self, msg):
msg = '(%s = %s) %s' % (self.name, self.value, msg)
return ConfigurationError(msg, self.filename, self.linenumber)
class ConfigurationError(EnvironmentError):
def __init__(self, msg, filename='', linenumber=0):
self.msg = msg
self.filename = filename
self.linenumber = linenumber
if linenumber:
msg = '%s line %d: %s' % (filename, linenumber, msg)
elif filename:
msg = '%s: %s' % (filename, msg)
EnvironmentError.__init__(self, msg)
def __str__(self):
return self.message
#-------------------------------- private --------------------------------
_setpat = re.compile(r"\s*(\w+)\s*(=\s*)?" # name [=]
'('
r"[eE]?('((\\.)?[^\\']*)*')+|" # single-quoted string or
r"[^\s#']*" # token ending at whitespace or comment
')')
_escapepat = re.compile(r"''|" # pair of single quotes, or
r"\\(" # backslash followed by
r"[0-7][0-7]?[0-7]?|" # nnn (1 to 3 octal digits) or
r"x[0-9A-Fa-f][0-9A-Fa-f]?|" # xHH (1 or 2 hex digits) or
r".)") # one char
def _escapefun(matchobj):
"""Callback to interpret an escape sequence"""
s = matchobj.group()
c = s[1]
i = "bfnrt".find(c)
if i >= 0:
c = "\b\f\n\r\t"[i]
elif c == 'x':
c = chr(int(s[2:], 16))
elif c in '01234567':
c = chr(int(s[1:], 8))
return c
| int | identifier_name |
pgconf.py | #!/usr/bin/env python
# $Id: $
"""
postgresql.conf configuration file reader
Module contents:
readfile() - Read postgresql.conf file
class gucdict - Container for postgresql.conf settings
class setting - Holds one setting
class ConfigurationError - a subclass of EnvironmentError
Example:
import lib.pgconf as pgconf
d = pgconf.readfile()
port = d.int('port', 5432)
pe = d.bool('password_encryption', False)
sb = d.kB('shared_buffers')
at = d.time('authentication_timeout', 'ms', 2500)
"""
import os
import os.path
import re
# Max recursion level for postgresql.conf include directives.
# The max value is 10 in the postgres code, so it's the same here.
MAX_RECURSION_LEVEL=10
def readfile(filename='postgresql.conf', defaultpath=None):
"""
Read postgresql.conf file and put the settings into a dictionary.
Returns the dictionary: a newly created pgconf.gucdict object.
If filename does not specify an absolute path, it is treated as relative
to defaultpath, or to the current working directory.
"""
if not os.path.isabs(filename):
if defaultpath is None:
defaultpath = os.getcwd()
filename = os.path.normpath(os.path.join(defaultpath, filename))
fp = open(filename)
try:
dictionary = gucdict()
dictionary.populate(fp, filename)
return dictionary
except Exception:
raise
finally:
fp.close()
class gucdict(dict):
"""
A container for settings from a postgresql.conf file.
Behaves as an ordinary dictionary, with a few added methods.
The keys of the dictionary are GUC names in lower case, and the
values are instances of the pgconf.setting class.
The populate() method loads the dictionary with settings from a file.
The str(), bool(), int(), float(), kB(), and time() methods return a
value from the dictionary, converted to internal form.
"""
def populate(self, lines, filename='', recurLevel=0):
'''
Given a postgresql.conf input file (or a list of strings, or some
iterable object yielding lines), look for lines of the form
name[=][value][#comment]
For each one found, construct a pgconf.setting object and put it
into our dictionary.
'''
if recurLevel == MAX_RECURSION_LEVEL:
raise Exception('could not open configuration file "%s": maximum nesting depth exceeded' % filename)
linenumber = 0
for line in lines:
linenumber += 1
m = _setpat.match(line)
if m:
name, value, pos = m.group(1), m.group(3), m.start(3)
if name == 'include':
try:
# Remove the ' from the filename and then convert to abspath if needed.
incfilename = value.strip("'")
if not incfilename.startswith('/') and filename != '':
incfilename = '%s/%s' % (filename[0:filename.rfind('/')], incfilename)
fp = open(incfilename)
self.populate(fp, incfilename, recurLevel+1)
fp.close()
except IOError:
raise Exception('File %s included from %s:%d does not exist' % (incfilename, filename, linenumber))
else:
self[name.lower()] = setting(name, value, filename, linenumber, pos)
def str(self, name, default=None):
"""
Return string setting, or default if absent.
"""
v = self.get(name)
if v:
return v.str()
else:
return default
def bool(self, name, default=None):
"""
Return Boolean setting, or default if absent.
"""
v = self.get(name)
if v:
return v.bool()
else:
return default
def int(self, name, default=None):
"""
Return integer setting, or default if absent.
"""
v = self.get(name)
if v:
return v.int()
else:
return default
def float(self, name, default=None):
"""
Return floating-point setting, or default if absent.
"""
v = self.get(name)
if v:
return v.float()
else:
return default
def kB(self, name, default=None):
"""
Return memory setting in units of 1024 bytes, or default if absent.
"""
v = self.get(name)
if v:
return v.kB()
else:
return default
def time(self, name, unit='s', default=None):
"""
Return time setting, or default if absent.
Specify desired unit as 'ms', 's', or 'min'.
"""
v = self.get(name)
if v:
return v.time(unit)
else:
return default
class setting(object):
"""
Holds a GUC setting from a postgresql.conf file.
The str(), bool(), int(), float(), kB(), and time() methods return the
value converted to the requested internal form. pgconf.ConfigurationError
is raised if the conversion fails, i.e. the value does not conform to the
expected syntax.
"""
def __init__(self, name, value, filename='', linenumber=0, pos=0):
self.name = name
self.value = value
self.filename = filename
self.linenumber = linenumber
self.pos = pos # starting offset of value within the input line
def __repr__(self):
return repr(self.value)
def str(self):
"""
Return the value as a string.
"""
v = self.value
if v and v.endswith("'"):
# Single-quoted string. Remove the opening and closing quotes.
# Replace each escape sequence with the character it stands for.
i = v.index("'") + 1
v = _escapepat.sub(_escapefun, v[i:-1])
return v
def bool(self):
"""
Interpret the value as a Boolean. Returns True or False.
"""
s = self.value
if s:
s = s.lower()
n = len(s)
if (s == '1' or
s == 'on' or
s == 'true'[:n] or
s == 'yes'[:n]):
return True
if (s == '0' or
s == 'off'[:n] or
s == 'false'[:n] or
s == 'no'[:n]):
return False
raise self.ConfigurationError('Boolean value should be one of: 1, 0, '
'on, off, true, false, yes, no.')
def int(self):
"""
Interpret the value as an integer. Returns an int or long.
"""
try:
return int(self.value, 0)
except ValueError:
raise self.ConfigurationError('Value should be integer.')
def float(self):
|
def kB(self):
"""
Interpret the value as an amount of memory. Returns an int or long,
in units of 1024 bytes.
"""
try:
m = 1
t = re.split('(kB|MB|GB)', self.value)
if len(t) > 1:
i = ['kB', 'MB', 'GB'].index(t[1])
m = (1, 1024, 1024*1024)[i]
try:
return int(t[0], 0) * m
except ValueError:
pass
return int(float(t[0]) * m)
except (ValueError, IndexError):
raise self.ConfigurationError('Value should be integer or float '
'with optional suffix kB, MB, or GB '
'(kB is default).')
def time(self, unit='s'):
"""
Interpret the value as a time. Returns an int or long.
Specify desired unit as 'ms', 's', or 'min'.
"""
u = ['ms', 's', 'min'].index(unit)
u = (1, 1000, 60*1000)[u]
try:
m = u
t = re.split('(ms|s|min|h|d)', self.value)
if len(t) > 1:
i = ['ms', 's', 'min', 'h', 'd'].index(t[1])
m = (1, 1000, 60*1000, 3600*1000, 24*3600*1000)[i]
return int(t[0], 0) * m / u
except (ValueError, IndexError):
raise self.ConfigurationError('Value should be integer with '
'optional suffix ms, s, min, h, or d '
'(%s is default).' % unit)
def ConfigurationError(self, msg):
msg = '(%s = %s) %s' % (self.name, self.value, msg)
return ConfigurationError(msg, self.filename, self.linenumber)
class ConfigurationError(EnvironmentError):
def __init__(self, msg, filename='', linenumber=0):
self.msg = msg
self.filename = filename
self.linenumber = linenumber
if linenumber:
msg = '%s line %d: %s' % (filename, linenumber, msg)
elif filename:
msg = '%s: %s' % (filename, msg)
EnvironmentError.__init__(self, msg)
def __str__(self):
return self.message
#-------------------------------- private --------------------------------
_setpat = re.compile(r"\s*(\w+)\s*(=\s*)?" # name [=]
'('
r"[eE]?('((\\.)?[^\\']*)*')+|" # single-quoted string or
r"[^\s#']*" # token ending at whitespace or comment
')')
_escapepat = re.compile(r"''|" # pair of single quotes, or
r"\\(" # backslash followed by
r"[0-7][0-7]?[0-7]?|" # nnn (1 to 3 octal digits) or
r"x[0-9A-Fa-f][0-9A-Fa-f]?|" # xHH (1 or 2 hex digits) or
r".)") # one char
def _escapefun(matchobj):
"""Callback to interpret an escape sequence"""
s = matchobj.group()
c = s[1]
i = "bfnrt".find(c)
if i >= 0:
c = "\b\f\n\r\t"[i]
elif c == 'x':
c = chr(int(s[2:], 16))
elif c in '01234567':
c = chr(int(s[1:], 8))
return c
| """
Interpret the value as floating point. Returns a float.
"""
try:
return float(self.value)
except ValueError:
raise self.ConfigurationError('Value should be floating point.') | identifier_body |
pgconf.py | #!/usr/bin/env python
# $Id: $
"""
postgresql.conf configuration file reader
Module contents:
readfile() - Read postgresql.conf file
class gucdict - Container for postgresql.conf settings
class setting - Holds one setting
class ConfigurationError - a subclass of EnvironmentError
Example:
import lib.pgconf as pgconf
d = pgconf.readfile()
port = d.int('port', 5432)
pe = d.bool('password_encryption', False)
sb = d.kB('shared_buffers')
at = d.time('authentication_timeout', 'ms', 2500)
"""
import os
import os.path
import re
# Max recursion level for postgresql.conf include directives.
# The max value is 10 in the postgres code, so it's the same here.
MAX_RECURSION_LEVEL=10
def readfile(filename='postgresql.conf', defaultpath=None):
"""
Read postgresql.conf file and put the settings into a dictionary.
Returns the dictionary: a newly created pgconf.gucdict object.
If filename does not specify an absolute path, it is treated as relative
to defaultpath, or to the current working directory.
"""
if not os.path.isabs(filename):
if defaultpath is None:
defaultpath = os.getcwd()
filename = os.path.normpath(os.path.join(defaultpath, filename))
fp = open(filename)
try:
dictionary = gucdict()
dictionary.populate(fp, filename)
return dictionary
except Exception:
raise
finally:
fp.close()
class gucdict(dict):
"""
A container for settings from a postgresql.conf file.
Behaves as an ordinary dictionary, with a few added methods.
The keys of the dictionary are GUC names in lower case, and the
values are instances of the pgconf.setting class.
The populate() method loads the dictionary with settings from a file.
The str(), bool(), int(), float(), kB(), and time() methods return a
value from the dictionary, converted to internal form.
"""
def populate(self, lines, filename='', recurLevel=0):
'''
Given a postgresql.conf input file (or a list of strings, or some
iterable object yielding lines), look for lines of the form
name[=][value][#comment]
For each one found, construct a pgconf.setting object and put it |
linenumber = 0
for line in lines:
linenumber += 1
m = _setpat.match(line)
if m:
name, value, pos = m.group(1), m.group(3), m.start(3)
if name == 'include':
try:
# Remove the ' from the filename and then convert to abspath if needed.
incfilename = value.strip("'")
if not incfilename.startswith('/') and filename != '':
incfilename = '%s/%s' % (filename[0:filename.rfind('/')], incfilename)
fp = open(incfilename)
self.populate(fp, incfilename, recurLevel+1)
fp.close()
except IOError:
raise Exception('File %s included from %s:%d does not exist' % (incfilename, filename, linenumber))
else:
self[name.lower()] = setting(name, value, filename, linenumber, pos)
def str(self, name, default=None):
"""
Return string setting, or default if absent.
"""
v = self.get(name)
if v:
return v.str()
else:
return default
def bool(self, name, default=None):
"""
Return Boolean setting, or default if absent.
"""
v = self.get(name)
if v:
return v.bool()
else:
return default
def int(self, name, default=None):
"""
Return integer setting, or default if absent.
"""
v = self.get(name)
if v:
return v.int()
else:
return default
def float(self, name, default=None):
"""
Return floating-point setting, or default if absent.
"""
v = self.get(name)
if v:
return v.float()
else:
return default
def kB(self, name, default=None):
"""
Return memory setting in units of 1024 bytes, or default if absent.
"""
v = self.get(name)
if v:
return v.kB()
else:
return default
def time(self, name, unit='s', default=None):
"""
Return time setting, or default if absent.
Specify desired unit as 'ms', 's', or 'min'.
"""
v = self.get(name)
if v:
return v.time(unit)
else:
return default
class setting(object):
"""
Holds a GUC setting from a postgresql.conf file.
The str(), bool(), int(), float(), kB(), and time() methods return the
value converted to the requested internal form. pgconf.ConfigurationError
is raised if the conversion fails, i.e. the value does not conform to the
expected syntax.
"""
def __init__(self, name, value, filename='', linenumber=0, pos=0):
self.name = name
self.value = value
self.filename = filename
self.linenumber = linenumber
self.pos = pos # starting offset of value within the input line
def __repr__(self):
return repr(self.value)
def str(self):
"""
Return the value as a string.
"""
v = self.value
if v and v.endswith("'"):
# Single-quoted string. Remove the opening and closing quotes.
# Replace each escape sequence with the character it stands for.
i = v.index("'") + 1
v = _escapepat.sub(_escapefun, v[i:-1])
return v
def bool(self):
"""
Interpret the value as a Boolean. Returns True or False.
"""
s = self.value
if s:
s = s.lower()
n = len(s)
if (s == '1' or
s == 'on' or
s == 'true'[:n] or
s == 'yes'[:n]):
return True
if (s == '0' or
s == 'off'[:n] or
s == 'false'[:n] or
s == 'no'[:n]):
return False
raise self.ConfigurationError('Boolean value should be one of: 1, 0, '
'on, off, true, false, yes, no.')
def int(self):
"""
Interpret the value as an integer. Returns an int or long.
"""
try:
return int(self.value, 0)
except ValueError:
raise self.ConfigurationError('Value should be integer.')
def float(self):
"""
Interpret the value as floating point. Returns a float.
"""
try:
return float(self.value)
except ValueError:
raise self.ConfigurationError('Value should be floating point.')
def kB(self):
"""
Interpret the value as an amount of memory. Returns an int or long,
in units of 1024 bytes.
"""
try:
m = 1
t = re.split('(kB|MB|GB)', self.value)
if len(t) > 1:
i = ['kB', 'MB', 'GB'].index(t[1])
m = (1, 1024, 1024*1024)[i]
try:
return int(t[0], 0) * m
except ValueError:
pass
return int(float(t[0]) * m)
except (ValueError, IndexError):
raise self.ConfigurationError('Value should be integer or float '
'with optional suffix kB, MB, or GB '
'(kB is default).')
def time(self, unit='s'):
"""
Interpret the value as a time. Returns an int or long.
Specify desired unit as 'ms', 's', or 'min'.
"""
u = ['ms', 's', 'min'].index(unit)
u = (1, 1000, 60*1000)[u]
try:
m = u
t = re.split('(ms|s|min|h|d)', self.value)
if len(t) > 1:
i = ['ms', 's', 'min', 'h', 'd'].index(t[1])
m = (1, 1000, 60*1000, 3600*1000, 24*3600*1000)[i]
return int(t[0], 0) * m / u
except (ValueError, IndexError):
raise self.ConfigurationError('Value should be integer with '
'optional suffix ms, s, min, h, or d '
'(%s is default).' % unit)
def ConfigurationError(self, msg):
msg = '(%s = %s) %s' % (self.name, self.value, msg)
return ConfigurationError(msg, self.filename, self.linenumber)
class ConfigurationError(EnvironmentError):
def __init__(self, msg, filename='', linenumber=0):
self.msg = msg
self.filename = filename
self.linenumber = linenumber
if linenumber:
msg = '%s line %d: %s' % (filename, linenumber, msg)
elif filename:
msg = '%s: %s' % (filename, msg)
EnvironmentError.__init__(self, msg)
def __str__(self):
return self.message
#-------------------------------- private --------------------------------
_setpat = re.compile(r"\s*(\w+)\s*(=\s*)?" # name [=]
'('
r"[eE]?('((\\.)?[^\\']*)*')+|" # single-quoted string or
r"[^\s#']*" # token ending at whitespace or comment
')')
_escapepat = re.compile(r"''|" # pair of single quotes, or
r"\\(" # backslash followed by
r"[0-7][0-7]?[0-7]?|" # nnn (1 to 3 octal digits) or
r"x[0-9A-Fa-f][0-9A-Fa-f]?|" # xHH (1 or 2 hex digits) or
r".)") # one char
def _escapefun(matchobj):
"""Callback to interpret an escape sequence"""
s = matchobj.group()
c = s[1]
i = "bfnrt".find(c)
if i >= 0:
c = "\b\f\n\r\t"[i]
elif c == 'x':
c = chr(int(s[2:], 16))
elif c in '01234567':
c = chr(int(s[1:], 8))
return c | into our dictionary.
'''
if recurLevel == MAX_RECURSION_LEVEL:
raise Exception('could not open configuration file "%s": maximum nesting depth exceeded' % filename) | random_line_split |
sagas.ts | import { put, takeLatest, all, call } from 'redux-saga/effects'
import request from 'ROOT_SOURCE/utils/request'
import { Action4All } from 'ROOT_SOURCE/utils/types'
import { CURRENT_PAGE, PAGE_SIZE, TOTAL, RESPONSE_DESTRUST_KEY,
RESPONSE_LIST_DESTRUST_KEY } from 'ROOT_SOURCE/base/BaseConfig'
import {
LIST__UPDATE_FORM_DATA,
LIST__UPDATE_TABLE_DATA,
LIST__SUBMIT_FORM_ASYNC,
} from './actions'
function* submitFormAsync(action: Action4All) {
// 初始化antd-table-pagination
let _formData = Object.assign(
{[PAGE_SIZE]: 10, [CURRENT_PAGE]: 1},
action.payload,
)
// 请求server数据
let result = yield call(request.post, '/asset/getAsset', _formData)
if (!result) { return; }
// 解构server结果
let resultBody = result[RESPONSE_DESTRUST_KEY]
if (!resultBody) { return; }
/ | a
yield put({ type: LIST__UPDATE_FORM_DATA, payload: {
..._formData,
[TOTAL]: resultBody[TOTAL],
} })
// 更新tableData
yield put({ type: LIST__UPDATE_TABLE_DATA, payload: {
dataSource: resultBody[RESPONSE_LIST_DESTRUST_KEY],
} })
}
export default function* asyncSagas() {
yield all([
yield takeLatest(LIST__SUBMIT_FORM_ASYNC, submitFormAsync),
])
}
| / 更新formDat | conditional_block |
sagas.ts | import { put, takeLatest, all, call } from 'redux-saga/effects'
import request from 'ROOT_SOURCE/utils/request'
import { Action4All } from 'ROOT_SOURCE/utils/types'
import { CURRENT_PAGE, PAGE_SIZE, TOTAL, RESPONSE_DESTRUST_KEY,
RESPONSE_LIST_DESTRUST_KEY } from 'ROOT_SOURCE/base/BaseConfig'
import {
LIST__UPDATE_FORM_DATA,
LIST__UPDATE_TABLE_DATA,
LIST__SUBMIT_FORM_ASYNC,
} from './actions'
| {[PAGE_SIZE]: 10, [CURRENT_PAGE]: 1},
action.payload,
)
// 请求server数据
let result = yield call(request.post, '/asset/getAsset', _formData)
if (!result) { return; }
// 解构server结果
let resultBody = result[RESPONSE_DESTRUST_KEY]
if (!resultBody) { return; }
// 更新formData
yield put({ type: LIST__UPDATE_FORM_DATA, payload: {
..._formData,
[TOTAL]: resultBody[TOTAL],
} })
// 更新tableData
yield put({ type: LIST__UPDATE_TABLE_DATA, payload: {
dataSource: resultBody[RESPONSE_LIST_DESTRUST_KEY],
} })
}
export default function* asyncSagas() {
yield all([
yield takeLatest(LIST__SUBMIT_FORM_ASYNC, submitFormAsync),
])
} |
function* submitFormAsync(action: Action4All) {
// 初始化antd-table-pagination
let _formData = Object.assign( | random_line_split |
product-list.ts | import { Component } from '@angular/core';
import { NavController, Keyboard } from 'ionic-angular';
import { OvhRequestService } from '../../../services/ovh-request/ovh-request.service';
import { categoryEnum } from '../../../config/constants';
import { Subscription } from 'rxjs/Subscription';
import { AnalyticsService } from '../../../services/analytics/analytics.service';
@Component({
templateUrl: 'build/pages/products/product-list/product-list.html',
providers: []
})
export class ProductListPage {
loading: boolean = true;
search: string;
products: Array<string> = [];
productsFiltered: Array<string> = [];
category: any = categoryEnum.DOMAIN;
categoryKeys: Array<string> = Object.keys(categoryEnum).filter((category) => category !== 'PROJECT');
categoryEnum: any = categoryEnum;
subscribtion: Subscription;
constructor(public ovhRequest: OvhRequestService, public navController: NavController, public keyboard: Keyboard,
public analytics: AnalyticsService) {
this.getProducts(this.category);
this.analytics.trackView('product-list');
}
getProducts(category: any) {
if (this.subscribtion != null) {
this.subscribtion.unsubscribe();
}
this.loading = true;
this.search = '';
this.subscribtion = this.ovhRequest.get(category.url) | .subscribe(
(products) => {
this.products = products;
this.sortProducts();
},
null,
() => this.loading = false
);
}
sortProducts() {
if (!this.search) {
this.productsFiltered = this.products;
} else {
this.productsFiltered = this.products.filter((product) => product.toLowerCase().indexOf(this.search.toLowerCase()) !== -1);
}
}
moreInfos(serviceName: string) {
this.navController.push(this.category.page, { serviceName });
}
closeKeyboard(): void {
this.keyboard.close();
}
} | random_line_split | |
product-list.ts | import { Component } from '@angular/core';
import { NavController, Keyboard } from 'ionic-angular';
import { OvhRequestService } from '../../../services/ovh-request/ovh-request.service';
import { categoryEnum } from '../../../config/constants';
import { Subscription } from 'rxjs/Subscription';
import { AnalyticsService } from '../../../services/analytics/analytics.service';
@Component({
templateUrl: 'build/pages/products/product-list/product-list.html',
providers: []
})
export class | {
loading: boolean = true;
search: string;
products: Array<string> = [];
productsFiltered: Array<string> = [];
category: any = categoryEnum.DOMAIN;
categoryKeys: Array<string> = Object.keys(categoryEnum).filter((category) => category !== 'PROJECT');
categoryEnum: any = categoryEnum;
subscribtion: Subscription;
constructor(public ovhRequest: OvhRequestService, public navController: NavController, public keyboard: Keyboard,
public analytics: AnalyticsService) {
this.getProducts(this.category);
this.analytics.trackView('product-list');
}
getProducts(category: any) {
if (this.subscribtion != null) {
this.subscribtion.unsubscribe();
}
this.loading = true;
this.search = '';
this.subscribtion = this.ovhRequest.get(category.url)
.subscribe(
(products) => {
this.products = products;
this.sortProducts();
},
null,
() => this.loading = false
);
}
sortProducts() {
if (!this.search) {
this.productsFiltered = this.products;
} else {
this.productsFiltered = this.products.filter((product) => product.toLowerCase().indexOf(this.search.toLowerCase()) !== -1);
}
}
moreInfos(serviceName: string) {
this.navController.push(this.category.page, { serviceName });
}
closeKeyboard(): void {
this.keyboard.close();
}
}
| ProductListPage | identifier_name |
product-list.ts | import { Component } from '@angular/core';
import { NavController, Keyboard } from 'ionic-angular';
import { OvhRequestService } from '../../../services/ovh-request/ovh-request.service';
import { categoryEnum } from '../../../config/constants';
import { Subscription } from 'rxjs/Subscription';
import { AnalyticsService } from '../../../services/analytics/analytics.service';
@Component({
templateUrl: 'build/pages/products/product-list/product-list.html',
providers: []
})
export class ProductListPage {
loading: boolean = true;
search: string;
products: Array<string> = [];
productsFiltered: Array<string> = [];
category: any = categoryEnum.DOMAIN;
categoryKeys: Array<string> = Object.keys(categoryEnum).filter((category) => category !== 'PROJECT');
categoryEnum: any = categoryEnum;
subscribtion: Subscription;
constructor(public ovhRequest: OvhRequestService, public navController: NavController, public keyboard: Keyboard,
public analytics: AnalyticsService) {
this.getProducts(this.category);
this.analytics.trackView('product-list');
}
getProducts(category: any) {
if (this.subscribtion != null) {
this.subscribtion.unsubscribe();
}
this.loading = true;
this.search = '';
this.subscribtion = this.ovhRequest.get(category.url)
.subscribe(
(products) => {
this.products = products;
this.sortProducts();
},
null,
() => this.loading = false
);
}
sortProducts() {
if (!this.search) {
this.productsFiltered = this.products;
} else {
this.productsFiltered = this.products.filter((product) => product.toLowerCase().indexOf(this.search.toLowerCase()) !== -1);
}
}
moreInfos(serviceName: string) {
this.navController.push(this.category.page, { serviceName });
}
closeKeyboard(): void |
}
| {
this.keyboard.close();
} | identifier_body |
product-list.ts | import { Component } from '@angular/core';
import { NavController, Keyboard } from 'ionic-angular';
import { OvhRequestService } from '../../../services/ovh-request/ovh-request.service';
import { categoryEnum } from '../../../config/constants';
import { Subscription } from 'rxjs/Subscription';
import { AnalyticsService } from '../../../services/analytics/analytics.service';
@Component({
templateUrl: 'build/pages/products/product-list/product-list.html',
providers: []
})
export class ProductListPage {
loading: boolean = true;
search: string;
products: Array<string> = [];
productsFiltered: Array<string> = [];
category: any = categoryEnum.DOMAIN;
categoryKeys: Array<string> = Object.keys(categoryEnum).filter((category) => category !== 'PROJECT');
categoryEnum: any = categoryEnum;
subscribtion: Subscription;
constructor(public ovhRequest: OvhRequestService, public navController: NavController, public keyboard: Keyboard,
public analytics: AnalyticsService) {
this.getProducts(this.category);
this.analytics.trackView('product-list');
}
getProducts(category: any) {
if (this.subscribtion != null) {
this.subscribtion.unsubscribe();
}
this.loading = true;
this.search = '';
this.subscribtion = this.ovhRequest.get(category.url)
.subscribe(
(products) => {
this.products = products;
this.sortProducts();
},
null,
() => this.loading = false
);
}
sortProducts() {
if (!this.search) | else {
this.productsFiltered = this.products.filter((product) => product.toLowerCase().indexOf(this.search.toLowerCase()) !== -1);
}
}
moreInfos(serviceName: string) {
this.navController.push(this.category.page, { serviceName });
}
closeKeyboard(): void {
this.keyboard.close();
}
}
| {
this.productsFiltered = this.products;
} | conditional_block |
fileUploadHandler.js | import { Meteor } from 'meteor/meteor';
import { Accounts } from 'meteor/accounts-base';
import { Tracker } from 'meteor/tracker';
import { UploadFS } from 'meteor/jalik:ufs';
import { FileUploadBase } from '../../lib/FileUploadBase';
import { Uploads, Avatars } from '../../../models';
new UploadFS.Store({
collection: Uploads.model,
name: 'Uploads',
});
new UploadFS.Store({
collection: Avatars.model,
name: 'Avatars',
});
export const fileUploadHandler = (directive, meta, file) => {
const store = UploadFS.getStore(directive); | if (store) {
return new FileUploadBase(store, meta, file);
}
console.error('Invalid file store', directive);
};
Tracker.autorun(function() {
if (Meteor.userId()) {
document.cookie = `rc_uid=${ escape(Meteor.userId()) }; path=/`;
document.cookie = `rc_token=${ escape(Accounts._storedLoginToken()) }; path=/`;
}
}); | random_line_split | |
fileUploadHandler.js | import { Meteor } from 'meteor/meteor';
import { Accounts } from 'meteor/accounts-base';
import { Tracker } from 'meteor/tracker';
import { UploadFS } from 'meteor/jalik:ufs';
import { FileUploadBase } from '../../lib/FileUploadBase';
import { Uploads, Avatars } from '../../../models';
new UploadFS.Store({
collection: Uploads.model,
name: 'Uploads',
});
new UploadFS.Store({
collection: Avatars.model,
name: 'Avatars',
});
export const fileUploadHandler = (directive, meta, file) => {
const store = UploadFS.getStore(directive);
if (store) |
console.error('Invalid file store', directive);
};
Tracker.autorun(function() {
if (Meteor.userId()) {
document.cookie = `rc_uid=${ escape(Meteor.userId()) }; path=/`;
document.cookie = `rc_token=${ escape(Accounts._storedLoginToken()) }; path=/`;
}
});
| {
return new FileUploadBase(store, meta, file);
} | conditional_block |
mailbox.py | # *- coding: utf-8 -*-
# mailbox.py
# Copyright (C) 2013-2015 LEAP
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
IMAP Mailbox.
"""
import re
import os
import io
import cStringIO
import StringIO
import time
from collections import defaultdict
from email.utils import formatdate
from twisted.internet import defer
from twisted.internet import reactor
from twisted.logger import Logger
from twisted.mail import imap4
from zope.interface import implements
from leap.common.check import leap_assert
from leap.common.check import leap_assert_type
from leap.bitmask.mail.constants import INBOX_NAME, MessageFlags
from leap.bitmask.mail.imap.messages import IMAPMessage
# TODO LIST
# [ ] finish the implementation of IMailboxListener
# [ ] implement the rest of ISearchableMailbox
INIT_FLAGS = (MessageFlags.RECENT_FLAG, MessageFlags.LIST_FLAG)
def make_collection_listener(mailbox):
"""
Wrap a mailbox in a class that can be hashed according to the mailbox name.
This means that dicts or sets will use this new equality rule, so we won't
collect multiple instances of the same mailbox in collections like the
MessageCollection set where we keep track of listeners.
"""
class HashableMailbox(object):
def __init__(self, mbox):
self.mbox = mbox
# See #8083, pixelated adaptor introduces conflicts in the usage
self.mailbox_name = self.mbox.mbox_name + 'IMAP'
def __hash__(self):
return hash(self.mailbox_name)
def __eq__(self, other):
return self.mailbox_name == other.mbox.mbox_name + 'IMAP'
def notify_new(self):
self.mbox.notify_new()
return HashableMailbox(mailbox)
class IMAPMailbox(object):
"""
A Soledad-backed IMAP mailbox.
Implements the high-level method needed for the Mailbox interfaces.
The low-level database methods are contained in the generic
MessageCollection class. We receive an instance of it and it is made
accessible in the `collection` attribute.
"""
implements(
imap4.IMailbox,
imap4.IMailboxInfo,
imap4.ISearchableMailbox,
# XXX I think we do not need to implement CloseableMailbox, do we?
# We could remove ourselves from the collectionListener, although I
# think it simply will be garbage collected.
# imap4.ICloseableMailbox
imap4.IMessageCopier)
init_flags = INIT_FLAGS
CMD_MSG = "MESSAGES"
CMD_RECENT = "RECENT"
CMD_UIDNEXT = "UIDNEXT"
CMD_UIDVALIDITY = "UIDVALIDITY"
CMD_UNSEEN = "UNSEEN"
log = Logger()
# TODO we should turn this into a datastructure with limited capacity
_listeners = defaultdict(set)
def __init__(self, collection, rw=1):
"""
:param collection: instance of MessageCollection
:type collection: MessageCollection
:param rw: read-and-write flag for this mailbox
:type rw: int
"""
self.rw = rw
self._uidvalidity = None
self.collection = collection
self.collection.addListener(make_collection_listener(self))
@property
def mbox_name(self):
return self.collection.mbox_name
@property
def listeners(self):
"""
Returns listeners for this mbox.
The server itself is a listener to the mailbox.
so we can notify it (and should!) after changes in flags
and number of messages.
:rtype: set
"""
return self._listeners[self.mbox_name]
def get_imap_message(self, message):
d = defer.Deferred()
IMAPMessage(message, store=self.collection.store, d=d)
return d
# FIXME this grows too crazily when many instances are fired, like
# during imaptest stress testing. Should have a queue of limited size
# instead.
def addListener(self, listener):
"""
Add a listener to the listeners queue.
The server adds itself as a listener when there is a SELECT,
so it can send EXIST commands.
:param listener: listener to add
:type listener: an object that implements IMailboxListener
"""
listeners = self.listeners
self.log.debug('Adding mailbox listener: %s. Total: %s' % (
listener, len(listeners)))
listeners.add(listener)
def removeListener(self, listener):
"""
Remove a listener from the listeners queue.
:param listener: listener to remove
:type listener: an object that implements IMailboxListener
"""
self.listeners.remove(listener)
def getFlags(self):
"""
Returns the flags defined for this mailbox.
:returns: tuple of flags for this mailbox
:rtype: tuple of str
"""
flags = self.collection.mbox_wrapper.flags
if not flags:
flags = self.init_flags
flags_str = map(str, flags)
return flags_str
def setFlags(self, flags):
"""
Sets flags for this mailbox.
:param flags: a tuple with the flags
:type flags: tuple of str
"""
# XXX this is setting (overriding) old flags.
# Better pass a mode flag
leap_assert(isinstance(flags, tuple),
"flags expected to be a tuple")
return self.collection.set_mbox_attr("flags", flags)
def getUIDValidity(self):
"""
Return the unique validity identifier for this mailbox.
:return: unique validity identifier
:rtype: int
"""
return self.collection.get_mbox_attr("created")
def getUID(self, message_number):
"""
Return the UID of a message in the mailbox
.. note:: this implementation does not make much sense RIGHT NOW,
but in the future will be useful to get absolute UIDs from
message sequence numbers.
:param message: the message sequence number.
:type message: int
:rtype: int
:return: the UID of the message.
"""
# TODO support relative sequences. The (imap) message should
# receive a sequence number attribute: a deferred is not expected
return message_number
def getUIDNext(self):
"""
Return the likely UID for the next message added to this
mailbox. Currently it returns the higher UID incremented by
one.
:return: deferred with int
:rtype: Deferred
"""
d = self.collection.get_uid_next()
return d
def getMessageCount(self):
"""
Returns the total count of messages in this mailbox.
:return: deferred with int
:rtype: Deferred
"""
return self.collection.count()
def getUnseenCount(self):
"""
Returns the number of messages with the 'Unseen' flag.
:return: count of messages flagged `unseen`
:rtype: int
"""
return self.collection.count_unseen()
def getRecentCount(self):
"""
Returns the number of messages with the 'Recent' flag.
:return: count of messages flagged `recent`
:rtype: int
"""
return self.collection.count_recent()
def isWriteable(self):
"""
Get the read/write status of the mailbox.
:return: 1 if mailbox is read-writeable, 0 otherwise.
:rtype: int
"""
# XXX We don't need to store it in the mbox doc, do we?
# return int(self.collection.get_mbox_attr('rw'))
return self.rw
def getHierarchicalDelimiter(self):
"""
Returns the character used to delimite hierarchies in mailboxes.
:rtype: str
"""
return '/'
def requestStatus(self, names):
"""
Handles a status request by gathering the output of the different
status commands.
:param names: a list of strings containing the status commands
:type names: iter
"""
r = {}
maybe = defer.maybeDeferred
if self.CMD_MSG in names:
r[self.CMD_MSG] = maybe(self.getMessageCount)
if self.CMD_RECENT in names:
r[self.CMD_RECENT] = maybe(self.getRecentCount)
if self.CMD_UIDNEXT in names:
r[self.CMD_UIDNEXT] = maybe(self.getUIDNext)
if self.CMD_UIDVALIDITY in names:
r[self.CMD_UIDVALIDITY] = maybe(self.getUIDValidity)
if self.CMD_UNSEEN in names:
r[self.CMD_UNSEEN] = maybe(self.getUnseenCount)
def as_a_dict(values):
return dict(zip(r.keys(), values))
d = defer.gatherResults(r.values())
d.addCallback(as_a_dict)
return d
def addMessage(self, message, flags, date=None):
"""
Adds a message to this mailbox.
:param message: the raw message
:type message: str
:param flags: flag list
:type flags: list of str
:param date: timestamp
:type date: str, or None
:return: a deferred that will be triggered with the UID of the added
message.
"""
# TODO should raise ReadOnlyMailbox if not rw.
# TODO have a look at the cases for internal date in the rfc
# XXX we could treat the message as an IMessage from here
# TODO -- fast appends should be definitely solved by Blobs.
# A better solution will probably involve implementing MULTIAPPEND
# extension or patching imap server to support pipelining.
if isinstance(message,
(cStringIO.OutputType, StringIO.StringIO, io.BytesIO)):
message = message.getvalue()
leap_assert_type(message, basestring)
if flags is None:
flags = tuple()
else:
flags = tuple(str(flag) for flag in flags)
if date is None:
date = formatdate(time.time())
d = self.collection.add_msg(message, flags, date=date)
d.addCallback(lambda message: message.get_uid())
d.addErrback(
lambda failure: self.log.failure('Error while adding msg'))
return d
def notify_new(self, *args):
"""
Notify of new messages to all the listeners.
This will be called indirectly by the underlying collection, that will
notify this IMAPMailbox whenever there are changes in the number of
messages in the collection, since we have added ourselves to the
collection listeners.
:param args: ignored.
"""
def cbNotifyNew(result):
exists, recent = result
for listener in self.listeners:
listener.newMessages(exists, recent)
d = self._get_notify_count()
d.addCallback(cbNotifyNew)
d.addCallback(self.collection.cb_signal_unread_to_ui)
d.addErrback(lambda failure: self.log.failure('Error while notify'))
def _get_notify_count(self):
"""
Get message count and recent count for this mailbox.
:return: a deferred that will fire with a tuple, with number of
messages and number of recent messages.
:rtype: Deferred
"""
# XXX this is way too expensive in cases like multiple APPENDS.
# We should have a way of keep a cache or do a self-increment for that
# kind of calls.
d_exists = defer.maybeDeferred(self.getMessageCount)
d_recent = defer.maybeDeferred(self.getRecentCount)
d_list = [d_exists, d_recent]
def log_num_msg(result):
exists, recent = tuple(result)
self.log.debug(
'NOTIFY (%r): there are %s messages, %s recent' % (
self.mbox_name, exists, recent))
return result
d = defer.gatherResults(d_list)
d.addCallback(log_num_msg)
return d
# commands, do not rename methods
def destroy(self):
"""
Called before this mailbox is permanently deleted.
Should cleanup resources, and set the \\Noselect flag
on the mailbox.
"""
# XXX this will overwrite all the existing flags
# should better simply addFlag
self.setFlags((MessageFlags.NOSELECT_FLAG,))
def remove_mbox(_):
uuid = self.collection.mbox_uuid
d = self.collection.mbox_wrapper.delete(self.collection.store)
d.addCallback(
lambda _: self.collection.mbox_indexer.delete_table(uuid))
return d
d = self.deleteAllDocs()
d.addCallback(remove_mbox)
return d
def expunge(self):
"""
Remove all messages flagged \\Deleted
"""
if not self.isWriteable():
raise imap4.ReadOnlyMailbox
return self.collection.delete_all_flagged()
def _get_message_fun(self, uid):
"""
Return the proper method to get a message for this mailbox, depending
on the passed uid flag.
:param uid: If true, the IDs specified in the query are UIDs;
otherwise they are message sequence IDs.
:type uid: bool
:rtype: callable
"""
get_message_fun = [
self.collection.get_message_by_sequence_number,
self.collection.get_message_by_uid][uid]
return get_message_fun
def _get_messages_range(self, messages_asked, uid=True):
def get_range(messages_asked):
return self._filter_msg_seq(messages_asked)
d = self._bound_seq(messages_asked, uid)
if uid:
d.addCallback(get_range)
d.addErrback(
lambda f: self.log.failure('Error getting msg range'))
return d
def _bound_seq(self, messages_asked, uid):
"""
Put an upper bound to a messages sequence if this is open.
:param messages_asked: IDs of the messages.
:type messages_asked: MessageSet
:return: a Deferred that will fire with a MessageSet
"""
def set_last_uid(last_uid):
messages_asked.last = last_uid
return messages_asked
def set_last_seq(all_uid):
messages_asked.last = len(all_uid)
return messages_asked
if not messages_asked.last:
try:
iter(messages_asked)
except TypeError:
# looks like we cannot iterate
if uid:
d = self.collection.get_last_uid()
d.addCallback(set_last_uid)
else:
|
return d
return defer.succeed(messages_asked)
def _filter_msg_seq(self, messages_asked):
"""
Filter a message sequence returning only the ones that do exist in the
collection.
:param messages_asked: IDs of the messages.
:type messages_asked: MessageSet
:rtype: set
"""
# TODO we could pass the asked sequence to the indexer
# all_uid_iter, and bound the sql query instead.
def filter_by_asked(all_msg_uid):
set_asked = set(messages_asked)
set_exist = set(all_msg_uid)
return set_asked.intersection(set_exist)
d = self.collection.all_uid_iter()
d.addCallback(filter_by_asked)
return d
def fetch(self, messages_asked, uid):
"""
Retrieve one or more messages in this mailbox.
from rfc 3501: The data items to be fetched can be either a single atom
or a parenthesized list.
:param messages_asked: IDs of the messages to retrieve information
about
:type messages_asked: MessageSet
:param uid: If true, the IDs are UIDs. They are message sequence IDs
otherwise.
:type uid: bool
:rtype: deferred with a generator that yields...
"""
get_msg_fun = self._get_message_fun(uid)
getimapmsg = self.get_imap_message
def get_imap_messages_for_range(msg_range):
def _get_imap_msg(messages):
d_imapmsg = []
# just in case we got bad data in here
for msg in filter(None, messages):
d_imapmsg.append(getimapmsg(msg))
return defer.gatherResults(d_imapmsg, consumeErrors=True)
def _zip_msgid(imap_messages):
zipped = zip(
list(msg_range), imap_messages)
return (item for item in zipped)
# XXX not called??
def _unset_recent(sequence):
reactor.callLater(0, self.unset_recent_flags, sequence)
return sequence
d_msg = []
for msgid in msg_range:
# XXX We want cdocs because we "probably" are asked for the
# body. We should be smarter at do_FETCH and pass a parameter
# to this method in order not to prefetch cdocs if they're not
# going to be used.
d_msg.append(get_msg_fun(msgid, get_cdocs=True))
d = defer.gatherResults(d_msg, consumeErrors=True)
d.addCallback(_get_imap_msg)
d.addCallback(_zip_msgid)
d.addErrback(
lambda failure: self.log.error(
'Error getting msg for range'))
return d
d = self._get_messages_range(messages_asked, uid)
d.addCallback(get_imap_messages_for_range)
d.addErrback(
lambda failure: self.log.failure('Error on fetch'))
return d
def fetch_flags(self, messages_asked, uid):
"""
A fast method to fetch all flags, tricking just the
needed subset of the MIME interface that's needed to satisfy
a generic FLAGS query.
Given how LEAP Mail is supposed to work without local cache,
this query is going to be quite common, and also we expect
it to be in the form 1:* at the beginning of a session, so
it's not bad to fetch all the FLAGS docs at once.
:param messages_asked: IDs of the messages to retrieve information
about
:type messages_asked: MessageSet
:param uid: If 1, the IDs are UIDs. They are message sequence IDs
otherwise.
:type uid: int
:return: A tuple of two-tuples of message sequence numbers and
flagsPart, which is a only a partial implementation of
MessagePart.
:rtype: tuple
"""
# is_sequence = True if uid == 0 else False
# XXX FIXME -----------------------------------------------------
# imap/tests, or muas like mutt, it will choke until we implement
# sequence numbers. This is an easy hack meanwhile.
is_sequence = False
# ---------------------------------------------------------------
if is_sequence:
raise NotImplementedError(
"FETCH FLAGS NOT IMPLEMENTED FOR MESSAGE SEQUENCE NUMBERS YET")
d = defer.Deferred()
reactor.callLater(0, self._do_fetch_flags, messages_asked, uid, d)
return d
def _do_fetch_flags(self, messages_asked, uid, d):
"""
:param messages_asked: IDs of the messages to retrieve information
about
:type messages_asked: MessageSet
:param uid: If 1, the IDs are UIDs. They are message sequence IDs
otherwise.
:type uid: int
:param d: deferred whose callback will be called with result.
:type d: Deferred
:rtype: A generator that yields two-tuples of message sequence numbers
and flagsPart
"""
class flagsPart(object):
def __init__(self, uid, flags):
self.uid = uid
self.flags = flags
def getUID(self):
return self.uid
def getFlags(self):
return map(str, self.flags)
def pack_flags(result):
_uid, _flags = result
return _uid, flagsPart(_uid, _flags)
def get_flags_for_seq(sequence):
d_all_flags = []
for msgid in sequence:
# TODO implement sequence numbers here too
d_flags_per_uid = self.collection.get_flags_by_uid(msgid)
d_flags_per_uid.addCallback(pack_flags)
d_all_flags.append(d_flags_per_uid)
gotflags = defer.gatherResults(d_all_flags)
gotflags.addCallback(get_uid_flag_generator)
return gotflags
def get_uid_flag_generator(result):
generator = (item for item in result)
d.callback(generator)
d_seq = self._get_messages_range(messages_asked, uid)
d_seq.addCallback(get_flags_for_seq)
return d_seq
@defer.inlineCallbacks
def fetch_headers(self, messages_asked, uid):
"""
A fast method to fetch all headers, tricking just the
needed subset of the MIME interface that's needed to satisfy
a generic HEADERS query.
Given how LEAP Mail is supposed to work without local cache,
this query is going to be quite common, and also we expect
it to be in the form 1:* at the beginning of a session, so
**MAYBE** it's not too bad to fetch all the HEADERS docs at once.
:param messages_asked: IDs of the messages to retrieve information
about
:type messages_asked: MessageSet
:param uid: If true, the IDs are UIDs. They are message sequence IDs
otherwise.
:type uid: bool
:return: A tuple of two-tuples of message sequence numbers and
headersPart, which is a only a partial implementation of
MessagePart.
:rtype: tuple
"""
# TODO implement sequences
is_sequence = True if uid == 0 else False
if is_sequence:
raise NotImplementedError(
"FETCH HEADERS NOT IMPLEMENTED FOR SEQUENCE NUMBER YET")
class headersPart(object):
def __init__(self, uid, headers):
self.uid = uid
self.headers = headers
def getUID(self):
return self.uid
def getHeaders(self, _):
return dict(
(str(key), str(value))
for key, value in
self.headers.items())
messages_asked = yield self._bound_seq(messages_asked, uid)
seq_messg = yield self._filter_msg_seq(messages_asked)
result = []
for msgid in seq_messg:
msg = yield self.collection.get_message_by_uid(msgid)
headers = headersPart(msgid, msg.get_headers())
result.append((msgid, headers))
defer.returnValue(iter(result))
def store(self, messages_asked, flags, mode, uid):
"""
Sets the flags of one or more messages.
:param messages: The identifiers of the messages to set the flags
:type messages: A MessageSet object with the list of messages requested
:param flags: The flags to set, unset, or add.
:type flags: sequence of str
:param mode: If mode is -1, these flags should be removed from the
specified messages. If mode is 1, these flags should be
added to the specified messages. If mode is 0, all
existing flags should be cleared and these flags should be
added.
:type mode: -1, 0, or 1
:param uid: If true, the IDs specified in the query are UIDs;
otherwise they are message sequence IDs.
:type uid: bool
:return: A deferred, that will be called with a dict mapping message
sequence numbers to sequences of str representing the flags
set on the message after this operation has been performed.
:rtype: deferred
:raise ReadOnlyMailbox: Raised if this mailbox is not open for
read-write.
"""
if not self.isWriteable():
self.log.info('Read only mailbox!')
raise imap4.ReadOnlyMailbox
d = defer.Deferred()
reactor.callLater(0, self._do_store, messages_asked, flags,
mode, uid, d)
d.addCallback(self.collection.cb_signal_unread_to_ui)
d.addErrback(lambda f: self.log.error('Error on store'))
return d
def _do_store(self, messages_asked, flags, mode, uid, observer):
"""
Helper method, invoke set_flags method in the IMAPMessageCollection.
See the documentation for the `store` method for the parameters.
:param observer: a deferred that will be called with the dictionary
mapping UIDs to flags after the operation has been
done.
:type observer: deferred
"""
# TODO we should prevent client from setting Recent flag
get_msg_fun = self._get_message_fun(uid)
leap_assert(not isinstance(flags, basestring),
"flags cannot be a string")
flags = tuple(flags)
def set_flags_for_seq(sequence):
def return_result_dict(list_of_flags):
result = dict(zip(list(sequence), list_of_flags))
observer.callback(result)
return result
d_all_set = []
for msgid in sequence:
d = get_msg_fun(msgid)
d.addCallback(lambda msg: self.collection.update_flags(
msg, flags, mode))
d_all_set.append(d)
got_flags_setted = defer.gatherResults(d_all_set)
got_flags_setted.addCallback(return_result_dict)
return got_flags_setted
d_seq = self._get_messages_range(messages_asked, uid)
d_seq.addCallback(set_flags_for_seq)
return d_seq
# ISearchableMailbox
def search(self, query, uid):
"""
Search for messages that meet the given query criteria.
Warning: this is half-baked, and it might give problems since
it offers the SearchableInterface.
We'll be implementing it asap.
:param query: The search criteria
:type query: list
:param uid: If true, the IDs specified in the query are UIDs;
otherwise they are message sequence IDs.
:type uid: bool
:return: A list of message sequence numbers or message UIDs which
match the search criteria or a C{Deferred} whose callback
will be invoked with such a list.
:rtype: C{list} or C{Deferred}
"""
# TODO see if we can raise w/o interrupting flow
# :raise IllegalQueryError: Raised when query is not valid.
# example query:
# ['UNDELETED', 'HEADER', 'Message-ID',
# XXX fixme, does not exist
# '52D44F11.9060107@dev.bitmask.net']
# TODO hardcoding for now! -- we'll support generic queries later on
# but doing a quickfix for avoiding duplicate saves in the draft
# folder. # See issue #4209
if len(query) > 2:
if query[1] == 'HEADER' and query[2].lower() == "message-id":
msgid = str(query[3]).strip()
self.log.debug('Searching for %s' % (msgid,))
d = self.collection.get_uid_from_msgid(str(msgid))
d.addCallback(lambda result: [result])
return d
# nothing implemented for any other query
self.log.warn('Cannot process query: %s' % (query,))
return []
# IMessageCopier
def copy(self, message):
"""
Copy the given message object into this mailbox.
:param message: an IMessage implementor
:type message: LeapMessage
:return: a deferred that will be fired with the message
uid when the copy succeed.
:rtype: Deferred
"""
d = self.collection.copy_msg(
message.message, self.collection.mbox_uuid)
return d
# convenience fun
def deleteAllDocs(self):
"""
Delete all docs in this mailbox
"""
# FIXME not implemented
return self.collection.delete_all_docs()
def unset_recent_flags(self, uid_seq):
"""
Unset Recent flag for a sequence of UIDs.
"""
# FIXME not implemented
return self.collection.unset_recent_flags(uid_seq)
def __repr__(self):
"""
Representation string for this mailbox.
"""
return u"<IMAPMailbox: mbox '%s' (%s)>" % (
self.mbox_name, self.collection.count())
_INBOX_RE = re.compile(INBOX_NAME, re.IGNORECASE)
def normalize_mailbox(name):
"""
Return a normalized representation of the mailbox ``name``.
This method ensures that an eventual initial 'inbox' part of a
mailbox name is made uppercase.
:param name: the name of the mailbox
:type name: unicode
:rtype: unicode
"""
# XXX maybe it would make sense to normalize common folders too:
# trash, sent, drafts, etc...
if _INBOX_RE.match(name):
# ensure inital INBOX is uppercase
return INBOX_NAME + name[len(INBOX_NAME):]
return name
| d = self.collection.all_uid_iter()
d.addCallback(set_last_seq) | conditional_block |
mailbox.py | # *- coding: utf-8 -*-
# mailbox.py
# Copyright (C) 2013-2015 LEAP
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
IMAP Mailbox.
"""
import re
import os
import io
import cStringIO
import StringIO
import time
from collections import defaultdict
from email.utils import formatdate
from twisted.internet import defer
from twisted.internet import reactor
from twisted.logger import Logger
from twisted.mail import imap4
from zope.interface import implements
from leap.common.check import leap_assert
from leap.common.check import leap_assert_type
from leap.bitmask.mail.constants import INBOX_NAME, MessageFlags
from leap.bitmask.mail.imap.messages import IMAPMessage
# TODO LIST
# [ ] finish the implementation of IMailboxListener
# [ ] implement the rest of ISearchableMailbox
INIT_FLAGS = (MessageFlags.RECENT_FLAG, MessageFlags.LIST_FLAG)
def make_collection_listener(mailbox):
"""
Wrap a mailbox in a class that can be hashed according to the mailbox name.
This means that dicts or sets will use this new equality rule, so we won't
collect multiple instances of the same mailbox in collections like the
MessageCollection set where we keep track of listeners.
"""
class HashableMailbox(object):
def __init__(self, mbox):
self.mbox = mbox
# See #8083, pixelated adaptor introduces conflicts in the usage
self.mailbox_name = self.mbox.mbox_name + 'IMAP'
def __hash__(self):
return hash(self.mailbox_name)
def __eq__(self, other):
return self.mailbox_name == other.mbox.mbox_name + 'IMAP'
def notify_new(self):
self.mbox.notify_new()
return HashableMailbox(mailbox)
class | (object):
"""
A Soledad-backed IMAP mailbox.
Implements the high-level method needed for the Mailbox interfaces.
The low-level database methods are contained in the generic
MessageCollection class. We receive an instance of it and it is made
accessible in the `collection` attribute.
"""
implements(
imap4.IMailbox,
imap4.IMailboxInfo,
imap4.ISearchableMailbox,
# XXX I think we do not need to implement CloseableMailbox, do we?
# We could remove ourselves from the collectionListener, although I
# think it simply will be garbage collected.
# imap4.ICloseableMailbox
imap4.IMessageCopier)
init_flags = INIT_FLAGS
CMD_MSG = "MESSAGES"
CMD_RECENT = "RECENT"
CMD_UIDNEXT = "UIDNEXT"
CMD_UIDVALIDITY = "UIDVALIDITY"
CMD_UNSEEN = "UNSEEN"
log = Logger()
# TODO we should turn this into a datastructure with limited capacity
_listeners = defaultdict(set)
def __init__(self, collection, rw=1):
"""
:param collection: instance of MessageCollection
:type collection: MessageCollection
:param rw: read-and-write flag for this mailbox
:type rw: int
"""
self.rw = rw
self._uidvalidity = None
self.collection = collection
self.collection.addListener(make_collection_listener(self))
@property
def mbox_name(self):
return self.collection.mbox_name
@property
def listeners(self):
"""
Returns listeners for this mbox.
The server itself is a listener to the mailbox.
so we can notify it (and should!) after changes in flags
and number of messages.
:rtype: set
"""
return self._listeners[self.mbox_name]
def get_imap_message(self, message):
d = defer.Deferred()
IMAPMessage(message, store=self.collection.store, d=d)
return d
# FIXME this grows too crazily when many instances are fired, like
# during imaptest stress testing. Should have a queue of limited size
# instead.
def addListener(self, listener):
"""
Add a listener to the listeners queue.
The server adds itself as a listener when there is a SELECT,
so it can send EXIST commands.
:param listener: listener to add
:type listener: an object that implements IMailboxListener
"""
listeners = self.listeners
self.log.debug('Adding mailbox listener: %s. Total: %s' % (
listener, len(listeners)))
listeners.add(listener)
def removeListener(self, listener):
"""
Remove a listener from the listeners queue.
:param listener: listener to remove
:type listener: an object that implements IMailboxListener
"""
self.listeners.remove(listener)
def getFlags(self):
"""
Returns the flags defined for this mailbox.
:returns: tuple of flags for this mailbox
:rtype: tuple of str
"""
flags = self.collection.mbox_wrapper.flags
if not flags:
flags = self.init_flags
flags_str = map(str, flags)
return flags_str
def setFlags(self, flags):
"""
Sets flags for this mailbox.
:param flags: a tuple with the flags
:type flags: tuple of str
"""
# XXX this is setting (overriding) old flags.
# Better pass a mode flag
leap_assert(isinstance(flags, tuple),
"flags expected to be a tuple")
return self.collection.set_mbox_attr("flags", flags)
def getUIDValidity(self):
"""
Return the unique validity identifier for this mailbox.
:return: unique validity identifier
:rtype: int
"""
return self.collection.get_mbox_attr("created")
def getUID(self, message_number):
"""
Return the UID of a message in the mailbox
.. note:: this implementation does not make much sense RIGHT NOW,
but in the future will be useful to get absolute UIDs from
message sequence numbers.
:param message: the message sequence number.
:type message: int
:rtype: int
:return: the UID of the message.
"""
# TODO support relative sequences. The (imap) message should
# receive a sequence number attribute: a deferred is not expected
return message_number
def getUIDNext(self):
"""
Return the likely UID for the next message added to this
mailbox. Currently it returns the higher UID incremented by
one.
:return: deferred with int
:rtype: Deferred
"""
d = self.collection.get_uid_next()
return d
def getMessageCount(self):
"""
Returns the total count of messages in this mailbox.
:return: deferred with int
:rtype: Deferred
"""
return self.collection.count()
def getUnseenCount(self):
"""
Returns the number of messages with the 'Unseen' flag.
:return: count of messages flagged `unseen`
:rtype: int
"""
return self.collection.count_unseen()
def getRecentCount(self):
"""
Returns the number of messages with the 'Recent' flag.
:return: count of messages flagged `recent`
:rtype: int
"""
return self.collection.count_recent()
def isWriteable(self):
"""
Get the read/write status of the mailbox.
:return: 1 if mailbox is read-writeable, 0 otherwise.
:rtype: int
"""
# XXX We don't need to store it in the mbox doc, do we?
# return int(self.collection.get_mbox_attr('rw'))
return self.rw
def getHierarchicalDelimiter(self):
"""
Returns the character used to delimite hierarchies in mailboxes.
:rtype: str
"""
return '/'
def requestStatus(self, names):
"""
Handles a status request by gathering the output of the different
status commands.
:param names: a list of strings containing the status commands
:type names: iter
"""
r = {}
maybe = defer.maybeDeferred
if self.CMD_MSG in names:
r[self.CMD_MSG] = maybe(self.getMessageCount)
if self.CMD_RECENT in names:
r[self.CMD_RECENT] = maybe(self.getRecentCount)
if self.CMD_UIDNEXT in names:
r[self.CMD_UIDNEXT] = maybe(self.getUIDNext)
if self.CMD_UIDVALIDITY in names:
r[self.CMD_UIDVALIDITY] = maybe(self.getUIDValidity)
if self.CMD_UNSEEN in names:
r[self.CMD_UNSEEN] = maybe(self.getUnseenCount)
def as_a_dict(values):
return dict(zip(r.keys(), values))
d = defer.gatherResults(r.values())
d.addCallback(as_a_dict)
return d
def addMessage(self, message, flags, date=None):
"""
Adds a message to this mailbox.
:param message: the raw message
:type message: str
:param flags: flag list
:type flags: list of str
:param date: timestamp
:type date: str, or None
:return: a deferred that will be triggered with the UID of the added
message.
"""
# TODO should raise ReadOnlyMailbox if not rw.
# TODO have a look at the cases for internal date in the rfc
# XXX we could treat the message as an IMessage from here
# TODO -- fast appends should be definitely solved by Blobs.
# A better solution will probably involve implementing MULTIAPPEND
# extension or patching imap server to support pipelining.
if isinstance(message,
(cStringIO.OutputType, StringIO.StringIO, io.BytesIO)):
message = message.getvalue()
leap_assert_type(message, basestring)
if flags is None:
flags = tuple()
else:
flags = tuple(str(flag) for flag in flags)
if date is None:
date = formatdate(time.time())
d = self.collection.add_msg(message, flags, date=date)
d.addCallback(lambda message: message.get_uid())
d.addErrback(
lambda failure: self.log.failure('Error while adding msg'))
return d
def notify_new(self, *args):
"""
Notify of new messages to all the listeners.
This will be called indirectly by the underlying collection, that will
notify this IMAPMailbox whenever there are changes in the number of
messages in the collection, since we have added ourselves to the
collection listeners.
:param args: ignored.
"""
def cbNotifyNew(result):
exists, recent = result
for listener in self.listeners:
listener.newMessages(exists, recent)
d = self._get_notify_count()
d.addCallback(cbNotifyNew)
d.addCallback(self.collection.cb_signal_unread_to_ui)
d.addErrback(lambda failure: self.log.failure('Error while notify'))
def _get_notify_count(self):
"""
Get message count and recent count for this mailbox.
:return: a deferred that will fire with a tuple, with number of
messages and number of recent messages.
:rtype: Deferred
"""
# XXX this is way too expensive in cases like multiple APPENDS.
# We should have a way of keep a cache or do a self-increment for that
# kind of calls.
d_exists = defer.maybeDeferred(self.getMessageCount)
d_recent = defer.maybeDeferred(self.getRecentCount)
d_list = [d_exists, d_recent]
def log_num_msg(result):
exists, recent = tuple(result)
self.log.debug(
'NOTIFY (%r): there are %s messages, %s recent' % (
self.mbox_name, exists, recent))
return result
d = defer.gatherResults(d_list)
d.addCallback(log_num_msg)
return d
# commands, do not rename methods
def destroy(self):
"""
Called before this mailbox is permanently deleted.
Should cleanup resources, and set the \\Noselect flag
on the mailbox.
"""
# XXX this will overwrite all the existing flags
# should better simply addFlag
self.setFlags((MessageFlags.NOSELECT_FLAG,))
def remove_mbox(_):
uuid = self.collection.mbox_uuid
d = self.collection.mbox_wrapper.delete(self.collection.store)
d.addCallback(
lambda _: self.collection.mbox_indexer.delete_table(uuid))
return d
d = self.deleteAllDocs()
d.addCallback(remove_mbox)
return d
def expunge(self):
"""
Remove all messages flagged \\Deleted
"""
if not self.isWriteable():
raise imap4.ReadOnlyMailbox
return self.collection.delete_all_flagged()
def _get_message_fun(self, uid):
"""
Return the proper method to get a message for this mailbox, depending
on the passed uid flag.
:param uid: If true, the IDs specified in the query are UIDs;
otherwise they are message sequence IDs.
:type uid: bool
:rtype: callable
"""
get_message_fun = [
self.collection.get_message_by_sequence_number,
self.collection.get_message_by_uid][uid]
return get_message_fun
def _get_messages_range(self, messages_asked, uid=True):
def get_range(messages_asked):
return self._filter_msg_seq(messages_asked)
d = self._bound_seq(messages_asked, uid)
if uid:
d.addCallback(get_range)
d.addErrback(
lambda f: self.log.failure('Error getting msg range'))
return d
def _bound_seq(self, messages_asked, uid):
"""
Put an upper bound to a messages sequence if this is open.
:param messages_asked: IDs of the messages.
:type messages_asked: MessageSet
:return: a Deferred that will fire with a MessageSet
"""
def set_last_uid(last_uid):
messages_asked.last = last_uid
return messages_asked
def set_last_seq(all_uid):
messages_asked.last = len(all_uid)
return messages_asked
if not messages_asked.last:
try:
iter(messages_asked)
except TypeError:
# looks like we cannot iterate
if uid:
d = self.collection.get_last_uid()
d.addCallback(set_last_uid)
else:
d = self.collection.all_uid_iter()
d.addCallback(set_last_seq)
return d
return defer.succeed(messages_asked)
def _filter_msg_seq(self, messages_asked):
"""
Filter a message sequence returning only the ones that do exist in the
collection.
:param messages_asked: IDs of the messages.
:type messages_asked: MessageSet
:rtype: set
"""
# TODO we could pass the asked sequence to the indexer
# all_uid_iter, and bound the sql query instead.
def filter_by_asked(all_msg_uid):
set_asked = set(messages_asked)
set_exist = set(all_msg_uid)
return set_asked.intersection(set_exist)
d = self.collection.all_uid_iter()
d.addCallback(filter_by_asked)
return d
def fetch(self, messages_asked, uid):
"""
Retrieve one or more messages in this mailbox.
from rfc 3501: The data items to be fetched can be either a single atom
or a parenthesized list.
:param messages_asked: IDs of the messages to retrieve information
about
:type messages_asked: MessageSet
:param uid: If true, the IDs are UIDs. They are message sequence IDs
otherwise.
:type uid: bool
:rtype: deferred with a generator that yields...
"""
get_msg_fun = self._get_message_fun(uid)
getimapmsg = self.get_imap_message
def get_imap_messages_for_range(msg_range):
def _get_imap_msg(messages):
d_imapmsg = []
# just in case we got bad data in here
for msg in filter(None, messages):
d_imapmsg.append(getimapmsg(msg))
return defer.gatherResults(d_imapmsg, consumeErrors=True)
def _zip_msgid(imap_messages):
zipped = zip(
list(msg_range), imap_messages)
return (item for item in zipped)
# XXX not called??
def _unset_recent(sequence):
reactor.callLater(0, self.unset_recent_flags, sequence)
return sequence
d_msg = []
for msgid in msg_range:
# XXX We want cdocs because we "probably" are asked for the
# body. We should be smarter at do_FETCH and pass a parameter
# to this method in order not to prefetch cdocs if they're not
# going to be used.
d_msg.append(get_msg_fun(msgid, get_cdocs=True))
d = defer.gatherResults(d_msg, consumeErrors=True)
d.addCallback(_get_imap_msg)
d.addCallback(_zip_msgid)
d.addErrback(
lambda failure: self.log.error(
'Error getting msg for range'))
return d
d = self._get_messages_range(messages_asked, uid)
d.addCallback(get_imap_messages_for_range)
d.addErrback(
lambda failure: self.log.failure('Error on fetch'))
return d
def fetch_flags(self, messages_asked, uid):
"""
A fast method to fetch all flags, tricking just the
needed subset of the MIME interface that's needed to satisfy
a generic FLAGS query.
Given how LEAP Mail is supposed to work without local cache,
this query is going to be quite common, and also we expect
it to be in the form 1:* at the beginning of a session, so
it's not bad to fetch all the FLAGS docs at once.
:param messages_asked: IDs of the messages to retrieve information
about
:type messages_asked: MessageSet
:param uid: If 1, the IDs are UIDs. They are message sequence IDs
otherwise.
:type uid: int
:return: A tuple of two-tuples of message sequence numbers and
flagsPart, which is a only a partial implementation of
MessagePart.
:rtype: tuple
"""
# is_sequence = True if uid == 0 else False
# XXX FIXME -----------------------------------------------------
# imap/tests, or muas like mutt, it will choke until we implement
# sequence numbers. This is an easy hack meanwhile.
is_sequence = False
# ---------------------------------------------------------------
if is_sequence:
raise NotImplementedError(
"FETCH FLAGS NOT IMPLEMENTED FOR MESSAGE SEQUENCE NUMBERS YET")
d = defer.Deferred()
reactor.callLater(0, self._do_fetch_flags, messages_asked, uid, d)
return d
def _do_fetch_flags(self, messages_asked, uid, d):
"""
:param messages_asked: IDs of the messages to retrieve information
about
:type messages_asked: MessageSet
:param uid: If 1, the IDs are UIDs. They are message sequence IDs
otherwise.
:type uid: int
:param d: deferred whose callback will be called with result.
:type d: Deferred
:rtype: A generator that yields two-tuples of message sequence numbers
and flagsPart
"""
class flagsPart(object):
def __init__(self, uid, flags):
self.uid = uid
self.flags = flags
def getUID(self):
return self.uid
def getFlags(self):
return map(str, self.flags)
def pack_flags(result):
_uid, _flags = result
return _uid, flagsPart(_uid, _flags)
def get_flags_for_seq(sequence):
d_all_flags = []
for msgid in sequence:
# TODO implement sequence numbers here too
d_flags_per_uid = self.collection.get_flags_by_uid(msgid)
d_flags_per_uid.addCallback(pack_flags)
d_all_flags.append(d_flags_per_uid)
gotflags = defer.gatherResults(d_all_flags)
gotflags.addCallback(get_uid_flag_generator)
return gotflags
def get_uid_flag_generator(result):
generator = (item for item in result)
d.callback(generator)
d_seq = self._get_messages_range(messages_asked, uid)
d_seq.addCallback(get_flags_for_seq)
return d_seq
@defer.inlineCallbacks
def fetch_headers(self, messages_asked, uid):
"""
A fast method to fetch all headers, tricking just the
needed subset of the MIME interface that's needed to satisfy
a generic HEADERS query.
Given how LEAP Mail is supposed to work without local cache,
this query is going to be quite common, and also we expect
it to be in the form 1:* at the beginning of a session, so
**MAYBE** it's not too bad to fetch all the HEADERS docs at once.
:param messages_asked: IDs of the messages to retrieve information
about
:type messages_asked: MessageSet
:param uid: If true, the IDs are UIDs. They are message sequence IDs
otherwise.
:type uid: bool
:return: A tuple of two-tuples of message sequence numbers and
headersPart, which is a only a partial implementation of
MessagePart.
:rtype: tuple
"""
# TODO implement sequences
is_sequence = True if uid == 0 else False
if is_sequence:
raise NotImplementedError(
"FETCH HEADERS NOT IMPLEMENTED FOR SEQUENCE NUMBER YET")
class headersPart(object):
def __init__(self, uid, headers):
self.uid = uid
self.headers = headers
def getUID(self):
return self.uid
def getHeaders(self, _):
return dict(
(str(key), str(value))
for key, value in
self.headers.items())
messages_asked = yield self._bound_seq(messages_asked, uid)
seq_messg = yield self._filter_msg_seq(messages_asked)
result = []
for msgid in seq_messg:
msg = yield self.collection.get_message_by_uid(msgid)
headers = headersPart(msgid, msg.get_headers())
result.append((msgid, headers))
defer.returnValue(iter(result))
def store(self, messages_asked, flags, mode, uid):
"""
Sets the flags of one or more messages.
:param messages: The identifiers of the messages to set the flags
:type messages: A MessageSet object with the list of messages requested
:param flags: The flags to set, unset, or add.
:type flags: sequence of str
:param mode: If mode is -1, these flags should be removed from the
specified messages. If mode is 1, these flags should be
added to the specified messages. If mode is 0, all
existing flags should be cleared and these flags should be
added.
:type mode: -1, 0, or 1
:param uid: If true, the IDs specified in the query are UIDs;
otherwise they are message sequence IDs.
:type uid: bool
:return: A deferred, that will be called with a dict mapping message
sequence numbers to sequences of str representing the flags
set on the message after this operation has been performed.
:rtype: deferred
:raise ReadOnlyMailbox: Raised if this mailbox is not open for
read-write.
"""
if not self.isWriteable():
self.log.info('Read only mailbox!')
raise imap4.ReadOnlyMailbox
d = defer.Deferred()
reactor.callLater(0, self._do_store, messages_asked, flags,
mode, uid, d)
d.addCallback(self.collection.cb_signal_unread_to_ui)
d.addErrback(lambda f: self.log.error('Error on store'))
return d
def _do_store(self, messages_asked, flags, mode, uid, observer):
"""
Helper method, invoke set_flags method in the IMAPMessageCollection.
See the documentation for the `store` method for the parameters.
:param observer: a deferred that will be called with the dictionary
mapping UIDs to flags after the operation has been
done.
:type observer: deferred
"""
# TODO we should prevent client from setting Recent flag
get_msg_fun = self._get_message_fun(uid)
leap_assert(not isinstance(flags, basestring),
"flags cannot be a string")
flags = tuple(flags)
def set_flags_for_seq(sequence):
def return_result_dict(list_of_flags):
result = dict(zip(list(sequence), list_of_flags))
observer.callback(result)
return result
d_all_set = []
for msgid in sequence:
d = get_msg_fun(msgid)
d.addCallback(lambda msg: self.collection.update_flags(
msg, flags, mode))
d_all_set.append(d)
got_flags_setted = defer.gatherResults(d_all_set)
got_flags_setted.addCallback(return_result_dict)
return got_flags_setted
d_seq = self._get_messages_range(messages_asked, uid)
d_seq.addCallback(set_flags_for_seq)
return d_seq
# ISearchableMailbox
def search(self, query, uid):
"""
Search for messages that meet the given query criteria.
Warning: this is half-baked, and it might give problems since
it offers the SearchableInterface.
We'll be implementing it asap.
:param query: The search criteria
:type query: list
:param uid: If true, the IDs specified in the query are UIDs;
otherwise they are message sequence IDs.
:type uid: bool
:return: A list of message sequence numbers or message UIDs which
match the search criteria or a C{Deferred} whose callback
will be invoked with such a list.
:rtype: C{list} or C{Deferred}
"""
# TODO see if we can raise w/o interrupting flow
# :raise IllegalQueryError: Raised when query is not valid.
# example query:
# ['UNDELETED', 'HEADER', 'Message-ID',
# XXX fixme, does not exist
# '52D44F11.9060107@dev.bitmask.net']
# TODO hardcoding for now! -- we'll support generic queries later on
# but doing a quickfix for avoiding duplicate saves in the draft
# folder. # See issue #4209
if len(query) > 2:
if query[1] == 'HEADER' and query[2].lower() == "message-id":
msgid = str(query[3]).strip()
self.log.debug('Searching for %s' % (msgid,))
d = self.collection.get_uid_from_msgid(str(msgid))
d.addCallback(lambda result: [result])
return d
# nothing implemented for any other query
self.log.warn('Cannot process query: %s' % (query,))
return []
# IMessageCopier
def copy(self, message):
"""
Copy the given message object into this mailbox.
:param message: an IMessage implementor
:type message: LeapMessage
:return: a deferred that will be fired with the message
uid when the copy succeed.
:rtype: Deferred
"""
d = self.collection.copy_msg(
message.message, self.collection.mbox_uuid)
return d
# convenience fun
def deleteAllDocs(self):
"""
Delete all docs in this mailbox
"""
# FIXME not implemented
return self.collection.delete_all_docs()
def unset_recent_flags(self, uid_seq):
"""
Unset Recent flag for a sequence of UIDs.
"""
# FIXME not implemented
return self.collection.unset_recent_flags(uid_seq)
def __repr__(self):
"""
Representation string for this mailbox.
"""
return u"<IMAPMailbox: mbox '%s' (%s)>" % (
self.mbox_name, self.collection.count())
_INBOX_RE = re.compile(INBOX_NAME, re.IGNORECASE)
def normalize_mailbox(name):
"""
Return a normalized representation of the mailbox ``name``.
This method ensures that an eventual initial 'inbox' part of a
mailbox name is made uppercase.
:param name: the name of the mailbox
:type name: unicode
:rtype: unicode
"""
# XXX maybe it would make sense to normalize common folders too:
# trash, sent, drafts, etc...
if _INBOX_RE.match(name):
# ensure inital INBOX is uppercase
return INBOX_NAME + name[len(INBOX_NAME):]
return name
| IMAPMailbox | identifier_name |
mailbox.py | # *- coding: utf-8 -*-
# mailbox.py
# Copyright (C) 2013-2015 LEAP
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
IMAP Mailbox.
"""
import re
import os
import io
import cStringIO
import StringIO
import time
from collections import defaultdict
from email.utils import formatdate
from twisted.internet import defer
from twisted.internet import reactor
from twisted.logger import Logger
from twisted.mail import imap4
from zope.interface import implements
from leap.common.check import leap_assert
from leap.common.check import leap_assert_type
from leap.bitmask.mail.constants import INBOX_NAME, MessageFlags
from leap.bitmask.mail.imap.messages import IMAPMessage
# TODO LIST
# [ ] finish the implementation of IMailboxListener
# [ ] implement the rest of ISearchableMailbox
INIT_FLAGS = (MessageFlags.RECENT_FLAG, MessageFlags.LIST_FLAG)
def make_collection_listener(mailbox):
"""
Wrap a mailbox in a class that can be hashed according to the mailbox name.
This means that dicts or sets will use this new equality rule, so we won't
collect multiple instances of the same mailbox in collections like the
MessageCollection set where we keep track of listeners.
"""
class HashableMailbox(object):
def __init__(self, mbox):
self.mbox = mbox
# See #8083, pixelated adaptor introduces conflicts in the usage
self.mailbox_name = self.mbox.mbox_name + 'IMAP'
def __hash__(self):
return hash(self.mailbox_name)
def __eq__(self, other):
return self.mailbox_name == other.mbox.mbox_name + 'IMAP'
def notify_new(self):
self.mbox.notify_new()
return HashableMailbox(mailbox)
class IMAPMailbox(object):
"""
A Soledad-backed IMAP mailbox.
Implements the high-level method needed for the Mailbox interfaces.
The low-level database methods are contained in the generic
MessageCollection class. We receive an instance of it and it is made
accessible in the `collection` attribute.
"""
implements(
imap4.IMailbox,
imap4.IMailboxInfo,
imap4.ISearchableMailbox,
# XXX I think we do not need to implement CloseableMailbox, do we?
# We could remove ourselves from the collectionListener, although I
# think it simply will be garbage collected.
# imap4.ICloseableMailbox
imap4.IMessageCopier)
init_flags = INIT_FLAGS
CMD_MSG = "MESSAGES"
CMD_RECENT = "RECENT"
CMD_UIDNEXT = "UIDNEXT"
CMD_UIDVALIDITY = "UIDVALIDITY"
CMD_UNSEEN = "UNSEEN"
log = Logger()
# TODO we should turn this into a datastructure with limited capacity
_listeners = defaultdict(set)
def __init__(self, collection, rw=1):
"""
:param collection: instance of MessageCollection
:type collection: MessageCollection
:param rw: read-and-write flag for this mailbox
:type rw: int
"""
self.rw = rw
self._uidvalidity = None
self.collection = collection
self.collection.addListener(make_collection_listener(self))
@property
def mbox_name(self):
return self.collection.mbox_name
@property
def listeners(self):
"""
Returns listeners for this mbox.
The server itself is a listener to the mailbox.
so we can notify it (and should!) after changes in flags
and number of messages.
:rtype: set
"""
return self._listeners[self.mbox_name]
def get_imap_message(self, message):
d = defer.Deferred()
IMAPMessage(message, store=self.collection.store, d=d)
return d
# FIXME this grows too crazily when many instances are fired, like
# during imaptest stress testing. Should have a queue of limited size
# instead.
def addListener(self, listener):
"""
Add a listener to the listeners queue.
The server adds itself as a listener when there is a SELECT,
so it can send EXIST commands.
:param listener: listener to add
:type listener: an object that implements IMailboxListener
"""
listeners = self.listeners
self.log.debug('Adding mailbox listener: %s. Total: %s' % (
listener, len(listeners)))
listeners.add(listener)
def removeListener(self, listener):
"""
Remove a listener from the listeners queue.
:param listener: listener to remove
:type listener: an object that implements IMailboxListener
"""
self.listeners.remove(listener)
def getFlags(self):
"""
Returns the flags defined for this mailbox.
:returns: tuple of flags for this mailbox
:rtype: tuple of str
"""
flags = self.collection.mbox_wrapper.flags
if not flags:
flags = self.init_flags
flags_str = map(str, flags)
return flags_str
def setFlags(self, flags):
"""
Sets flags for this mailbox.
:param flags: a tuple with the flags
:type flags: tuple of str
"""
# XXX this is setting (overriding) old flags.
# Better pass a mode flag
leap_assert(isinstance(flags, tuple),
"flags expected to be a tuple")
return self.collection.set_mbox_attr("flags", flags)
def getUIDValidity(self):
"""
Return the unique validity identifier for this mailbox.
:return: unique validity identifier
:rtype: int
"""
return self.collection.get_mbox_attr("created")
def getUID(self, message_number):
"""
Return the UID of a message in the mailbox
.. note:: this implementation does not make much sense RIGHT NOW,
but in the future will be useful to get absolute UIDs from
message sequence numbers.
:param message: the message sequence number.
:type message: int
:rtype: int
:return: the UID of the message.
"""
# TODO support relative sequences. The (imap) message should
# receive a sequence number attribute: a deferred is not expected
return message_number
def getUIDNext(self):
"""
Return the likely UID for the next message added to this
mailbox. Currently it returns the higher UID incremented by
one.
:return: deferred with int
:rtype: Deferred
"""
d = self.collection.get_uid_next()
return d
def getMessageCount(self):
"""
Returns the total count of messages in this mailbox.
:return: deferred with int
:rtype: Deferred
""" | Returns the number of messages with the 'Unseen' flag.
:return: count of messages flagged `unseen`
:rtype: int
"""
return self.collection.count_unseen()
def getRecentCount(self):
"""
Returns the number of messages with the 'Recent' flag.
:return: count of messages flagged `recent`
:rtype: int
"""
return self.collection.count_recent()
def isWriteable(self):
"""
Get the read/write status of the mailbox.
:return: 1 if mailbox is read-writeable, 0 otherwise.
:rtype: int
"""
# XXX We don't need to store it in the mbox doc, do we?
# return int(self.collection.get_mbox_attr('rw'))
return self.rw
def getHierarchicalDelimiter(self):
"""
Returns the character used to delimite hierarchies in mailboxes.
:rtype: str
"""
return '/'
def requestStatus(self, names):
"""
Handles a status request by gathering the output of the different
status commands.
:param names: a list of strings containing the status commands
:type names: iter
"""
r = {}
maybe = defer.maybeDeferred
if self.CMD_MSG in names:
r[self.CMD_MSG] = maybe(self.getMessageCount)
if self.CMD_RECENT in names:
r[self.CMD_RECENT] = maybe(self.getRecentCount)
if self.CMD_UIDNEXT in names:
r[self.CMD_UIDNEXT] = maybe(self.getUIDNext)
if self.CMD_UIDVALIDITY in names:
r[self.CMD_UIDVALIDITY] = maybe(self.getUIDValidity)
if self.CMD_UNSEEN in names:
r[self.CMD_UNSEEN] = maybe(self.getUnseenCount)
def as_a_dict(values):
return dict(zip(r.keys(), values))
d = defer.gatherResults(r.values())
d.addCallback(as_a_dict)
return d
def addMessage(self, message, flags, date=None):
"""
Adds a message to this mailbox.
:param message: the raw message
:type message: str
:param flags: flag list
:type flags: list of str
:param date: timestamp
:type date: str, or None
:return: a deferred that will be triggered with the UID of the added
message.
"""
# TODO should raise ReadOnlyMailbox if not rw.
# TODO have a look at the cases for internal date in the rfc
# XXX we could treat the message as an IMessage from here
# TODO -- fast appends should be definitely solved by Blobs.
# A better solution will probably involve implementing MULTIAPPEND
# extension or patching imap server to support pipelining.
if isinstance(message,
(cStringIO.OutputType, StringIO.StringIO, io.BytesIO)):
message = message.getvalue()
leap_assert_type(message, basestring)
if flags is None:
flags = tuple()
else:
flags = tuple(str(flag) for flag in flags)
if date is None:
date = formatdate(time.time())
d = self.collection.add_msg(message, flags, date=date)
d.addCallback(lambda message: message.get_uid())
d.addErrback(
lambda failure: self.log.failure('Error while adding msg'))
return d
def notify_new(self, *args):
"""
Notify of new messages to all the listeners.
This will be called indirectly by the underlying collection, that will
notify this IMAPMailbox whenever there are changes in the number of
messages in the collection, since we have added ourselves to the
collection listeners.
:param args: ignored.
"""
def cbNotifyNew(result):
exists, recent = result
for listener in self.listeners:
listener.newMessages(exists, recent)
d = self._get_notify_count()
d.addCallback(cbNotifyNew)
d.addCallback(self.collection.cb_signal_unread_to_ui)
d.addErrback(lambda failure: self.log.failure('Error while notify'))
def _get_notify_count(self):
"""
Get message count and recent count for this mailbox.
:return: a deferred that will fire with a tuple, with number of
messages and number of recent messages.
:rtype: Deferred
"""
# XXX this is way too expensive in cases like multiple APPENDS.
# We should have a way of keep a cache or do a self-increment for that
# kind of calls.
d_exists = defer.maybeDeferred(self.getMessageCount)
d_recent = defer.maybeDeferred(self.getRecentCount)
d_list = [d_exists, d_recent]
def log_num_msg(result):
exists, recent = tuple(result)
self.log.debug(
'NOTIFY (%r): there are %s messages, %s recent' % (
self.mbox_name, exists, recent))
return result
d = defer.gatherResults(d_list)
d.addCallback(log_num_msg)
return d
# commands, do not rename methods
def destroy(self):
"""
Called before this mailbox is permanently deleted.
Should cleanup resources, and set the \\Noselect flag
on the mailbox.
"""
# XXX this will overwrite all the existing flags
# should better simply addFlag
self.setFlags((MessageFlags.NOSELECT_FLAG,))
def remove_mbox(_):
uuid = self.collection.mbox_uuid
d = self.collection.mbox_wrapper.delete(self.collection.store)
d.addCallback(
lambda _: self.collection.mbox_indexer.delete_table(uuid))
return d
d = self.deleteAllDocs()
d.addCallback(remove_mbox)
return d
def expunge(self):
"""
Remove all messages flagged \\Deleted
"""
if not self.isWriteable():
raise imap4.ReadOnlyMailbox
return self.collection.delete_all_flagged()
def _get_message_fun(self, uid):
"""
Return the proper method to get a message for this mailbox, depending
on the passed uid flag.
:param uid: If true, the IDs specified in the query are UIDs;
otherwise they are message sequence IDs.
:type uid: bool
:rtype: callable
"""
get_message_fun = [
self.collection.get_message_by_sequence_number,
self.collection.get_message_by_uid][uid]
return get_message_fun
def _get_messages_range(self, messages_asked, uid=True):
def get_range(messages_asked):
return self._filter_msg_seq(messages_asked)
d = self._bound_seq(messages_asked, uid)
if uid:
d.addCallback(get_range)
d.addErrback(
lambda f: self.log.failure('Error getting msg range'))
return d
def _bound_seq(self, messages_asked, uid):
"""
Put an upper bound to a messages sequence if this is open.
:param messages_asked: IDs of the messages.
:type messages_asked: MessageSet
:return: a Deferred that will fire with a MessageSet
"""
def set_last_uid(last_uid):
messages_asked.last = last_uid
return messages_asked
def set_last_seq(all_uid):
messages_asked.last = len(all_uid)
return messages_asked
if not messages_asked.last:
try:
iter(messages_asked)
except TypeError:
# looks like we cannot iterate
if uid:
d = self.collection.get_last_uid()
d.addCallback(set_last_uid)
else:
d = self.collection.all_uid_iter()
d.addCallback(set_last_seq)
return d
return defer.succeed(messages_asked)
def _filter_msg_seq(self, messages_asked):
"""
Filter a message sequence returning only the ones that do exist in the
collection.
:param messages_asked: IDs of the messages.
:type messages_asked: MessageSet
:rtype: set
"""
# TODO we could pass the asked sequence to the indexer
# all_uid_iter, and bound the sql query instead.
def filter_by_asked(all_msg_uid):
set_asked = set(messages_asked)
set_exist = set(all_msg_uid)
return set_asked.intersection(set_exist)
d = self.collection.all_uid_iter()
d.addCallback(filter_by_asked)
return d
def fetch(self, messages_asked, uid):
"""
Retrieve one or more messages in this mailbox.
from rfc 3501: The data items to be fetched can be either a single atom
or a parenthesized list.
:param messages_asked: IDs of the messages to retrieve information
about
:type messages_asked: MessageSet
:param uid: If true, the IDs are UIDs. They are message sequence IDs
otherwise.
:type uid: bool
:rtype: deferred with a generator that yields...
"""
get_msg_fun = self._get_message_fun(uid)
getimapmsg = self.get_imap_message
def get_imap_messages_for_range(msg_range):
def _get_imap_msg(messages):
d_imapmsg = []
# just in case we got bad data in here
for msg in filter(None, messages):
d_imapmsg.append(getimapmsg(msg))
return defer.gatherResults(d_imapmsg, consumeErrors=True)
def _zip_msgid(imap_messages):
zipped = zip(
list(msg_range), imap_messages)
return (item for item in zipped)
# XXX not called??
def _unset_recent(sequence):
reactor.callLater(0, self.unset_recent_flags, sequence)
return sequence
d_msg = []
for msgid in msg_range:
# XXX We want cdocs because we "probably" are asked for the
# body. We should be smarter at do_FETCH and pass a parameter
# to this method in order not to prefetch cdocs if they're not
# going to be used.
d_msg.append(get_msg_fun(msgid, get_cdocs=True))
d = defer.gatherResults(d_msg, consumeErrors=True)
d.addCallback(_get_imap_msg)
d.addCallback(_zip_msgid)
d.addErrback(
lambda failure: self.log.error(
'Error getting msg for range'))
return d
d = self._get_messages_range(messages_asked, uid)
d.addCallback(get_imap_messages_for_range)
d.addErrback(
lambda failure: self.log.failure('Error on fetch'))
return d
def fetch_flags(self, messages_asked, uid):
"""
A fast method to fetch all flags, tricking just the
needed subset of the MIME interface that's needed to satisfy
a generic FLAGS query.
Given how LEAP Mail is supposed to work without local cache,
this query is going to be quite common, and also we expect
it to be in the form 1:* at the beginning of a session, so
it's not bad to fetch all the FLAGS docs at once.
:param messages_asked: IDs of the messages to retrieve information
about
:type messages_asked: MessageSet
:param uid: If 1, the IDs are UIDs. They are message sequence IDs
otherwise.
:type uid: int
:return: A tuple of two-tuples of message sequence numbers and
flagsPart, which is a only a partial implementation of
MessagePart.
:rtype: tuple
"""
# is_sequence = True if uid == 0 else False
# XXX FIXME -----------------------------------------------------
# imap/tests, or muas like mutt, it will choke until we implement
# sequence numbers. This is an easy hack meanwhile.
is_sequence = False
# ---------------------------------------------------------------
if is_sequence:
raise NotImplementedError(
"FETCH FLAGS NOT IMPLEMENTED FOR MESSAGE SEQUENCE NUMBERS YET")
d = defer.Deferred()
reactor.callLater(0, self._do_fetch_flags, messages_asked, uid, d)
return d
def _do_fetch_flags(self, messages_asked, uid, d):
"""
:param messages_asked: IDs of the messages to retrieve information
about
:type messages_asked: MessageSet
:param uid: If 1, the IDs are UIDs. They are message sequence IDs
otherwise.
:type uid: int
:param d: deferred whose callback will be called with result.
:type d: Deferred
:rtype: A generator that yields two-tuples of message sequence numbers
and flagsPart
"""
class flagsPart(object):
def __init__(self, uid, flags):
self.uid = uid
self.flags = flags
def getUID(self):
return self.uid
def getFlags(self):
return map(str, self.flags)
def pack_flags(result):
_uid, _flags = result
return _uid, flagsPart(_uid, _flags)
def get_flags_for_seq(sequence):
d_all_flags = []
for msgid in sequence:
# TODO implement sequence numbers here too
d_flags_per_uid = self.collection.get_flags_by_uid(msgid)
d_flags_per_uid.addCallback(pack_flags)
d_all_flags.append(d_flags_per_uid)
gotflags = defer.gatherResults(d_all_flags)
gotflags.addCallback(get_uid_flag_generator)
return gotflags
def get_uid_flag_generator(result):
generator = (item for item in result)
d.callback(generator)
d_seq = self._get_messages_range(messages_asked, uid)
d_seq.addCallback(get_flags_for_seq)
return d_seq
@defer.inlineCallbacks
def fetch_headers(self, messages_asked, uid):
"""
A fast method to fetch all headers, tricking just the
needed subset of the MIME interface that's needed to satisfy
a generic HEADERS query.
Given how LEAP Mail is supposed to work without local cache,
this query is going to be quite common, and also we expect
it to be in the form 1:* at the beginning of a session, so
**MAYBE** it's not too bad to fetch all the HEADERS docs at once.
:param messages_asked: IDs of the messages to retrieve information
about
:type messages_asked: MessageSet
:param uid: If true, the IDs are UIDs. They are message sequence IDs
otherwise.
:type uid: bool
:return: A tuple of two-tuples of message sequence numbers and
headersPart, which is a only a partial implementation of
MessagePart.
:rtype: tuple
"""
# TODO implement sequences
is_sequence = True if uid == 0 else False
if is_sequence:
raise NotImplementedError(
"FETCH HEADERS NOT IMPLEMENTED FOR SEQUENCE NUMBER YET")
class headersPart(object):
def __init__(self, uid, headers):
self.uid = uid
self.headers = headers
def getUID(self):
return self.uid
def getHeaders(self, _):
return dict(
(str(key), str(value))
for key, value in
self.headers.items())
messages_asked = yield self._bound_seq(messages_asked, uid)
seq_messg = yield self._filter_msg_seq(messages_asked)
result = []
for msgid in seq_messg:
msg = yield self.collection.get_message_by_uid(msgid)
headers = headersPart(msgid, msg.get_headers())
result.append((msgid, headers))
defer.returnValue(iter(result))
def store(self, messages_asked, flags, mode, uid):
"""
Sets the flags of one or more messages.
:param messages: The identifiers of the messages to set the flags
:type messages: A MessageSet object with the list of messages requested
:param flags: The flags to set, unset, or add.
:type flags: sequence of str
:param mode: If mode is -1, these flags should be removed from the
specified messages. If mode is 1, these flags should be
added to the specified messages. If mode is 0, all
existing flags should be cleared and these flags should be
added.
:type mode: -1, 0, or 1
:param uid: If true, the IDs specified in the query are UIDs;
otherwise they are message sequence IDs.
:type uid: bool
:return: A deferred, that will be called with a dict mapping message
sequence numbers to sequences of str representing the flags
set on the message after this operation has been performed.
:rtype: deferred
:raise ReadOnlyMailbox: Raised if this mailbox is not open for
read-write.
"""
if not self.isWriteable():
self.log.info('Read only mailbox!')
raise imap4.ReadOnlyMailbox
d = defer.Deferred()
reactor.callLater(0, self._do_store, messages_asked, flags,
mode, uid, d)
d.addCallback(self.collection.cb_signal_unread_to_ui)
d.addErrback(lambda f: self.log.error('Error on store'))
return d
def _do_store(self, messages_asked, flags, mode, uid, observer):
"""
Helper method, invoke set_flags method in the IMAPMessageCollection.
See the documentation for the `store` method for the parameters.
:param observer: a deferred that will be called with the dictionary
mapping UIDs to flags after the operation has been
done.
:type observer: deferred
"""
# TODO we should prevent client from setting Recent flag
get_msg_fun = self._get_message_fun(uid)
leap_assert(not isinstance(flags, basestring),
"flags cannot be a string")
flags = tuple(flags)
def set_flags_for_seq(sequence):
def return_result_dict(list_of_flags):
result = dict(zip(list(sequence), list_of_flags))
observer.callback(result)
return result
d_all_set = []
for msgid in sequence:
d = get_msg_fun(msgid)
d.addCallback(lambda msg: self.collection.update_flags(
msg, flags, mode))
d_all_set.append(d)
got_flags_setted = defer.gatherResults(d_all_set)
got_flags_setted.addCallback(return_result_dict)
return got_flags_setted
d_seq = self._get_messages_range(messages_asked, uid)
d_seq.addCallback(set_flags_for_seq)
return d_seq
# ISearchableMailbox
def search(self, query, uid):
"""
Search for messages that meet the given query criteria.
Warning: this is half-baked, and it might give problems since
it offers the SearchableInterface.
We'll be implementing it asap.
:param query: The search criteria
:type query: list
:param uid: If true, the IDs specified in the query are UIDs;
otherwise they are message sequence IDs.
:type uid: bool
:return: A list of message sequence numbers or message UIDs which
match the search criteria or a C{Deferred} whose callback
will be invoked with such a list.
:rtype: C{list} or C{Deferred}
"""
# TODO see if we can raise w/o interrupting flow
# :raise IllegalQueryError: Raised when query is not valid.
# example query:
# ['UNDELETED', 'HEADER', 'Message-ID',
# XXX fixme, does not exist
# '52D44F11.9060107@dev.bitmask.net']
# TODO hardcoding for now! -- we'll support generic queries later on
# but doing a quickfix for avoiding duplicate saves in the draft
# folder. # See issue #4209
if len(query) > 2:
if query[1] == 'HEADER' and query[2].lower() == "message-id":
msgid = str(query[3]).strip()
self.log.debug('Searching for %s' % (msgid,))
d = self.collection.get_uid_from_msgid(str(msgid))
d.addCallback(lambda result: [result])
return d
# nothing implemented for any other query
self.log.warn('Cannot process query: %s' % (query,))
return []
# IMessageCopier
def copy(self, message):
"""
Copy the given message object into this mailbox.
:param message: an IMessage implementor
:type message: LeapMessage
:return: a deferred that will be fired with the message
uid when the copy succeed.
:rtype: Deferred
"""
d = self.collection.copy_msg(
message.message, self.collection.mbox_uuid)
return d
# convenience fun
def deleteAllDocs(self):
"""
Delete all docs in this mailbox
"""
# FIXME not implemented
return self.collection.delete_all_docs()
def unset_recent_flags(self, uid_seq):
"""
Unset Recent flag for a sequence of UIDs.
"""
# FIXME not implemented
return self.collection.unset_recent_flags(uid_seq)
def __repr__(self):
"""
Representation string for this mailbox.
"""
return u"<IMAPMailbox: mbox '%s' (%s)>" % (
self.mbox_name, self.collection.count())
_INBOX_RE = re.compile(INBOX_NAME, re.IGNORECASE)
def normalize_mailbox(name):
"""
Return a normalized representation of the mailbox ``name``.
This method ensures that an eventual initial 'inbox' part of a
mailbox name is made uppercase.
:param name: the name of the mailbox
:type name: unicode
:rtype: unicode
"""
# XXX maybe it would make sense to normalize common folders too:
# trash, sent, drafts, etc...
if _INBOX_RE.match(name):
# ensure inital INBOX is uppercase
return INBOX_NAME + name[len(INBOX_NAME):]
return name | return self.collection.count()
def getUnseenCount(self):
""" | random_line_split |
mailbox.py | # *- coding: utf-8 -*-
# mailbox.py
# Copyright (C) 2013-2015 LEAP
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
IMAP Mailbox.
"""
import re
import os
import io
import cStringIO
import StringIO
import time
from collections import defaultdict
from email.utils import formatdate
from twisted.internet import defer
from twisted.internet import reactor
from twisted.logger import Logger
from twisted.mail import imap4
from zope.interface import implements
from leap.common.check import leap_assert
from leap.common.check import leap_assert_type
from leap.bitmask.mail.constants import INBOX_NAME, MessageFlags
from leap.bitmask.mail.imap.messages import IMAPMessage
# TODO LIST
# [ ] finish the implementation of IMailboxListener
# [ ] implement the rest of ISearchableMailbox
INIT_FLAGS = (MessageFlags.RECENT_FLAG, MessageFlags.LIST_FLAG)
def make_collection_listener(mailbox):
"""
Wrap a mailbox in a class that can be hashed according to the mailbox name.
This means that dicts or sets will use this new equality rule, so we won't
collect multiple instances of the same mailbox in collections like the
MessageCollection set where we keep track of listeners.
"""
class HashableMailbox(object):
def __init__(self, mbox):
self.mbox = mbox
# See #8083, pixelated adaptor introduces conflicts in the usage
self.mailbox_name = self.mbox.mbox_name + 'IMAP'
def __hash__(self):
return hash(self.mailbox_name)
def __eq__(self, other):
return self.mailbox_name == other.mbox.mbox_name + 'IMAP'
def notify_new(self):
self.mbox.notify_new()
return HashableMailbox(mailbox)
class IMAPMailbox(object):
"""
A Soledad-backed IMAP mailbox.
Implements the high-level method needed for the Mailbox interfaces.
The low-level database methods are contained in the generic
MessageCollection class. We receive an instance of it and it is made
accessible in the `collection` attribute.
"""
implements(
imap4.IMailbox,
imap4.IMailboxInfo,
imap4.ISearchableMailbox,
# XXX I think we do not need to implement CloseableMailbox, do we?
# We could remove ourselves from the collectionListener, although I
# think it simply will be garbage collected.
# imap4.ICloseableMailbox
imap4.IMessageCopier)
init_flags = INIT_FLAGS
CMD_MSG = "MESSAGES"
CMD_RECENT = "RECENT"
CMD_UIDNEXT = "UIDNEXT"
CMD_UIDVALIDITY = "UIDVALIDITY"
CMD_UNSEEN = "UNSEEN"
log = Logger()
# TODO we should turn this into a datastructure with limited capacity
_listeners = defaultdict(set)
def __init__(self, collection, rw=1):
"""
:param collection: instance of MessageCollection
:type collection: MessageCollection
:param rw: read-and-write flag for this mailbox
:type rw: int
"""
self.rw = rw
self._uidvalidity = None
self.collection = collection
self.collection.addListener(make_collection_listener(self))
@property
def mbox_name(self):
return self.collection.mbox_name
@property
def listeners(self):
"""
Returns listeners for this mbox.
The server itself is a listener to the mailbox.
so we can notify it (and should!) after changes in flags
and number of messages.
:rtype: set
"""
return self._listeners[self.mbox_name]
def get_imap_message(self, message):
d = defer.Deferred()
IMAPMessage(message, store=self.collection.store, d=d)
return d
# FIXME this grows too crazily when many instances are fired, like
# during imaptest stress testing. Should have a queue of limited size
# instead.
def addListener(self, listener):
"""
Add a listener to the listeners queue.
The server adds itself as a listener when there is a SELECT,
so it can send EXIST commands.
:param listener: listener to add
:type listener: an object that implements IMailboxListener
"""
listeners = self.listeners
self.log.debug('Adding mailbox listener: %s. Total: %s' % (
listener, len(listeners)))
listeners.add(listener)
def removeListener(self, listener):
"""
Remove a listener from the listeners queue.
:param listener: listener to remove
:type listener: an object that implements IMailboxListener
"""
self.listeners.remove(listener)
def getFlags(self):
"""
Returns the flags defined for this mailbox.
:returns: tuple of flags for this mailbox
:rtype: tuple of str
"""
flags = self.collection.mbox_wrapper.flags
if not flags:
flags = self.init_flags
flags_str = map(str, flags)
return flags_str
def setFlags(self, flags):
"""
Sets flags for this mailbox.
:param flags: a tuple with the flags
:type flags: tuple of str
"""
# XXX this is setting (overriding) old flags.
# Better pass a mode flag
leap_assert(isinstance(flags, tuple),
"flags expected to be a tuple")
return self.collection.set_mbox_attr("flags", flags)
def getUIDValidity(self):
"""
Return the unique validity identifier for this mailbox.
:return: unique validity identifier
:rtype: int
"""
return self.collection.get_mbox_attr("created")
def getUID(self, message_number):
"""
Return the UID of a message in the mailbox
.. note:: this implementation does not make much sense RIGHT NOW,
but in the future will be useful to get absolute UIDs from
message sequence numbers.
:param message: the message sequence number.
:type message: int
:rtype: int
:return: the UID of the message.
"""
# TODO support relative sequences. The (imap) message should
# receive a sequence number attribute: a deferred is not expected
return message_number
def getUIDNext(self):
"""
Return the likely UID for the next message added to this
mailbox. Currently it returns the higher UID incremented by
one.
:return: deferred with int
:rtype: Deferred
"""
d = self.collection.get_uid_next()
return d
def getMessageCount(self):
"""
Returns the total count of messages in this mailbox.
:return: deferred with int
:rtype: Deferred
"""
return self.collection.count()
def getUnseenCount(self):
"""
Returns the number of messages with the 'Unseen' flag.
:return: count of messages flagged `unseen`
:rtype: int
"""
return self.collection.count_unseen()
def getRecentCount(self):
"""
Returns the number of messages with the 'Recent' flag.
:return: count of messages flagged `recent`
:rtype: int
"""
return self.collection.count_recent()
def isWriteable(self):
"""
Get the read/write status of the mailbox.
:return: 1 if mailbox is read-writeable, 0 otherwise.
:rtype: int
"""
# XXX We don't need to store it in the mbox doc, do we?
# return int(self.collection.get_mbox_attr('rw'))
return self.rw
def getHierarchicalDelimiter(self):
"""
Returns the character used to delimite hierarchies in mailboxes.
:rtype: str
"""
return '/'
def requestStatus(self, names):
"""
Handles a status request by gathering the output of the different
status commands.
:param names: a list of strings containing the status commands
:type names: iter
"""
r = {}
maybe = defer.maybeDeferred
if self.CMD_MSG in names:
r[self.CMD_MSG] = maybe(self.getMessageCount)
if self.CMD_RECENT in names:
r[self.CMD_RECENT] = maybe(self.getRecentCount)
if self.CMD_UIDNEXT in names:
r[self.CMD_UIDNEXT] = maybe(self.getUIDNext)
if self.CMD_UIDVALIDITY in names:
r[self.CMD_UIDVALIDITY] = maybe(self.getUIDValidity)
if self.CMD_UNSEEN in names:
r[self.CMD_UNSEEN] = maybe(self.getUnseenCount)
def as_a_dict(values):
return dict(zip(r.keys(), values))
d = defer.gatherResults(r.values())
d.addCallback(as_a_dict)
return d
def addMessage(self, message, flags, date=None):
"""
Adds a message to this mailbox.
:param message: the raw message
:type message: str
:param flags: flag list
:type flags: list of str
:param date: timestamp
:type date: str, or None
:return: a deferred that will be triggered with the UID of the added
message.
"""
# TODO should raise ReadOnlyMailbox if not rw.
# TODO have a look at the cases for internal date in the rfc
# XXX we could treat the message as an IMessage from here
# TODO -- fast appends should be definitely solved by Blobs.
# A better solution will probably involve implementing MULTIAPPEND
# extension or patching imap server to support pipelining.
if isinstance(message,
(cStringIO.OutputType, StringIO.StringIO, io.BytesIO)):
message = message.getvalue()
leap_assert_type(message, basestring)
if flags is None:
flags = tuple()
else:
flags = tuple(str(flag) for flag in flags)
if date is None:
date = formatdate(time.time())
d = self.collection.add_msg(message, flags, date=date)
d.addCallback(lambda message: message.get_uid())
d.addErrback(
lambda failure: self.log.failure('Error while adding msg'))
return d
def notify_new(self, *args):
"""
Notify of new messages to all the listeners.
This will be called indirectly by the underlying collection, that will
notify this IMAPMailbox whenever there are changes in the number of
messages in the collection, since we have added ourselves to the
collection listeners.
:param args: ignored.
"""
def cbNotifyNew(result):
exists, recent = result
for listener in self.listeners:
listener.newMessages(exists, recent)
d = self._get_notify_count()
d.addCallback(cbNotifyNew)
d.addCallback(self.collection.cb_signal_unread_to_ui)
d.addErrback(lambda failure: self.log.failure('Error while notify'))
def _get_notify_count(self):
"""
Get message count and recent count for this mailbox.
:return: a deferred that will fire with a tuple, with number of
messages and number of recent messages.
:rtype: Deferred
"""
# XXX this is way too expensive in cases like multiple APPENDS.
# We should have a way of keep a cache or do a self-increment for that
# kind of calls.
d_exists = defer.maybeDeferred(self.getMessageCount)
d_recent = defer.maybeDeferred(self.getRecentCount)
d_list = [d_exists, d_recent]
def log_num_msg(result):
exists, recent = tuple(result)
self.log.debug(
'NOTIFY (%r): there are %s messages, %s recent' % (
self.mbox_name, exists, recent))
return result
d = defer.gatherResults(d_list)
d.addCallback(log_num_msg)
return d
# commands, do not rename methods
def destroy(self):
"""
Called before this mailbox is permanently deleted.
Should cleanup resources, and set the \\Noselect flag
on the mailbox.
"""
# XXX this will overwrite all the existing flags
# should better simply addFlag
self.setFlags((MessageFlags.NOSELECT_FLAG,))
def remove_mbox(_):
uuid = self.collection.mbox_uuid
d = self.collection.mbox_wrapper.delete(self.collection.store)
d.addCallback(
lambda _: self.collection.mbox_indexer.delete_table(uuid))
return d
d = self.deleteAllDocs()
d.addCallback(remove_mbox)
return d
def expunge(self):
"""
Remove all messages flagged \\Deleted
"""
if not self.isWriteable():
raise imap4.ReadOnlyMailbox
return self.collection.delete_all_flagged()
def _get_message_fun(self, uid):
"""
Return the proper method to get a message for this mailbox, depending
on the passed uid flag.
:param uid: If true, the IDs specified in the query are UIDs;
otherwise they are message sequence IDs.
:type uid: bool
:rtype: callable
"""
get_message_fun = [
self.collection.get_message_by_sequence_number,
self.collection.get_message_by_uid][uid]
return get_message_fun
def _get_messages_range(self, messages_asked, uid=True):
def get_range(messages_asked):
return self._filter_msg_seq(messages_asked)
d = self._bound_seq(messages_asked, uid)
if uid:
d.addCallback(get_range)
d.addErrback(
lambda f: self.log.failure('Error getting msg range'))
return d
def _bound_seq(self, messages_asked, uid):
"""
Put an upper bound to a messages sequence if this is open.
:param messages_asked: IDs of the messages.
:type messages_asked: MessageSet
:return: a Deferred that will fire with a MessageSet
"""
def set_last_uid(last_uid):
messages_asked.last = last_uid
return messages_asked
def set_last_seq(all_uid):
messages_asked.last = len(all_uid)
return messages_asked
if not messages_asked.last:
try:
iter(messages_asked)
except TypeError:
# looks like we cannot iterate
if uid:
d = self.collection.get_last_uid()
d.addCallback(set_last_uid)
else:
d = self.collection.all_uid_iter()
d.addCallback(set_last_seq)
return d
return defer.succeed(messages_asked)
def _filter_msg_seq(self, messages_asked):
"""
Filter a message sequence returning only the ones that do exist in the
collection.
:param messages_asked: IDs of the messages.
:type messages_asked: MessageSet
:rtype: set
"""
# TODO we could pass the asked sequence to the indexer
# all_uid_iter, and bound the sql query instead.
def filter_by_asked(all_msg_uid):
set_asked = set(messages_asked)
set_exist = set(all_msg_uid)
return set_asked.intersection(set_exist)
d = self.collection.all_uid_iter()
d.addCallback(filter_by_asked)
return d
def fetch(self, messages_asked, uid):
"""
Retrieve one or more messages in this mailbox.
from rfc 3501: The data items to be fetched can be either a single atom
or a parenthesized list.
:param messages_asked: IDs of the messages to retrieve information
about
:type messages_asked: MessageSet
:param uid: If true, the IDs are UIDs. They are message sequence IDs
otherwise.
:type uid: bool
:rtype: deferred with a generator that yields...
"""
get_msg_fun = self._get_message_fun(uid)
getimapmsg = self.get_imap_message
def get_imap_messages_for_range(msg_range):
def _get_imap_msg(messages):
d_imapmsg = []
# just in case we got bad data in here
for msg in filter(None, messages):
d_imapmsg.append(getimapmsg(msg))
return defer.gatherResults(d_imapmsg, consumeErrors=True)
def _zip_msgid(imap_messages):
zipped = zip(
list(msg_range), imap_messages)
return (item for item in zipped)
# XXX not called??
def _unset_recent(sequence):
reactor.callLater(0, self.unset_recent_flags, sequence)
return sequence
d_msg = []
for msgid in msg_range:
# XXX We want cdocs because we "probably" are asked for the
# body. We should be smarter at do_FETCH and pass a parameter
# to this method in order not to prefetch cdocs if they're not
# going to be used.
d_msg.append(get_msg_fun(msgid, get_cdocs=True))
d = defer.gatherResults(d_msg, consumeErrors=True)
d.addCallback(_get_imap_msg)
d.addCallback(_zip_msgid)
d.addErrback(
lambda failure: self.log.error(
'Error getting msg for range'))
return d
d = self._get_messages_range(messages_asked, uid)
d.addCallback(get_imap_messages_for_range)
d.addErrback(
lambda failure: self.log.failure('Error on fetch'))
return d
def fetch_flags(self, messages_asked, uid):
"""
A fast method to fetch all flags, tricking just the
needed subset of the MIME interface that's needed to satisfy
a generic FLAGS query.
Given how LEAP Mail is supposed to work without local cache,
this query is going to be quite common, and also we expect
it to be in the form 1:* at the beginning of a session, so
it's not bad to fetch all the FLAGS docs at once.
:param messages_asked: IDs of the messages to retrieve information
about
:type messages_asked: MessageSet
:param uid: If 1, the IDs are UIDs. They are message sequence IDs
otherwise.
:type uid: int
:return: A tuple of two-tuples of message sequence numbers and
flagsPart, which is a only a partial implementation of
MessagePart.
:rtype: tuple
"""
# is_sequence = True if uid == 0 else False
# XXX FIXME -----------------------------------------------------
# imap/tests, or muas like mutt, it will choke until we implement
# sequence numbers. This is an easy hack meanwhile.
is_sequence = False
# ---------------------------------------------------------------
if is_sequence:
raise NotImplementedError(
"FETCH FLAGS NOT IMPLEMENTED FOR MESSAGE SEQUENCE NUMBERS YET")
d = defer.Deferred()
reactor.callLater(0, self._do_fetch_flags, messages_asked, uid, d)
return d
def _do_fetch_flags(self, messages_asked, uid, d):
"""
:param messages_asked: IDs of the messages to retrieve information
about
:type messages_asked: MessageSet
:param uid: If 1, the IDs are UIDs. They are message sequence IDs
otherwise.
:type uid: int
:param d: deferred whose callback will be called with result.
:type d: Deferred
:rtype: A generator that yields two-tuples of message sequence numbers
and flagsPart
"""
class flagsPart(object):
def __init__(self, uid, flags):
self.uid = uid
self.flags = flags
def getUID(self):
return self.uid
def getFlags(self):
return map(str, self.flags)
def pack_flags(result):
_uid, _flags = result
return _uid, flagsPart(_uid, _flags)
def get_flags_for_seq(sequence):
d_all_flags = []
for msgid in sequence:
# TODO implement sequence numbers here too
d_flags_per_uid = self.collection.get_flags_by_uid(msgid)
d_flags_per_uid.addCallback(pack_flags)
d_all_flags.append(d_flags_per_uid)
gotflags = defer.gatherResults(d_all_flags)
gotflags.addCallback(get_uid_flag_generator)
return gotflags
def get_uid_flag_generator(result):
generator = (item for item in result)
d.callback(generator)
d_seq = self._get_messages_range(messages_asked, uid)
d_seq.addCallback(get_flags_for_seq)
return d_seq
@defer.inlineCallbacks
def fetch_headers(self, messages_asked, uid):
"""
A fast method to fetch all headers, tricking just the
needed subset of the MIME interface that's needed to satisfy
a generic HEADERS query.
Given how LEAP Mail is supposed to work without local cache,
this query is going to be quite common, and also we expect
it to be in the form 1:* at the beginning of a session, so
**MAYBE** it's not too bad to fetch all the HEADERS docs at once.
:param messages_asked: IDs of the messages to retrieve information
about
:type messages_asked: MessageSet
:param uid: If true, the IDs are UIDs. They are message sequence IDs
otherwise.
:type uid: bool
:return: A tuple of two-tuples of message sequence numbers and
headersPart, which is a only a partial implementation of
MessagePart.
:rtype: tuple
"""
# TODO implement sequences
is_sequence = True if uid == 0 else False
if is_sequence:
raise NotImplementedError(
"FETCH HEADERS NOT IMPLEMENTED FOR SEQUENCE NUMBER YET")
class headersPart(object):
def __init__(self, uid, headers):
self.uid = uid
self.headers = headers
def getUID(self):
return self.uid
def getHeaders(self, _):
return dict(
(str(key), str(value))
for key, value in
self.headers.items())
messages_asked = yield self._bound_seq(messages_asked, uid)
seq_messg = yield self._filter_msg_seq(messages_asked)
result = []
for msgid in seq_messg:
msg = yield self.collection.get_message_by_uid(msgid)
headers = headersPart(msgid, msg.get_headers())
result.append((msgid, headers))
defer.returnValue(iter(result))
def store(self, messages_asked, flags, mode, uid):
|
def _do_store(self, messages_asked, flags, mode, uid, observer):
"""
Helper method, invoke set_flags method in the IMAPMessageCollection.
See the documentation for the `store` method for the parameters.
:param observer: a deferred that will be called with the dictionary
mapping UIDs to flags after the operation has been
done.
:type observer: deferred
"""
# TODO we should prevent client from setting Recent flag
get_msg_fun = self._get_message_fun(uid)
leap_assert(not isinstance(flags, basestring),
"flags cannot be a string")
flags = tuple(flags)
def set_flags_for_seq(sequence):
def return_result_dict(list_of_flags):
result = dict(zip(list(sequence), list_of_flags))
observer.callback(result)
return result
d_all_set = []
for msgid in sequence:
d = get_msg_fun(msgid)
d.addCallback(lambda msg: self.collection.update_flags(
msg, flags, mode))
d_all_set.append(d)
got_flags_setted = defer.gatherResults(d_all_set)
got_flags_setted.addCallback(return_result_dict)
return got_flags_setted
d_seq = self._get_messages_range(messages_asked, uid)
d_seq.addCallback(set_flags_for_seq)
return d_seq
# ISearchableMailbox
def search(self, query, uid):
"""
Search for messages that meet the given query criteria.
Warning: this is half-baked, and it might give problems since
it offers the SearchableInterface.
We'll be implementing it asap.
:param query: The search criteria
:type query: list
:param uid: If true, the IDs specified in the query are UIDs;
otherwise they are message sequence IDs.
:type uid: bool
:return: A list of message sequence numbers or message UIDs which
match the search criteria or a C{Deferred} whose callback
will be invoked with such a list.
:rtype: C{list} or C{Deferred}
"""
# TODO see if we can raise w/o interrupting flow
# :raise IllegalQueryError: Raised when query is not valid.
# example query:
# ['UNDELETED', 'HEADER', 'Message-ID',
# XXX fixme, does not exist
# '52D44F11.9060107@dev.bitmask.net']
# TODO hardcoding for now! -- we'll support generic queries later on
# but doing a quickfix for avoiding duplicate saves in the draft
# folder. # See issue #4209
if len(query) > 2:
if query[1] == 'HEADER' and query[2].lower() == "message-id":
msgid = str(query[3]).strip()
self.log.debug('Searching for %s' % (msgid,))
d = self.collection.get_uid_from_msgid(str(msgid))
d.addCallback(lambda result: [result])
return d
# nothing implemented for any other query
self.log.warn('Cannot process query: %s' % (query,))
return []
# IMessageCopier
def copy(self, message):
"""
Copy the given message object into this mailbox.
:param message: an IMessage implementor
:type message: LeapMessage
:return: a deferred that will be fired with the message
uid when the copy succeed.
:rtype: Deferred
"""
d = self.collection.copy_msg(
message.message, self.collection.mbox_uuid)
return d
# convenience fun
def deleteAllDocs(self):
"""
Delete all docs in this mailbox
"""
# FIXME not implemented
return self.collection.delete_all_docs()
def unset_recent_flags(self, uid_seq):
"""
Unset Recent flag for a sequence of UIDs.
"""
# FIXME not implemented
return self.collection.unset_recent_flags(uid_seq)
def __repr__(self):
"""
Representation string for this mailbox.
"""
return u"<IMAPMailbox: mbox '%s' (%s)>" % (
self.mbox_name, self.collection.count())
_INBOX_RE = re.compile(INBOX_NAME, re.IGNORECASE)
def normalize_mailbox(name):
"""
Return a normalized representation of the mailbox ``name``.
This method ensures that an eventual initial 'inbox' part of a
mailbox name is made uppercase.
:param name: the name of the mailbox
:type name: unicode
:rtype: unicode
"""
# XXX maybe it would make sense to normalize common folders too:
# trash, sent, drafts, etc...
if _INBOX_RE.match(name):
# ensure inital INBOX is uppercase
return INBOX_NAME + name[len(INBOX_NAME):]
return name
| """
Sets the flags of one or more messages.
:param messages: The identifiers of the messages to set the flags
:type messages: A MessageSet object with the list of messages requested
:param flags: The flags to set, unset, or add.
:type flags: sequence of str
:param mode: If mode is -1, these flags should be removed from the
specified messages. If mode is 1, these flags should be
added to the specified messages. If mode is 0, all
existing flags should be cleared and these flags should be
added.
:type mode: -1, 0, or 1
:param uid: If true, the IDs specified in the query are UIDs;
otherwise they are message sequence IDs.
:type uid: bool
:return: A deferred, that will be called with a dict mapping message
sequence numbers to sequences of str representing the flags
set on the message after this operation has been performed.
:rtype: deferred
:raise ReadOnlyMailbox: Raised if this mailbox is not open for
read-write.
"""
if not self.isWriteable():
self.log.info('Read only mailbox!')
raise imap4.ReadOnlyMailbox
d = defer.Deferred()
reactor.callLater(0, self._do_store, messages_asked, flags,
mode, uid, d)
d.addCallback(self.collection.cb_signal_unread_to_ui)
d.addErrback(lambda f: self.log.error('Error on store'))
return d | identifier_body |
namespaced-enum-glob-import-no-impls.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
mod m2 {
pub enum Foo {
A,
B(isize),
C { a: isize },
}
impl Foo {
pub fn foo() {}
pub fn bar(&self) {}
}
}
mod m {
pub use m2::Foo::*;
}
pub fn main() {
use m2::Foo::*;
foo(); //~ ERROR unresolved name `foo`
m::foo(); //~ ERROR unresolved name `m::foo`
bar(); //~ ERROR unresolved name `bar` | } | m::bar(); //~ ERROR unresolved name `m::bar` | random_line_split |
namespaced-enum-glob-import-no-impls.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
mod m2 {
pub enum Foo {
A,
B(isize),
C { a: isize },
}
impl Foo {
pub fn | () {}
pub fn bar(&self) {}
}
}
mod m {
pub use m2::Foo::*;
}
pub fn main() {
use m2::Foo::*;
foo(); //~ ERROR unresolved name `foo`
m::foo(); //~ ERROR unresolved name `m::foo`
bar(); //~ ERROR unresolved name `bar`
m::bar(); //~ ERROR unresolved name `m::bar`
}
| foo | identifier_name |
dashboard.js | console.log(URL_FILE_SERVER);
var lastID = 0;
var getLastId = function() {
return lastID;
};
var loadNextFile = function() { | url: URL_FILE_SERVER + getLastId(),
dataType: 'json',
method: "GET",
success: function(result) {
console.log(result)
lastID = parseInt(result.lastId);
$('#conteudo').append(result.html);
if ($("#conteudo .imagem").length <= 10) {
loadNextFile();
}
},
error: function(e) {
console.log("ERROR:");
console.log(e);
}
})
};
$(function() {
if ($("#conteudo .imagem").length <= 1) {
loadNextFile();
}
var win = $(window);
// Each time the user scrolls
win.scroll(function() {
// End of the document reached?
if ($(document).height() - win.height() == win.scrollTop()) {
//$('#loading').show();
console.log("SCOLL");
loadNextFile();
}
});
}); | console.log(URL_FILE_SERVER + lastID);
$.ajax({ | random_line_split |
dashboard.js | console.log(URL_FILE_SERVER);
var lastID = 0;
var getLastId = function() {
return lastID;
};
var loadNextFile = function() {
console.log(URL_FILE_SERVER + lastID);
$.ajax({
url: URL_FILE_SERVER + getLastId(),
dataType: 'json',
method: "GET",
success: function(result) {
console.log(result)
lastID = parseInt(result.lastId);
$('#conteudo').append(result.html);
if ($("#conteudo .imagem").length <= 10) |
},
error: function(e) {
console.log("ERROR:");
console.log(e);
}
})
};
$(function() {
if ($("#conteudo .imagem").length <= 1) {
loadNextFile();
}
var win = $(window);
// Each time the user scrolls
win.scroll(function() {
// End of the document reached?
if ($(document).height() - win.height() == win.scrollTop()) {
//$('#loading').show();
console.log("SCOLL");
loadNextFile();
}
});
}); | {
loadNextFile();
} | conditional_block |
plugin.py | import StringIO
class Plugin(object):
| ANGULAR_MODULE = None
JS_FILES = []
CSS_FILES = []
@classmethod
def PlugIntoApp(cls, app):
pass
@classmethod
def GenerateHTML(cls, root_url="/"):
out = StringIO.StringIO()
for js_file in cls.JS_FILES:
js_file = js_file.lstrip("/")
out.write('<script src="%s%s"></script>\n' % (root_url, js_file))
for css_file in cls.CSS_FILES:
css_file = css_file.lstrip("/")
out.write('<link rel="stylesheet" href="%s%s"></link>\n' % (
root_url, css_file))
if cls.ANGULAR_MODULE:
out.write("""
<script>var manuskriptPluginsList = manuskriptPluginsList || [];\n
manuskriptPluginsList.push("%s");</script>\n""" % cls.ANGULAR_MODULE)
return out.getvalue() | identifier_body | |
plugin.py | import StringIO
class Plugin(object):
ANGULAR_MODULE = None
JS_FILES = []
CSS_FILES = []
@classmethod
def PlugIntoApp(cls, app):
pass
@classmethod
def GenerateHTML(cls, root_url="/"):
out = StringIO.StringIO()
for js_file in cls.JS_FILES:
|
for css_file in cls.CSS_FILES:
css_file = css_file.lstrip("/")
out.write('<link rel="stylesheet" href="%s%s"></link>\n' % (
root_url, css_file))
if cls.ANGULAR_MODULE:
out.write("""
<script>var manuskriptPluginsList = manuskriptPluginsList || [];\n
manuskriptPluginsList.push("%s");</script>\n""" % cls.ANGULAR_MODULE)
return out.getvalue()
| js_file = js_file.lstrip("/")
out.write('<script src="%s%s"></script>\n' % (root_url, js_file)) | conditional_block |
plugin.py | import StringIO
class Plugin(object):
ANGULAR_MODULE = None
JS_FILES = []
CSS_FILES = []
@classmethod
def PlugIntoApp(cls, app):
pass
@classmethod | for js_file in cls.JS_FILES:
js_file = js_file.lstrip("/")
out.write('<script src="%s%s"></script>\n' % (root_url, js_file))
for css_file in cls.CSS_FILES:
css_file = css_file.lstrip("/")
out.write('<link rel="stylesheet" href="%s%s"></link>\n' % (
root_url, css_file))
if cls.ANGULAR_MODULE:
out.write("""
<script>var manuskriptPluginsList = manuskriptPluginsList || [];\n
manuskriptPluginsList.push("%s");</script>\n""" % cls.ANGULAR_MODULE)
return out.getvalue() | def GenerateHTML(cls, root_url="/"):
out = StringIO.StringIO() | random_line_split |
plugin.py | import StringIO
class Plugin(object):
ANGULAR_MODULE = None
JS_FILES = []
CSS_FILES = []
@classmethod
def | (cls, app):
pass
@classmethod
def GenerateHTML(cls, root_url="/"):
out = StringIO.StringIO()
for js_file in cls.JS_FILES:
js_file = js_file.lstrip("/")
out.write('<script src="%s%s"></script>\n' % (root_url, js_file))
for css_file in cls.CSS_FILES:
css_file = css_file.lstrip("/")
out.write('<link rel="stylesheet" href="%s%s"></link>\n' % (
root_url, css_file))
if cls.ANGULAR_MODULE:
out.write("""
<script>var manuskriptPluginsList = manuskriptPluginsList || [];\n
manuskriptPluginsList.push("%s");</script>\n""" % cls.ANGULAR_MODULE)
return out.getvalue()
| PlugIntoApp | identifier_name |
Row.ts | import Cell, { ISerializedCell } from './Cell';
import { MODE } from '../store/types';
export interface ISerializedRow {
active: boolean;
cells: ISerializedCell[];
index: number;
}
export default class Row {
private cells: Cell[];
private index: number;
private active: boolean;
private constructor(previous?: Row) {
this.index = previous ? previous.index : -1;
this.active = previous ? previous.active : false;
this.cells = previous ? previous.cells : [];
}
static create(data: [number, boolean][], index: number): Row {
const row = new Row();
row.cells = [...data.map(([value, given], i) => Cell.create(value, index, i + 1, given))];
row.index = index;
return row;
}
static deserialize(data: ISerializedRow): Row {
const row = new Row();
row.active = data.active;
row.index = data.index;
row.cells = data.cells.map(Cell.deserialize);
return row;
}
public getCells(): Cell[] {
return this.cells;
}
public validate(): Row {
const row = new Row(this);
row.cells = this.cells.map(c => c.validate());
return row;
}
public getIndex(): number {
return this.index;
}
public toggleCell(index: number, column: number): Row {
if (this.index !== index && !this.active) {
return this;
}
const row = new Row(this);
if (this.active) {
if (this.index === index) {
row.cells = this.cells.map(c => {
return c.setActive(c.isActive() ? false : c.getColumn() === column);
});
} else {
row.active = false;
row.cells = this.cells.map(c => (c.isActive() ? c.setActive(false) : c));
}
} else if (this.index === index) {
row.active = true;
row.cells = this.cells.map(c => (c.getColumn() === column ? c.setActive(true) : c));
}
return row;
}
public isActive(): boolean {
return this.active;
}
public setDigit(digit: number, mode: MODE): Row {
if (!this.active) {
return this;
}
const row = new Row(this);
row.cells = this.cells.map(c => c.setDigit(digit, mode));
return row;
}
public removeDigit(): Row {
if (!this.active) {
return this;
}
const row = new Row(this);
row.cells = this.cells.map(c => c.removeDigit());
return row;
}
public | (): Row {
const row = new Row(this);
row.cells = this.cells.map(c => c.clearNotes());
return row;
}
}
| clearCandidates | identifier_name |
Row.ts | import Cell, { ISerializedCell } from './Cell';
import { MODE } from '../store/types';
export interface ISerializedRow {
active: boolean;
cells: ISerializedCell[];
index: number;
}
export default class Row {
private cells: Cell[];
private index: number;
private active: boolean;
private constructor(previous?: Row) {
this.index = previous ? previous.index : -1;
this.active = previous ? previous.active : false;
this.cells = previous ? previous.cells : [];
}
static create(data: [number, boolean][], index: number): Row {
const row = new Row();
row.cells = [...data.map(([value, given], i) => Cell.create(value, index, i + 1, given))];
row.index = index;
return row;
}
static deserialize(data: ISerializedRow): Row {
const row = new Row();
row.active = data.active;
row.index = data.index;
row.cells = data.cells.map(Cell.deserialize);
return row;
}
public getCells(): Cell[] {
return this.cells;
}
public validate(): Row |
public getIndex(): number {
return this.index;
}
public toggleCell(index: number, column: number): Row {
if (this.index !== index && !this.active) {
return this;
}
const row = new Row(this);
if (this.active) {
if (this.index === index) {
row.cells = this.cells.map(c => {
return c.setActive(c.isActive() ? false : c.getColumn() === column);
});
} else {
row.active = false;
row.cells = this.cells.map(c => (c.isActive() ? c.setActive(false) : c));
}
} else if (this.index === index) {
row.active = true;
row.cells = this.cells.map(c => (c.getColumn() === column ? c.setActive(true) : c));
}
return row;
}
public isActive(): boolean {
return this.active;
}
public setDigit(digit: number, mode: MODE): Row {
if (!this.active) {
return this;
}
const row = new Row(this);
row.cells = this.cells.map(c => c.setDigit(digit, mode));
return row;
}
public removeDigit(): Row {
if (!this.active) {
return this;
}
const row = new Row(this);
row.cells = this.cells.map(c => c.removeDigit());
return row;
}
public clearCandidates(): Row {
const row = new Row(this);
row.cells = this.cells.map(c => c.clearNotes());
return row;
}
}
| {
const row = new Row(this);
row.cells = this.cells.map(c => c.validate());
return row;
} | identifier_body |
Row.ts | import Cell, { ISerializedCell } from './Cell';
import { MODE } from '../store/types';
export interface ISerializedRow {
active: boolean;
cells: ISerializedCell[];
index: number;
}
export default class Row {
private cells: Cell[];
private index: number;
private active: boolean;
private constructor(previous?: Row) {
this.index = previous ? previous.index : -1;
this.active = previous ? previous.active : false;
this.cells = previous ? previous.cells : [];
}
static create(data: [number, boolean][], index: number): Row {
const row = new Row();
row.cells = [...data.map(([value, given], i) => Cell.create(value, index, i + 1, given))];
row.index = index;
return row;
}
static deserialize(data: ISerializedRow): Row {
const row = new Row();
row.active = data.active;
row.index = data.index;
row.cells = data.cells.map(Cell.deserialize);
return row;
}
public getCells(): Cell[] {
return this.cells;
}
public validate(): Row {
const row = new Row(this);
row.cells = this.cells.map(c => c.validate());
return row;
}
public getIndex(): number {
return this.index;
}
public toggleCell(index: number, column: number): Row {
if (this.index !== index && !this.active) {
return this;
}
const row = new Row(this);
if (this.active) {
if (this.index === index) {
row.cells = this.cells.map(c => {
return c.setActive(c.isActive() ? false : c.getColumn() === column);
});
} else {
row.active = false;
row.cells = this.cells.map(c => (c.isActive() ? c.setActive(false) : c));
}
} else if (this.index === index) {
row.active = true;
row.cells = this.cells.map(c => (c.getColumn() === column ? c.setActive(true) : c));
}
return row;
}
public isActive(): boolean {
return this.active;
}
public setDigit(digit: number, mode: MODE): Row {
if (!this.active) {
return this;
}
const row = new Row(this);
row.cells = this.cells.map(c => c.setDigit(digit, mode));
return row;
}
public removeDigit(): Row { | if (!this.active) {
return this;
}
const row = new Row(this);
row.cells = this.cells.map(c => c.removeDigit());
return row;
}
public clearCandidates(): Row {
const row = new Row(this);
row.cells = this.cells.map(c => c.clearNotes());
return row;
}
} | random_line_split | |
Row.ts | import Cell, { ISerializedCell } from './Cell';
import { MODE } from '../store/types';
export interface ISerializedRow {
active: boolean;
cells: ISerializedCell[];
index: number;
}
export default class Row {
private cells: Cell[];
private index: number;
private active: boolean;
private constructor(previous?: Row) {
this.index = previous ? previous.index : -1;
this.active = previous ? previous.active : false;
this.cells = previous ? previous.cells : [];
}
static create(data: [number, boolean][], index: number): Row {
const row = new Row();
row.cells = [...data.map(([value, given], i) => Cell.create(value, index, i + 1, given))];
row.index = index;
return row;
}
static deserialize(data: ISerializedRow): Row {
const row = new Row();
row.active = data.active;
row.index = data.index;
row.cells = data.cells.map(Cell.deserialize);
return row;
}
public getCells(): Cell[] {
return this.cells;
}
public validate(): Row {
const row = new Row(this);
row.cells = this.cells.map(c => c.validate());
return row;
}
public getIndex(): number {
return this.index;
}
public toggleCell(index: number, column: number): Row {
if (this.index !== index && !this.active) {
return this;
}
const row = new Row(this);
if (this.active) {
if (this.index === index) {
row.cells = this.cells.map(c => {
return c.setActive(c.isActive() ? false : c.getColumn() === column);
});
} else {
row.active = false;
row.cells = this.cells.map(c => (c.isActive() ? c.setActive(false) : c));
}
} else if (this.index === index) |
return row;
}
public isActive(): boolean {
return this.active;
}
public setDigit(digit: number, mode: MODE): Row {
if (!this.active) {
return this;
}
const row = new Row(this);
row.cells = this.cells.map(c => c.setDigit(digit, mode));
return row;
}
public removeDigit(): Row {
if (!this.active) {
return this;
}
const row = new Row(this);
row.cells = this.cells.map(c => c.removeDigit());
return row;
}
public clearCandidates(): Row {
const row = new Row(this);
row.cells = this.cells.map(c => c.clearNotes());
return row;
}
}
| {
row.active = true;
row.cells = this.cells.map(c => (c.getColumn() === column ? c.setActive(true) : c));
} | conditional_block |
fortios_api_firewall_ippool.py | #!/usr/bin/python
#
# Ansible module for managing Fortigate devices via API
# (c) 2017, Will Wagner <willwagner602@gmail.com> and Eugene Opredelennov <eoprede@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: fortios_api_firewall_ippool
extends_documentation_fragment: fortios_api_doc
version_added: "2.4"
short_description: Manages Firewall IP pool configuration.
description:
- Manages Firewall IP pool configuration.
author:
- Will Wagner (@willwagner602)
Eugene Opredelennov (@eoprede)
notes:
- Tested against Fortigate v5.4.5 VM
options:
ippool:
description:
- Full list of IP pools to be applied to the Firewall. Note that any ip pool not present in the list will be DELETED.
required: true
suboptions:
name:
description:
- Name of the IP pool (must be unique)
required: true
type:
description:
- Type of the IP pool
required: true
startip:
description:
- Start IP of the pool
required: true
endip:
description:
- End IP of the pool
required: true
'''
EXAMPLES = '''
---
name: set firewall IP pool
fortios_api_firewall_ippool:
conn_params:
fortigate_username: admin
fortigate_password: test
fortigate_ip: 1.2.3.4
port: 10080
verify: false
secure: false
proxies:
http: socks5://127.0.0.1:9000
ippool:
- name: test_pool
type: one-to-one
startip: 1.2.3.4
endip: 1.2.3.4
- name: test_pool_overload
type: overload
startip: 2.2.3.4
endip: 2.2.3.5
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module and sent to the device for changes
returned: always
type: list
existing:
description: k/v pairs of existing configuration
returned: always
type: list
end_state:
description: k/v pairs of configuration after module execution
returned: always
type: list
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
'''
from ansible.module_utils.fortios_api import API
system_global_api_args = {
'endpoint': 'cmdb/firewall/ippool',
'list_identifier': 'ippool',
'object_identifier': 'name',
'default_ignore_params': []
}
def main():
|
if __name__ == "__main__":
main()
| forti_api = API(system_global_api_args)
forti_api.apply_configuration_to_endpoint() | identifier_body |
fortios_api_firewall_ippool.py | #!/usr/bin/python
#
# Ansible module for managing Fortigate devices via API
# (c) 2017, Will Wagner <willwagner602@gmail.com> and Eugene Opredelennov <eoprede@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: fortios_api_firewall_ippool
extends_documentation_fragment: fortios_api_doc
version_added: "2.4"
short_description: Manages Firewall IP pool configuration.
description:
- Manages Firewall IP pool configuration.
author:
- Will Wagner (@willwagner602)
Eugene Opredelennov (@eoprede)
notes:
- Tested against Fortigate v5.4.5 VM
options:
ippool:
description:
- Full list of IP pools to be applied to the Firewall. Note that any ip pool not present in the list will be DELETED.
required: true
suboptions:
name:
description:
- Name of the IP pool (must be unique)
required: true
type:
description:
- Type of the IP pool
required: true
startip:
description:
- Start IP of the pool
required: true
endip:
description:
- End IP of the pool
required: true
'''
EXAMPLES = '''
---
name: set firewall IP pool
fortios_api_firewall_ippool:
conn_params:
fortigate_username: admin
fortigate_password: test
fortigate_ip: 1.2.3.4
port: 10080
verify: false
secure: false
proxies:
http: socks5://127.0.0.1:9000
ippool:
- name: test_pool
type: one-to-one
startip: 1.2.3.4
endip: 1.2.3.4
- name: test_pool_overload
type: overload
startip: 2.2.3.4
endip: 2.2.3.5
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module and sent to the device for changes
returned: always
type: list
existing:
description: k/v pairs of existing configuration
returned: always
type: list
end_state:
description: k/v pairs of configuration after module execution
returned: always
type: list
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
'''
from ansible.module_utils.fortios_api import API
system_global_api_args = {
'endpoint': 'cmdb/firewall/ippool',
'list_identifier': 'ippool',
'object_identifier': 'name',
'default_ignore_params': []
}
def main():
forti_api = API(system_global_api_args)
forti_api.apply_configuration_to_endpoint()
if __name__ == "__main__":
| main() | conditional_block | |
fortios_api_firewall_ippool.py | #!/usr/bin/python
#
# Ansible module for managing Fortigate devices via API
# (c) 2017, Will Wagner <willwagner602@gmail.com> and Eugene Opredelennov <eoprede@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: fortios_api_firewall_ippool
extends_documentation_fragment: fortios_api_doc
version_added: "2.4"
short_description: Manages Firewall IP pool configuration.
description:
- Manages Firewall IP pool configuration.
author:
- Will Wagner (@willwagner602)
Eugene Opredelennov (@eoprede)
notes:
- Tested against Fortigate v5.4.5 VM
options:
ippool:
description:
- Full list of IP pools to be applied to the Firewall. Note that any ip pool not present in the list will be DELETED.
required: true
suboptions:
name:
description:
- Name of the IP pool (must be unique)
required: true
type:
description:
- Type of the IP pool
required: true
startip:
description:
- Start IP of the pool
required: true
endip:
description:
- End IP of the pool
required: true
'''
EXAMPLES = '''
---
name: set firewall IP pool
fortios_api_firewall_ippool:
conn_params:
fortigate_username: admin
fortigate_password: test
fortigate_ip: 1.2.3.4
port: 10080
verify: false
secure: false
proxies:
http: socks5://127.0.0.1:9000
ippool:
- name: test_pool
type: one-to-one
startip: 1.2.3.4
endip: 1.2.3.4
- name: test_pool_overload
type: overload
startip: 2.2.3.4
endip: 2.2.3.5
'''
RETURN = '''
proposed: | description: k/v pairs of parameters passed into module and sent to the device for changes
returned: always
type: list
existing:
description: k/v pairs of existing configuration
returned: always
type: list
end_state:
description: k/v pairs of configuration after module execution
returned: always
type: list
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
'''
from ansible.module_utils.fortios_api import API
system_global_api_args = {
'endpoint': 'cmdb/firewall/ippool',
'list_identifier': 'ippool',
'object_identifier': 'name',
'default_ignore_params': []
}
def main():
forti_api = API(system_global_api_args)
forti_api.apply_configuration_to_endpoint()
if __name__ == "__main__":
main() | random_line_split | |
fortios_api_firewall_ippool.py | #!/usr/bin/python
#
# Ansible module for managing Fortigate devices via API
# (c) 2017, Will Wagner <willwagner602@gmail.com> and Eugene Opredelennov <eoprede@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: fortios_api_firewall_ippool
extends_documentation_fragment: fortios_api_doc
version_added: "2.4"
short_description: Manages Firewall IP pool configuration.
description:
- Manages Firewall IP pool configuration.
author:
- Will Wagner (@willwagner602)
Eugene Opredelennov (@eoprede)
notes:
- Tested against Fortigate v5.4.5 VM
options:
ippool:
description:
- Full list of IP pools to be applied to the Firewall. Note that any ip pool not present in the list will be DELETED.
required: true
suboptions:
name:
description:
- Name of the IP pool (must be unique)
required: true
type:
description:
- Type of the IP pool
required: true
startip:
description:
- Start IP of the pool
required: true
endip:
description:
- End IP of the pool
required: true
'''
EXAMPLES = '''
---
name: set firewall IP pool
fortios_api_firewall_ippool:
conn_params:
fortigate_username: admin
fortigate_password: test
fortigate_ip: 1.2.3.4
port: 10080
verify: false
secure: false
proxies:
http: socks5://127.0.0.1:9000
ippool:
- name: test_pool
type: one-to-one
startip: 1.2.3.4
endip: 1.2.3.4
- name: test_pool_overload
type: overload
startip: 2.2.3.4
endip: 2.2.3.5
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module and sent to the device for changes
returned: always
type: list
existing:
description: k/v pairs of existing configuration
returned: always
type: list
end_state:
description: k/v pairs of configuration after module execution
returned: always
type: list
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
'''
from ansible.module_utils.fortios_api import API
system_global_api_args = {
'endpoint': 'cmdb/firewall/ippool',
'list_identifier': 'ippool',
'object_identifier': 'name',
'default_ignore_params': []
}
def | ():
forti_api = API(system_global_api_args)
forti_api.apply_configuration_to_endpoint()
if __name__ == "__main__":
main()
| main | identifier_name |
BugIcon.tsx | /*
* SonarQube
* Copyright (C) 2009-2019 SonarSource SA
* mailto:info AT sonarsource DOT com
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 3 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
import * as React from 'react';
import Icon, { IconProps } from './Icon';
export default function | ({ className, fill = 'currentColor', size }: IconProps) {
return (
<Icon className={className} size={size}>
<path
d="M8.01 10.9885h1v-5h-1v5zm3-2h1.265l.46.771.775-.543-.733-1.228H11.01v-.316l2-2.343v-2.341h-1v1.972l-1 1.172v-1.144h-.029c-.101-.826-.658-1.52-1.436-1.853l1.472-1.349-.676-.736-1.831 1.678-1.831-1.678-.676.736 1.472 1.349c-.778.333-1.335 1.027-1.436 1.853H6.01v1.144l-1-1.172v-1.972h-1v2.341l2 2.343v.316H4.243l-.733 1.228.775.543.46-.771H6.01v.287l-2 1.912v2.801h1v-2.374l1.003-.959c.018 1.289 1.07 2.333 2.363 2.333h.768c.741 0 1.418-.347 1.767-.907.258-.411.304-.887.16-1.365l.939.898v2.374h1v-2.801l-2-1.912v-.287z"
fillRule="evenodd"
style={{ fill }}
/>
</Icon>
);
}
| BugIcon | identifier_name |
BugIcon.tsx | /*
* SonarQube
* Copyright (C) 2009-2019 SonarSource SA
* mailto:info AT sonarsource DOT com
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
import * as React from 'react';
import Icon, { IconProps } from './Icon';
export default function BugIcon({ className, fill = 'currentColor', size }: IconProps) {
return (
<Icon className={className} size={size}>
<path
d="M8.01 10.9885h1v-5h-1v5zm3-2h1.265l.46.771.775-.543-.733-1.228H11.01v-.316l2-2.343v-2.341h-1v1.972l-1 1.172v-1.144h-.029c-.101-.826-.658-1.52-1.436-1.853l1.472-1.349-.676-.736-1.831 1.678-1.831-1.678-.676.736 1.472 1.349c-.778.333-1.335 1.027-1.436 1.853H6.01v1.144l-1-1.172v-1.972h-1v2.341l2 2.343v.316H4.243l-.733 1.228.775.543.46-.771H6.01v.287l-2 1.912v2.801h1v-2.374l1.003-.959c.018 1.289 1.07 2.333 2.363 2.333h.768c.741 0 1.418-.347 1.767-.907.258-.411.304-.887.16-1.365l.939.898v2.374h1v-2.801l-2-1.912v-.287z"
fillRule="evenodd"
style={{ fill }}
/>
</Icon>
);
} | * version 3 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of | random_line_split |
BugIcon.tsx | /*
* SonarQube
* Copyright (C) 2009-2019 SonarSource SA
* mailto:info AT sonarsource DOT com
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 3 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
import * as React from 'react';
import Icon, { IconProps } from './Icon';
export default function BugIcon({ className, fill = 'currentColor', size }: IconProps) | {
return (
<Icon className={className} size={size}>
<path
d="M8.01 10.9885h1v-5h-1v5zm3-2h1.265l.46.771.775-.543-.733-1.228H11.01v-.316l2-2.343v-2.341h-1v1.972l-1 1.172v-1.144h-.029c-.101-.826-.658-1.52-1.436-1.853l1.472-1.349-.676-.736-1.831 1.678-1.831-1.678-.676.736 1.472 1.349c-.778.333-1.335 1.027-1.436 1.853H6.01v1.144l-1-1.172v-1.972h-1v2.341l2 2.343v.316H4.243l-.733 1.228.775.543.46-.771H6.01v.287l-2 1.912v2.801h1v-2.374l1.003-.959c.018 1.289 1.07 2.333 2.363 2.333h.768c.741 0 1.418-.347 1.767-.907.258-.411.304-.887.16-1.365l.939.898v2.374h1v-2.801l-2-1.912v-.287z"
fillRule="evenodd"
style={{ fill }}
/>
</Icon>
);
} | identifier_body | |
model.js | /*
* Copyright (C) 2013 Huub de Beer
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
var model = function(name, config) {
"use strict";
var _model = {name: name},
_appendix = {};
// ## Data invariant and initialization
//
// This model describes a dynamic phenomenon in terms of changing
// quantities over time.
//
//
// This description starts at `T_START` milliseconds
// (ms), defaulting to 0 ms and ends at `T_END` ms. If no end is specified
// it is assumed that the phenomenon does not end or is still ongoing in
// the real world (RW). The phenomenon's change is tracked by "measuring"
// the changing quantities at consecutive moments in time. These moments
// are `T_STEP` apart, defaulting to 1 ms, and are tracked by order
// number.
var T_START = config.time.start || 0,
T_END = config.time.end || Infinity,
T_STEP = config.time.step || 1;
function set_end(seconds) {
T_END = seconds*1000;
}
_model.set_end = set_end;
// To translate from a moment's order number to its corresponding time in
// ms and vice versa, two helper functions are defined, `time_to_moment`
// and `moment_to_time`, as well as a shorthand name for these two helper
// functions, respectively, `t2m` and `m2t`.
_model.time_to_moment = function(time) {
return Math.floor(time / T_STEP);
};
var t2m = _model.time_to_moment;
_model.moment_to_time = function(moment) {
return moment * T_STEP;
};
var m2t = _model.moment_to_time;
// When I use "measured" I mean to denote that the values of the
// quantities describing the phenomenon have been captured, computed,
// downloaded, measured, or otherwise obtained. This `model` function is
// intended to be applicable for describing purely theoretical models of a
// phenomenon as well as real-time measurements of a phenomenon.
//
// "Measuring" a moment is left to the `measure_moment` function. Each
// model has to (re)implement this function to specify the relationship
// between the phenomenon's quantities of interest at each moment during
// the phenomenon.
_model.measure_moment = function(moment) {
// to be implemented in an object implementing model
};
// The model has the following data invariant:
//
// (∀m: 0 ≤ m ≤ |`moments`|: `moment_computed`(`moments`[m]))
//
// stating that the phenomenon has been described quantitatively for all
// moments. These "measurements" are stored in a list of `moments` and can
// be accessed through a moment's order number.
var moments = [];
_model.get_moment = function(moment) {
return moments[moment];
};
_model.number_of_moments = function() {
return moments.length;
};
// A moment can only be inspected if it already has been "measured".
// Following the data invariant, a moment has been measured when its order
// number is smaller or equal to the number of measured moments.
_model.moment_measured = function(moment) {
return (moment <= (moments.length - 1));
};
// Furthermore, the current moment of interest, or `now`, points to an
// already "measured" moment during the phenomenon's duration. Hence, the
// data invariant is extended as follows:
//
// `t2m`(`T_START`) ≤ `now` ≤ `t2m`(`T_END`) → `moment_computed`(`now`)
var now;
// To ensure this data invariant, `now` is set to a moment before the
// phenomenon started.
now = t2m(T_START) - 1;
// ## Inspecting and running a model
// Inspection through registerd views
var views = [];
var update_views = function() {
var update_view = function(view) {
view.update(_model.name);
};
views.forEach(update_view);
};
_model.update_views = update_views;
var update_all_views = function() {
var update_view = function(view) {
if (view.update_all) {
view.update_all();
} else {
view.update(_model.name);
}
};
views.forEach(update_view);
}; |
_model.register = function(view) {
var view_found = views.indexOf(view);
if (view_found === -1) {
views.push(view);
views.forEach(function(v) { if(v.update_all) v.update_all();});
}
};
_model.get_views_of_type = function(view_type) {
return views.filter(function(v) {
return v.type === view_type;
});
};
_model.unregister = function(view) {
if (arguments.length === 0) {
var unregister = function(view) {
view.unregister(_model.name);
};
views.forEach(unregister);
} else {
var view_found = views.indexOf(view);
if (view_found !== -1) {
views.slice(view_found, 1);
}
}
};
// As a model can be inspected repeatedly, as is one
// of the reasons to model a phenomenon using a computer, we introduce a
// `reset` function to resets `now` to a moment before the phenomenon
// started.
_model.reset = function() {
now = t2m(T_START) - 1;
_model.step();
update_views();
};
// Once a model has been started, the current moment will be measured as
// well as all moments before since the start. These moments can be
// inspected.
//
_model.has_started = function() {
return now >= 0;
};
// The `step` function will advance `now` to the next moment if the end of
// the phenomenon has not been reached yet. If that moment has not been
// "measured" earlier, "measure" it now.
_model.step = function(do_not_update_views) {
if (m2t(now) + T_STEP <= T_END) {
now++;
if (!_model.moment_measured(now)) {
var moment = _model.measure_moment(now);
moment._time_ = m2t(now);
moments.push(moment);
}
}
if (!do_not_update_views) {
update_views();
}
return now;
};
// If the phenomenon is a finite process or the "measuring" process cannot
// go further `T_END` will have a value that is not `Infinity`.
_model.can_finish = function() {
return Math.abs(T_END) !== Infinity;
};
// To inspect the whole phenomenon at once or inspect the last moment,
// `finish`ing the model will ensure that all moments during the
// phenomenon have been "measured".
_model.finish = function() {
var DO_NOT_UPDATE_VIEWS = true;
if (_model.can_finish()) {
while ((moments.length - 1) < t2m(T_END)) {
_model.step(DO_NOT_UPDATE_VIEWS);
}
}
now = moments.length - 1;
_model.update_views();
return now;
};
// We call the model finished if the current moment, or `now`, is the
// phenomenon's last moment.
_model.is_finished = function() {
return _model.can_finish() && m2t(now) >= T_END;
};
function reset_model() {
moments = [];
_model.action("reset").callback(_model)();
// _model.reset();
}
_model.reset_model = reset_model;
/**
* ## Actions on the model
*
*/
_model.actions = {};
_model.add_action = function( action ) {
_model.actions[action.name] = action;
_model.actions[action.name].install = function() {
return action.callback(_model);
};
};
if (config.actions) {
var add_action = function(action_name) {
_model.add_action(config.actions[action_name]);
};
Object.keys(config.actions).forEach(add_action);
}
_model.action = function( action_name ) {
if (_model.actions[action_name]) {
return _model.actions[action_name];
}
};
_model.remove_action = function( action ) {
if (_model.actions[action.name]) {
delete _model.actions[action.name];
}
};
_model.disable_action = function( action_name ) {
if (_model.actions[action_name]) {
_model.actions[action_name].enabled = false;
}
};
_model.enable_action = function( action_name ) {
if (_model.actions[action_name]) {
_model.actions[action_name].enabled = true;
}
};
_model.toggle_action = function( action_name ) {
if (_model.actions[action_name]) {
_model.actions[action_name].enabled =
!_model.action[action_name].enabled;
}
};
// ## Coordinating quantities
//
// All quantities that describe the phenomenon being modeled change in
// coordination with time's change. Add the model's time as a quantity to
// the list with quantities. To allow people to model time as part of
// their model, for example to describe the phenomenon accelerated, the
// internal time is added as quantity `_time_` and, as a result, "_time_"
// is not allowed as a quantity name.
_model.quantities = config.quantities || {};
_model.quantities._time_ = {
hidden: true,
minimum: T_START,
maximum: T_END,
value: m2t(now),
stepsize: T_STEP,
unit: "ms",
label: "internal time",
monotone: true
};
_model.get_minimum = function(quantity) {
if (arguments.length===0) {
// called without any arguments: return all minima
var minima = {},
add_minimum = function(quantity) {
minima[quantity] = parseFloat(_model.quantities[quantity].minimum);
};
Object.keys(_model.quantities).forEach(add_minimum);
return minima;
} else {
// return quantity's minimum
return parseFloat(_model.quantities[quantity].minimum);
}
};
_model.get_maximum = function(quantity) {
if (arguments.length===0) {
// called without any arguments: return all minima
var maxima = {},
add_maximum = function(quantity) {
maxima[quantity] = parseFloat(_model.quantities[quantity].maximum);
};
Object.keys(_model.quantities).forEach(add_maximum);
return maxima;
} else {
// return quantity's minimum
return parseFloat(_model.quantities[quantity].maximum);
}
};
_model.find_moment = function(quantity, value, EPSILON) {
if (moments.length === 0) {
// no moment are measured yet, so there is nothing to be found
return -1;
} else {
var val = _appendix.quantity_value(quantity);
// pre: quantity is monotone
// determine if it is increasing or decreasing
// determine type of monotone
//
// As the first moment has been measured and we do know the
// minimum of this quantity, type of monotone follows.
var start = val(0),
INCREASING = (start !== _model.get_maximum(quantity));
// Use a stupid linear search to find the moment that approaches the
// value best
var m = 0,
n = moments.length - 1,
lowerbound,
upperbound;
if (INCREASING) {
lowerbound = function(moment) {
return val(moment) < value;
};
upperbound = function(moment) {
return val(moment) > value;
};
} else {
lowerbound = function(moment) {
return val(moment) > value;
};
upperbound = function(moment) {
return val(moment) < value;
};
}
// Increasing "function", meaning
//
// (∀m: 0 ≤ m < |`moments`|: `val`(m) <= `val`(m+1))
//
// Therefore,
//
// (∃m, n: 0 ≤ m < n ≤ |`moments`|:
// `val`(m) ≤ value ≤ `val`(n) ⋀
// (∀p: m < p < n: `val`(p) = value))
//
// `find_moment` finds those moments m and n and returns the
// one closest to value or, when even close, the last moment
// decreasing is reverse.
while (lowerbound(m)) {
m++;
if (m>n) {
//
return -1;
}
}
return m;
//m--;
/*
while (upperbound(n)) {
n--;
if (n<m) {
return -1;
}
}
//n++;
return (Math.abs(val(n)-value) < Math.abs(val(m)-value))?n:m;
*/
}
};
_model.get = function(quantity) {
if (now < 0) {
return undefined;
} else {
return moments[now][quantity];
}
};
_model.set = function(quantity, value) {
var q = _model.quantities[quantity];
if (value < parseFloat(q.minimum)) {
value = parseFloat(q.minimum);
} else if (value > parseFloat(q.maximum)) {
value = parseFloat(q.maximum);
}
// q.minimum ≤ value ≤ q.maximum
// has value already been "measured"?
// As some quantities can have the same value more often, there are
// potentially many moments that fit the bill. There can be an unknown
// amount of moments that aren't measured as well.
//
// However, some quantities will be strictly increasing or decreasing
// and no value will appear twice. For example, the internal time will
// only increase. Those quantities with property `monotone`
// `true`, only one value will be searched for
var approx = _appendix.approximates(),
moment = -1;
if (q.monotone) {
moment = _model.find_moment(quantity, value);
if (moment === -1) {
// not yet "measured"
var DO_NOT_UPDATE_VIEWS = true;
_model.step(DO_NOT_UPDATE_VIEWS);
// THIS DOES WORK ONLY FOR INCREASING QUANTITIES. CHANGE THIS
// ALTER WITH FIND FUNCTION !!!!
while((moments[now][quantity] < value) && !_model.is_finished()) {
_model.step(DO_NOT_UPDATE_VIEWS);
}
} else {
now = moment;
}
update_views();
return moments[now];
}
};
_model.data = function() {
return moments.slice(0, now + 1);
};
_model.current_moment = function(moment_only) {
if (moment_only) {
return now;
} else {
return moments[now];
}
};
_model.graphs_shown = {
tailpoints: false,
line: false,
arrows: false
};
_model.show_graph = function(kind) {
var graphs = _model.get_views_of_type("graph");
function show_this_graph(g) {
switch(kind) {
case "line":
g.show_line(_model.name);
break;
case "tailpoints":
g.show_tailpoints(_model.name);
break;
case "arrows":
g.show_arrows(_model.name);
break;
}
}
graphs.forEach(show_this_graph);
_model.graphs_shown[kind] = true;
};
_model.hide_graph = function(kind) {
var graphs = _model.get_views_of_type("graph");
function hide_this_graph(g) {
switch(kind) {
case "line":
g.hide_line(_model.name);
break;
case "tailpoints":
g.hide_tailpoints(_model.name);
break;
case "arrows":
g.hide_arrows(_model.name);
break;
}
}
graphs.forEach(hide_this_graph);
_model.graphs_shown[kind] = false;
};
_model.graph_is_shown = function(kind) {
return _model.graphs_shown[kind];
};
// ## _appendix H: helper functions
_appendix.approximates = function(epsilon) {
var EPSILON = epsilon || 0.001,
fn = function(a, b) {
return Math.abs(a - b) <= EPSILON;
};
fn.EPSILON = EPSILON;
return fn;
};
_appendix.quantity_value = function(quantity) {
return function(moment) {
return moments[moment][quantity];
};
};
var step = (config.step_size || T_STEP)*5 ;
function step_size(size) {
if (arguments.length === 1) {
step = size;
}
return step;
}
_model.step_size = step_size;
function random_color() {
var hexes = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'],
colors = [],
i = 0;
while (i < 6) {
colors.push(hexes[Math.round(Math.random()*(hexes.length - 1))]);
i++;
}
return "#"+ colors.join("");
}
var color = random_color();
_model.color = function(c) {
if (arguments.length === 1) {
if (c === "random") {
color = random_color();
} else {
color = c;
}
}
return color;
};
return _model;
};
module.exports = model; | _model.update_all_views = update_all_views; | random_line_split |
model.js | /*
* Copyright (C) 2013 Huub de Beer
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
var model = function(name, config) {
"use strict";
var _model = {name: name},
_appendix = {};
// ## Data invariant and initialization
//
// This model describes a dynamic phenomenon in terms of changing
// quantities over time.
//
//
// This description starts at `T_START` milliseconds
// (ms), defaulting to 0 ms and ends at `T_END` ms. If no end is specified
// it is assumed that the phenomenon does not end or is still ongoing in
// the real world (RW). The phenomenon's change is tracked by "measuring"
// the changing quantities at consecutive moments in time. These moments
// are `T_STEP` apart, defaulting to 1 ms, and are tracked by order
// number.
var T_START = config.time.start || 0,
T_END = config.time.end || Infinity,
T_STEP = config.time.step || 1;
function set_end(seconds) {
T_END = seconds*1000;
}
_model.set_end = set_end;
// To translate from a moment's order number to its corresponding time in
// ms and vice versa, two helper functions are defined, `time_to_moment`
// and `moment_to_time`, as well as a shorthand name for these two helper
// functions, respectively, `t2m` and `m2t`.
_model.time_to_moment = function(time) {
return Math.floor(time / T_STEP);
};
var t2m = _model.time_to_moment;
_model.moment_to_time = function(moment) {
return moment * T_STEP;
};
var m2t = _model.moment_to_time;
// When I use "measured" I mean to denote that the values of the
// quantities describing the phenomenon have been captured, computed,
// downloaded, measured, or otherwise obtained. This `model` function is
// intended to be applicable for describing purely theoretical models of a
// phenomenon as well as real-time measurements of a phenomenon.
//
// "Measuring" a moment is left to the `measure_moment` function. Each
// model has to (re)implement this function to specify the relationship
// between the phenomenon's quantities of interest at each moment during
// the phenomenon.
_model.measure_moment = function(moment) {
// to be implemented in an object implementing model
};
// The model has the following data invariant:
//
// (∀m: 0 ≤ m ≤ |`moments`|: `moment_computed`(`moments`[m]))
//
// stating that the phenomenon has been described quantitatively for all
// moments. These "measurements" are stored in a list of `moments` and can
// be accessed through a moment's order number.
var moments = [];
_model.get_moment = function(moment) {
return moments[moment];
};
_model.number_of_moments = function() {
return moments.length;
};
// A moment can only be inspected if it already has been "measured".
// Following the data invariant, a moment has been measured when its order
// number is smaller or equal to the number of measured moments.
_model.moment_measured = function(moment) {
return (moment <= (moments.length - 1));
};
// Furthermore, the current moment of interest, or `now`, points to an
// already "measured" moment during the phenomenon's duration. Hence, the
// data invariant is extended as follows:
//
// `t2m`(`T_START`) ≤ `now` ≤ `t2m`(`T_END`) → `moment_computed`(`now`)
var now;
// To ensure this data invariant, `now` is set to a moment before the
// phenomenon started.
now = t2m(T_START) - 1;
// ## Inspecting and running a model
// Inspection through registerd views
var views = [];
var update_views = function() {
var update_view = function(view) {
view.update(_model.name);
};
views.forEach(update_view);
};
_model.update_views = update_views;
var update_all_views = function() {
var update_view = function(view) {
if (view.update_all) {
view.update_all();
} else {
view.update(_model.name);
}
};
views.forEach(update_view);
};
_model.update_all_views = update_all_views;
_model.register = function(view) {
var view_found = views.indexOf(view);
if (view_found === -1) {
views.push(view);
views.forEach(function(v) { if(v.update_all) v.update_all();});
}
};
_model.get_views_of_type = function(view_type) {
return views.filter(function(v) {
return v.type === view_type;
});
};
_model.unregister = function(view) {
if (arguments.length === 0) {
var unregister = function(view) {
view.unregister(_model.name);
};
views.forEach(unregister);
} else {
var view_found = views.indexOf(view);
if (view_found !== -1) {
views.slice(view_found, 1);
}
}
};
// As a model can be inspected repeatedly, as is one
// of the reasons to model a phenomenon using a computer, we introduce a
// `reset` function to resets `now` to a moment before the phenomenon
// started.
_model.reset = function() {
now = t2m(T_START) - 1;
_model.step();
update_views();
};
// Once a model has been started, the current moment will be measured as
// well as all moments before since the start. These moments can be
// inspected.
//
_model.has_started = function() {
return now >= 0;
};
// The `step` function will advance `now` to the next moment if the end of
// the phenomenon has not been reached yet. If that moment has not been
// "measured" earlier, "measure" it now.
_model.step = function(do_not_update_views) {
if (m2t(now) + T_STEP <= T_END) {
now++;
if (!_model.moment_measured(now)) {
var moment = _model.measure_moment(now);
moment._time_ = m2t(now);
moments.push(moment);
}
}
if (!do_not_update_views) {
update_views();
}
return now;
};
// If the phenomenon is a finite process or the "measuring" process cannot
// go further `T_END` will have a value that is not `Infinity`.
_model.can_finish = function() {
return Math.abs(T_END) !== Infinity;
};
// To inspect the whole phenomenon at once or inspect the last moment,
// `finish`ing the model will ensure that all moments during the
// phenomenon have been "measured".
_model.finish = function() {
var DO_NOT_UPDATE_VIEWS = true;
if (_model.can_finish()) {
while ((moments.length - 1) < t2m(T_END)) {
_model.step(DO_NOT_UPDATE_VIEWS);
}
}
now = moments.length - 1;
_model.update_views();
return now;
};
// We call the model finished if the current moment, or `now`, is the
// phenomenon's last moment.
_model.is_finished = function() {
return _model.can_finish() && m2t(now) >= T_END;
};
function reset_model() {
moments = [];
_model.action("reset").callback(_model)();
// _model.reset();
}
_model.reset_model = reset_model;
/**
* ## Actions on the model
*
*/
_model.actions = {};
_model.add_action = function( action ) {
_model.actions[action.name] = action;
_model.actions[action.name].install = function() {
return action.callback(_model);
};
};
if (config.actions) {
var add_action = function(action_name) {
_model.add_action(config.actions[action_name]);
};
Object.keys(config.actions).forEach(add_action);
}
_model.action = function( action_name ) {
if (_model.actions[action_name]) {
return _model.actions[action_name];
}
};
_model.remove_action = function( action ) {
if (_model.actions[action.name]) {
delete _model.actions[action.name];
}
};
_model.disable_action = function( action_name ) {
if (_model.actions[action_name]) {
_model.actions[action_name].enabled = false;
}
};
_model.enable_action = function( action_name ) {
if (_model.actions[action_name]) {
_model.actions[action_name].enabled = true;
}
};
_model.toggle_action = function( action_name ) {
if (_model.actions[action_name]) {
_model.actions[action_name].enabled =
!_model.action[action_name].enabled;
}
};
// ## Coordinating quantities
//
// All quantities that describe the phenomenon being modeled change in
// coordination with time's change. Add the model's time as a quantity to
// the list with quantities. To allow people to model time as part of
// their model, for example to describe the phenomenon accelerated, the
// internal time is added as quantity `_time_` and, as a result, "_time_"
// is not allowed as a quantity name.
_model.quantities = config.quantities || {};
_model.quantities._time_ = {
hidden: true,
minimum: T_START,
maximum: T_END,
value: m2t(now),
stepsize: T_STEP,
unit: "ms",
label: "internal time",
monotone: true
};
_model.get_minimum = function(quantity) {
if (arguments.length===0) {
// called without any arguments: return all minima
var minima = {},
add_minimum = function(quantity) {
minima[quantity] = parseFloat(_model.quantities[quantity].minimum);
};
Object.keys(_model.quantities).forEach(add_minimum);
return minima;
} else {
// return quantity's minimum
return parseFloat(_model.quantities[quantity].minimum);
}
};
_model.get_maximum = function(quantity) {
if (arguments.length===0) {
// called without any arguments: return all minima
var maxima = {},
add_maximum = function(quantity) {
maxima[quantity] = parseFloat(_model.quantities[quantity].maximum);
};
Object.keys(_model.quantities).forEach(add_maximum);
return maxima;
} else {
// return quantity's minimum
return parseFloat(_model.quantities[quantity].maximum);
}
};
_model.find_moment = function(quantity, value, EPSILON) {
if (moments.length === 0) {
// no moment are measured yet, so there is nothing to be found
return -1;
} else {
var val = _appendix.quantity_value(quantity);
// pre: quantity is monotone
// determine if it is increasing or decreasing
// determine type of monotone
//
// As the first moment has been measured and we do know the
// minimum of this quantity, type of monotone follows.
var start = val(0),
INCREASING = (start !== _model.get_maximum(quantity));
// Use a stupid linear search to find the moment that approaches the
// value best
var m = 0,
n = moments.length - 1,
lowerbound,
upperbound;
if (INCREASING) {
lowerbound = function(moment) {
return val(moment) < value;
};
upperbound = function(moment) {
return val(moment) > value;
};
} else {
lowerbound = function(moment) {
return val(moment) > value;
};
upperbound = function(moment) {
return val(moment) < value;
};
}
// Increasing "function", meaning
//
// (∀m: 0 ≤ m < |`moments`|: `val`(m) <= `val`(m+1))
//
// Therefore,
//
// (∃m, n: 0 ≤ m < n ≤ |`moments`|:
// `val`(m) ≤ value ≤ `val`(n) ⋀
// (∀p: m < p < n: `val`(p) = value))
//
// `find_moment` finds those moments m and n and returns the
// one closest to value or, when even close, the last moment
// decreasing is reverse.
while (lowerbound(m)) {
m++;
if (m>n) {
//
return -1;
}
}
return m;
//m--;
/*
while (upperbound(n)) {
n--;
if (n<m) {
return -1;
}
}
//n++;
return (Math.abs(val(n)-value) < Math.abs(val(m)-value))?n:m;
*/
}
};
_model.get = function(quantity) {
if (now < 0) {
return undefined;
} else {
return moments[now][quantity];
}
};
_model.set = function(quantity, value) {
var q = _model.quantities[quantity];
if (value < parseFloat(q.minimum)) {
value = parseFloat(q.minimum);
} else if (value > parseFloat(q.maximum)) {
value = parseFloat(q.maximum);
}
// q.minimum ≤ value ≤ q.maximum
// has value already been "measured"?
// As some quantities can have the same value more often, there are
// potentially many moments that fit the bill. There can be an unknown
// amount of moments that aren't measured as well.
//
// However, some quantities will be strictly increasing or decreasing
// and no value will appear twice. For example, the internal time will
// only increase. Those quantities with property `monotone`
// `true`, only one value will be searched for
var approx = _appendix.approximates(),
moment = -1;
if (q.monotone) {
moment = _model.find_moment(quantity, value);
if (moment === -1) {
// not yet "measured"
var DO_NOT_UPDATE_VIEWS = true;
_model.step(DO_NOT_UPDATE_VIEWS);
// THIS DOES WORK ONLY FOR INCREASING QUANTITIES. CHANGE THIS
// ALTER WITH FIND FUNCTION !!!!
while((moments[now][quantity] < value) && !_model.is_finished()) {
_model.step(DO_NOT_UPDATE_VIEWS);
}
} else {
now = moment;
}
update_views();
return moments[now];
}
};
_model.data = function() {
return moments.slice(0, now + 1);
};
_model.current_moment = function(moment_only) {
if (moment_only) {
return now;
} else {
return moments[now];
}
};
_model.graphs_shown = {
tailpoints: false,
line: false,
arrows: false
};
_model.show_graph = function(kind) {
var graphs = _model.get_views_of_type("graph");
function show_this_graph(g) {
switch(kind) {
case "line":
g.show_line(_model.name);
break;
case "tailpoints":
g.show_tailpoints(_model.name);
break;
case "arrows":
g.show_arrows(_model.name);
break;
}
}
graphs.forEach(show_this_graph);
_model.graphs_shown[kind] = true;
};
_model.hide_graph = function(kind) {
var graphs = _model.get_views_of_type("graph");
function hide_this_graph(g) {
switch(kind) {
case "line":
g.hide_line(_model.name);
break;
case "tailpoints":
g.hide_tailpoints(_model.name);
break;
case "arrows":
g.hide_arrows(_model.name);
break;
}
}
graphs.forEach(hide_this_graph);
_model.graphs_shown[kind] = false;
};
_model.graph_is_shown = function(kind) {
return _model.graphs_shown[kind];
};
// ## _appendix H: helper functions
_appendix.approximates = function(epsilon) {
var EPSILON = epsilon || 0.001,
fn = function(a, b) {
return Math.abs(a - b) <= EPSILON;
};
fn.EPSILON = EPSILON;
return fn;
};
_appendix.quantity_value = function(quantity) {
return function(moment) {
return moments[moment][quantity];
};
};
var step = (config.step_size || T_STEP)*5 ;
function step_size(size) {
if (argu | gth === 1) {
step = size;
}
return step;
}
_model.step_size = step_size;
function random_color() {
var hexes = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'],
colors = [],
i = 0;
while (i < 6) {
colors.push(hexes[Math.round(Math.random()*(hexes.length - 1))]);
i++;
}
return "#"+ colors.join("");
}
var color = random_color();
_model.color = function(c) {
if (arguments.length === 1) {
if (c === "random") {
color = random_color();
} else {
color = c;
}
}
return color;
};
return _model;
};
module.exports = model;
| ments.len | identifier_name |
model.js | /*
* Copyright (C) 2013 Huub de Beer
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
var model = function(name, config) {
"use strict";
var _model = {name: name},
_appendix = {};
// ## Data invariant and initialization
//
// This model describes a dynamic phenomenon in terms of changing
// quantities over time.
//
//
// This description starts at `T_START` milliseconds
// (ms), defaulting to 0 ms and ends at `T_END` ms. If no end is specified
// it is assumed that the phenomenon does not end or is still ongoing in
// the real world (RW). The phenomenon's change is tracked by "measuring"
// the changing quantities at consecutive moments in time. These moments
// are `T_STEP` apart, defaulting to 1 ms, and are tracked by order
// number.
var T_START = config.time.start || 0,
T_END = config.time.end || Infinity,
T_STEP = config.time.step || 1;
function set_end(seconds) {
T_END = seconds*1000;
}
_model.set_end = set_end;
// To translate from a moment's order number to its corresponding time in
// ms and vice versa, two helper functions are defined, `time_to_moment`
// and `moment_to_time`, as well as a shorthand name for these two helper
// functions, respectively, `t2m` and `m2t`.
_model.time_to_moment = function(time) {
return Math.floor(time / T_STEP);
};
var t2m = _model.time_to_moment;
_model.moment_to_time = function(moment) {
return moment * T_STEP;
};
var m2t = _model.moment_to_time;
// When I use "measured" I mean to denote that the values of the
// quantities describing the phenomenon have been captured, computed,
// downloaded, measured, or otherwise obtained. This `model` function is
// intended to be applicable for describing purely theoretical models of a
// phenomenon as well as real-time measurements of a phenomenon.
//
// "Measuring" a moment is left to the `measure_moment` function. Each
// model has to (re)implement this function to specify the relationship
// between the phenomenon's quantities of interest at each moment during
// the phenomenon.
_model.measure_moment = function(moment) {
// to be implemented in an object implementing model
};
// The model has the following data invariant:
//
// (∀m: 0 ≤ m ≤ |`moments`|: `moment_computed`(`moments`[m]))
//
// stating that the phenomenon has been described quantitatively for all
// moments. These "measurements" are stored in a list of `moments` and can
// be accessed through a moment's order number.
var moments = [];
_model.get_moment = function(moment) {
return moments[moment];
};
_model.number_of_moments = function() {
return moments.length;
};
// A moment can only be inspected if it already has been "measured".
// Following the data invariant, a moment has been measured when its order
// number is smaller or equal to the number of measured moments.
_model.moment_measured = function(moment) {
return (moment <= (moments.length - 1));
};
// Furthermore, the current moment of interest, or `now`, points to an
// already "measured" moment during the phenomenon's duration. Hence, the
// data invariant is extended as follows:
//
// `t2m`(`T_START`) ≤ `now` ≤ `t2m`(`T_END`) → `moment_computed`(`now`)
var now;
// To ensure this data invariant, `now` is set to a moment before the
// phenomenon started.
now = t2m(T_START) - 1;
// ## Inspecting and running a model
// Inspection through registerd views
var views = [];
var update_views = function() {
var update_view = function(view) {
view.update(_model.name);
};
views.forEach(update_view);
};
_model.update_views = update_views;
var update_all_views = function() {
var update_view = function(view) {
if (view.update_all) {
view.update_all();
} else {
view.update(_model.name);
}
};
views.forEach(update_view);
};
_model.update_all_views = update_all_views;
_model.register = function(view) {
var view_found = views.indexOf(view);
if (view_found === -1) {
views.push(view);
views.forEach(function(v) { if(v.update_all) v.update_all();});
}
};
_model.get_views_of_type = function(view_type) {
return views.filter(function(v) {
return v.type === view_type;
});
};
_model.unregister = function(view) {
if (arguments.length === 0) {
var unregister = function(view) {
view.unregister(_model.name);
};
views.forEach(unregister);
} else {
var view_found = views.indexOf(view);
if (view_found !== -1) {
views.slice(view_found, 1);
}
}
};
// As a model can be inspected repeatedly, as is one
// of the reasons to model a phenomenon using a computer, we introduce a
// `reset` function to resets `now` to a moment before the phenomenon
// started.
_model.reset = function() {
now = t2m(T_START) - 1;
_model.step();
update_views();
};
// Once a model has been started, the current moment will be measured as
// well as all moments before since the start. These moments can be
// inspected.
//
_model.has_started = function() {
return now >= 0;
};
// The `step` function will advance `now` to the next moment if the end of
// the phenomenon has not been reached yet. If that moment has not been
// "measured" earlier, "measure" it now.
_model.step = function(do_not_update_views) {
if (m2t(now) + T_STEP <= T_END) {
now++;
if (!_model.moment_measured(now)) {
var moment = _model.measure_moment(now);
moment._time_ = m2t(now);
moments.push(moment);
}
}
if (!do_not_update_views) {
update_views();
}
return now;
};
// If the phenomenon is a finite process or the "measuring" process cannot
// go further `T_END` will have a value that is not `Infinity`.
_model.can_finish = function() {
return Math.abs(T_END) !== Infinity;
};
// To inspect the whole phenomenon at once or inspect the last moment,
// `finish`ing the model will ensure that all moments during the
// phenomenon have been "measured".
_model.finish = function() {
var DO_NOT_UPDATE_VIEWS = true;
if (_model.can_finish()) {
while ((moments.length - 1) < t2m(T_END)) {
_model.step(DO_NOT_UPDATE_VIEWS);
}
}
now = moments.length - 1;
_model.update_views();
return now;
};
// We call the model finished if the current moment, or `now`, is the
// phenomenon's last moment.
_model.is_finished = function() {
return _model.can_finish() && m2t(now) >= T_END;
};
function reset_model() {
moments = [];
_model.action("reset").callback(_model)();
// _model.reset();
}
_model.reset_model = reset_model;
/**
* ## Actions on the model
*
*/
_model.actions = {};
_model.add_action = function( action ) {
_model.actions[action.name] = action;
_model.actions[action.name].install = function() {
return action.callback(_model);
};
};
if (config.actions) {
var add_action = function(action_name) {
_model.add_action(config.actions[action_name]);
};
Object.keys(config.actions).forEach(add_action);
}
_model.action = function( action_name ) {
if (_model.actions[action_name]) {
return _model.actions[action_name];
}
};
_model.remove_action = function( action ) {
if (_model.actions[action.name]) {
delete _model.actions[action.name];
}
};
_model.disable_action = function( action_name ) {
if (_model.actions[action_name]) {
_model.actions[action_name].enabled = false;
}
};
_model.enable_action = function( action_name ) {
if (_model.actions[action_name]) {
_model.actions[action_name].enabled = true;
}
};
_model.toggle_action = function( action_name ) {
if (_model.actions[action_name]) {
_model.actions[action_name].enabled =
!_model.action[action_name].enabled;
}
};
// ## Coordinating quantities
//
// All quantities that describe the phenomenon being modeled change in
// coordination with time's change. Add the model's time as a quantity to
// the list with quantities. To allow people to model time as part of
// their model, for example to describe the phenomenon accelerated, the
// internal time is added as quantity `_time_` and, as a result, "_time_"
// is not allowed as a quantity name.
_model.quantities = config.quantities || {};
_model.quantities._time_ = {
hidden: true,
minimum: T_START,
maximum: T_END,
value: m2t(now),
stepsize: T_STEP,
unit: "ms",
label: "internal time",
monotone: true
};
_model.get_minimum = function(quantity) {
if (arguments.length===0) {
// called without any arguments: return all minima
var minima = {},
add_minimum = function(quantity) {
minima[quantity] = parseFloat(_model.quantities[quantity].minimum);
};
Object.keys(_model.quantities).forEach(add_minimum);
return minima;
} else {
// return quantity's minimum
return parseFloat(_model.quantities[quantity].minimum);
}
};
_model.get_maximum = function(quantity) {
if (arguments.length===0) {
// called without any arguments: return all minima
var maxima = {},
add_maximum = function(quantity) {
maxima[quantity] = parseFloat(_model.quantities[quantity].maximum);
};
Object.keys(_model.quantities).forEach(add_maximum);
return maxima;
} else {
// return quantity's minimum
return parseFloat(_model.quantities[quantity].maximum);
}
};
_model.find_moment = function(quantity, value, EPSILON) {
if (moments.length === 0) {
// no moment are measured yet, so there is nothing to be found
return -1;
} else {
var val = _appendix.quantity_value(quantity);
// pre: quantity is monotone
// determine if it is increasing or decreasing
// determine type of monotone
//
// As the first moment has been measured and we do know the
// minimum of this quantity, type of monotone follows.
var start = val(0),
INCREASING = (start !== _model.get_maximum(quantity));
// Use a stupid linear search to find the moment that approaches the
// value best
var m = 0,
n = moments.length - 1,
lowerbound,
upperbound;
if (INCREASING) {
lowerbound = function(moment) {
return val(moment) < value;
};
upperbound = function(moment) {
return val(moment) > value;
};
} else {
lowerbound = function(moment) {
return val(moment) > value;
};
upperbound = function(moment) {
return val(moment) < value;
};
}
// Increasing "function", meaning
//
// (∀m: 0 ≤ m < |`moments`|: `val`(m) <= `val`(m+1))
//
// Therefore,
//
// (∃m, n: 0 ≤ m < n ≤ |`moments`|:
// `val`(m) ≤ value ≤ `val`(n) ⋀
// (∀p: m < p < n: `val`(p) = value))
//
// `find_moment` finds those moments m and n and returns the
// one closest to value or, when even close, the last moment
// decreasing is reverse.
while (lowerbound(m)) {
m++;
if (m>n) {
//
return -1;
}
}
return m;
//m--;
/*
while (upperbound(n)) {
n--;
if (n<m) {
return -1;
}
}
//n++;
return (Math.abs(val(n)-value) < Math.abs(val(m)-value))?n:m;
*/
}
};
_model.get = function(quantity) {
if (now < 0) {
return undefined;
} else {
return moments[now][quantity];
}
};
_model.set = function(quantity, value) {
var q = _model.quantities[quantity];
if (value < parseFloat(q.minimum)) {
value = parseFloat(q.minimum);
} else if (value > parseFloat(q.maximum)) {
value = parseFloat(q.maximum);
}
// q.minimum ≤ value ≤ q.maximum
// has value already been "measured"?
// As some quantities can have the same value more often, there are
// potentially many moments that fit the bill. There can be an unknown
// amount of moments that aren't measured as well.
//
// However, some quantities will be strictly increasing or decreasing
// and no value will appear twice. For example, the internal time will
// only increase. Those quantities with property `monotone`
// `true`, only one value will be searched for
var approx = _appendix.approximates(),
moment = -1;
if (q.monotone) {
moment = _model.find_moment(quantity, value);
if (moment === -1) {
// not yet "measured"
var DO_NOT_UPDATE_VIEWS = true;
_model.step(DO_NOT_UPDATE_VIEWS);
// THIS DOES WORK ONLY FOR INCREASING QUANTITIES. CHANGE THIS
// ALTER WITH FIND FUNCTION !!!!
while((moments[now][quantity] < value) && !_model.is_finished()) {
_model.step(DO_NOT_UPDATE_VIEWS);
}
} else {
now = moment;
}
update_views();
return moments[now];
}
};
_model.data = function() {
return moments.slice(0, now + 1);
};
_model.current_moment = function(moment_only) {
if (moment_only) {
return now;
} else {
return moments[now];
}
};
_model.graphs_shown = {
tailpoints: false,
line: false,
arrows: false
};
_model.show_graph = function(kind) {
var graphs = _model.get_views_of_type("graph");
function show_this_graph(g) {
switch(kind) {
case "line":
g.show_line(_model.name);
break;
case "tailpoints":
g.show_tailpoints(_model.name);
break;
case "arrows":
g.show_arrows(_model.name);
break;
}
}
graphs.forEach(show_this_graph);
_model.graphs_shown[kind] = true;
};
_model.hide_graph = function(kind) {
var graphs = _model.get_views_of_type("graph");
function hide_this_graph(g) {
switch(kind) {
case "line":
g.hide_line(_model.name);
break;
case "tailpoints":
g.hide_tailpoints(_model.name);
break;
case "arrows":
g.hide_arrows(_model.name);
break;
}
}
graphs.forEach(hide_this_graph);
_model.graphs_shown[kind] = false;
};
_model.graph_is_shown = function(kind) {
return _model.graphs_shown[kind];
};
// ## _appendix H: helper functions
_appendix.approximates = function(epsilon) {
var EPSILON = epsilon || 0.001,
fn = function(a, b) {
return Math.abs(a - b) <= EPSILON;
};
fn.EPSILON = EPSILON;
return fn;
};
_appendix.quantity_value = function(quantity) {
return function(moment) {
return moments[moment][quantity];
};
};
var step = (config.step_size || T_STEP)*5 ;
function step_size(size) {
if (arguments.length === |
function random_color() {
var hexes = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'],
colors = [],
i = 0;
while (i < 6) {
colors.push(hexes[Math.round(Math.random()*(hexes.length - 1))]);
i++;
}
return "#"+ colors.join("");
}
var color = random_color();
_model.color = function(c) {
if (arguments.length === 1) {
if (c === "random") {
color = random_color();
} else {
color = c;
}
}
return color;
};
return _model;
};
module.exports = model;
| 1) {
step = size;
}
return step;
}
_model.step_size = step_size; | identifier_body |
model.js | /*
* Copyright (C) 2013 Huub de Beer
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
var model = function(name, config) {
"use strict";
var _model = {name: name},
_appendix = {};
// ## Data invariant and initialization
//
// This model describes a dynamic phenomenon in terms of changing
// quantities over time.
//
//
// This description starts at `T_START` milliseconds
// (ms), defaulting to 0 ms and ends at `T_END` ms. If no end is specified
// it is assumed that the phenomenon does not end or is still ongoing in
// the real world (RW). The phenomenon's change is tracked by "measuring"
// the changing quantities at consecutive moments in time. These moments
// are `T_STEP` apart, defaulting to 1 ms, and are tracked by order
// number.
var T_START = config.time.start || 0,
T_END = config.time.end || Infinity,
T_STEP = config.time.step || 1;
function set_end(seconds) {
T_END = seconds*1000;
}
_model.set_end = set_end;
// To translate from a moment's order number to its corresponding time in
// ms and vice versa, two helper functions are defined, `time_to_moment`
// and `moment_to_time`, as well as a shorthand name for these two helper
// functions, respectively, `t2m` and `m2t`.
_model.time_to_moment = function(time) {
return Math.floor(time / T_STEP);
};
var t2m = _model.time_to_moment;
_model.moment_to_time = function(moment) {
return moment * T_STEP;
};
var m2t = _model.moment_to_time;
// When I use "measured" I mean to denote that the values of the
// quantities describing the phenomenon have been captured, computed,
// downloaded, measured, or otherwise obtained. This `model` function is
// intended to be applicable for describing purely theoretical models of a
// phenomenon as well as real-time measurements of a phenomenon.
//
// "Measuring" a moment is left to the `measure_moment` function. Each
// model has to (re)implement this function to specify the relationship
// between the phenomenon's quantities of interest at each moment during
// the phenomenon.
_model.measure_moment = function(moment) {
// to be implemented in an object implementing model
};
// The model has the following data invariant:
//
// (∀m: 0 ≤ m ≤ |`moments`|: `moment_computed`(`moments`[m]))
//
// stating that the phenomenon has been described quantitatively for all
// moments. These "measurements" are stored in a list of `moments` and can
// be accessed through a moment's order number.
var moments = [];
_model.get_moment = function(moment) {
return moments[moment];
};
_model.number_of_moments = function() {
return moments.length;
};
// A moment can only be inspected if it already has been "measured".
// Following the data invariant, a moment has been measured when its order
// number is smaller or equal to the number of measured moments.
_model.moment_measured = function(moment) {
return (moment <= (moments.length - 1));
};
// Furthermore, the current moment of interest, or `now`, points to an
// already "measured" moment during the phenomenon's duration. Hence, the
// data invariant is extended as follows:
//
// `t2m`(`T_START`) ≤ `now` ≤ `t2m`(`T_END`) → `moment_computed`(`now`)
var now;
// To ensure this data invariant, `now` is set to a moment before the
// phenomenon started.
now = t2m(T_START) - 1;
// ## Inspecting and running a model
// Inspection through registerd views
var views = [];
var update_views = function() {
var update_view = function(view) {
view.update(_model.name);
};
views.forEach(update_view);
};
_model.update_views = update_views;
var update_all_views = function() {
var update_view = function(view) {
if (view.update_all) {
view.update_all();
} else {
view.update(_model.name);
}
};
views.forEach(update_view);
};
_model.update_all_views = update_all_views;
_model.register = function(view) {
var view_found = views.indexOf(view);
if (view_found === -1) {
views.push(view);
views.forEach(function(v) { if(v.update_all) v.update_all();});
}
};
_model.get_views_of_type = function(view_type) {
return views.filter(function(v) {
return v.type === view_type;
});
};
_model.unregister = function(view) {
if (arguments.length === 0) {
var unregister = function(view) {
view.unregister(_model.name);
};
views.forEach(unregister);
} else {
var view_found = views.indexOf(view);
if (view_found !== -1) {
views.slice(view_found, 1);
}
}
};
// As a model can be inspected repeatedly, as is one
// of the reasons to model a phenomenon using a computer, we introduce a
// `reset` function to resets `now` to a moment before the phenomenon
// started.
_model.reset = function() {
now = t2m(T_START) - 1;
_model.step();
update_views();
};
// Once a model has been started, the current moment will be measured as
// well as all moments before since the start. These moments can be
// inspected.
//
_model.has_started = function() {
return now >= 0;
};
// The `step` function will advance `now` to the next moment if the end of
// the phenomenon has not been reached yet. If that moment has not been
// "measured" earlier, "measure" it now.
_model.step = function(do_not_update_views) {
if (m2t(now) + T_STEP <= T_END) {
now++;
if (!_model.moment_measured(now)) {
var moment = _model.measure_moment(now);
moment._time_ = m2t(now);
moments.push(moment);
}
}
if (!do_not_update_views) {
update_views();
}
return now;
};
// If the phenomenon is a finite process or the "measuring" process cannot
// go further `T_END` will have a value that is not `Infinity`.
_model.can_finish = function() {
return Math.abs(T_END) !== Infinity;
};
// To inspect the whole phenomenon at once or inspect the last moment,
// `finish`ing the model will ensure that all moments during the
// phenomenon have been "measured".
_model.finish = function() {
var DO_NOT_UPDATE_VIEWS = true;
if (_model.can_finish()) {
while ((moments.length - 1) < t2m(T_END)) {
_model.step(DO_NOT_UPDATE_VIEWS);
}
}
now = moments.length - 1;
_model.update_views();
return now;
};
// We call the model finished if the current moment, or `now`, is the
// phenomenon's last moment.
_model.is_finished = function() {
return _model.can_finish() && m2t(now) >= T_END;
};
function reset_model() {
moments = [];
_model.action("reset").callback(_model)();
// _model.reset();
}
_model.reset_model = reset_model;
/**
* ## Actions on the model
*
*/
_model.actions = {};
_model.add_action = function( action ) {
_model.actions[action.name] = action;
_model.actions[action.name].install = function() {
return action.callback(_model);
};
};
if (config.actions) {
var add_action = function(action_name) {
_model.add_action(config.actions[action_name]);
};
Object.keys(config.actions).forEach(add_action);
}
_model.action = function( action_name ) {
if (_model.actions[action_name]) {
return _model.actions[action_name];
}
};
_model.remove_action = function( action ) {
if (_model.actions[action.name]) {
delete _model.actions[action.name];
}
};
_model.disable_action = function( action_name ) {
if (_model.actions[action_name]) {
_model.actions[action_name].enabled = false;
}
};
_model.enable_action = function( action_name ) {
if (_model.actions[action_name]) {
| _model.toggle_action = function( action_name ) {
if (_model.actions[action_name]) {
_model.actions[action_name].enabled =
!_model.action[action_name].enabled;
}
};
// ## Coordinating quantities
//
// All quantities that describe the phenomenon being modeled change in
// coordination with time's change. Add the model's time as a quantity to
// the list with quantities. To allow people to model time as part of
// their model, for example to describe the phenomenon accelerated, the
// internal time is added as quantity `_time_` and, as a result, "_time_"
// is not allowed as a quantity name.
_model.quantities = config.quantities || {};
_model.quantities._time_ = {
hidden: true,
minimum: T_START,
maximum: T_END,
value: m2t(now),
stepsize: T_STEP,
unit: "ms",
label: "internal time",
monotone: true
};
_model.get_minimum = function(quantity) {
if (arguments.length===0) {
// called without any arguments: return all minima
var minima = {},
add_minimum = function(quantity) {
minima[quantity] = parseFloat(_model.quantities[quantity].minimum);
};
Object.keys(_model.quantities).forEach(add_minimum);
return minima;
} else {
// return quantity's minimum
return parseFloat(_model.quantities[quantity].minimum);
}
};
_model.get_maximum = function(quantity) {
if (arguments.length===0) {
// called without any arguments: return all minima
var maxima = {},
add_maximum = function(quantity) {
maxima[quantity] = parseFloat(_model.quantities[quantity].maximum);
};
Object.keys(_model.quantities).forEach(add_maximum);
return maxima;
} else {
// return quantity's minimum
return parseFloat(_model.quantities[quantity].maximum);
}
};
_model.find_moment = function(quantity, value, EPSILON) {
if (moments.length === 0) {
// no moment are measured yet, so there is nothing to be found
return -1;
} else {
var val = _appendix.quantity_value(quantity);
// pre: quantity is monotone
// determine if it is increasing or decreasing
// determine type of monotone
//
// As the first moment has been measured and we do know the
// minimum of this quantity, type of monotone follows.
var start = val(0),
INCREASING = (start !== _model.get_maximum(quantity));
// Use a stupid linear search to find the moment that approaches the
// value best
var m = 0,
n = moments.length - 1,
lowerbound,
upperbound;
if (INCREASING) {
lowerbound = function(moment) {
return val(moment) < value;
};
upperbound = function(moment) {
return val(moment) > value;
};
} else {
lowerbound = function(moment) {
return val(moment) > value;
};
upperbound = function(moment) {
return val(moment) < value;
};
}
// Increasing "function", meaning
//
// (∀m: 0 ≤ m < |`moments`|: `val`(m) <= `val`(m+1))
//
// Therefore,
//
// (∃m, n: 0 ≤ m < n ≤ |`moments`|:
// `val`(m) ≤ value ≤ `val`(n) ⋀
// (∀p: m < p < n: `val`(p) = value))
//
// `find_moment` finds those moments m and n and returns the
// one closest to value or, when even close, the last moment
// decreasing is reverse.
while (lowerbound(m)) {
m++;
if (m>n) {
//
return -1;
}
}
return m;
//m--;
/*
while (upperbound(n)) {
n--;
if (n<m) {
return -1;
}
}
//n++;
return (Math.abs(val(n)-value) < Math.abs(val(m)-value))?n:m;
*/
}
};
_model.get = function(quantity) {
if (now < 0) {
return undefined;
} else {
return moments[now][quantity];
}
};
_model.set = function(quantity, value) {
var q = _model.quantities[quantity];
if (value < parseFloat(q.minimum)) {
value = parseFloat(q.minimum);
} else if (value > parseFloat(q.maximum)) {
value = parseFloat(q.maximum);
}
// q.minimum ≤ value ≤ q.maximum
// has value already been "measured"?
// As some quantities can have the same value more often, there are
// potentially many moments that fit the bill. There can be an unknown
// amount of moments that aren't measured as well.
//
// However, some quantities will be strictly increasing or decreasing
// and no value will appear twice. For example, the internal time will
// only increase. Those quantities with property `monotone`
// `true`, only one value will be searched for
var approx = _appendix.approximates(),
moment = -1;
if (q.monotone) {
moment = _model.find_moment(quantity, value);
if (moment === -1) {
// not yet "measured"
var DO_NOT_UPDATE_VIEWS = true;
_model.step(DO_NOT_UPDATE_VIEWS);
// THIS DOES WORK ONLY FOR INCREASING QUANTITIES. CHANGE THIS
// ALTER WITH FIND FUNCTION !!!!
while((moments[now][quantity] < value) && !_model.is_finished()) {
_model.step(DO_NOT_UPDATE_VIEWS);
}
} else {
now = moment;
}
update_views();
return moments[now];
}
};
_model.data = function() {
return moments.slice(0, now + 1);
};
_model.current_moment = function(moment_only) {
if (moment_only) {
return now;
} else {
return moments[now];
}
};
_model.graphs_shown = {
tailpoints: false,
line: false,
arrows: false
};
_model.show_graph = function(kind) {
var graphs = _model.get_views_of_type("graph");
function show_this_graph(g) {
switch(kind) {
case "line":
g.show_line(_model.name);
break;
case "tailpoints":
g.show_tailpoints(_model.name);
break;
case "arrows":
g.show_arrows(_model.name);
break;
}
}
graphs.forEach(show_this_graph);
_model.graphs_shown[kind] = true;
};
_model.hide_graph = function(kind) {
var graphs = _model.get_views_of_type("graph");
function hide_this_graph(g) {
switch(kind) {
case "line":
g.hide_line(_model.name);
break;
case "tailpoints":
g.hide_tailpoints(_model.name);
break;
case "arrows":
g.hide_arrows(_model.name);
break;
}
}
graphs.forEach(hide_this_graph);
_model.graphs_shown[kind] = false;
};
_model.graph_is_shown = function(kind) {
return _model.graphs_shown[kind];
};
// ## _appendix H: helper functions
_appendix.approximates = function(epsilon) {
var EPSILON = epsilon || 0.001,
fn = function(a, b) {
return Math.abs(a - b) <= EPSILON;
};
fn.EPSILON = EPSILON;
return fn;
};
_appendix.quantity_value = function(quantity) {
return function(moment) {
return moments[moment][quantity];
};
};
var step = (config.step_size || T_STEP)*5 ;
function step_size(size) {
if (arguments.length === 1) {
step = size;
}
return step;
}
_model.step_size = step_size;
function random_color() {
var hexes = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'],
colors = [],
i = 0;
while (i < 6) {
colors.push(hexes[Math.round(Math.random()*(hexes.length - 1))]);
i++;
}
return "#"+ colors.join("");
}
var color = random_color();
_model.color = function(c) {
if (arguments.length === 1) {
if (c === "random") {
color = random_color();
} else {
color = c;
}
}
return color;
};
return _model;
};
module.exports = model;
| _model.actions[action_name].enabled = true;
}
};
| conditional_block |
credentials-in-url.https.window.js | // META: script=/service-workers/service-worker/resources/test-helpers.sub.js
// META: script=resources/utils.js
'use strict';
// "If parsedURL includes credentials, then throw a TypeError."
// https://fetch.spec.whatwg.org/#dom-request
// (Added by https://github.com/whatwg/fetch/issues/26).
// "A URL includes credentials if its username or password is not the empty
// string."
// https://url.spec.whatwg.org/#include-credentials
backgroundFetchTest((t, bgFetch) => { | return bgFetch.fetch(uniqueTag(), 'https://example.com');
}, 'fetch without credentials in URL should register ok');
backgroundFetchTest((t, bgFetch) => {
return promise_rejects(
t, new TypeError(),
bgFetch.fetch(uniqueTag(), 'https://username:password@example.com'));
}, 'fetch with username and password in URL should reject');
backgroundFetchTest((t, bgFetch) => {
return promise_rejects(
t, new TypeError(),
bgFetch.fetch(uniqueTag(), 'https://username:@example.com'));
}, 'fetch with username and empty password in URL should reject');
backgroundFetchTest((t, bgFetch) => {
return promise_rejects(
t, new TypeError(),
bgFetch.fetch(uniqueTag(), 'https://:password@example.com'));
}, 'fetch with empty username and password in URL should reject'); | random_line_split | |
result-info-native-replay-summary.py | #!/usr/bin/env python
# Copyright (c) 2016, Daniel Liew
# This file is covered by the license in LICENSE-SVCB.txt
# vim: set sw=4 ts=4 softtabstop=4 expandtab:
"""
Read a result info describing a set of KLEE test case replays.
"""
from load_klee_runner import add_KleeRunner_to_module_search_path
from load_klee_analysis import add_kleeanalysis_to_module_search_path
from load_native_analysis import add_nativeanalysis_to_module_search_path
add_KleeRunner_to_module_search_path()
add_kleeanalysis_to_module_search_path()
add_nativeanalysis_to_module_search_path()
from KleeRunner import ResultInfo
import KleeRunner.DriverUtil as DriverUtil
import KleeRunner.InvocationInfo
import KleeRunner.util
import nativeanalysis.analyse
import argparse
import logging
import os
import pprint
import subprocess
import sys
import yaml
_logger = logging.getLogger(__name__)
def main(args):
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('result_info_file',
help='Result info file',
type=argparse.FileType('r'))
parser.add_argument('--dump-unknowns',
dest='dump_unknowns',
action='store_true')
parser.add_argument('--dump-timeouts',
dest='dump_timeouts',
action='store_true')
DriverUtil.parserAddLoggerArg(parser)
pargs = parser.parse_args()
DriverUtil.handleLoggerArgs(pargs, parser)
_logger.info('Loading "{}"...'.format(pargs.result_info_file.name))
resultInfos, resultInfoMisc = ResultInfo.loadResultInfos(pargs.result_info_file)
_logger.info('Loading complete')
# Check the misc data
if resultInfoMisc is None:
_logger.error('Expected result info to have misc data')
return 1
if resultInfoMisc['runner'] != 'NativeReplay':
_logger.error('Expected runner to have been NativeReplay but was "{}"'.format(
resultInfoMisc['runner']))
return 1
errorTypeToErrorListMap = dict()
multipeOutcomeList = []
for result_index, r in enumerate(resultInfos):
_logger.info('Processing {}/{}'.format(result_index + 1, len(resultInfos)))
raw_result = r.GetInternalRepr()
program_path = r.RawInvocationInfo['program']
outcome = nativeanalysis.analyse.get_test_case_run_outcome(raw_result)
error_list = None
try:
error_list = errorTypeToErrorListMap[type(outcome)]
except KeyError:
error_list = []
errorTypeToErrorListMap[type(outcome)] = error_list
error_list.append(outcome)
# Print report
print('#'*70)
print("# of test cases with multiple outcomes: {}".format(len(multipeOutcomeList)))
for ty, error_list in errorTypeToErrorListMap.items():
print("# of {}: {}".format(ty, len(error_list)))
if ty == nativeanalysis.analyse.UnknownError and pargs.dump_unknowns:
for error in error_list:
print(error) | for error in error_list:
print(error)
# Now emit as YAML
#as_yaml = yaml.dump(program_to_coverage_info, default_flow_style=False)
#pargs.output_yaml.write(as_yaml)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv)) | if ty == nativeanalysis.analyse.TimeoutError and pargs.dump_timeouts: | random_line_split |
result-info-native-replay-summary.py | #!/usr/bin/env python
# Copyright (c) 2016, Daniel Liew
# This file is covered by the license in LICENSE-SVCB.txt
# vim: set sw=4 ts=4 softtabstop=4 expandtab:
"""
Read a result info describing a set of KLEE test case replays.
"""
from load_klee_runner import add_KleeRunner_to_module_search_path
from load_klee_analysis import add_kleeanalysis_to_module_search_path
from load_native_analysis import add_nativeanalysis_to_module_search_path
add_KleeRunner_to_module_search_path()
add_kleeanalysis_to_module_search_path()
add_nativeanalysis_to_module_search_path()
from KleeRunner import ResultInfo
import KleeRunner.DriverUtil as DriverUtil
import KleeRunner.InvocationInfo
import KleeRunner.util
import nativeanalysis.analyse
import argparse
import logging
import os
import pprint
import subprocess
import sys
import yaml
_logger = logging.getLogger(__name__)
def main(args):
|
if __name__ == '__main__':
sys.exit(main(sys.argv))
| parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('result_info_file',
help='Result info file',
type=argparse.FileType('r'))
parser.add_argument('--dump-unknowns',
dest='dump_unknowns',
action='store_true')
parser.add_argument('--dump-timeouts',
dest='dump_timeouts',
action='store_true')
DriverUtil.parserAddLoggerArg(parser)
pargs = parser.parse_args()
DriverUtil.handleLoggerArgs(pargs, parser)
_logger.info('Loading "{}"...'.format(pargs.result_info_file.name))
resultInfos, resultInfoMisc = ResultInfo.loadResultInfos(pargs.result_info_file)
_logger.info('Loading complete')
# Check the misc data
if resultInfoMisc is None:
_logger.error('Expected result info to have misc data')
return 1
if resultInfoMisc['runner'] != 'NativeReplay':
_logger.error('Expected runner to have been NativeReplay but was "{}"'.format(
resultInfoMisc['runner']))
return 1
errorTypeToErrorListMap = dict()
multipeOutcomeList = []
for result_index, r in enumerate(resultInfos):
_logger.info('Processing {}/{}'.format(result_index + 1, len(resultInfos)))
raw_result = r.GetInternalRepr()
program_path = r.RawInvocationInfo['program']
outcome = nativeanalysis.analyse.get_test_case_run_outcome(raw_result)
error_list = None
try:
error_list = errorTypeToErrorListMap[type(outcome)]
except KeyError:
error_list = []
errorTypeToErrorListMap[type(outcome)] = error_list
error_list.append(outcome)
# Print report
print('#'*70)
print("# of test cases with multiple outcomes: {}".format(len(multipeOutcomeList)))
for ty, error_list in errorTypeToErrorListMap.items():
print("# of {}: {}".format(ty, len(error_list)))
if ty == nativeanalysis.analyse.UnknownError and pargs.dump_unknowns:
for error in error_list:
print(error)
if ty == nativeanalysis.analyse.TimeoutError and pargs.dump_timeouts:
for error in error_list:
print(error)
# Now emit as YAML
#as_yaml = yaml.dump(program_to_coverage_info, default_flow_style=False)
#pargs.output_yaml.write(as_yaml)
return 0 | identifier_body |
result-info-native-replay-summary.py | #!/usr/bin/env python
# Copyright (c) 2016, Daniel Liew
# This file is covered by the license in LICENSE-SVCB.txt
# vim: set sw=4 ts=4 softtabstop=4 expandtab:
"""
Read a result info describing a set of KLEE test case replays.
"""
from load_klee_runner import add_KleeRunner_to_module_search_path
from load_klee_analysis import add_kleeanalysis_to_module_search_path
from load_native_analysis import add_nativeanalysis_to_module_search_path
add_KleeRunner_to_module_search_path()
add_kleeanalysis_to_module_search_path()
add_nativeanalysis_to_module_search_path()
from KleeRunner import ResultInfo
import KleeRunner.DriverUtil as DriverUtil
import KleeRunner.InvocationInfo
import KleeRunner.util
import nativeanalysis.analyse
import argparse
import logging
import os
import pprint
import subprocess
import sys
import yaml
_logger = logging.getLogger(__name__)
def | (args):
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('result_info_file',
help='Result info file',
type=argparse.FileType('r'))
parser.add_argument('--dump-unknowns',
dest='dump_unknowns',
action='store_true')
parser.add_argument('--dump-timeouts',
dest='dump_timeouts',
action='store_true')
DriverUtil.parserAddLoggerArg(parser)
pargs = parser.parse_args()
DriverUtil.handleLoggerArgs(pargs, parser)
_logger.info('Loading "{}"...'.format(pargs.result_info_file.name))
resultInfos, resultInfoMisc = ResultInfo.loadResultInfos(pargs.result_info_file)
_logger.info('Loading complete')
# Check the misc data
if resultInfoMisc is None:
_logger.error('Expected result info to have misc data')
return 1
if resultInfoMisc['runner'] != 'NativeReplay':
_logger.error('Expected runner to have been NativeReplay but was "{}"'.format(
resultInfoMisc['runner']))
return 1
errorTypeToErrorListMap = dict()
multipeOutcomeList = []
for result_index, r in enumerate(resultInfos):
_logger.info('Processing {}/{}'.format(result_index + 1, len(resultInfos)))
raw_result = r.GetInternalRepr()
program_path = r.RawInvocationInfo['program']
outcome = nativeanalysis.analyse.get_test_case_run_outcome(raw_result)
error_list = None
try:
error_list = errorTypeToErrorListMap[type(outcome)]
except KeyError:
error_list = []
errorTypeToErrorListMap[type(outcome)] = error_list
error_list.append(outcome)
# Print report
print('#'*70)
print("# of test cases with multiple outcomes: {}".format(len(multipeOutcomeList)))
for ty, error_list in errorTypeToErrorListMap.items():
print("# of {}: {}".format(ty, len(error_list)))
if ty == nativeanalysis.analyse.UnknownError and pargs.dump_unknowns:
for error in error_list:
print(error)
if ty == nativeanalysis.analyse.TimeoutError and pargs.dump_timeouts:
for error in error_list:
print(error)
# Now emit as YAML
#as_yaml = yaml.dump(program_to_coverage_info, default_flow_style=False)
#pargs.output_yaml.write(as_yaml)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
| main | identifier_name |
result-info-native-replay-summary.py | #!/usr/bin/env python
# Copyright (c) 2016, Daniel Liew
# This file is covered by the license in LICENSE-SVCB.txt
# vim: set sw=4 ts=4 softtabstop=4 expandtab:
"""
Read a result info describing a set of KLEE test case replays.
"""
from load_klee_runner import add_KleeRunner_to_module_search_path
from load_klee_analysis import add_kleeanalysis_to_module_search_path
from load_native_analysis import add_nativeanalysis_to_module_search_path
add_KleeRunner_to_module_search_path()
add_kleeanalysis_to_module_search_path()
add_nativeanalysis_to_module_search_path()
from KleeRunner import ResultInfo
import KleeRunner.DriverUtil as DriverUtil
import KleeRunner.InvocationInfo
import KleeRunner.util
import nativeanalysis.analyse
import argparse
import logging
import os
import pprint
import subprocess
import sys
import yaml
_logger = logging.getLogger(__name__)
def main(args):
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('result_info_file',
help='Result info file',
type=argparse.FileType('r'))
parser.add_argument('--dump-unknowns',
dest='dump_unknowns',
action='store_true')
parser.add_argument('--dump-timeouts',
dest='dump_timeouts',
action='store_true')
DriverUtil.parserAddLoggerArg(parser)
pargs = parser.parse_args()
DriverUtil.handleLoggerArgs(pargs, parser)
_logger.info('Loading "{}"...'.format(pargs.result_info_file.name))
resultInfos, resultInfoMisc = ResultInfo.loadResultInfos(pargs.result_info_file)
_logger.info('Loading complete')
# Check the misc data
if resultInfoMisc is None:
_logger.error('Expected result info to have misc data')
return 1
if resultInfoMisc['runner'] != 'NativeReplay':
_logger.error('Expected runner to have been NativeReplay but was "{}"'.format(
resultInfoMisc['runner']))
return 1
errorTypeToErrorListMap = dict()
multipeOutcomeList = []
for result_index, r in enumerate(resultInfos):
_logger.info('Processing {}/{}'.format(result_index + 1, len(resultInfos)))
raw_result = r.GetInternalRepr()
program_path = r.RawInvocationInfo['program']
outcome = nativeanalysis.analyse.get_test_case_run_outcome(raw_result)
error_list = None
try:
error_list = errorTypeToErrorListMap[type(outcome)]
except KeyError:
error_list = []
errorTypeToErrorListMap[type(outcome)] = error_list
error_list.append(outcome)
# Print report
print('#'*70)
print("# of test cases with multiple outcomes: {}".format(len(multipeOutcomeList)))
for ty, error_list in errorTypeToErrorListMap.items():
print("# of {}: {}".format(ty, len(error_list)))
if ty == nativeanalysis.analyse.UnknownError and pargs.dump_unknowns:
for error in error_list:
print(error)
if ty == nativeanalysis.analyse.TimeoutError and pargs.dump_timeouts:
for error in error_list:
|
# Now emit as YAML
#as_yaml = yaml.dump(program_to_coverage_info, default_flow_style=False)
#pargs.output_yaml.write(as_yaml)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
| print(error) | conditional_block |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.